Tue, 25 Mar 2014 12:54:21 -0700
8037821: Account for trampoline stubs when estimating code buffer sizes
Summary: Take into account space needed for "trampoline code" used by calls on PPC64.
Reviewed-by: kvn
Contributed-by: lutz.schmidt@sap.com
1 //
2 // Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 // or visit www.oracle.com if you need additional information or have any
21 // questions.
22 //
23 //
25 // SPARC Architecture Description File
27 //----------REGISTER DEFINITION BLOCK------------------------------------------
28 // This information is used by the matcher and the register allocator to
29 // describe individual registers and classes of registers within the target
30 // archtecture.
31 register %{
32 //----------Architecture Description Register Definitions----------------------
33 // General Registers
34 // "reg_def" name ( register save type, C convention save type,
35 // ideal register type, encoding, vm name );
36 // Register Save Types:
37 //
38 // NS = No-Save: The register allocator assumes that these registers
39 // can be used without saving upon entry to the method, &
40 // that they do not need to be saved at call sites.
41 //
42 // SOC = Save-On-Call: The register allocator assumes that these registers
43 // can be used without saving upon entry to the method,
44 // but that they must be saved at call sites.
45 //
46 // SOE = Save-On-Entry: The register allocator assumes that these registers
47 // must be saved before using them upon entry to the
48 // method, but they do not need to be saved at call
49 // sites.
50 //
51 // AS = Always-Save: The register allocator assumes that these registers
52 // must be saved before using them upon entry to the
53 // method, & that they must be saved at call sites.
54 //
55 // Ideal Register Type is used to determine how to save & restore a
56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
58 //
59 // The encoding number is the actual bit-pattern placed into the opcodes.
62 // ----------------------------
63 // Integer/Long Registers
64 // ----------------------------
66 // Need to expose the hi/lo aspect of 64-bit registers
67 // This register set is used for both the 64-bit build and
68 // the 32-bit build with 1-register longs.
70 // Global Registers 0-7
71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next());
72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg());
73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next());
74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg());
75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next());
76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg());
77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next());
78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg());
79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next());
80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg());
81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next());
82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg());
83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next());
84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg());
85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next());
86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg());
88 // Output Registers 0-7
89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next());
90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg());
91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next());
92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg());
93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next());
94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg());
95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next());
96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg());
97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next());
98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg());
99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next());
100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg());
101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next());
102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg());
103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next());
104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg());
106 // Local Registers 0-7
107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next());
108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg());
109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next());
110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg());
111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next());
112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg());
113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next());
114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg());
115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next());
116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg());
117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next());
118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg());
119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next());
120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg());
121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next());
122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg());
124 // Input Registers 0-7
125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next());
126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg());
127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next());
128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg());
129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next());
130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg());
131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next());
132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg());
133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next());
134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg());
135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next());
136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg());
137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next());
138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next());
140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg());
142 // ----------------------------
143 // Float/Double Registers
144 // ----------------------------
146 // Float Registers
147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
180 // Double Registers
181 // The rules of ADL require that double registers be defined in pairs.
182 // Each pair must be two 32-bit values, but not necessarily a pair of
183 // single float registers. In each pair, ADLC-assigned register numbers
184 // must be adjacent, with the lower number even. Finally, when the
185 // CPU stores such a register pair to memory, the word associated with
186 // the lower ADLC-assigned number must be stored to the lower address.
188 // These definitions specify the actual bit encodings of the sparc
189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp
190 // wants 0-63, so we have to convert every time we want to use fp regs
191 // with the macroassembler, using reg_to_DoubleFloatRegister_object().
192 // 255 is a flag meaning "don't go here".
193 // I believe we can't handle callee-save doubles D32 and up until
194 // the place in the sparc stack crawler that asserts on the 255 is
195 // fixed up.
196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg());
197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next());
198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg());
199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next());
200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg());
201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next());
202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg());
203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next());
204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg());
205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next());
206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg());
207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next());
208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg());
209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next());
210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg());
211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next());
212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg());
213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next());
214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg());
215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next());
216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg());
217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next());
218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg());
219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next());
220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg());
221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next());
222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg());
223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next());
224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg());
225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next());
226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg());
227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next());
230 // ----------------------------
231 // Special Registers
232 // Condition Codes Flag Registers
233 // I tried to break out ICC and XCC but it's not very pretty.
234 // Every Sparc instruction which defs/kills one also kills the other.
235 // Hence every compare instruction which defs one kind of flags ends
236 // up needing a kill of the other.
237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad());
241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad());
242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad());
244 // ----------------------------
245 // Specify the enum values for the registers. These enums are only used by the
246 // OptoReg "class". We can convert these enum values at will to VMReg when needed
247 // for visibility to the rest of the vm. The order of this enum influences the
248 // register allocator so having the freedom to set this order and not be stuck
249 // with the order that is natural for the rest of the vm is worth it.
250 alloc_class chunk0(
251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H,
252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H,
253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H,
254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H);
256 // Note that a register is not allocatable unless it is also mentioned
257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg.
259 alloc_class chunk1(
260 // The first registers listed here are those most likely to be used
261 // as temporaries. We move F0..F7 away from the front of the list,
262 // to reduce the likelihood of interferences with parameters and
263 // return values. Likewise, we avoid using F0/F1 for parameters,
264 // since they are used for return values.
265 // This FPU fine-tuning is worth about 1% on the SPEC geomean.
266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,
268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31,
269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values
270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,
271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x,
272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,
273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x);
275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3);
277 //----------Architecture Description Register Classes--------------------------
278 // Several register classes are automatically defined based upon information in
279 // this architecture description.
280 // 1) reg_class inline_cache_reg ( as defined in frame section )
281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section )
282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
283 //
285 // G0 is not included in integer class since it has special meaning.
286 reg_class g0_reg(R_G0);
288 // ----------------------------
289 // Integer Register Classes
290 // ----------------------------
291 // Exclusions from i_reg:
292 // R_G0: hardwired zero
293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java)
294 // R_G6: reserved by Solaris ABI to tools
295 // R_G7: reserved by Solaris ABI to libthread
296 // R_O7: Used as a temp in many encodings
297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
299 // Class for all integer registers, except the G registers. This is used for
300 // encodings which use G registers as temps. The regular inputs to such
301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator
302 // will not put an input into a temp register.
303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
305 reg_class g1_regI(R_G1);
306 reg_class g3_regI(R_G3);
307 reg_class g4_regI(R_G4);
308 reg_class o0_regI(R_O0);
309 reg_class o7_regI(R_O7);
311 // ----------------------------
312 // Pointer Register Classes
313 // ----------------------------
314 #ifdef _LP64
315 // 64-bit build means 64-bit pointers means hi/lo pairs
316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
320 // Lock encodings use G3 and G4 internally
321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5,
322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
325 // Special class for storeP instructions, which can store SP or RPC to TLS.
326 // It is also used for memory addressing, allowing direct TLS addressing.
327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP,
329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP );
331 // R_L7 is the lowest-priority callee-save (i.e., NS) register
332 // We use it to save R_G2 across calls out of Java.
333 reg_class l7_regP(R_L7H,R_L7);
335 // Other special pointer regs
336 reg_class g1_regP(R_G1H,R_G1);
337 reg_class g2_regP(R_G2H,R_G2);
338 reg_class g3_regP(R_G3H,R_G3);
339 reg_class g4_regP(R_G4H,R_G4);
340 reg_class g5_regP(R_G5H,R_G5);
341 reg_class i0_regP(R_I0H,R_I0);
342 reg_class o0_regP(R_O0H,R_O0);
343 reg_class o1_regP(R_O1H,R_O1);
344 reg_class o2_regP(R_O2H,R_O2);
345 reg_class o7_regP(R_O7H,R_O7);
347 #else // _LP64
348 // 32-bit build means 32-bit pointers means 1 register.
349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5,
350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
353 // Lock encodings use G3 and G4 internally
354 reg_class lock_ptr_reg(R_G1, R_G5,
355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
358 // Special class for storeP instructions, which can store SP or RPC to TLS.
359 // It is also used for memory addressing, allowing direct TLS addressing.
360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5,
361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP,
362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP);
364 // R_L7 is the lowest-priority callee-save (i.e., NS) register
365 // We use it to save R_G2 across calls out of Java.
366 reg_class l7_regP(R_L7);
368 // Other special pointer regs
369 reg_class g1_regP(R_G1);
370 reg_class g2_regP(R_G2);
371 reg_class g3_regP(R_G3);
372 reg_class g4_regP(R_G4);
373 reg_class g5_regP(R_G5);
374 reg_class i0_regP(R_I0);
375 reg_class o0_regP(R_O0);
376 reg_class o1_regP(R_O1);
377 reg_class o2_regP(R_O2);
378 reg_class o7_regP(R_O7);
379 #endif // _LP64
382 // ----------------------------
383 // Long Register Classes
384 // ----------------------------
385 // Longs in 1 register. Aligned adjacent hi/lo pairs.
386 // Note: O7 is never in this class; it is sometimes used as an encoding temp.
387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
389 #ifdef _LP64
390 // 64-bit, longs in 1 register: use all 64-bit integer registers
391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's.
392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
394 #endif // _LP64
395 );
397 reg_class g1_regL(R_G1H,R_G1);
398 reg_class g3_regL(R_G3H,R_G3);
399 reg_class o2_regL(R_O2H,R_O2);
400 reg_class o7_regL(R_O7H,R_O7);
402 // ----------------------------
403 // Special Class for Condition Code Flags Register
404 reg_class int_flags(CCR);
405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3);
406 reg_class float_flag0(FCC0);
409 // ----------------------------
410 // Float Point Register Classes
411 // ----------------------------
412 // Skip F30/F31, they are reserved for mem-mem copies
413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
415 // Paired floating point registers--they show up in the same order as the floats,
416 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
417 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
418 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,
419 /* Use extra V9 double registers; this AD file does not support V8 */
420 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x,
421 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x
422 );
424 // Paired floating point registers--they show up in the same order as the floats,
425 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
426 // This class is usable for mis-aligned loads as happen in I2C adapters.
427 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
428 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
429 %}
431 //----------DEFINITION BLOCK---------------------------------------------------
432 // Define name --> value mappings to inform the ADLC of an integer valued name
433 // Current support includes integer values in the range [0, 0x7FFFFFFF]
434 // Format:
435 // int_def <name> ( <int_value>, <expression>);
436 // Generated Code in ad_<arch>.hpp
437 // #define <name> (<expression>)
438 // // value == <int_value>
439 // Generated code in ad_<arch>.cpp adlc_verification()
440 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
441 //
442 definitions %{
443 // The default cost (of an ALU instruction).
444 int_def DEFAULT_COST ( 100, 100);
445 int_def HUGE_COST (1000000, 1000000);
447 // Memory refs are twice as expensive as run-of-the-mill.
448 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
450 // Branches are even more expensive.
451 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
452 int_def CALL_COST ( 300, DEFAULT_COST * 3);
453 %}
456 //----------SOURCE BLOCK-------------------------------------------------------
457 // This is a block of C++ code which provides values, functions, and
458 // definitions necessary in the rest of the architecture description
459 source_hpp %{
460 // Header information of the source block.
461 // Method declarations/definitions which are used outside
462 // the ad-scope can conveniently be defined here.
463 //
464 // To keep related declarations/definitions/uses close together,
465 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
467 // Must be visible to the DFA in dfa_sparc.cpp
468 extern bool can_branch_register( Node *bol, Node *cmp );
470 extern bool use_block_zeroing(Node* count);
472 // Macros to extract hi & lo halves from a long pair.
473 // G0 is not part of any long pair, so assert on that.
474 // Prevents accidentally using G1 instead of G0.
475 #define LONG_HI_REG(x) (x)
476 #define LONG_LO_REG(x) (x)
478 class CallStubImpl {
480 //--------------------------------------------------------------
481 //---< Used for optimization in Compile::Shorten_branches >---
482 //--------------------------------------------------------------
484 public:
485 // Size of call trampoline stub.
486 static uint size_call_trampoline() {
487 return 0; // no call trampolines on this platform
488 }
490 // number of relocations needed by a call trampoline stub
491 static uint reloc_call_trampoline() {
492 return 0; // no call trampolines on this platform
493 }
494 };
496 class HandlerImpl {
498 public:
500 static int emit_exception_handler(CodeBuffer &cbuf);
501 static int emit_deopt_handler(CodeBuffer& cbuf);
503 static uint size_exception_handler() {
504 if (TraceJumps) {
505 return (400); // just a guess
506 }
507 return ( NativeJump::instruction_size ); // sethi;jmp;nop
508 }
510 static uint size_deopt_handler() {
511 if (TraceJumps) {
512 return (400); // just a guess
513 }
514 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore
515 }
516 };
518 %}
520 source %{
521 #define __ _masm.
523 // tertiary op of a LoadP or StoreP encoding
524 #define REGP_OP true
526 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding);
527 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding);
528 static Register reg_to_register_object(int register_encoding);
530 // Used by the DFA in dfa_sparc.cpp.
531 // Check for being able to use a V9 branch-on-register. Requires a
532 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign-
533 // extended. Doesn't work following an integer ADD, for example, because of
534 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On
535 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and
536 // replace them with zero, which could become sign-extension in a different OS
537 // release. There's no obvious reason why an interrupt will ever fill these
538 // bits with non-zero junk (the registers are reloaded with standard LD
539 // instructions which either zero-fill or sign-fill).
540 bool can_branch_register( Node *bol, Node *cmp ) {
541 if( !BranchOnRegister ) return false;
542 #ifdef _LP64
543 if( cmp->Opcode() == Op_CmpP )
544 return true; // No problems with pointer compares
545 #endif
546 if( cmp->Opcode() == Op_CmpL )
547 return true; // No problems with long compares
549 if( !SparcV9RegsHiBitsZero ) return false;
550 if( bol->as_Bool()->_test._test != BoolTest::ne &&
551 bol->as_Bool()->_test._test != BoolTest::eq )
552 return false;
554 // Check for comparing against a 'safe' value. Any operation which
555 // clears out the high word is safe. Thus, loads and certain shifts
556 // are safe, as are non-negative constants. Any operation which
557 // preserves zero bits in the high word is safe as long as each of its
558 // inputs are safe. Thus, phis and bitwise booleans are safe if their
559 // inputs are safe. At present, the only important case to recognize
560 // seems to be loads. Constants should fold away, and shifts &
561 // logicals can use the 'cc' forms.
562 Node *x = cmp->in(1);
563 if( x->is_Load() ) return true;
564 if( x->is_Phi() ) {
565 for( uint i = 1; i < x->req(); i++ )
566 if( !x->in(i)->is_Load() )
567 return false;
568 return true;
569 }
570 return false;
571 }
573 bool use_block_zeroing(Node* count) {
574 // Use BIS for zeroing if count is not constant
575 // or it is >= BlockZeroingLowLimit.
576 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit);
577 }
579 // ****************************************************************************
581 // REQUIRED FUNCTIONALITY
583 // !!!!! Special hack to get all type of calls to specify the byte offset
584 // from the start of the call to the point where the return address
585 // will point.
586 // The "return address" is the address of the call instruction, plus 8.
588 int MachCallStaticJavaNode::ret_addr_offset() {
589 int offset = NativeCall::instruction_size; // call; delay slot
590 if (_method_handle_invoke)
591 offset += 4; // restore SP
592 return offset;
593 }
595 int MachCallDynamicJavaNode::ret_addr_offset() {
596 int vtable_index = this->_vtable_index;
597 if (vtable_index < 0) {
598 // must be invalid_vtable_index, not nonvirtual_vtable_index
599 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
600 return (NativeMovConstReg::instruction_size +
601 NativeCall::instruction_size); // sethi; setlo; call; delay slot
602 } else {
603 assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
604 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
605 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
606 int klass_load_size;
607 if (UseCompressedClassPointers) {
608 assert(Universe::heap() != NULL, "java heap should be initialized");
609 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
610 } else {
611 klass_load_size = 1*BytesPerInstWord;
612 }
613 if (Assembler::is_simm13(v_off)) {
614 return klass_load_size +
615 (2*BytesPerInstWord + // ld_ptr, ld_ptr
616 NativeCall::instruction_size); // call; delay slot
617 } else {
618 return klass_load_size +
619 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr
620 NativeCall::instruction_size); // call; delay slot
621 }
622 }
623 }
625 int MachCallRuntimeNode::ret_addr_offset() {
626 #ifdef _LP64
627 if (MacroAssembler::is_far_target(entry_point())) {
628 return NativeFarCall::instruction_size;
629 } else {
630 return NativeCall::instruction_size;
631 }
632 #else
633 return NativeCall::instruction_size; // call; delay slot
634 #endif
635 }
637 // Indicate if the safepoint node needs the polling page as an input.
638 // Since Sparc does not have absolute addressing, it does.
639 bool SafePointNode::needs_polling_address_input() {
640 return true;
641 }
643 // emit an interrupt that is caught by the debugger (for debugging compiler)
644 void emit_break(CodeBuffer &cbuf) {
645 MacroAssembler _masm(&cbuf);
646 __ breakpoint_trap();
647 }
649 #ifndef PRODUCT
650 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
651 st->print("TA");
652 }
653 #endif
655 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
656 emit_break(cbuf);
657 }
659 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
660 return MachNode::size(ra_);
661 }
663 // Traceable jump
664 void emit_jmpl(CodeBuffer &cbuf, int jump_target) {
665 MacroAssembler _masm(&cbuf);
666 Register rdest = reg_to_register_object(jump_target);
667 __ JMP(rdest, 0);
668 __ delayed()->nop();
669 }
671 // Traceable jump and set exception pc
672 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) {
673 MacroAssembler _masm(&cbuf);
674 Register rdest = reg_to_register_object(jump_target);
675 __ JMP(rdest, 0);
676 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc );
677 }
679 void emit_nop(CodeBuffer &cbuf) {
680 MacroAssembler _masm(&cbuf);
681 __ nop();
682 }
684 void emit_illtrap(CodeBuffer &cbuf) {
685 MacroAssembler _masm(&cbuf);
686 __ illtrap(0);
687 }
690 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) {
691 assert(n->rule() != loadUB_rule, "");
693 intptr_t offset = 0;
694 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP
695 const Node* addr = n->get_base_and_disp(offset, adr_type);
696 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP");
697 assert(addr != NULL && addr != (Node*)-1, "invalid addr");
698 assert(addr->bottom_type()->isa_oopptr() == atype, "");
699 atype = atype->add_offset(offset);
700 assert(disp32 == offset, "wrong disp32");
701 return atype->_offset;
702 }
705 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) {
706 assert(n->rule() != loadUB_rule, "");
708 intptr_t offset = 0;
709 Node* addr = n->in(2);
710 assert(addr->bottom_type()->isa_oopptr() == atype, "");
711 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) {
712 Node* a = addr->in(2/*AddPNode::Address*/);
713 Node* o = addr->in(3/*AddPNode::Offset*/);
714 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot;
715 atype = a->bottom_type()->is_ptr()->add_offset(offset);
716 assert(atype->isa_oop_ptr(), "still an oop");
717 }
718 offset = atype->is_ptr()->_offset;
719 if (offset != Type::OffsetBot) offset += disp32;
720 return offset;
721 }
723 static inline jdouble replicate_immI(int con, int count, int width) {
724 // Load a constant replicated "count" times with width "width"
725 assert(count*width == 8 && width <= 4, "sanity");
726 int bit_width = width * 8;
727 jlong val = con;
728 val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits
729 for (int i = 0; i < count - 1; i++) {
730 val |= (val << bit_width);
731 }
732 jdouble dval = *((jdouble*) &val); // coerce to double type
733 return dval;
734 }
736 static inline jdouble replicate_immF(float con) {
737 // Replicate float con 2 times and pack into vector.
738 int val = *((int*)&con);
739 jlong lval = val;
740 lval = (lval << 32) | (lval & 0xFFFFFFFFl);
741 jdouble dval = *((jdouble*) &lval); // coerce to double type
742 return dval;
743 }
745 // Standard Sparc opcode form2 field breakdown
746 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) {
747 f0 &= (1<<19)-1; // Mask displacement to 19 bits
748 int op = (f30 << 30) |
749 (f29 << 29) |
750 (f25 << 25) |
751 (f22 << 22) |
752 (f20 << 20) |
753 (f19 << 19) |
754 (f0 << 0);
755 cbuf.insts()->emit_int32(op);
756 }
758 // Standard Sparc opcode form2 field breakdown
759 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) {
760 f0 >>= 10; // Drop 10 bits
761 f0 &= (1<<22)-1; // Mask displacement to 22 bits
762 int op = (f30 << 30) |
763 (f25 << 25) |
764 (f22 << 22) |
765 (f0 << 0);
766 cbuf.insts()->emit_int32(op);
767 }
769 // Standard Sparc opcode form3 field breakdown
770 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) {
771 int op = (f30 << 30) |
772 (f25 << 25) |
773 (f19 << 19) |
774 (f14 << 14) |
775 (f5 << 5) |
776 (f0 << 0);
777 cbuf.insts()->emit_int32(op);
778 }
780 // Standard Sparc opcode form3 field breakdown
781 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) {
782 simm13 &= (1<<13)-1; // Mask to 13 bits
783 int op = (f30 << 30) |
784 (f25 << 25) |
785 (f19 << 19) |
786 (f14 << 14) |
787 (1 << 13) | // bit to indicate immediate-mode
788 (simm13<<0);
789 cbuf.insts()->emit_int32(op);
790 }
792 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) {
793 simm10 &= (1<<10)-1; // Mask to 10 bits
794 emit3_simm13(cbuf,f30,f25,f19,f14,simm10);
795 }
797 #ifdef ASSERT
798 // Helper function for VerifyOops in emit_form3_mem_reg
799 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) {
800 warning("VerifyOops encountered unexpected instruction:");
801 n->dump(2);
802 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]);
803 }
804 #endif
807 void emit_form3_mem_reg(CodeBuffer &cbuf, PhaseRegAlloc* ra, const MachNode* n, int primary, int tertiary,
808 int src1_enc, int disp32, int src2_enc, int dst_enc) {
810 #ifdef ASSERT
811 // The following code implements the +VerifyOops feature.
812 // It verifies oop values which are loaded into or stored out of
813 // the current method activation. +VerifyOops complements techniques
814 // like ScavengeALot, because it eagerly inspects oops in transit,
815 // as they enter or leave the stack, as opposed to ScavengeALot,
816 // which inspects oops "at rest", in the stack or heap, at safepoints.
817 // For this reason, +VerifyOops can sometimes detect bugs very close
818 // to their point of creation. It can also serve as a cross-check
819 // on the validity of oop maps, when used toegether with ScavengeALot.
821 // It would be good to verify oops at other points, especially
822 // when an oop is used as a base pointer for a load or store.
823 // This is presently difficult, because it is hard to know when
824 // a base address is biased or not. (If we had such information,
825 // it would be easy and useful to make a two-argument version of
826 // verify_oop which unbiases the base, and performs verification.)
828 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary");
829 bool is_verified_oop_base = false;
830 bool is_verified_oop_load = false;
831 bool is_verified_oop_store = false;
832 int tmp_enc = -1;
833 if (VerifyOops && src1_enc != R_SP_enc) {
834 // classify the op, mainly for an assert check
835 int st_op = 0, ld_op = 0;
836 switch (primary) {
837 case Assembler::stb_op3: st_op = Op_StoreB; break;
838 case Assembler::sth_op3: st_op = Op_StoreC; break;
839 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0
840 case Assembler::stw_op3: st_op = Op_StoreI; break;
841 case Assembler::std_op3: st_op = Op_StoreL; break;
842 case Assembler::stf_op3: st_op = Op_StoreF; break;
843 case Assembler::stdf_op3: st_op = Op_StoreD; break;
845 case Assembler::ldsb_op3: ld_op = Op_LoadB; break;
846 case Assembler::ldub_op3: ld_op = Op_LoadUB; break;
847 case Assembler::lduh_op3: ld_op = Op_LoadUS; break;
848 case Assembler::ldsh_op3: ld_op = Op_LoadS; break;
849 case Assembler::ldx_op3: // may become LoadP or stay LoadI
850 case Assembler::ldsw_op3: // may become LoadP or stay LoadI
851 case Assembler::lduw_op3: ld_op = Op_LoadI; break;
852 case Assembler::ldd_op3: ld_op = Op_LoadL; break;
853 case Assembler::ldf_op3: ld_op = Op_LoadF; break;
854 case Assembler::lddf_op3: ld_op = Op_LoadD; break;
855 case Assembler::prefetch_op3: ld_op = Op_LoadI; break;
857 default: ShouldNotReachHere();
858 }
859 if (tertiary == REGP_OP) {
860 if (st_op == Op_StoreI) st_op = Op_StoreP;
861 else if (ld_op == Op_LoadI) ld_op = Op_LoadP;
862 else ShouldNotReachHere();
863 if (st_op) {
864 // a store
865 // inputs are (0:control, 1:memory, 2:address, 3:value)
866 Node* n2 = n->in(3);
867 if (n2 != NULL) {
868 const Type* t = n2->bottom_type();
869 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false;
870 }
871 } else {
872 // a load
873 const Type* t = n->bottom_type();
874 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false;
875 }
876 }
878 if (ld_op) {
879 // a Load
880 // inputs are (0:control, 1:memory, 2:address)
881 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases
882 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) &&
883 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) &&
884 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) &&
885 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) &&
886 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) &&
887 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) &&
888 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) &&
889 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) &&
890 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) &&
891 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
892 !(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) &&
893 !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
894 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) &&
895 !(n->ideal_Opcode()==Op_LoadVector && ld_op==Op_LoadD) &&
896 !(n->rule() == loadUB_rule)) {
897 verify_oops_warning(n, n->ideal_Opcode(), ld_op);
898 }
899 } else if (st_op) {
900 // a Store
901 // inputs are (0:control, 1:memory, 2:address, 3:value)
902 if (!(n->ideal_Opcode()==st_op) && // Following are special cases
903 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) &&
904 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) &&
905 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) &&
906 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) &&
907 !(n->ideal_Opcode()==Op_StoreVector && st_op==Op_StoreD) &&
908 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) {
909 verify_oops_warning(n, n->ideal_Opcode(), st_op);
910 }
911 }
913 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) {
914 Node* addr = n->in(2);
915 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) {
916 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr?
917 if (atype != NULL) {
918 intptr_t offset = get_offset_from_base(n, atype, disp32);
919 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32);
920 if (offset != offset_2) {
921 get_offset_from_base(n, atype, disp32);
922 get_offset_from_base_2(n, atype, disp32);
923 }
924 assert(offset == offset_2, "different offsets");
925 if (offset == disp32) {
926 // we now know that src1 is a true oop pointer
927 is_verified_oop_base = true;
928 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) {
929 if( primary == Assembler::ldd_op3 ) {
930 is_verified_oop_base = false; // Cannot 'ldd' into O7
931 } else {
932 tmp_enc = dst_enc;
933 dst_enc = R_O7_enc; // Load into O7; preserve source oop
934 assert(src1_enc != dst_enc, "");
935 }
936 }
937 }
938 if (st_op && (( offset == oopDesc::klass_offset_in_bytes())
939 || offset == oopDesc::mark_offset_in_bytes())) {
940 // loading the mark should not be allowed either, but
941 // we don't check this since it conflicts with InlineObjectHash
942 // usage of LoadINode to get the mark. We could keep the
943 // check if we create a new LoadMarkNode
944 // but do not verify the object before its header is initialized
945 ShouldNotReachHere();
946 }
947 }
948 }
949 }
950 }
951 #endif
953 uint instr;
954 instr = (Assembler::ldst_op << 30)
955 | (dst_enc << 25)
956 | (primary << 19)
957 | (src1_enc << 14);
959 uint index = src2_enc;
960 int disp = disp32;
962 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) {
963 disp += STACK_BIAS;
964 // Quick fix for JDK-8029668: check that stack offset fits, bailout if not
965 if (!Assembler::is_simm13(disp)) {
966 ra->C->record_method_not_compilable("unable to handle large constant offsets");
967 return;
968 }
969 }
971 // We should have a compiler bailout here rather than a guarantee.
972 // Better yet would be some mechanism to handle variable-size matches correctly.
973 guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" );
975 if( disp == 0 ) {
976 // use reg-reg form
977 // bit 13 is already zero
978 instr |= index;
979 } else {
980 // use reg-imm form
981 instr |= 0x00002000; // set bit 13 to one
982 instr |= disp & 0x1FFF;
983 }
985 cbuf.insts()->emit_int32(instr);
987 #ifdef ASSERT
988 {
989 MacroAssembler _masm(&cbuf);
990 if (is_verified_oop_base) {
991 __ verify_oop(reg_to_register_object(src1_enc));
992 }
993 if (is_verified_oop_store) {
994 __ verify_oop(reg_to_register_object(dst_enc));
995 }
996 if (tmp_enc != -1) {
997 __ mov(O7, reg_to_register_object(tmp_enc));
998 }
999 if (is_verified_oop_load) {
1000 __ verify_oop(reg_to_register_object(dst_enc));
1001 }
1002 }
1003 #endif
1004 }
1006 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) {
1007 // The method which records debug information at every safepoint
1008 // expects the call to be the first instruction in the snippet as
1009 // it creates a PcDesc structure which tracks the offset of a call
1010 // from the start of the codeBlob. This offset is computed as
1011 // code_end() - code_begin() of the code which has been emitted
1012 // so far.
1013 // In this particular case we have skirted around the problem by
1014 // putting the "mov" instruction in the delay slot but the problem
1015 // may bite us again at some other point and a cleaner/generic
1016 // solution using relocations would be needed.
1017 MacroAssembler _masm(&cbuf);
1018 __ set_inst_mark();
1020 // We flush the current window just so that there is a valid stack copy
1021 // the fact that the current window becomes active again instantly is
1022 // not a problem there is nothing live in it.
1024 #ifdef ASSERT
1025 int startpos = __ offset();
1026 #endif /* ASSERT */
1028 __ call((address)entry_point, rtype);
1030 if (preserve_g2) __ delayed()->mov(G2, L7);
1031 else __ delayed()->nop();
1033 if (preserve_g2) __ mov(L7, G2);
1035 #ifdef ASSERT
1036 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) {
1037 #ifdef _LP64
1038 // Trash argument dump slots.
1039 __ set(0xb0b8ac0db0b8ac0d, G1);
1040 __ mov(G1, G5);
1041 __ stx(G1, SP, STACK_BIAS + 0x80);
1042 __ stx(G1, SP, STACK_BIAS + 0x88);
1043 __ stx(G1, SP, STACK_BIAS + 0x90);
1044 __ stx(G1, SP, STACK_BIAS + 0x98);
1045 __ stx(G1, SP, STACK_BIAS + 0xA0);
1046 __ stx(G1, SP, STACK_BIAS + 0xA8);
1047 #else // _LP64
1048 // this is also a native call, so smash the first 7 stack locations,
1049 // and the various registers
1051 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset],
1052 // while [SP+0x44..0x58] are the argument dump slots.
1053 __ set((intptr_t)0xbaadf00d, G1);
1054 __ mov(G1, G5);
1055 __ sllx(G1, 32, G1);
1056 __ or3(G1, G5, G1);
1057 __ mov(G1, G5);
1058 __ stx(G1, SP, 0x40);
1059 __ stx(G1, SP, 0x48);
1060 __ stx(G1, SP, 0x50);
1061 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot
1062 #endif // _LP64
1063 }
1064 #endif /*ASSERT*/
1065 }
1067 //=============================================================================
1068 // REQUIRED FUNCTIONALITY for encoding
1069 void emit_lo(CodeBuffer &cbuf, int val) { }
1070 void emit_hi(CodeBuffer &cbuf, int val) { }
1073 //=============================================================================
1074 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask();
1076 int Compile::ConstantTable::calculate_table_base_offset() const {
1077 if (UseRDPCForConstantTableBase) {
1078 // The table base offset might be less but then it fits into
1079 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit).
1080 return Assembler::min_simm13();
1081 } else {
1082 int offset = -(size() / 2);
1083 if (!Assembler::is_simm13(offset)) {
1084 offset = Assembler::min_simm13();
1085 }
1086 return offset;
1087 }
1088 }
1090 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1091 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1092 ShouldNotReachHere();
1093 }
1095 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1096 Compile* C = ra_->C;
1097 Compile::ConstantTable& constant_table = C->constant_table();
1098 MacroAssembler _masm(&cbuf);
1100 Register r = as_Register(ra_->get_encode(this));
1101 CodeSection* consts_section = __ code()->consts();
1102 int consts_size = consts_section->align_at_start(consts_section->size());
1103 assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size));
1105 if (UseRDPCForConstantTableBase) {
1106 // For the following RDPC logic to work correctly the consts
1107 // section must be allocated right before the insts section. This
1108 // assert checks for that. The layout and the SECT_* constants
1109 // are defined in src/share/vm/asm/codeBuffer.hpp.
1110 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be");
1111 int insts_offset = __ offset();
1113 // Layout:
1114 //
1115 // |----------- consts section ------------|----------- insts section -----------...
1116 // |------ constant table -----|- padding -|------------------x----
1117 // \ current PC (RDPC instruction)
1118 // |<------------- consts_size ----------->|<- insts_offset ->|
1119 // \ table base
1120 // The table base offset is later added to the load displacement
1121 // so it has to be negative.
1122 int table_base_offset = -(consts_size + insts_offset);
1123 int disp;
1125 // If the displacement from the current PC to the constant table
1126 // base fits into simm13 we set the constant table base to the
1127 // current PC.
1128 if (Assembler::is_simm13(table_base_offset)) {
1129 constant_table.set_table_base_offset(table_base_offset);
1130 disp = 0;
1131 } else {
1132 // Otherwise we set the constant table base offset to the
1133 // maximum negative displacement of load instructions to keep
1134 // the disp as small as possible:
1135 //
1136 // |<------------- consts_size ----------->|<- insts_offset ->|
1137 // |<--------- min_simm13 --------->|<-------- disp --------->|
1138 // \ table base
1139 table_base_offset = Assembler::min_simm13();
1140 constant_table.set_table_base_offset(table_base_offset);
1141 disp = (consts_size + insts_offset) + table_base_offset;
1142 }
1144 __ rdpc(r);
1146 if (disp != 0) {
1147 assert(r != O7, "need temporary");
1148 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r);
1149 }
1150 }
1151 else {
1152 // Materialize the constant table base.
1153 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1154 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1155 AddressLiteral base(baseaddr, rspec);
1156 __ set(base, r);
1157 }
1158 }
1160 uint MachConstantBaseNode::size(PhaseRegAlloc*) const {
1161 if (UseRDPCForConstantTableBase) {
1162 // This is really the worst case but generally it's only 1 instruction.
1163 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord;
1164 } else {
1165 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord;
1166 }
1167 }
1169 #ifndef PRODUCT
1170 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1171 char reg[128];
1172 ra_->dump_register(this, reg);
1173 if (UseRDPCForConstantTableBase) {
1174 st->print("RDPC %s\t! constant table base", reg);
1175 } else {
1176 st->print("SET &constanttable,%s\t! constant table base", reg);
1177 }
1178 }
1179 #endif
1182 //=============================================================================
1184 #ifndef PRODUCT
1185 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1186 Compile* C = ra_->C;
1188 for (int i = 0; i < OptoPrologueNops; i++) {
1189 st->print_cr("NOP"); st->print("\t");
1190 }
1192 if( VerifyThread ) {
1193 st->print_cr("Verify_Thread"); st->print("\t");
1194 }
1196 size_t framesize = C->frame_slots() << LogBytesPerInt;
1198 // Calls to C2R adapters often do not accept exceptional returns.
1199 // We require that their callers must bang for them. But be careful, because
1200 // some VM calls (such as call site linkage) can use several kilobytes of
1201 // stack. But the stack safety zone should account for that.
1202 // See bugs 4446381, 4468289, 4497237.
1203 if (C->need_stack_bang(framesize)) {
1204 st->print_cr("! stack bang"); st->print("\t");
1205 }
1207 if (Assembler::is_simm13(-framesize)) {
1208 st->print ("SAVE R_SP,-%d,R_SP",framesize);
1209 } else {
1210 st->print_cr("SETHI R_SP,hi%%(-%d),R_G3",framesize); st->print("\t");
1211 st->print_cr("ADD R_G3,lo%%(-%d),R_G3",framesize); st->print("\t");
1212 st->print ("SAVE R_SP,R_G3,R_SP");
1213 }
1215 }
1216 #endif
1218 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1219 Compile* C = ra_->C;
1220 MacroAssembler _masm(&cbuf);
1222 for (int i = 0; i < OptoPrologueNops; i++) {
1223 __ nop();
1224 }
1226 __ verify_thread();
1228 size_t framesize = C->frame_slots() << LogBytesPerInt;
1229 assert(framesize >= 16*wordSize, "must have room for reg. save area");
1230 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1232 // Calls to C2R adapters often do not accept exceptional returns.
1233 // We require that their callers must bang for them. But be careful, because
1234 // some VM calls (such as call site linkage) can use several kilobytes of
1235 // stack. But the stack safety zone should account for that.
1236 // See bugs 4446381, 4468289, 4497237.
1237 if (C->need_stack_bang(framesize)) {
1238 __ generate_stack_overflow_check(framesize);
1239 }
1241 if (Assembler::is_simm13(-framesize)) {
1242 __ save(SP, -framesize, SP);
1243 } else {
1244 __ sethi(-framesize & ~0x3ff, G3);
1245 __ add(G3, -framesize & 0x3ff, G3);
1246 __ save(SP, G3, SP);
1247 }
1248 C->set_frame_complete( __ offset() );
1250 if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) {
1251 // NOTE: We set the table base offset here because users might be
1252 // emitted before MachConstantBaseNode.
1253 Compile::ConstantTable& constant_table = C->constant_table();
1254 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1255 }
1256 }
1258 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1259 return MachNode::size(ra_);
1260 }
1262 int MachPrologNode::reloc() const {
1263 return 10; // a large enough number
1264 }
1266 //=============================================================================
1267 #ifndef PRODUCT
1268 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1269 Compile* C = ra_->C;
1271 if( do_polling() && ra_->C->is_method_compilation() ) {
1272 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
1273 #ifdef _LP64
1274 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
1275 #else
1276 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t");
1277 #endif
1278 }
1280 if( do_polling() )
1281 st->print("RET\n\t");
1283 st->print("RESTORE");
1284 }
1285 #endif
1287 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1288 MacroAssembler _masm(&cbuf);
1289 Compile* C = ra_->C;
1291 __ verify_thread();
1293 // If this does safepoint polling, then do it here
1294 if( do_polling() && ra_->C->is_method_compilation() ) {
1295 AddressLiteral polling_page(os::get_polling_page());
1296 __ sethi(polling_page, L0);
1297 __ relocate(relocInfo::poll_return_type);
1298 __ ld_ptr( L0, 0, G0 );
1299 }
1301 // If this is a return, then stuff the restore in the delay slot
1302 if( do_polling() ) {
1303 __ ret();
1304 __ delayed()->restore();
1305 } else {
1306 __ restore();
1307 }
1308 }
1310 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1311 return MachNode::size(ra_);
1312 }
1314 int MachEpilogNode::reloc() const {
1315 return 16; // a large enough number
1316 }
1318 const Pipeline * MachEpilogNode::pipeline() const {
1319 return MachNode::pipeline_class();
1320 }
1322 int MachEpilogNode::safepoint_offset() const {
1323 assert( do_polling(), "no return for this epilog node");
1324 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord;
1325 }
1327 //=============================================================================
1329 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1330 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1331 static enum RC rc_class( OptoReg::Name reg ) {
1332 if( !OptoReg::is_valid(reg) ) return rc_bad;
1333 if (OptoReg::is_stack(reg)) return rc_stack;
1334 VMReg r = OptoReg::as_VMReg(reg);
1335 if (r->is_Register()) return rc_int;
1336 assert(r->is_FloatRegister(), "must be");
1337 return rc_float;
1338 }
1340 static int impl_helper(const MachNode* mach, CodeBuffer* cbuf, PhaseRegAlloc* ra, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) {
1341 if (cbuf) {
1342 emit_form3_mem_reg(*cbuf, ra, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]);
1343 }
1344 #ifndef PRODUCT
1345 else if (!do_size) {
1346 if (size != 0) st->print("\n\t");
1347 if (is_load) st->print("%s [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg));
1348 else st->print("%s R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset);
1349 }
1350 #endif
1351 return size+4;
1352 }
1354 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int op1, int op2, const char *op_str, int size, outputStream* st ) {
1355 if( cbuf ) emit3( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src] );
1356 #ifndef PRODUCT
1357 else if( !do_size ) {
1358 if( size != 0 ) st->print("\n\t");
1359 st->print("%s R_%s,R_%s\t! spill",op_str,OptoReg::regname(src),OptoReg::regname(dst));
1360 }
1361 #endif
1362 return size+4;
1363 }
1365 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
1366 PhaseRegAlloc *ra_,
1367 bool do_size,
1368 outputStream* st ) const {
1369 // Get registers to move
1370 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1371 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1372 OptoReg::Name dst_second = ra_->get_reg_second(this );
1373 OptoReg::Name dst_first = ra_->get_reg_first(this );
1375 enum RC src_second_rc = rc_class(src_second);
1376 enum RC src_first_rc = rc_class(src_first);
1377 enum RC dst_second_rc = rc_class(dst_second);
1378 enum RC dst_first_rc = rc_class(dst_first);
1380 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1382 // Generate spill code!
1383 int size = 0;
1385 if( src_first == dst_first && src_second == dst_second )
1386 return size; // Self copy, no move
1388 // --------------------------------------
1389 // Check for mem-mem move. Load into unused float registers and fall into
1390 // the float-store case.
1391 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1392 int offset = ra_->reg2offset(src_first);
1393 // Further check for aligned-adjacent pair, so we can use a double load
1394 if( (src_first&1)==0 && src_first+1 == src_second ) {
1395 src_second = OptoReg::Name(R_F31_num);
1396 src_second_rc = rc_float;
1397 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::lddf_op3,"LDDF",size, st);
1398 } else {
1399 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::ldf_op3 ,"LDF ",size, st);
1400 }
1401 src_first = OptoReg::Name(R_F30_num);
1402 src_first_rc = rc_float;
1403 }
1405 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) {
1406 int offset = ra_->reg2offset(src_second);
1407 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F31_num,Assembler::ldf_op3,"LDF ",size, st);
1408 src_second = OptoReg::Name(R_F31_num);
1409 src_second_rc = rc_float;
1410 }
1412 // --------------------------------------
1413 // Check for float->int copy; requires a trip through memory
1414 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) {
1415 int offset = frame::register_save_words*wordSize;
1416 if (cbuf) {
1417 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 );
1418 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1419 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1420 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 );
1421 }
1422 #ifndef PRODUCT
1423 else if (!do_size) {
1424 if (size != 0) st->print("\n\t");
1425 st->print( "SUB R_SP,16,R_SP\n");
1426 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1427 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1428 st->print("\tADD R_SP,16,R_SP\n");
1429 }
1430 #endif
1431 size += 16;
1432 }
1434 // Check for float->int copy on T4
1435 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) {
1436 // Further check for aligned-adjacent pair, so we can use a double move
1437 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
1438 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st);
1439 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st);
1440 }
1441 // Check for int->float copy on T4
1442 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) {
1443 // Further check for aligned-adjacent pair, so we can use a double move
1444 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
1445 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st);
1446 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st);
1447 }
1449 // --------------------------------------
1450 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations.
1451 // In such cases, I have to do the big-endian swap. For aligned targets, the
1452 // hardware does the flop for me. Doubles are always aligned, so no problem
1453 // there. Misaligned sources only come from native-long-returns (handled
1454 // special below).
1455 #ifndef _LP64
1456 if( src_first_rc == rc_int && // source is already big-endian
1457 src_second_rc != rc_bad && // 64-bit move
1458 ((dst_first&1)!=0 || dst_second != dst_first+1) ) { // misaligned dst
1459 assert( (src_first&1)==0 && src_second == src_first+1, "source must be aligned" );
1460 // Do the big-endian flop.
1461 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ;
1462 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc;
1463 }
1464 #endif
1466 // --------------------------------------
1467 // Check for integer reg-reg copy
1468 if( src_first_rc == rc_int && dst_first_rc == rc_int ) {
1469 #ifndef _LP64
1470 if( src_first == R_O0_num && src_second == R_O1_num ) { // Check for the evil O0/O1 native long-return case
1471 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1472 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1473 // operand contains the least significant word of the 64-bit value and vice versa.
1474 OptoReg::Name tmp = OptoReg::Name(R_O7_num);
1475 assert( (dst_first&1)==0 && dst_second == dst_first+1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
1476 // Shift O0 left in-place, zero-extend O1, then OR them into the dst
1477 if( cbuf ) {
1478 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020 );
1479 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000 );
1480 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second] );
1481 #ifndef PRODUCT
1482 } else if( !do_size ) {
1483 if( size != 0 ) st->print("\n\t");
1484 st->print("SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
1485 st->print("SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
1486 st->print("OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
1487 #endif
1488 }
1489 return size+12;
1490 }
1491 else if( dst_first == R_I0_num && dst_second == R_I1_num ) {
1492 // returning a long value in I0/I1
1493 // a SpillCopy must be able to target a return instruction's reg_class
1494 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1495 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1496 // operand contains the least significant word of the 64-bit value and vice versa.
1497 OptoReg::Name tdest = dst_first;
1499 if (src_first == dst_first) {
1500 tdest = OptoReg::Name(R_O7_num);
1501 size += 4;
1502 }
1504 if( cbuf ) {
1505 assert( (src_first&1) == 0 && (src_first+1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
1506 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1
1507 // ShrL_reg_imm6
1508 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000 );
1509 // ShrR_reg_imm6 src, 0, dst
1510 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000 );
1511 if (tdest != dst_first) {
1512 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest] );
1513 }
1514 }
1515 #ifndef PRODUCT
1516 else if( !do_size ) {
1517 if( size != 0 ) st->print("\n\t"); // %%%%% !!!!!
1518 st->print("SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
1519 st->print("SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
1520 if (tdest != dst_first) {
1521 st->print("MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
1522 }
1523 }
1524 #endif // PRODUCT
1525 return size+8;
1526 }
1527 #endif // !_LP64
1528 // Else normal reg-reg copy
1529 assert( src_second != dst_first, "smashed second before evacuating it" );
1530 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::or_op3,0,"MOV ",size, st);
1531 assert( (src_first&1) == 0 && (dst_first&1) == 0, "never move second-halves of int registers" );
1532 // This moves an aligned adjacent pair.
1533 // See if we are done.
1534 if( src_first+1 == src_second && dst_first+1 == dst_second )
1535 return size;
1536 }
1538 // Check for integer store
1539 if( src_first_rc == rc_int && dst_first_rc == rc_stack ) {
1540 int offset = ra_->reg2offset(dst_first);
1541 // Further check for aligned-adjacent pair, so we can use a double store
1542 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1543 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stx_op3,"STX ",size, st);
1544 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stw_op3,"STW ",size, st);
1545 }
1547 // Check for integer load
1548 if( dst_first_rc == rc_int && src_first_rc == rc_stack ) {
1549 int offset = ra_->reg2offset(src_first);
1550 // Further check for aligned-adjacent pair, so we can use a double load
1551 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1552 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldx_op3 ,"LDX ",size, st);
1553 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1554 }
1556 // Check for float reg-reg copy
1557 if( src_first_rc == rc_float && dst_first_rc == rc_float ) {
1558 // Further check for aligned-adjacent pair, so we can use a double move
1559 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1560 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovd_opf,"FMOVD",size, st);
1561 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovs_opf,"FMOVS",size, st);
1562 }
1564 // Check for float store
1565 if( src_first_rc == rc_float && dst_first_rc == rc_stack ) {
1566 int offset = ra_->reg2offset(dst_first);
1567 // Further check for aligned-adjacent pair, so we can use a double store
1568 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1569 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stdf_op3,"STDF",size, st);
1570 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1571 }
1573 // Check for float load
1574 if( dst_first_rc == rc_float && src_first_rc == rc_stack ) {
1575 int offset = ra_->reg2offset(src_first);
1576 // Further check for aligned-adjacent pair, so we can use a double load
1577 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1578 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lddf_op3,"LDDF",size, st);
1579 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldf_op3 ,"LDF ",size, st);
1580 }
1582 // --------------------------------------------------------------------
1583 // Check for hi bits still needing moving. Only happens for misaligned
1584 // arguments to native calls.
1585 if( src_second == dst_second )
1586 return size; // Self copy; no move
1587 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
1589 #ifndef _LP64
1590 // In the LP64 build, all registers can be moved as aligned/adjacent
1591 // pairs, so there's never any need to move the high bits separately.
1592 // The 32-bit builds have to deal with the 32-bit ABI which can force
1593 // all sorts of silly alignment problems.
1595 // Check for integer reg-reg copy. Hi bits are stuck up in the top
1596 // 32-bits of a 64-bit register, but are needed in low bits of another
1597 // register (else it's a hi-bits-to-hi-bits copy which should have
1598 // happened already as part of a 64-bit move)
1599 if( src_second_rc == rc_int && dst_second_rc == rc_int ) {
1600 assert( (src_second&1)==1, "its the evil O0/O1 native return case" );
1601 assert( (dst_second&1)==0, "should have moved with 1 64-bit move" );
1602 // Shift src_second down to dst_second's low bits.
1603 if( cbuf ) {
1604 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 );
1605 #ifndef PRODUCT
1606 } else if( !do_size ) {
1607 if( size != 0 ) st->print("\n\t");
1608 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(dst_second));
1609 #endif
1610 }
1611 return size+4;
1612 }
1614 // Check for high word integer store. Must down-shift the hi bits
1615 // into a temp register, then fall into the case of storing int bits.
1616 if( src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second&1)==1 ) {
1617 // Shift src_second down to dst_second's low bits.
1618 if( cbuf ) {
1619 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 );
1620 #ifndef PRODUCT
1621 } else if( !do_size ) {
1622 if( size != 0 ) st->print("\n\t");
1623 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(R_O7_num));
1624 #endif
1625 }
1626 size+=4;
1627 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num!
1628 }
1630 // Check for high word integer load
1631 if( dst_second_rc == rc_int && src_second_rc == rc_stack )
1632 return impl_helper(this,cbuf,ra_,do_size,true ,ra_->reg2offset(src_second),dst_second,Assembler::lduw_op3,"LDUW",size, st);
1634 // Check for high word integer store
1635 if( src_second_rc == rc_int && dst_second_rc == rc_stack )
1636 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stw_op3 ,"STW ",size, st);
1638 // Check for high word float store
1639 if( src_second_rc == rc_float && dst_second_rc == rc_stack )
1640 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stf_op3 ,"STF ",size, st);
1642 #endif // !_LP64
1644 Unimplemented();
1645 }
1647 #ifndef PRODUCT
1648 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1649 implementation( NULL, ra_, false, st );
1650 }
1651 #endif
1653 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1654 implementation( &cbuf, ra_, false, NULL );
1655 }
1657 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1658 return implementation( NULL, ra_, true, NULL );
1659 }
1661 //=============================================================================
1662 #ifndef PRODUCT
1663 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const {
1664 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1665 }
1666 #endif
1668 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1669 MacroAssembler _masm(&cbuf);
1670 for(int i = 0; i < _count; i += 1) {
1671 __ nop();
1672 }
1673 }
1675 uint MachNopNode::size(PhaseRegAlloc *ra_) const {
1676 return 4 * _count;
1677 }
1680 //=============================================================================
1681 #ifndef PRODUCT
1682 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1683 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1684 int reg = ra_->get_reg_first(this);
1685 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]);
1686 }
1687 #endif
1689 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1690 MacroAssembler _masm(&cbuf);
1691 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS;
1692 int reg = ra_->get_encode(this);
1694 if (Assembler::is_simm13(offset)) {
1695 __ add(SP, offset, reg_to_register_object(reg));
1696 } else {
1697 __ set(offset, O7);
1698 __ add(SP, O7, reg_to_register_object(reg));
1699 }
1700 }
1702 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1703 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
1704 assert(ra_ == ra_->C->regalloc(), "sanity");
1705 return ra_->C->scratch_emit_size(this);
1706 }
1708 //=============================================================================
1709 #ifndef PRODUCT
1710 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1711 st->print_cr("\nUEP:");
1712 #ifdef _LP64
1713 if (UseCompressedClassPointers) {
1714 assert(Universe::heap() != NULL, "java heap should be initialized");
1715 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
1716 if (Universe::narrow_klass_base() != 0) {
1717 st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base");
1718 if (Universe::narrow_klass_shift() != 0) {
1719 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5");
1720 }
1721 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
1722 st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base");
1723 } else {
1724 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5");
1725 }
1726 } else {
1727 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1728 }
1729 st->print_cr("\tCMP R_G5,R_G3" );
1730 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
1731 #else // _LP64
1732 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1733 st->print_cr("\tCMP R_G5,R_G3" );
1734 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2");
1735 #endif // _LP64
1736 }
1737 #endif
1739 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1740 MacroAssembler _masm(&cbuf);
1741 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
1742 Register temp_reg = G3;
1743 assert( G5_ic_reg != temp_reg, "conflicting registers" );
1745 // Load klass from receiver
1746 __ load_klass(O0, temp_reg);
1747 // Compare against expected klass
1748 __ cmp(temp_reg, G5_ic_reg);
1749 // Branch to miss code, checks xcc or icc depending
1750 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2);
1751 }
1753 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1754 return MachNode::size(ra_);
1755 }
1758 //=============================================================================
1761 // Emit exception handler code.
1762 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
1763 Register temp_reg = G3;
1764 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point());
1765 MacroAssembler _masm(&cbuf);
1767 address base =
1768 __ start_a_stub(size_exception_handler());
1769 if (base == NULL) return 0; // CodeBuffer::expand failed
1771 int offset = __ offset();
1773 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp
1774 __ delayed()->nop();
1776 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1778 __ end_a_stub();
1780 return offset;
1781 }
1783 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
1784 // Can't use any of the current frame's registers as we may have deopted
1785 // at a poll and everything (including G3) can be live.
1786 Register temp_reg = L0;
1787 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
1788 MacroAssembler _masm(&cbuf);
1790 address base =
1791 __ start_a_stub(size_deopt_handler());
1792 if (base == NULL) return 0; // CodeBuffer::expand failed
1794 int offset = __ offset();
1795 __ save_frame(0);
1796 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp
1797 __ delayed()->restore();
1799 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1801 __ end_a_stub();
1802 return offset;
1804 }
1806 // Given a register encoding, produce a Integer Register object
1807 static Register reg_to_register_object(int register_encoding) {
1808 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding");
1809 return as_Register(register_encoding);
1810 }
1812 // Given a register encoding, produce a single-precision Float Register object
1813 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) {
1814 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding");
1815 return as_SingleFloatRegister(register_encoding);
1816 }
1818 // Given a register encoding, produce a double-precision Float Register object
1819 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) {
1820 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding");
1821 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding");
1822 return as_DoubleFloatRegister(register_encoding);
1823 }
1825 const bool Matcher::match_rule_supported(int opcode) {
1826 if (!has_match_rule(opcode))
1827 return false;
1829 switch (opcode) {
1830 case Op_CountLeadingZerosI:
1831 case Op_CountLeadingZerosL:
1832 case Op_CountTrailingZerosI:
1833 case Op_CountTrailingZerosL:
1834 case Op_PopCountI:
1835 case Op_PopCountL:
1836 if (!UsePopCountInstruction)
1837 return false;
1838 case Op_CompareAndSwapL:
1839 #ifdef _LP64
1840 case Op_CompareAndSwapP:
1841 #endif
1842 if (!VM_Version::supports_cx8())
1843 return false;
1844 break;
1845 }
1847 return true; // Per default match rules are supported.
1848 }
1850 int Matcher::regnum_to_fpu_offset(int regnum) {
1851 return regnum - 32; // The FP registers are in the second chunk
1852 }
1854 #ifdef ASSERT
1855 address last_rethrow = NULL; // debugging aid for Rethrow encoding
1856 #endif
1858 // Vector width in bytes
1859 const int Matcher::vector_width_in_bytes(BasicType bt) {
1860 assert(MaxVectorSize == 8, "");
1861 return 8;
1862 }
1864 // Vector ideal reg
1865 const int Matcher::vector_ideal_reg(int size) {
1866 assert(MaxVectorSize == 8, "");
1867 return Op_RegD;
1868 }
1870 const int Matcher::vector_shift_count_ideal_reg(int size) {
1871 fatal("vector shift is not supported");
1872 return Node::NotAMachineReg;
1873 }
1875 // Limits on vector size (number of elements) loaded into vector.
1876 const int Matcher::max_vector_size(const BasicType bt) {
1877 assert(is_java_primitive(bt), "only primitive type vectors");
1878 return vector_width_in_bytes(bt)/type2aelembytes(bt);
1879 }
1881 const int Matcher::min_vector_size(const BasicType bt) {
1882 return max_vector_size(bt); // Same as max.
1883 }
1885 // SPARC doesn't support misaligned vectors store/load.
1886 const bool Matcher::misaligned_vectors_ok() {
1887 return false;
1888 }
1890 // Current (2013) SPARC platforms need to read original key
1891 // to construct decryption expanded key
1892 const bool Matcher::pass_original_key_for_aes() {
1893 return true;
1894 }
1896 // USII supports fxtof through the whole range of number, USIII doesn't
1897 const bool Matcher::convL2FSupported(void) {
1898 return VM_Version::has_fast_fxtof();
1899 }
1901 // Is this branch offset short enough that a short branch can be used?
1902 //
1903 // NOTE: If the platform does not provide any short branch variants, then
1904 // this method should return false for offset 0.
1905 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1906 // The passed offset is relative to address of the branch.
1907 // Don't need to adjust the offset.
1908 return UseCBCond && Assembler::is_simm12(offset);
1909 }
1911 const bool Matcher::isSimpleConstant64(jlong value) {
1912 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1913 // Depends on optimizations in MacroAssembler::setx.
1914 int hi = (int)(value >> 32);
1915 int lo = (int)(value & ~0);
1916 return (hi == 0) || (hi == -1) || (lo == 0);
1917 }
1919 // No scaling for the parameter the ClearArray node.
1920 const bool Matcher::init_array_count_is_in_bytes = true;
1922 // Threshold size for cleararray.
1923 const int Matcher::init_array_short_size = 8 * BytesPerLong;
1925 // No additional cost for CMOVL.
1926 const int Matcher::long_cmove_cost() { return 0; }
1928 // CMOVF/CMOVD are expensive on T4 and on SPARC64.
1929 const int Matcher::float_cmove_cost() {
1930 return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0;
1931 }
1933 // Does the CPU require late expand (see block.cpp for description of late expand)?
1934 const bool Matcher::require_postalloc_expand = false;
1936 // Should the Matcher clone shifts on addressing modes, expecting them to
1937 // be subsumed into complex addressing expressions or compute them into
1938 // registers? True for Intel but false for most RISCs
1939 const bool Matcher::clone_shift_expressions = false;
1941 // Do we need to mask the count passed to shift instructions or does
1942 // the cpu only look at the lower 5/6 bits anyway?
1943 const bool Matcher::need_masked_shift_count = false;
1945 bool Matcher::narrow_oop_use_complex_address() {
1946 NOT_LP64(ShouldNotCallThis());
1947 assert(UseCompressedOops, "only for compressed oops code");
1948 return false;
1949 }
1951 bool Matcher::narrow_klass_use_complex_address() {
1952 NOT_LP64(ShouldNotCallThis());
1953 assert(UseCompressedClassPointers, "only for compressed klass code");
1954 return false;
1955 }
1957 // Is it better to copy float constants, or load them directly from memory?
1958 // Intel can load a float constant from a direct address, requiring no
1959 // extra registers. Most RISCs will have to materialize an address into a
1960 // register first, so they would do better to copy the constant from stack.
1961 const bool Matcher::rematerialize_float_constants = false;
1963 // If CPU can load and store mis-aligned doubles directly then no fixup is
1964 // needed. Else we split the double into 2 integer pieces and move it
1965 // piece-by-piece. Only happens when passing doubles into C code as the
1966 // Java calling convention forces doubles to be aligned.
1967 #ifdef _LP64
1968 const bool Matcher::misaligned_doubles_ok = true;
1969 #else
1970 const bool Matcher::misaligned_doubles_ok = false;
1971 #endif
1973 // No-op on SPARC.
1974 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
1975 }
1977 // Advertise here if the CPU requires explicit rounding operations
1978 // to implement the UseStrictFP mode.
1979 const bool Matcher::strict_fp_requires_explicit_rounding = false;
1981 // Are floats conerted to double when stored to stack during deoptimization?
1982 // Sparc does not handle callee-save floats.
1983 bool Matcher::float_in_double() { return false; }
1985 // Do ints take an entire long register or just half?
1986 // Note that we if-def off of _LP64.
1987 // The relevant question is how the int is callee-saved. In _LP64
1988 // the whole long is written but de-opt'ing will have to extract
1989 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
1990 #ifdef _LP64
1991 const bool Matcher::int_in_long = true;
1992 #else
1993 const bool Matcher::int_in_long = false;
1994 #endif
1996 // Return whether or not this register is ever used as an argument. This
1997 // function is used on startup to build the trampoline stubs in generateOptoStub.
1998 // Registers not mentioned will be killed by the VM call in the trampoline, and
1999 // arguments in those registers not be available to the callee.
2000 bool Matcher::can_be_java_arg( int reg ) {
2001 // Standard sparc 6 args in registers
2002 if( reg == R_I0_num ||
2003 reg == R_I1_num ||
2004 reg == R_I2_num ||
2005 reg == R_I3_num ||
2006 reg == R_I4_num ||
2007 reg == R_I5_num ) return true;
2008 #ifdef _LP64
2009 // 64-bit builds can pass 64-bit pointers and longs in
2010 // the high I registers
2011 if( reg == R_I0H_num ||
2012 reg == R_I1H_num ||
2013 reg == R_I2H_num ||
2014 reg == R_I3H_num ||
2015 reg == R_I4H_num ||
2016 reg == R_I5H_num ) return true;
2018 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) {
2019 return true;
2020 }
2022 #else
2023 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
2024 // Longs cannot be passed in O regs, because O regs become I regs
2025 // after a 'save' and I regs get their high bits chopped off on
2026 // interrupt.
2027 if( reg == R_G1H_num || reg == R_G1_num ) return true;
2028 if( reg == R_G4H_num || reg == R_G4_num ) return true;
2029 #endif
2030 // A few float args in registers
2031 if( reg >= R_F0_num && reg <= R_F7_num ) return true;
2033 return false;
2034 }
2036 bool Matcher::is_spillable_arg( int reg ) {
2037 return can_be_java_arg(reg);
2038 }
2040 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
2041 // Use hardware SDIVX instruction when it is
2042 // faster than a code which use multiply.
2043 return VM_Version::has_fast_idiv();
2044 }
2046 // Register for DIVI projection of divmodI
2047 RegMask Matcher::divI_proj_mask() {
2048 ShouldNotReachHere();
2049 return RegMask();
2050 }
2052 // Register for MODI projection of divmodI
2053 RegMask Matcher::modI_proj_mask() {
2054 ShouldNotReachHere();
2055 return RegMask();
2056 }
2058 // Register for DIVL projection of divmodL
2059 RegMask Matcher::divL_proj_mask() {
2060 ShouldNotReachHere();
2061 return RegMask();
2062 }
2064 // Register for MODL projection of divmodL
2065 RegMask Matcher::modL_proj_mask() {
2066 ShouldNotReachHere();
2067 return RegMask();
2068 }
2070 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2071 return L7_REGP_mask();
2072 }
2074 const RegMask Matcher::mathExactI_result_proj_mask() {
2075 return G1_REGI_mask();
2076 }
2078 const RegMask Matcher::mathExactL_result_proj_mask() {
2079 return G1_REGL_mask();
2080 }
2082 const RegMask Matcher::mathExactI_flags_proj_mask() {
2083 return INT_FLAGS_mask();
2084 }
2087 %}
2090 // The intptr_t operand types, defined by textual substitution.
2091 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
2092 #ifdef _LP64
2093 #define immX immL
2094 #define immX13 immL13
2095 #define immX13m7 immL13m7
2096 #define iRegX iRegL
2097 #define g1RegX g1RegL
2098 #else
2099 #define immX immI
2100 #define immX13 immI13
2101 #define immX13m7 immI13m7
2102 #define iRegX iRegI
2103 #define g1RegX g1RegI
2104 #endif
2106 //----------ENCODING BLOCK-----------------------------------------------------
2107 // This block specifies the encoding classes used by the compiler to output
2108 // byte streams. Encoding classes are parameterized macros used by
2109 // Machine Instruction Nodes in order to generate the bit encoding of the
2110 // instruction. Operands specify their base encoding interface with the
2111 // interface keyword. There are currently supported four interfaces,
2112 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
2113 // operand to generate a function which returns its register number when
2114 // queried. CONST_INTER causes an operand to generate a function which
2115 // returns the value of the constant when queried. MEMORY_INTER causes an
2116 // operand to generate four functions which return the Base Register, the
2117 // Index Register, the Scale Value, and the Offset Value of the operand when
2118 // queried. COND_INTER causes an operand to generate six functions which
2119 // return the encoding code (ie - encoding bits for the instruction)
2120 // associated with each basic boolean condition for a conditional instruction.
2121 //
2122 // Instructions specify two basic values for encoding. Again, a function
2123 // is available to check if the constant displacement is an oop. They use the
2124 // ins_encode keyword to specify their encoding classes (which must be
2125 // a sequence of enc_class names, and their parameters, specified in
2126 // the encoding block), and they use the
2127 // opcode keyword to specify, in order, their primary, secondary, and
2128 // tertiary opcode. Only the opcode sections which a particular instruction
2129 // needs for encoding need to be specified.
2130 encode %{
2131 enc_class enc_untested %{
2132 #ifdef ASSERT
2133 MacroAssembler _masm(&cbuf);
2134 __ untested("encoding");
2135 #endif
2136 %}
2138 enc_class form3_mem_reg( memory mem, iRegI dst ) %{
2139 emit_form3_mem_reg(cbuf, ra_, this, $primary, $tertiary,
2140 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
2141 %}
2143 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{
2144 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1,
2145 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
2146 %}
2148 enc_class form3_mem_prefetch_read( memory mem ) %{
2149 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1,
2150 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
2151 %}
2153 enc_class form3_mem_prefetch_write( memory mem ) %{
2154 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1,
2155 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/);
2156 %}
2158 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{
2159 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4");
2160 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
2161 guarantee($mem$$index == R_G0_enc, "double index?");
2162 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc );
2163 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg );
2164 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 );
2165 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc );
2166 %}
2168 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{
2169 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4");
2170 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
2171 guarantee($mem$$index == R_G0_enc, "double index?");
2172 // Load long with 2 instructions
2173 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 );
2174 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 );
2175 %}
2177 //%%% form3_mem_plus_4_reg is a hack--get rid of it
2178 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{
2179 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4");
2180 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg);
2181 %}
2183 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{
2184 // Encode a reg-reg copy. If it is useless, then empty encoding.
2185 if( $rs2$$reg != $rd$$reg )
2186 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg );
2187 %}
2189 // Target lo half of long
2190 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{
2191 // Encode a reg-reg copy. If it is useless, then empty encoding.
2192 if( $rs2$$reg != LONG_LO_REG($rd$$reg) )
2193 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg );
2194 %}
2196 // Source lo half of long
2197 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{
2198 // Encode a reg-reg copy. If it is useless, then empty encoding.
2199 if( LONG_LO_REG($rs2$$reg) != $rd$$reg )
2200 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) );
2201 %}
2203 // Target hi half of long
2204 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{
2205 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 );
2206 %}
2208 // Source lo half of long, and leave it sign extended.
2209 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{
2210 // Sign extend low half
2211 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 );
2212 %}
2214 // Source hi half of long, and leave it sign extended.
2215 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{
2216 // Shift high half to low half
2217 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 );
2218 %}
2220 // Source hi half of long
2221 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{
2222 // Encode a reg-reg copy. If it is useless, then empty encoding.
2223 if( LONG_HI_REG($rs2$$reg) != $rd$$reg )
2224 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) );
2225 %}
2227 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{
2228 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg );
2229 %}
2231 enc_class enc_to_bool( iRegI src, iRegI dst ) %{
2232 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg );
2233 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 );
2234 %}
2236 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{
2237 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg );
2238 // clear if nothing else is happening
2239 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 );
2240 // blt,a,pn done
2241 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 );
2242 // mov dst,-1 in delay slot
2243 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 );
2244 %}
2246 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{
2247 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F );
2248 %}
2250 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{
2251 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 );
2252 %}
2254 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{
2255 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg );
2256 %}
2258 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{
2259 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant );
2260 %}
2262 enc_class move_return_pc_to_o1() %{
2263 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset );
2264 %}
2266 #ifdef _LP64
2267 /* %%% merge with enc_to_bool */
2268 enc_class enc_convP2B( iRegI dst, iRegP src ) %{
2269 MacroAssembler _masm(&cbuf);
2271 Register src_reg = reg_to_register_object($src$$reg);
2272 Register dst_reg = reg_to_register_object($dst$$reg);
2273 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg);
2274 %}
2275 #endif
2277 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{
2278 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)))
2279 MacroAssembler _masm(&cbuf);
2281 Register p_reg = reg_to_register_object($p$$reg);
2282 Register q_reg = reg_to_register_object($q$$reg);
2283 Register y_reg = reg_to_register_object($y$$reg);
2284 Register tmp_reg = reg_to_register_object($tmp$$reg);
2286 __ subcc( p_reg, q_reg, p_reg );
2287 __ add ( p_reg, y_reg, tmp_reg );
2288 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg );
2289 %}
2291 enc_class form_d2i_helper(regD src, regF dst) %{
2292 // fcmp %fcc0,$src,$src
2293 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2294 // branch %fcc0 not-nan, predict taken
2295 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2296 // fdtoi $src,$dst
2297 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg );
2298 // fitos $dst,$dst (if nan)
2299 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg );
2300 // clear $dst (if nan)
2301 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg );
2302 // carry on here...
2303 %}
2305 enc_class form_d2l_helper(regD src, regD dst) %{
2306 // fcmp %fcc0,$src,$src check for NAN
2307 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2308 // branch %fcc0 not-nan, predict taken
2309 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2310 // fdtox $src,$dst convert in delay slot
2311 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg );
2312 // fxtod $dst,$dst (if nan)
2313 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg );
2314 // clear $dst (if nan)
2315 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg );
2316 // carry on here...
2317 %}
2319 enc_class form_f2i_helper(regF src, regF dst) %{
2320 // fcmps %fcc0,$src,$src
2321 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg );
2322 // branch %fcc0 not-nan, predict taken
2323 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2324 // fstoi $src,$dst
2325 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg );
2326 // fitos $dst,$dst (if nan)
2327 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg );
2328 // clear $dst (if nan)
2329 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg );
2330 // carry on here...
2331 %}
2333 enc_class form_f2l_helper(regF src, regD dst) %{
2334 // fcmps %fcc0,$src,$src
2335 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg );
2336 // branch %fcc0 not-nan, predict taken
2337 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2338 // fstox $src,$dst
2339 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg );
2340 // fxtod $dst,$dst (if nan)
2341 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg );
2342 // clear $dst (if nan)
2343 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg );
2344 // carry on here...
2345 %}
2347 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2348 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2349 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2350 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2352 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %}
2354 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2355 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %}
2357 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{
2358 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2359 %}
2361 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{
2362 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2363 %}
2365 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{
2366 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2367 %}
2369 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{
2370 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2371 %}
2373 enc_class form3_convI2F(regF rs2, regF rd) %{
2374 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg);
2375 %}
2377 // Encloding class for traceable jumps
2378 enc_class form_jmpl(g3RegP dest) %{
2379 emit_jmpl(cbuf, $dest$$reg);
2380 %}
2382 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{
2383 emit_jmpl_set_exception_pc(cbuf, $dest$$reg);
2384 %}
2386 enc_class form2_nop() %{
2387 emit_nop(cbuf);
2388 %}
2390 enc_class form2_illtrap() %{
2391 emit_illtrap(cbuf);
2392 %}
2395 // Compare longs and convert into -1, 0, 1.
2396 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{
2397 // CMP $src1,$src2
2398 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg );
2399 // blt,a,pn done
2400 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 );
2401 // mov dst,-1 in delay slot
2402 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 );
2403 // bgt,a,pn done
2404 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 );
2405 // mov dst,1 in delay slot
2406 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 );
2407 // CLR $dst
2408 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 );
2409 %}
2411 enc_class enc_PartialSubtypeCheck() %{
2412 MacroAssembler _masm(&cbuf);
2413 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type);
2414 __ delayed()->nop();
2415 %}
2417 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{
2418 MacroAssembler _masm(&cbuf);
2419 Label* L = $labl$$label;
2420 Assembler::Predict predict_taken =
2421 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
2423 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
2424 __ delayed()->nop();
2425 %}
2427 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{
2428 MacroAssembler _masm(&cbuf);
2429 Label* L = $labl$$label;
2430 Assembler::Predict predict_taken =
2431 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
2433 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L);
2434 __ delayed()->nop();
2435 %}
2437 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{
2438 int op = (Assembler::arith_op << 30) |
2439 ($dst$$reg << 25) |
2440 (Assembler::movcc_op3 << 19) |
2441 (1 << 18) | // cc2 bit for 'icc'
2442 ($cmp$$cmpcode << 14) |
2443 (0 << 13) | // select register move
2444 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc'
2445 ($src$$reg << 0);
2446 cbuf.insts()->emit_int32(op);
2447 %}
2449 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{
2450 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits
2451 int op = (Assembler::arith_op << 30) |
2452 ($dst$$reg << 25) |
2453 (Assembler::movcc_op3 << 19) |
2454 (1 << 18) | // cc2 bit for 'icc'
2455 ($cmp$$cmpcode << 14) |
2456 (1 << 13) | // select immediate move
2457 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc'
2458 (simm11 << 0);
2459 cbuf.insts()->emit_int32(op);
2460 %}
2462 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{
2463 int op = (Assembler::arith_op << 30) |
2464 ($dst$$reg << 25) |
2465 (Assembler::movcc_op3 << 19) |
2466 (0 << 18) | // cc2 bit for 'fccX'
2467 ($cmp$$cmpcode << 14) |
2468 (0 << 13) | // select register move
2469 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3
2470 ($src$$reg << 0);
2471 cbuf.insts()->emit_int32(op);
2472 %}
2474 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{
2475 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits
2476 int op = (Assembler::arith_op << 30) |
2477 ($dst$$reg << 25) |
2478 (Assembler::movcc_op3 << 19) |
2479 (0 << 18) | // cc2 bit for 'fccX'
2480 ($cmp$$cmpcode << 14) |
2481 (1 << 13) | // select immediate move
2482 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3
2483 (simm11 << 0);
2484 cbuf.insts()->emit_int32(op);
2485 %}
2487 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{
2488 int op = (Assembler::arith_op << 30) |
2489 ($dst$$reg << 25) |
2490 (Assembler::fpop2_op3 << 19) |
2491 (0 << 18) |
2492 ($cmp$$cmpcode << 14) |
2493 (1 << 13) | // select register move
2494 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc'
2495 ($primary << 5) | // select single, double or quad
2496 ($src$$reg << 0);
2497 cbuf.insts()->emit_int32(op);
2498 %}
2500 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{
2501 int op = (Assembler::arith_op << 30) |
2502 ($dst$$reg << 25) |
2503 (Assembler::fpop2_op3 << 19) |
2504 (0 << 18) |
2505 ($cmp$$cmpcode << 14) |
2506 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX'
2507 ($primary << 5) | // select single, double or quad
2508 ($src$$reg << 0);
2509 cbuf.insts()->emit_int32(op);
2510 %}
2512 // Used by the MIN/MAX encodings. Same as a CMOV, but
2513 // the condition comes from opcode-field instead of an argument.
2514 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{
2515 int op = (Assembler::arith_op << 30) |
2516 ($dst$$reg << 25) |
2517 (Assembler::movcc_op3 << 19) |
2518 (1 << 18) | // cc2 bit for 'icc'
2519 ($primary << 14) |
2520 (0 << 13) | // select register move
2521 (0 << 11) | // cc1, cc0 bits for 'icc'
2522 ($src$$reg << 0);
2523 cbuf.insts()->emit_int32(op);
2524 %}
2526 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{
2527 int op = (Assembler::arith_op << 30) |
2528 ($dst$$reg << 25) |
2529 (Assembler::movcc_op3 << 19) |
2530 (6 << 16) | // cc2 bit for 'xcc'
2531 ($primary << 14) |
2532 (0 << 13) | // select register move
2533 (0 << 11) | // cc1, cc0 bits for 'icc'
2534 ($src$$reg << 0);
2535 cbuf.insts()->emit_int32(op);
2536 %}
2538 enc_class Set13( immI13 src, iRegI rd ) %{
2539 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant );
2540 %}
2542 enc_class SetHi22( immI src, iRegI rd ) %{
2543 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant );
2544 %}
2546 enc_class Set32( immI src, iRegI rd ) %{
2547 MacroAssembler _masm(&cbuf);
2548 __ set($src$$constant, reg_to_register_object($rd$$reg));
2549 %}
2551 enc_class call_epilog %{
2552 if( VerifyStackAtCalls ) {
2553 MacroAssembler _masm(&cbuf);
2554 int framesize = ra_->C->frame_slots() << LogBytesPerInt;
2555 Register temp_reg = G3;
2556 __ add(SP, framesize, temp_reg);
2557 __ cmp(temp_reg, FP);
2558 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc);
2559 }
2560 %}
2562 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value
2563 // to G1 so the register allocator will not have to deal with the misaligned register
2564 // pair.
2565 enc_class adjust_long_from_native_call %{
2566 #ifndef _LP64
2567 if (returns_long()) {
2568 // sllx O0,32,O0
2569 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 );
2570 // srl O1,0,O1
2571 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 );
2572 // or O0,O1,G1
2573 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc );
2574 }
2575 #endif
2576 %}
2578 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
2579 // CALL directly to the runtime
2580 // The user of this is responsible for ensuring that R_L7 is empty (killed).
2581 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type,
2582 /*preserve_g2=*/true);
2583 %}
2585 enc_class preserve_SP %{
2586 MacroAssembler _masm(&cbuf);
2587 __ mov(SP, L7_mh_SP_save);
2588 %}
2590 enc_class restore_SP %{
2591 MacroAssembler _masm(&cbuf);
2592 __ mov(L7_mh_SP_save, SP);
2593 %}
2595 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
2596 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2597 // who we intended to call.
2598 if (!_method) {
2599 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type);
2600 } else if (_optimized_virtual) {
2601 emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type);
2602 } else {
2603 emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type);
2604 }
2605 if (_method) { // Emit stub for static call.
2606 CompiledStaticCall::emit_to_interp_stub(cbuf);
2607 }
2608 %}
2610 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
2611 MacroAssembler _masm(&cbuf);
2612 __ set_inst_mark();
2613 int vtable_index = this->_vtable_index;
2614 // MachCallDynamicJavaNode::ret_addr_offset uses this same test
2615 if (vtable_index < 0) {
2616 // must be invalid_vtable_index, not nonvirtual_vtable_index
2617 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
2618 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2619 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()");
2620 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub");
2621 __ ic_call((address)$meth$$method);
2622 } else {
2623 assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
2624 // Just go thru the vtable
2625 // get receiver klass (receiver already checked for non-null)
2626 // If we end up going thru a c2i adapter interpreter expects method in G5
2627 int off = __ offset();
2628 __ load_klass(O0, G3_scratch);
2629 int klass_load_size;
2630 if (UseCompressedClassPointers) {
2631 assert(Universe::heap() != NULL, "java heap should be initialized");
2632 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
2633 } else {
2634 klass_load_size = 1*BytesPerInstWord;
2635 }
2636 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
2637 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
2638 if (Assembler::is_simm13(v_off)) {
2639 __ ld_ptr(G3, v_off, G5_method);
2640 } else {
2641 // Generate 2 instructions
2642 __ Assembler::sethi(v_off & ~0x3ff, G5_method);
2643 __ or3(G5_method, v_off & 0x3ff, G5_method);
2644 // ld_ptr, set_hi, set
2645 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord,
2646 "Unexpected instruction size(s)");
2647 __ ld_ptr(G3, G5_method, G5_method);
2648 }
2649 // NOTE: for vtable dispatches, the vtable entry will never be null.
2650 // However it may very well end up in handle_wrong_method if the
2651 // method is abstract for the particular class.
2652 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);
2653 // jump to target (either compiled code or c2iadapter)
2654 __ jmpl(G3_scratch, G0, O7);
2655 __ delayed()->nop();
2656 }
2657 %}
2659 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL
2660 MacroAssembler _masm(&cbuf);
2662 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2663 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because
2664 // we might be calling a C2I adapter which needs it.
2666 assert(temp_reg != G5_ic_reg, "conflicting registers");
2667 // Load nmethod
2668 __ ld_ptr(G5_ic_reg, in_bytes(Method::from_compiled_offset()), temp_reg);
2670 // CALL to compiled java, indirect the contents of G3
2671 __ set_inst_mark();
2672 __ callr(temp_reg, G0);
2673 __ delayed()->nop();
2674 %}
2676 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{
2677 MacroAssembler _masm(&cbuf);
2678 Register Rdividend = reg_to_register_object($src1$$reg);
2679 Register Rdivisor = reg_to_register_object($src2$$reg);
2680 Register Rresult = reg_to_register_object($dst$$reg);
2682 __ sra(Rdivisor, 0, Rdivisor);
2683 __ sra(Rdividend, 0, Rdividend);
2684 __ sdivx(Rdividend, Rdivisor, Rresult);
2685 %}
2687 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{
2688 MacroAssembler _masm(&cbuf);
2690 Register Rdividend = reg_to_register_object($src1$$reg);
2691 int divisor = $imm$$constant;
2692 Register Rresult = reg_to_register_object($dst$$reg);
2694 __ sra(Rdividend, 0, Rdividend);
2695 __ sdivx(Rdividend, divisor, Rresult);
2696 %}
2698 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{
2699 MacroAssembler _masm(&cbuf);
2700 Register Rsrc1 = reg_to_register_object($src1$$reg);
2701 Register Rsrc2 = reg_to_register_object($src2$$reg);
2702 Register Rdst = reg_to_register_object($dst$$reg);
2704 __ sra( Rsrc1, 0, Rsrc1 );
2705 __ sra( Rsrc2, 0, Rsrc2 );
2706 __ mulx( Rsrc1, Rsrc2, Rdst );
2707 __ srlx( Rdst, 32, Rdst );
2708 %}
2710 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{
2711 MacroAssembler _masm(&cbuf);
2712 Register Rdividend = reg_to_register_object($src1$$reg);
2713 Register Rdivisor = reg_to_register_object($src2$$reg);
2714 Register Rresult = reg_to_register_object($dst$$reg);
2715 Register Rscratch = reg_to_register_object($scratch$$reg);
2717 assert(Rdividend != Rscratch, "");
2718 assert(Rdivisor != Rscratch, "");
2720 __ sra(Rdividend, 0, Rdividend);
2721 __ sra(Rdivisor, 0, Rdivisor);
2722 __ sdivx(Rdividend, Rdivisor, Rscratch);
2723 __ mulx(Rscratch, Rdivisor, Rscratch);
2724 __ sub(Rdividend, Rscratch, Rresult);
2725 %}
2727 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{
2728 MacroAssembler _masm(&cbuf);
2730 Register Rdividend = reg_to_register_object($src1$$reg);
2731 int divisor = $imm$$constant;
2732 Register Rresult = reg_to_register_object($dst$$reg);
2733 Register Rscratch = reg_to_register_object($scratch$$reg);
2735 assert(Rdividend != Rscratch, "");
2737 __ sra(Rdividend, 0, Rdividend);
2738 __ sdivx(Rdividend, divisor, Rscratch);
2739 __ mulx(Rscratch, divisor, Rscratch);
2740 __ sub(Rdividend, Rscratch, Rresult);
2741 %}
2743 enc_class fabss (sflt_reg dst, sflt_reg src) %{
2744 MacroAssembler _masm(&cbuf);
2746 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2747 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2749 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst);
2750 %}
2752 enc_class fabsd (dflt_reg dst, dflt_reg src) %{
2753 MacroAssembler _masm(&cbuf);
2755 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2756 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2758 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst);
2759 %}
2761 enc_class fnegd (dflt_reg dst, dflt_reg src) %{
2762 MacroAssembler _masm(&cbuf);
2764 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2765 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2767 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst);
2768 %}
2770 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{
2771 MacroAssembler _masm(&cbuf);
2773 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2774 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2776 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst);
2777 %}
2779 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{
2780 MacroAssembler _masm(&cbuf);
2782 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2783 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2785 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst);
2786 %}
2788 enc_class fmovs (dflt_reg dst, dflt_reg src) %{
2789 MacroAssembler _masm(&cbuf);
2791 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2792 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2794 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst);
2795 %}
2797 enc_class fmovd (dflt_reg dst, dflt_reg src) %{
2798 MacroAssembler _masm(&cbuf);
2800 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2801 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2803 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst);
2804 %}
2806 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
2807 MacroAssembler _masm(&cbuf);
2809 Register Roop = reg_to_register_object($oop$$reg);
2810 Register Rbox = reg_to_register_object($box$$reg);
2811 Register Rscratch = reg_to_register_object($scratch$$reg);
2812 Register Rmark = reg_to_register_object($scratch2$$reg);
2814 assert(Roop != Rscratch, "");
2815 assert(Roop != Rmark, "");
2816 assert(Rbox != Rscratch, "");
2817 assert(Rbox != Rmark, "");
2819 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining);
2820 %}
2822 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
2823 MacroAssembler _masm(&cbuf);
2825 Register Roop = reg_to_register_object($oop$$reg);
2826 Register Rbox = reg_to_register_object($box$$reg);
2827 Register Rscratch = reg_to_register_object($scratch$$reg);
2828 Register Rmark = reg_to_register_object($scratch2$$reg);
2830 assert(Roop != Rscratch, "");
2831 assert(Roop != Rmark, "");
2832 assert(Rbox != Rscratch, "");
2833 assert(Rbox != Rmark, "");
2835 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining);
2836 %}
2838 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{
2839 MacroAssembler _masm(&cbuf);
2840 Register Rmem = reg_to_register_object($mem$$reg);
2841 Register Rold = reg_to_register_object($old$$reg);
2842 Register Rnew = reg_to_register_object($new$$reg);
2844 __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
2845 __ cmp( Rold, Rnew );
2846 %}
2848 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{
2849 Register Rmem = reg_to_register_object($mem$$reg);
2850 Register Rold = reg_to_register_object($old$$reg);
2851 Register Rnew = reg_to_register_object($new$$reg);
2853 MacroAssembler _masm(&cbuf);
2854 __ mov(Rnew, O7);
2855 __ casx(Rmem, Rold, O7);
2856 __ cmp( Rold, O7 );
2857 %}
2859 // raw int cas, used for compareAndSwap
2860 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{
2861 Register Rmem = reg_to_register_object($mem$$reg);
2862 Register Rold = reg_to_register_object($old$$reg);
2863 Register Rnew = reg_to_register_object($new$$reg);
2865 MacroAssembler _masm(&cbuf);
2866 __ mov(Rnew, O7);
2867 __ cas(Rmem, Rold, O7);
2868 __ cmp( Rold, O7 );
2869 %}
2871 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{
2872 Register Rres = reg_to_register_object($res$$reg);
2874 MacroAssembler _masm(&cbuf);
2875 __ mov(1, Rres);
2876 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres );
2877 %}
2879 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{
2880 Register Rres = reg_to_register_object($res$$reg);
2882 MacroAssembler _masm(&cbuf);
2883 __ mov(1, Rres);
2884 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres );
2885 %}
2887 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{
2888 MacroAssembler _masm(&cbuf);
2889 Register Rdst = reg_to_register_object($dst$$reg);
2890 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg)
2891 : reg_to_DoubleFloatRegister_object($src1$$reg);
2892 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg)
2893 : reg_to_DoubleFloatRegister_object($src2$$reg);
2895 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1)
2896 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst);
2897 %}
2900 enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{
2901 Label Ldone, Lloop;
2902 MacroAssembler _masm(&cbuf);
2904 Register str1_reg = reg_to_register_object($str1$$reg);
2905 Register str2_reg = reg_to_register_object($str2$$reg);
2906 Register cnt1_reg = reg_to_register_object($cnt1$$reg);
2907 Register cnt2_reg = reg_to_register_object($cnt2$$reg);
2908 Register result_reg = reg_to_register_object($result$$reg);
2910 assert(result_reg != str1_reg &&
2911 result_reg != str2_reg &&
2912 result_reg != cnt1_reg &&
2913 result_reg != cnt2_reg ,
2914 "need different registers");
2916 // Compute the minimum of the string lengths(str1_reg) and the
2917 // difference of the string lengths (stack)
2919 // See if the lengths are different, and calculate min in str1_reg.
2920 // Stash diff in O7 in case we need it for a tie-breaker.
2921 Label Lskip;
2922 __ subcc(cnt1_reg, cnt2_reg, O7);
2923 __ sll(cnt1_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit
2924 __ br(Assembler::greater, true, Assembler::pt, Lskip);
2925 // cnt2 is shorter, so use its count:
2926 __ delayed()->sll(cnt2_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit
2927 __ bind(Lskip);
2929 // reallocate cnt1_reg, cnt2_reg, result_reg
2930 // Note: limit_reg holds the string length pre-scaled by 2
2931 Register limit_reg = cnt1_reg;
2932 Register chr2_reg = cnt2_reg;
2933 Register chr1_reg = result_reg;
2934 // str{12} are the base pointers
2936 // Is the minimum length zero?
2937 __ cmp(limit_reg, (int)(0 * sizeof(jchar))); // use cast to resolve overloading ambiguity
2938 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2939 __ delayed()->mov(O7, result_reg); // result is difference in lengths
2941 // Load first characters
2942 __ lduh(str1_reg, 0, chr1_reg);
2943 __ lduh(str2_reg, 0, chr2_reg);
2945 // Compare first characters
2946 __ subcc(chr1_reg, chr2_reg, chr1_reg);
2947 __ br(Assembler::notZero, false, Assembler::pt, Ldone);
2948 assert(chr1_reg == result_reg, "result must be pre-placed");
2949 __ delayed()->nop();
2951 {
2952 // Check after comparing first character to see if strings are equivalent
2953 Label LSkip2;
2954 // Check if the strings start at same location
2955 __ cmp(str1_reg, str2_reg);
2956 __ brx(Assembler::notEqual, true, Assembler::pt, LSkip2);
2957 __ delayed()->nop();
2959 // Check if the length difference is zero (in O7)
2960 __ cmp(G0, O7);
2961 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2962 __ delayed()->mov(G0, result_reg); // result is zero
2964 // Strings might not be equal
2965 __ bind(LSkip2);
2966 }
2968 // We have no guarantee that on 64 bit the higher half of limit_reg is 0
2969 __ signx(limit_reg);
2971 __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg);
2972 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2973 __ delayed()->mov(O7, result_reg); // result is difference in lengths
2975 // Shift str1_reg and str2_reg to the end of the arrays, negate limit
2976 __ add(str1_reg, limit_reg, str1_reg);
2977 __ add(str2_reg, limit_reg, str2_reg);
2978 __ neg(chr1_reg, limit_reg); // limit = -(limit-2)
2980 // Compare the rest of the characters
2981 __ lduh(str1_reg, limit_reg, chr1_reg);
2982 __ bind(Lloop);
2983 // __ lduh(str1_reg, limit_reg, chr1_reg); // hoisted
2984 __ lduh(str2_reg, limit_reg, chr2_reg);
2985 __ subcc(chr1_reg, chr2_reg, chr1_reg);
2986 __ br(Assembler::notZero, false, Assembler::pt, Ldone);
2987 assert(chr1_reg == result_reg, "result must be pre-placed");
2988 __ delayed()->inccc(limit_reg, sizeof(jchar));
2989 // annul LDUH if branch is not taken to prevent access past end of string
2990 __ br(Assembler::notZero, true, Assembler::pt, Lloop);
2991 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted
2993 // If strings are equal up to min length, return the length difference.
2994 __ mov(O7, result_reg);
2996 // Otherwise, return the difference between the first mismatched chars.
2997 __ bind(Ldone);
2998 %}
3000 enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result) %{
3001 Label Lword_loop, Lpost_word, Lchar, Lchar_loop, Ldone;
3002 MacroAssembler _masm(&cbuf);
3004 Register str1_reg = reg_to_register_object($str1$$reg);
3005 Register str2_reg = reg_to_register_object($str2$$reg);
3006 Register cnt_reg = reg_to_register_object($cnt$$reg);
3007 Register tmp1_reg = O7;
3008 Register result_reg = reg_to_register_object($result$$reg);
3010 assert(result_reg != str1_reg &&
3011 result_reg != str2_reg &&
3012 result_reg != cnt_reg &&
3013 result_reg != tmp1_reg ,
3014 "need different registers");
3016 __ cmp(str1_reg, str2_reg); //same char[] ?
3017 __ brx(Assembler::equal, true, Assembler::pn, Ldone);
3018 __ delayed()->add(G0, 1, result_reg);
3020 __ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn);
3021 __ delayed()->add(G0, 1, result_reg); // count == 0
3023 //rename registers
3024 Register limit_reg = cnt_reg;
3025 Register chr1_reg = result_reg;
3026 Register chr2_reg = tmp1_reg;
3028 // We have no guarantee that on 64 bit the higher half of limit_reg is 0
3029 __ signx(limit_reg);
3031 //check for alignment and position the pointers to the ends
3032 __ or3(str1_reg, str2_reg, chr1_reg);
3033 __ andcc(chr1_reg, 0x3, chr1_reg);
3034 // notZero means at least one not 4-byte aligned.
3035 // We could optimize the case when both arrays are not aligned
3036 // but it is not frequent case and it requires additional checks.
3037 __ br(Assembler::notZero, false, Assembler::pn, Lchar); // char by char compare
3038 __ delayed()->sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); // set byte count
3040 // Compare char[] arrays aligned to 4 bytes.
3041 __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg,
3042 chr1_reg, chr2_reg, Ldone);
3043 __ ba(Ldone);
3044 __ delayed()->add(G0, 1, result_reg);
3046 // char by char compare
3047 __ bind(Lchar);
3048 __ add(str1_reg, limit_reg, str1_reg);
3049 __ add(str2_reg, limit_reg, str2_reg);
3050 __ neg(limit_reg); //negate count
3052 __ lduh(str1_reg, limit_reg, chr1_reg);
3053 // Lchar_loop
3054 __ bind(Lchar_loop);
3055 __ lduh(str2_reg, limit_reg, chr2_reg);
3056 __ cmp(chr1_reg, chr2_reg);
3057 __ br(Assembler::notEqual, true, Assembler::pt, Ldone);
3058 __ delayed()->mov(G0, result_reg); //not equal
3059 __ inccc(limit_reg, sizeof(jchar));
3060 // annul LDUH if branch is not taken to prevent access past end of string
3061 __ br(Assembler::notZero, true, Assembler::pt, Lchar_loop);
3062 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted
3064 __ add(G0, 1, result_reg); //equal
3066 __ bind(Ldone);
3067 %}
3069 enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI result) %{
3070 Label Lvector, Ldone, Lloop;
3071 MacroAssembler _masm(&cbuf);
3073 Register ary1_reg = reg_to_register_object($ary1$$reg);
3074 Register ary2_reg = reg_to_register_object($ary2$$reg);
3075 Register tmp1_reg = reg_to_register_object($tmp1$$reg);
3076 Register tmp2_reg = O7;
3077 Register result_reg = reg_to_register_object($result$$reg);
3079 int length_offset = arrayOopDesc::length_offset_in_bytes();
3080 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
3082 // return true if the same array
3083 __ cmp(ary1_reg, ary2_reg);
3084 __ brx(Assembler::equal, true, Assembler::pn, Ldone);
3085 __ delayed()->add(G0, 1, result_reg); // equal
3087 __ br_null(ary1_reg, true, Assembler::pn, Ldone);
3088 __ delayed()->mov(G0, result_reg); // not equal
3090 __ br_null(ary2_reg, true, Assembler::pn, Ldone);
3091 __ delayed()->mov(G0, result_reg); // not equal
3093 //load the lengths of arrays
3094 __ ld(Address(ary1_reg, length_offset), tmp1_reg);
3095 __ ld(Address(ary2_reg, length_offset), tmp2_reg);
3097 // return false if the two arrays are not equal length
3098 __ cmp(tmp1_reg, tmp2_reg);
3099 __ br(Assembler::notEqual, true, Assembler::pn, Ldone);
3100 __ delayed()->mov(G0, result_reg); // not equal
3102 __ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn);
3103 __ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal
3105 // load array addresses
3106 __ add(ary1_reg, base_offset, ary1_reg);
3107 __ add(ary2_reg, base_offset, ary2_reg);
3109 // renaming registers
3110 Register chr1_reg = result_reg; // for characters in ary1
3111 Register chr2_reg = tmp2_reg; // for characters in ary2
3112 Register limit_reg = tmp1_reg; // length
3114 // set byte count
3115 __ sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg);
3117 // Compare char[] arrays aligned to 4 bytes.
3118 __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg,
3119 chr1_reg, chr2_reg, Ldone);
3120 __ add(G0, 1, result_reg); // equals
3122 __ bind(Ldone);
3123 %}
3125 enc_class enc_rethrow() %{
3126 cbuf.set_insts_mark();
3127 Register temp_reg = G3;
3128 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub());
3129 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg");
3130 MacroAssembler _masm(&cbuf);
3131 #ifdef ASSERT
3132 __ save_frame(0);
3133 AddressLiteral last_rethrow_addrlit(&last_rethrow);
3134 __ sethi(last_rethrow_addrlit, L1);
3135 Address addr(L1, last_rethrow_addrlit.low10());
3136 __ rdpc(L2);
3137 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
3138 __ st_ptr(L2, addr);
3139 __ restore();
3140 #endif
3141 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp
3142 __ delayed()->nop();
3143 %}
3145 enc_class emit_mem_nop() %{
3146 // Generates the instruction LDUXA [o6,g0],#0x82,g0
3147 cbuf.insts()->emit_int32((unsigned int) 0xc0839040);
3148 %}
3150 enc_class emit_fadd_nop() %{
3151 // Generates the instruction FMOVS f31,f31
3152 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f);
3153 %}
3155 enc_class emit_br_nop() %{
3156 // Generates the instruction BPN,PN .
3157 cbuf.insts()->emit_int32((unsigned int) 0x00400000);
3158 %}
3160 enc_class enc_membar_acquire %{
3161 MacroAssembler _masm(&cbuf);
3162 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) );
3163 %}
3165 enc_class enc_membar_release %{
3166 MacroAssembler _masm(&cbuf);
3167 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) );
3168 %}
3170 enc_class enc_membar_volatile %{
3171 MacroAssembler _masm(&cbuf);
3172 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3173 %}
3175 %}
3177 //----------FRAME--------------------------------------------------------------
3178 // Definition of frame structure and management information.
3179 //
3180 // S T A C K L A Y O U T Allocators stack-slot number
3181 // | (to get allocators register number
3182 // G Owned by | | v add VMRegImpl::stack0)
3183 // r CALLER | |
3184 // o | +--------+ pad to even-align allocators stack-slot
3185 // w V | pad0 | numbers; owned by CALLER
3186 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3187 // h ^ | in | 5
3188 // | | args | 4 Holes in incoming args owned by SELF
3189 // | | | | 3
3190 // | | +--------+
3191 // V | | old out| Empty on Intel, window on Sparc
3192 // | old |preserve| Must be even aligned.
3193 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned
3194 // | | in | 3 area for Intel ret address
3195 // Owned by |preserve| Empty on Sparc.
3196 // SELF +--------+
3197 // | | pad2 | 2 pad to align old SP
3198 // | +--------+ 1
3199 // | | locks | 0
3200 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned
3201 // | | pad1 | 11 pad to align new SP
3202 // | +--------+
3203 // | | | 10
3204 // | | spills | 9 spills
3205 // V | | 8 (pad0 slot for callee)
3206 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3207 // ^ | out | 7
3208 // | | args | 6 Holes in outgoing args owned by CALLEE
3209 // Owned by +--------+
3210 // CALLEE | new out| 6 Empty on Intel, window on Sparc
3211 // | new |preserve| Must be even-aligned.
3212 // | SP-+--------+----> Matcher::_new_SP, even aligned
3213 // | | |
3214 //
3215 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3216 // known from SELF's arguments and the Java calling convention.
3217 // Region 6-7 is determined per call site.
3218 // Note 2: If the calling convention leaves holes in the incoming argument
3219 // area, those holes are owned by SELF. Holes in the outgoing area
3220 // are owned by the CALLEE. Holes should not be nessecary in the
3221 // incoming area, as the Java calling convention is completely under
3222 // the control of the AD file. Doubles can be sorted and packed to
3223 // avoid holes. Holes in the outgoing arguments may be nessecary for
3224 // varargs C calling conventions.
3225 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3226 // even aligned with pad0 as needed.
3227 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3228 // region 6-11 is even aligned; it may be padded out more so that
3229 // the region from SP to FP meets the minimum stack alignment.
3231 frame %{
3232 // What direction does stack grow in (assumed to be same for native & Java)
3233 stack_direction(TOWARDS_LOW);
3235 // These two registers define part of the calling convention
3236 // between compiled code and the interpreter.
3237 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C
3238 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter
3240 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3241 cisc_spilling_operand_name(indOffset);
3243 // Number of stack slots consumed by a Monitor enter
3244 #ifdef _LP64
3245 sync_stack_slots(2);
3246 #else
3247 sync_stack_slots(1);
3248 #endif
3250 // Compiled code's Frame Pointer
3251 frame_pointer(R_SP);
3253 // Stack alignment requirement
3254 stack_alignment(StackAlignmentInBytes);
3255 // LP64: Alignment size in bytes (128-bit -> 16 bytes)
3256 // !LP64: Alignment size in bytes (64-bit -> 8 bytes)
3258 // Number of stack slots between incoming argument block and the start of
3259 // a new frame. The PROLOG must add this many slots to the stack. The
3260 // EPILOG must remove this many slots.
3261 in_preserve_stack_slots(0);
3263 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3264 // for calls to C. Supports the var-args backing area for register parms.
3265 // ADLC doesn't support parsing expressions, so I folded the math by hand.
3266 #ifdef _LP64
3267 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word
3268 varargs_C_out_slots_killed(12);
3269 #else
3270 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word
3271 varargs_C_out_slots_killed( 7);
3272 #endif
3274 // The after-PROLOG location of the return address. Location of
3275 // return address specifies a type (REG or STACK) and a number
3276 // representing the register number (i.e. - use a register name) or
3277 // stack slot.
3278 return_addr(REG R_I7); // Ret Addr is in register I7
3280 // Body of function which returns an OptoRegs array locating
3281 // arguments either in registers or in stack slots for calling
3282 // java
3283 calling_convention %{
3284 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing);
3286 %}
3288 // Body of function which returns an OptoRegs array locating
3289 // arguments either in registers or in stack slots for callin
3290 // C.
3291 c_calling_convention %{
3292 // This is obviously always outgoing
3293 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3294 %}
3296 // Location of native (C/C++) and interpreter return values. This is specified to
3297 // be the same as Java. In the 32-bit VM, long values are actually returned from
3298 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying
3299 // to and from the register pairs is done by the appropriate call and epilog
3300 // opcodes. This simplifies the register allocator.
3301 c_return_value %{
3302 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3303 #ifdef _LP64
3304 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3305 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3306 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3307 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3308 #else // !_LP64
3309 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3310 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3311 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3312 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3313 #endif
3314 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3315 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3316 %}
3318 // Location of compiled Java return values. Same as C
3319 return_value %{
3320 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3321 #ifdef _LP64
3322 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3323 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3324 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3325 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3326 #else // !_LP64
3327 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3328 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3329 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3330 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3331 #endif
3332 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3333 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3334 %}
3336 %}
3339 //----------ATTRIBUTES---------------------------------------------------------
3340 //----------Operand Attributes-------------------------------------------------
3341 op_attrib op_cost(1); // Required cost attribute
3343 //----------Instruction Attributes---------------------------------------------
3344 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
3345 ins_attrib ins_size(32); // Required size attribute (in bits)
3346 ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back
3347 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3348 // non-matching short branch variant of some
3349 // long branch?
3351 //----------OPERANDS-----------------------------------------------------------
3352 // Operand definitions must precede instruction definitions for correct parsing
3353 // in the ADLC because operands constitute user defined types which are used in
3354 // instruction definitions.
3356 //----------Simple Operands----------------------------------------------------
3357 // Immediate Operands
3358 // Integer Immediate: 32-bit
3359 operand immI() %{
3360 match(ConI);
3362 op_cost(0);
3363 // formats are generated automatically for constants and base registers
3364 format %{ %}
3365 interface(CONST_INTER);
3366 %}
3368 // Integer Immediate: 8-bit
3369 operand immI8() %{
3370 predicate(Assembler::is_simm8(n->get_int()));
3371 match(ConI);
3372 op_cost(0);
3373 format %{ %}
3374 interface(CONST_INTER);
3375 %}
3377 // Integer Immediate: 13-bit
3378 operand immI13() %{
3379 predicate(Assembler::is_simm13(n->get_int()));
3380 match(ConI);
3381 op_cost(0);
3383 format %{ %}
3384 interface(CONST_INTER);
3385 %}
3387 // Integer Immediate: 13-bit minus 7
3388 operand immI13m7() %{
3389 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095));
3390 match(ConI);
3391 op_cost(0);
3393 format %{ %}
3394 interface(CONST_INTER);
3395 %}
3397 // Integer Immediate: 16-bit
3398 operand immI16() %{
3399 predicate(Assembler::is_simm16(n->get_int()));
3400 match(ConI);
3401 op_cost(0);
3402 format %{ %}
3403 interface(CONST_INTER);
3404 %}
3406 // Unsigned Integer Immediate: 12-bit (non-negative that fits in simm13)
3407 operand immU12() %{
3408 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int()));
3409 match(ConI);
3410 op_cost(0);
3412 format %{ %}
3413 interface(CONST_INTER);
3414 %}
3416 // Integer Immediate: 6-bit
3417 operand immU6() %{
3418 predicate(n->get_int() >= 0 && n->get_int() <= 63);
3419 match(ConI);
3420 op_cost(0);
3421 format %{ %}
3422 interface(CONST_INTER);
3423 %}
3425 // Integer Immediate: 11-bit
3426 operand immI11() %{
3427 predicate(Assembler::is_simm11(n->get_int()));
3428 match(ConI);
3429 op_cost(0);
3430 format %{ %}
3431 interface(CONST_INTER);
3432 %}
3434 // Integer Immediate: 5-bit
3435 operand immI5() %{
3436 predicate(Assembler::is_simm5(n->get_int()));
3437 match(ConI);
3438 op_cost(0);
3439 format %{ %}
3440 interface(CONST_INTER);
3441 %}
3443 // Int Immediate non-negative
3444 operand immU31()
3445 %{
3446 predicate(n->get_int() >= 0);
3447 match(ConI);
3449 op_cost(0);
3450 format %{ %}
3451 interface(CONST_INTER);
3452 %}
3454 // Integer Immediate: 0-bit
3455 operand immI0() %{
3456 predicate(n->get_int() == 0);
3457 match(ConI);
3458 op_cost(0);
3460 format %{ %}
3461 interface(CONST_INTER);
3462 %}
3464 // Integer Immediate: the value 10
3465 operand immI10() %{
3466 predicate(n->get_int() == 10);
3467 match(ConI);
3468 op_cost(0);
3470 format %{ %}
3471 interface(CONST_INTER);
3472 %}
3474 // Integer Immediate: the values 0-31
3475 operand immU5() %{
3476 predicate(n->get_int() >= 0 && n->get_int() <= 31);
3477 match(ConI);
3478 op_cost(0);
3480 format %{ %}
3481 interface(CONST_INTER);
3482 %}
3484 // Integer Immediate: the values 1-31
3485 operand immI_1_31() %{
3486 predicate(n->get_int() >= 1 && n->get_int() <= 31);
3487 match(ConI);
3488 op_cost(0);
3490 format %{ %}
3491 interface(CONST_INTER);
3492 %}
3494 // Integer Immediate: the values 32-63
3495 operand immI_32_63() %{
3496 predicate(n->get_int() >= 32 && n->get_int() <= 63);
3497 match(ConI);
3498 op_cost(0);
3500 format %{ %}
3501 interface(CONST_INTER);
3502 %}
3504 // Immediates for special shifts (sign extend)
3506 // Integer Immediate: the value 16
3507 operand immI_16() %{
3508 predicate(n->get_int() == 16);
3509 match(ConI);
3510 op_cost(0);
3512 format %{ %}
3513 interface(CONST_INTER);
3514 %}
3516 // Integer Immediate: the value 24
3517 operand immI_24() %{
3518 predicate(n->get_int() == 24);
3519 match(ConI);
3520 op_cost(0);
3522 format %{ %}
3523 interface(CONST_INTER);
3524 %}
3526 // Integer Immediate: the value 255
3527 operand immI_255() %{
3528 predicate( n->get_int() == 255 );
3529 match(ConI);
3530 op_cost(0);
3532 format %{ %}
3533 interface(CONST_INTER);
3534 %}
3536 // Integer Immediate: the value 65535
3537 operand immI_65535() %{
3538 predicate(n->get_int() == 65535);
3539 match(ConI);
3540 op_cost(0);
3542 format %{ %}
3543 interface(CONST_INTER);
3544 %}
3546 // Long Immediate: the value FF
3547 operand immL_FF() %{
3548 predicate( n->get_long() == 0xFFL );
3549 match(ConL);
3550 op_cost(0);
3552 format %{ %}
3553 interface(CONST_INTER);
3554 %}
3556 // Long Immediate: the value FFFF
3557 operand immL_FFFF() %{
3558 predicate( n->get_long() == 0xFFFFL );
3559 match(ConL);
3560 op_cost(0);
3562 format %{ %}
3563 interface(CONST_INTER);
3564 %}
3566 // Pointer Immediate: 32 or 64-bit
3567 operand immP() %{
3568 match(ConP);
3570 op_cost(5);
3571 // formats are generated automatically for constants and base registers
3572 format %{ %}
3573 interface(CONST_INTER);
3574 %}
3576 #ifdef _LP64
3577 // Pointer Immediate: 64-bit
3578 operand immP_set() %{
3579 predicate(!VM_Version::is_niagara_plus());
3580 match(ConP);
3582 op_cost(5);
3583 // formats are generated automatically for constants and base registers
3584 format %{ %}
3585 interface(CONST_INTER);
3586 %}
3588 // Pointer Immediate: 64-bit
3589 // From Niagara2 processors on a load should be better than materializing.
3590 operand immP_load() %{
3591 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3)));
3592 match(ConP);
3594 op_cost(5);
3595 // formats are generated automatically for constants and base registers
3596 format %{ %}
3597 interface(CONST_INTER);
3598 %}
3600 // Pointer Immediate: 64-bit
3601 operand immP_no_oop_cheap() %{
3602 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3));
3603 match(ConP);
3605 op_cost(5);
3606 // formats are generated automatically for constants and base registers
3607 format %{ %}
3608 interface(CONST_INTER);
3609 %}
3610 #endif
3612 operand immP13() %{
3613 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
3614 match(ConP);
3615 op_cost(0);
3617 format %{ %}
3618 interface(CONST_INTER);
3619 %}
3621 operand immP0() %{
3622 predicate(n->get_ptr() == 0);
3623 match(ConP);
3624 op_cost(0);
3626 format %{ %}
3627 interface(CONST_INTER);
3628 %}
3630 operand immP_poll() %{
3631 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3632 match(ConP);
3634 // formats are generated automatically for constants and base registers
3635 format %{ %}
3636 interface(CONST_INTER);
3637 %}
3639 // Pointer Immediate
3640 operand immN()
3641 %{
3642 match(ConN);
3644 op_cost(10);
3645 format %{ %}
3646 interface(CONST_INTER);
3647 %}
3649 operand immNKlass()
3650 %{
3651 match(ConNKlass);
3653 op_cost(10);
3654 format %{ %}
3655 interface(CONST_INTER);
3656 %}
3658 // NULL Pointer Immediate
3659 operand immN0()
3660 %{
3661 predicate(n->get_narrowcon() == 0);
3662 match(ConN);
3664 op_cost(0);
3665 format %{ %}
3666 interface(CONST_INTER);
3667 %}
3669 operand immL() %{
3670 match(ConL);
3671 op_cost(40);
3672 // formats are generated automatically for constants and base registers
3673 format %{ %}
3674 interface(CONST_INTER);
3675 %}
3677 operand immL0() %{
3678 predicate(n->get_long() == 0L);
3679 match(ConL);
3680 op_cost(0);
3681 // formats are generated automatically for constants and base registers
3682 format %{ %}
3683 interface(CONST_INTER);
3684 %}
3686 // Integer Immediate: 5-bit
3687 operand immL5() %{
3688 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long()));
3689 match(ConL);
3690 op_cost(0);
3691 format %{ %}
3692 interface(CONST_INTER);
3693 %}
3695 // Long Immediate: 13-bit
3696 operand immL13() %{
3697 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L));
3698 match(ConL);
3699 op_cost(0);
3701 format %{ %}
3702 interface(CONST_INTER);
3703 %}
3705 // Long Immediate: 13-bit minus 7
3706 operand immL13m7() %{
3707 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L));
3708 match(ConL);
3709 op_cost(0);
3711 format %{ %}
3712 interface(CONST_INTER);
3713 %}
3715 // Long Immediate: low 32-bit mask
3716 operand immL_32bits() %{
3717 predicate(n->get_long() == 0xFFFFFFFFL);
3718 match(ConL);
3719 op_cost(0);
3721 format %{ %}
3722 interface(CONST_INTER);
3723 %}
3725 // Long Immediate: cheap (materialize in <= 3 instructions)
3726 operand immL_cheap() %{
3727 predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3728 match(ConL);
3729 op_cost(0);
3731 format %{ %}
3732 interface(CONST_INTER);
3733 %}
3735 // Long Immediate: expensive (materialize in > 3 instructions)
3736 operand immL_expensive() %{
3737 predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3);
3738 match(ConL);
3739 op_cost(0);
3741 format %{ %}
3742 interface(CONST_INTER);
3743 %}
3745 // Double Immediate
3746 operand immD() %{
3747 match(ConD);
3749 op_cost(40);
3750 format %{ %}
3751 interface(CONST_INTER);
3752 %}
3754 operand immD0() %{
3755 #ifdef _LP64
3756 // on 64-bit architectures this comparision is faster
3757 predicate(jlong_cast(n->getd()) == 0);
3758 #else
3759 predicate((n->getd() == 0) && (fpclass(n->getd()) == FP_PZERO));
3760 #endif
3761 match(ConD);
3763 op_cost(0);
3764 format %{ %}
3765 interface(CONST_INTER);
3766 %}
3768 // Float Immediate
3769 operand immF() %{
3770 match(ConF);
3772 op_cost(20);
3773 format %{ %}
3774 interface(CONST_INTER);
3775 %}
3777 // Float Immediate: 0
3778 operand immF0() %{
3779 predicate((n->getf() == 0) && (fpclass(n->getf()) == FP_PZERO));
3780 match(ConF);
3782 op_cost(0);
3783 format %{ %}
3784 interface(CONST_INTER);
3785 %}
3787 // Integer Register Operands
3788 // Integer Register
3789 operand iRegI() %{
3790 constraint(ALLOC_IN_RC(int_reg));
3791 match(RegI);
3793 match(notemp_iRegI);
3794 match(g1RegI);
3795 match(o0RegI);
3796 match(iRegIsafe);
3798 format %{ %}
3799 interface(REG_INTER);
3800 %}
3802 operand notemp_iRegI() %{
3803 constraint(ALLOC_IN_RC(notemp_int_reg));
3804 match(RegI);
3806 match(o0RegI);
3808 format %{ %}
3809 interface(REG_INTER);
3810 %}
3812 operand o0RegI() %{
3813 constraint(ALLOC_IN_RC(o0_regI));
3814 match(iRegI);
3816 format %{ %}
3817 interface(REG_INTER);
3818 %}
3820 // Pointer Register
3821 operand iRegP() %{
3822 constraint(ALLOC_IN_RC(ptr_reg));
3823 match(RegP);
3825 match(lock_ptr_RegP);
3826 match(g1RegP);
3827 match(g2RegP);
3828 match(g3RegP);
3829 match(g4RegP);
3830 match(i0RegP);
3831 match(o0RegP);
3832 match(o1RegP);
3833 match(l7RegP);
3835 format %{ %}
3836 interface(REG_INTER);
3837 %}
3839 operand sp_ptr_RegP() %{
3840 constraint(ALLOC_IN_RC(sp_ptr_reg));
3841 match(RegP);
3842 match(iRegP);
3844 format %{ %}
3845 interface(REG_INTER);
3846 %}
3848 operand lock_ptr_RegP() %{
3849 constraint(ALLOC_IN_RC(lock_ptr_reg));
3850 match(RegP);
3851 match(i0RegP);
3852 match(o0RegP);
3853 match(o1RegP);
3854 match(l7RegP);
3856 format %{ %}
3857 interface(REG_INTER);
3858 %}
3860 operand g1RegP() %{
3861 constraint(ALLOC_IN_RC(g1_regP));
3862 match(iRegP);
3864 format %{ %}
3865 interface(REG_INTER);
3866 %}
3868 operand g2RegP() %{
3869 constraint(ALLOC_IN_RC(g2_regP));
3870 match(iRegP);
3872 format %{ %}
3873 interface(REG_INTER);
3874 %}
3876 operand g3RegP() %{
3877 constraint(ALLOC_IN_RC(g3_regP));
3878 match(iRegP);
3880 format %{ %}
3881 interface(REG_INTER);
3882 %}
3884 operand g1RegI() %{
3885 constraint(ALLOC_IN_RC(g1_regI));
3886 match(iRegI);
3888 format %{ %}
3889 interface(REG_INTER);
3890 %}
3892 operand g3RegI() %{
3893 constraint(ALLOC_IN_RC(g3_regI));
3894 match(iRegI);
3896 format %{ %}
3897 interface(REG_INTER);
3898 %}
3900 operand g4RegI() %{
3901 constraint(ALLOC_IN_RC(g4_regI));
3902 match(iRegI);
3904 format %{ %}
3905 interface(REG_INTER);
3906 %}
3908 operand g4RegP() %{
3909 constraint(ALLOC_IN_RC(g4_regP));
3910 match(iRegP);
3912 format %{ %}
3913 interface(REG_INTER);
3914 %}
3916 operand i0RegP() %{
3917 constraint(ALLOC_IN_RC(i0_regP));
3918 match(iRegP);
3920 format %{ %}
3921 interface(REG_INTER);
3922 %}
3924 operand o0RegP() %{
3925 constraint(ALLOC_IN_RC(o0_regP));
3926 match(iRegP);
3928 format %{ %}
3929 interface(REG_INTER);
3930 %}
3932 operand o1RegP() %{
3933 constraint(ALLOC_IN_RC(o1_regP));
3934 match(iRegP);
3936 format %{ %}
3937 interface(REG_INTER);
3938 %}
3940 operand o2RegP() %{
3941 constraint(ALLOC_IN_RC(o2_regP));
3942 match(iRegP);
3944 format %{ %}
3945 interface(REG_INTER);
3946 %}
3948 operand o7RegP() %{
3949 constraint(ALLOC_IN_RC(o7_regP));
3950 match(iRegP);
3952 format %{ %}
3953 interface(REG_INTER);
3954 %}
3956 operand l7RegP() %{
3957 constraint(ALLOC_IN_RC(l7_regP));
3958 match(iRegP);
3960 format %{ %}
3961 interface(REG_INTER);
3962 %}
3964 operand o7RegI() %{
3965 constraint(ALLOC_IN_RC(o7_regI));
3966 match(iRegI);
3968 format %{ %}
3969 interface(REG_INTER);
3970 %}
3972 operand iRegN() %{
3973 constraint(ALLOC_IN_RC(int_reg));
3974 match(RegN);
3976 format %{ %}
3977 interface(REG_INTER);
3978 %}
3980 // Long Register
3981 operand iRegL() %{
3982 constraint(ALLOC_IN_RC(long_reg));
3983 match(RegL);
3985 format %{ %}
3986 interface(REG_INTER);
3987 %}
3989 operand o2RegL() %{
3990 constraint(ALLOC_IN_RC(o2_regL));
3991 match(iRegL);
3993 format %{ %}
3994 interface(REG_INTER);
3995 %}
3997 operand o7RegL() %{
3998 constraint(ALLOC_IN_RC(o7_regL));
3999 match(iRegL);
4001 format %{ %}
4002 interface(REG_INTER);
4003 %}
4005 operand g1RegL() %{
4006 constraint(ALLOC_IN_RC(g1_regL));
4007 match(iRegL);
4009 format %{ %}
4010 interface(REG_INTER);
4011 %}
4013 operand g3RegL() %{
4014 constraint(ALLOC_IN_RC(g3_regL));
4015 match(iRegL);
4017 format %{ %}
4018 interface(REG_INTER);
4019 %}
4021 // Int Register safe
4022 // This is 64bit safe
4023 operand iRegIsafe() %{
4024 constraint(ALLOC_IN_RC(long_reg));
4026 match(iRegI);
4028 format %{ %}
4029 interface(REG_INTER);
4030 %}
4032 // Condition Code Flag Register
4033 operand flagsReg() %{
4034 constraint(ALLOC_IN_RC(int_flags));
4035 match(RegFlags);
4037 format %{ "ccr" %} // both ICC and XCC
4038 interface(REG_INTER);
4039 %}
4041 // Condition Code Register, unsigned comparisons.
4042 operand flagsRegU() %{
4043 constraint(ALLOC_IN_RC(int_flags));
4044 match(RegFlags);
4046 format %{ "icc_U" %}
4047 interface(REG_INTER);
4048 %}
4050 // Condition Code Register, pointer comparisons.
4051 operand flagsRegP() %{
4052 constraint(ALLOC_IN_RC(int_flags));
4053 match(RegFlags);
4055 #ifdef _LP64
4056 format %{ "xcc_P" %}
4057 #else
4058 format %{ "icc_P" %}
4059 #endif
4060 interface(REG_INTER);
4061 %}
4063 // Condition Code Register, long comparisons.
4064 operand flagsRegL() %{
4065 constraint(ALLOC_IN_RC(int_flags));
4066 match(RegFlags);
4068 format %{ "xcc_L" %}
4069 interface(REG_INTER);
4070 %}
4072 // Condition Code Register, floating comparisons, unordered same as "less".
4073 operand flagsRegF() %{
4074 constraint(ALLOC_IN_RC(float_flags));
4075 match(RegFlags);
4076 match(flagsRegF0);
4078 format %{ %}
4079 interface(REG_INTER);
4080 %}
4082 operand flagsRegF0() %{
4083 constraint(ALLOC_IN_RC(float_flag0));
4084 match(RegFlags);
4086 format %{ %}
4087 interface(REG_INTER);
4088 %}
4091 // Condition Code Flag Register used by long compare
4092 operand flagsReg_long_LTGE() %{
4093 constraint(ALLOC_IN_RC(int_flags));
4094 match(RegFlags);
4095 format %{ "icc_LTGE" %}
4096 interface(REG_INTER);
4097 %}
4098 operand flagsReg_long_EQNE() %{
4099 constraint(ALLOC_IN_RC(int_flags));
4100 match(RegFlags);
4101 format %{ "icc_EQNE" %}
4102 interface(REG_INTER);
4103 %}
4104 operand flagsReg_long_LEGT() %{
4105 constraint(ALLOC_IN_RC(int_flags));
4106 match(RegFlags);
4107 format %{ "icc_LEGT" %}
4108 interface(REG_INTER);
4109 %}
4112 operand regD() %{
4113 constraint(ALLOC_IN_RC(dflt_reg));
4114 match(RegD);
4116 match(regD_low);
4118 format %{ %}
4119 interface(REG_INTER);
4120 %}
4122 operand regF() %{
4123 constraint(ALLOC_IN_RC(sflt_reg));
4124 match(RegF);
4126 format %{ %}
4127 interface(REG_INTER);
4128 %}
4130 operand regD_low() %{
4131 constraint(ALLOC_IN_RC(dflt_low_reg));
4132 match(regD);
4134 format %{ %}
4135 interface(REG_INTER);
4136 %}
4138 // Special Registers
4140 // Method Register
4141 operand inline_cache_regP(iRegP reg) %{
4142 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1
4143 match(reg);
4144 format %{ %}
4145 interface(REG_INTER);
4146 %}
4148 operand interpreter_method_oop_regP(iRegP reg) %{
4149 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1
4150 match(reg);
4151 format %{ %}
4152 interface(REG_INTER);
4153 %}
4156 //----------Complex Operands---------------------------------------------------
4157 // Indirect Memory Reference
4158 operand indirect(sp_ptr_RegP reg) %{
4159 constraint(ALLOC_IN_RC(sp_ptr_reg));
4160 match(reg);
4162 op_cost(100);
4163 format %{ "[$reg]" %}
4164 interface(MEMORY_INTER) %{
4165 base($reg);
4166 index(0x0);
4167 scale(0x0);
4168 disp(0x0);
4169 %}
4170 %}
4172 // Indirect with simm13 Offset
4173 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{
4174 constraint(ALLOC_IN_RC(sp_ptr_reg));
4175 match(AddP reg offset);
4177 op_cost(100);
4178 format %{ "[$reg + $offset]" %}
4179 interface(MEMORY_INTER) %{
4180 base($reg);
4181 index(0x0);
4182 scale(0x0);
4183 disp($offset);
4184 %}
4185 %}
4187 // Indirect with simm13 Offset minus 7
4188 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{
4189 constraint(ALLOC_IN_RC(sp_ptr_reg));
4190 match(AddP reg offset);
4192 op_cost(100);
4193 format %{ "[$reg + $offset]" %}
4194 interface(MEMORY_INTER) %{
4195 base($reg);
4196 index(0x0);
4197 scale(0x0);
4198 disp($offset);
4199 %}
4200 %}
4202 // Note: Intel has a swapped version also, like this:
4203 //operand indOffsetX(iRegI reg, immP offset) %{
4204 // constraint(ALLOC_IN_RC(int_reg));
4205 // match(AddP offset reg);
4206 //
4207 // op_cost(100);
4208 // format %{ "[$reg + $offset]" %}
4209 // interface(MEMORY_INTER) %{
4210 // base($reg);
4211 // index(0x0);
4212 // scale(0x0);
4213 // disp($offset);
4214 // %}
4215 //%}
4216 //// However, it doesn't make sense for SPARC, since
4217 // we have no particularly good way to embed oops in
4218 // single instructions.
4220 // Indirect with Register Index
4221 operand indIndex(iRegP addr, iRegX index) %{
4222 constraint(ALLOC_IN_RC(ptr_reg));
4223 match(AddP addr index);
4225 op_cost(100);
4226 format %{ "[$addr + $index]" %}
4227 interface(MEMORY_INTER) %{
4228 base($addr);
4229 index($index);
4230 scale(0x0);
4231 disp(0x0);
4232 %}
4233 %}
4235 //----------Special Memory Operands--------------------------------------------
4236 // Stack Slot Operand - This operand is used for loading and storing temporary
4237 // values on the stack where a match requires a value to
4238 // flow through memory.
4239 operand stackSlotI(sRegI reg) %{
4240 constraint(ALLOC_IN_RC(stack_slots));
4241 op_cost(100);
4242 //match(RegI);
4243 format %{ "[$reg]" %}
4244 interface(MEMORY_INTER) %{
4245 base(0xE); // R_SP
4246 index(0x0);
4247 scale(0x0);
4248 disp($reg); // Stack Offset
4249 %}
4250 %}
4252 operand stackSlotP(sRegP reg) %{
4253 constraint(ALLOC_IN_RC(stack_slots));
4254 op_cost(100);
4255 //match(RegP);
4256 format %{ "[$reg]" %}
4257 interface(MEMORY_INTER) %{
4258 base(0xE); // R_SP
4259 index(0x0);
4260 scale(0x0);
4261 disp($reg); // Stack Offset
4262 %}
4263 %}
4265 operand stackSlotF(sRegF reg) %{
4266 constraint(ALLOC_IN_RC(stack_slots));
4267 op_cost(100);
4268 //match(RegF);
4269 format %{ "[$reg]" %}
4270 interface(MEMORY_INTER) %{
4271 base(0xE); // R_SP
4272 index(0x0);
4273 scale(0x0);
4274 disp($reg); // Stack Offset
4275 %}
4276 %}
4277 operand stackSlotD(sRegD reg) %{
4278 constraint(ALLOC_IN_RC(stack_slots));
4279 op_cost(100);
4280 //match(RegD);
4281 format %{ "[$reg]" %}
4282 interface(MEMORY_INTER) %{
4283 base(0xE); // R_SP
4284 index(0x0);
4285 scale(0x0);
4286 disp($reg); // Stack Offset
4287 %}
4288 %}
4289 operand stackSlotL(sRegL reg) %{
4290 constraint(ALLOC_IN_RC(stack_slots));
4291 op_cost(100);
4292 //match(RegL);
4293 format %{ "[$reg]" %}
4294 interface(MEMORY_INTER) %{
4295 base(0xE); // R_SP
4296 index(0x0);
4297 scale(0x0);
4298 disp($reg); // Stack Offset
4299 %}
4300 %}
4302 // Operands for expressing Control Flow
4303 // NOTE: Label is a predefined operand which should not be redefined in
4304 // the AD file. It is generically handled within the ADLC.
4306 //----------Conditional Branch Operands----------------------------------------
4307 // Comparison Op - This is the operation of the comparison, and is limited to
4308 // the following set of codes:
4309 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
4310 //
4311 // Other attributes of the comparison, such as unsignedness, are specified
4312 // by the comparison instruction that sets a condition code flags register.
4313 // That result is represented by a flags operand whose subtype is appropriate
4314 // to the unsignedness (etc.) of the comparison.
4315 //
4316 // Later, the instruction which matches both the Comparison Op (a Bool) and
4317 // the flags (produced by the Cmp) specifies the coding of the comparison op
4318 // by matching a specific subtype of Bool operand below, such as cmpOpU.
4320 operand cmpOp() %{
4321 match(Bool);
4323 format %{ "" %}
4324 interface(COND_INTER) %{
4325 equal(0x1);
4326 not_equal(0x9);
4327 less(0x3);
4328 greater_equal(0xB);
4329 less_equal(0x2);
4330 greater(0xA);
4331 overflow(0x7);
4332 no_overflow(0xF);
4333 %}
4334 %}
4336 // Comparison Op, unsigned
4337 operand cmpOpU() %{
4338 match(Bool);
4339 predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4340 n->as_Bool()->_test._test != BoolTest::no_overflow);
4342 format %{ "u" %}
4343 interface(COND_INTER) %{
4344 equal(0x1);
4345 not_equal(0x9);
4346 less(0x5);
4347 greater_equal(0xD);
4348 less_equal(0x4);
4349 greater(0xC);
4350 overflow(0x7);
4351 no_overflow(0xF);
4352 %}
4353 %}
4355 // Comparison Op, pointer (same as unsigned)
4356 operand cmpOpP() %{
4357 match(Bool);
4358 predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4359 n->as_Bool()->_test._test != BoolTest::no_overflow);
4361 format %{ "p" %}
4362 interface(COND_INTER) %{
4363 equal(0x1);
4364 not_equal(0x9);
4365 less(0x5);
4366 greater_equal(0xD);
4367 less_equal(0x4);
4368 greater(0xC);
4369 overflow(0x7);
4370 no_overflow(0xF);
4371 %}
4372 %}
4374 // Comparison Op, branch-register encoding
4375 operand cmpOp_reg() %{
4376 match(Bool);
4377 predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4378 n->as_Bool()->_test._test != BoolTest::no_overflow);
4380 format %{ "" %}
4381 interface(COND_INTER) %{
4382 equal (0x1);
4383 not_equal (0x5);
4384 less (0x3);
4385 greater_equal(0x7);
4386 less_equal (0x2);
4387 greater (0x6);
4388 overflow(0x7); // not supported
4389 no_overflow(0xF); // not supported
4390 %}
4391 %}
4393 // Comparison Code, floating, unordered same as less
4394 operand cmpOpF() %{
4395 match(Bool);
4396 predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4397 n->as_Bool()->_test._test != BoolTest::no_overflow);
4399 format %{ "fl" %}
4400 interface(COND_INTER) %{
4401 equal(0x9);
4402 not_equal(0x1);
4403 less(0x3);
4404 greater_equal(0xB);
4405 less_equal(0xE);
4406 greater(0x6);
4408 overflow(0x7); // not supported
4409 no_overflow(0xF); // not supported
4410 %}
4411 %}
4413 // Used by long compare
4414 operand cmpOp_commute() %{
4415 match(Bool);
4416 predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
4417 n->as_Bool()->_test._test != BoolTest::no_overflow);
4419 format %{ "" %}
4420 interface(COND_INTER) %{
4421 equal(0x1);
4422 not_equal(0x9);
4423 less(0xA);
4424 greater_equal(0x2);
4425 less_equal(0xB);
4426 greater(0x3);
4427 overflow(0x7);
4428 no_overflow(0xF);
4429 %}
4430 %}
4432 //----------OPERAND CLASSES----------------------------------------------------
4433 // Operand Classes are groups of operands that are used to simplify
4434 // instruction definitions by not requiring the AD writer to specify separate
4435 // instructions for every form of operand when the instruction accepts
4436 // multiple operand types with the same basic encoding and format. The classic
4437 // case of this is memory operands.
4438 opclass memory( indirect, indOffset13, indIndex );
4439 opclass indIndexMemory( indIndex );
4441 //----------PIPELINE-----------------------------------------------------------
4442 pipeline %{
4444 //----------ATTRIBUTES---------------------------------------------------------
4445 attributes %{
4446 fixed_size_instructions; // Fixed size instructions
4447 branch_has_delay_slot; // Branch has delay slot following
4448 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle
4449 instruction_unit_size = 4; // An instruction is 4 bytes long
4450 instruction_fetch_unit_size = 16; // The processor fetches one line
4451 instruction_fetch_units = 1; // of 16 bytes
4453 // List of nop instructions
4454 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR );
4455 %}
4457 //----------RESOURCES----------------------------------------------------------
4458 // Resources are the functional units available to the machine
4459 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1);
4461 //----------PIPELINE DESCRIPTION-----------------------------------------------
4462 // Pipeline Description specifies the stages in the machine's pipeline
4464 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D);
4466 //----------PIPELINE CLASSES---------------------------------------------------
4467 // Pipeline Classes describe the stages in which input and output are
4468 // referenced by the hardware pipeline.
4470 // Integer ALU reg-reg operation
4471 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
4472 single_instruction;
4473 dst : E(write);
4474 src1 : R(read);
4475 src2 : R(read);
4476 IALU : R;
4477 %}
4479 // Integer ALU reg-reg long operation
4480 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
4481 instruction_count(2);
4482 dst : E(write);
4483 src1 : R(read);
4484 src2 : R(read);
4485 IALU : R;
4486 IALU : R;
4487 %}
4489 // Integer ALU reg-reg long dependent operation
4490 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{
4491 instruction_count(1); multiple_bundles;
4492 dst : E(write);
4493 src1 : R(read);
4494 src2 : R(read);
4495 cr : E(write);
4496 IALU : R(2);
4497 %}
4499 // Integer ALU reg-imm operaion
4500 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{
4501 single_instruction;
4502 dst : E(write);
4503 src1 : R(read);
4504 IALU : R;
4505 %}
4507 // Integer ALU reg-reg operation with condition code
4508 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{
4509 single_instruction;
4510 dst : E(write);
4511 cr : E(write);
4512 src1 : R(read);
4513 src2 : R(read);
4514 IALU : R;
4515 %}
4517 // Integer ALU reg-imm operation with condition code
4518 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{
4519 single_instruction;
4520 dst : E(write);
4521 cr : E(write);
4522 src1 : R(read);
4523 IALU : R;
4524 %}
4526 // Integer ALU zero-reg operation
4527 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
4528 single_instruction;
4529 dst : E(write);
4530 src2 : R(read);
4531 IALU : R;
4532 %}
4534 // Integer ALU zero-reg operation with condition code only
4535 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{
4536 single_instruction;
4537 cr : E(write);
4538 src : R(read);
4539 IALU : R;
4540 %}
4542 // Integer ALU reg-reg operation with condition code only
4543 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
4544 single_instruction;
4545 cr : E(write);
4546 src1 : R(read);
4547 src2 : R(read);
4548 IALU : R;
4549 %}
4551 // Integer ALU reg-imm operation with condition code only
4552 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{
4553 single_instruction;
4554 cr : E(write);
4555 src1 : R(read);
4556 IALU : R;
4557 %}
4559 // Integer ALU reg-reg-zero operation with condition code only
4560 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{
4561 single_instruction;
4562 cr : E(write);
4563 src1 : R(read);
4564 src2 : R(read);
4565 IALU : R;
4566 %}
4568 // Integer ALU reg-imm-zero operation with condition code only
4569 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{
4570 single_instruction;
4571 cr : E(write);
4572 src1 : R(read);
4573 IALU : R;
4574 %}
4576 // Integer ALU reg-reg operation with condition code, src1 modified
4577 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
4578 single_instruction;
4579 cr : E(write);
4580 src1 : E(write);
4581 src1 : R(read);
4582 src2 : R(read);
4583 IALU : R;
4584 %}
4586 // Integer ALU reg-imm operation with condition code, src1 modified
4587 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{
4588 single_instruction;
4589 cr : E(write);
4590 src1 : E(write);
4591 src1 : R(read);
4592 IALU : R;
4593 %}
4595 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{
4596 multiple_bundles;
4597 dst : E(write)+4;
4598 cr : E(write);
4599 src1 : R(read);
4600 src2 : R(read);
4601 IALU : R(3);
4602 BR : R(2);
4603 %}
4605 // Integer ALU operation
4606 pipe_class ialu_none(iRegI dst) %{
4607 single_instruction;
4608 dst : E(write);
4609 IALU : R;
4610 %}
4612 // Integer ALU reg operation
4613 pipe_class ialu_reg(iRegI dst, iRegI src) %{
4614 single_instruction; may_have_no_code;
4615 dst : E(write);
4616 src : R(read);
4617 IALU : R;
4618 %}
4620 // Integer ALU reg conditional operation
4621 // This instruction has a 1 cycle stall, and cannot execute
4622 // in the same cycle as the instruction setting the condition
4623 // code. We kludge this by pretending to read the condition code
4624 // 1 cycle earlier, and by marking the functional units as busy
4625 // for 2 cycles with the result available 1 cycle later than
4626 // is really the case.
4627 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{
4628 single_instruction;
4629 op2_out : C(write);
4630 op1 : R(read);
4631 cr : R(read); // This is really E, with a 1 cycle stall
4632 BR : R(2);
4633 MS : R(2);
4634 %}
4636 #ifdef _LP64
4637 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
4638 instruction_count(1); multiple_bundles;
4639 dst : C(write)+1;
4640 src : R(read)+1;
4641 IALU : R(1);
4642 BR : E(2);
4643 MS : E(2);
4644 %}
4645 #endif
4647 // Integer ALU reg operation
4648 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
4649 single_instruction; may_have_no_code;
4650 dst : E(write);
4651 src : R(read);
4652 IALU : R;
4653 %}
4654 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{
4655 single_instruction; may_have_no_code;
4656 dst : E(write);
4657 src : R(read);
4658 IALU : R;
4659 %}
4661 // Two integer ALU reg operations
4662 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{
4663 instruction_count(2);
4664 dst : E(write);
4665 src : R(read);
4666 A0 : R;
4667 A1 : R;
4668 %}
4670 // Two integer ALU reg operations
4671 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{
4672 instruction_count(2); may_have_no_code;
4673 dst : E(write);
4674 src : R(read);
4675 A0 : R;
4676 A1 : R;
4677 %}
4679 // Integer ALU imm operation
4680 pipe_class ialu_imm(iRegI dst, immI13 src) %{
4681 single_instruction;
4682 dst : E(write);
4683 IALU : R;
4684 %}
4686 // Integer ALU reg-reg with carry operation
4687 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{
4688 single_instruction;
4689 dst : E(write);
4690 src1 : R(read);
4691 src2 : R(read);
4692 IALU : R;
4693 %}
4695 // Integer ALU cc operation
4696 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{
4697 single_instruction;
4698 dst : E(write);
4699 cc : R(read);
4700 IALU : R;
4701 %}
4703 // Integer ALU cc / second IALU operation
4704 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{
4705 instruction_count(1); multiple_bundles;
4706 dst : E(write)+1;
4707 src : R(read);
4708 IALU : R;
4709 %}
4711 // Integer ALU cc / second IALU operation
4712 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{
4713 instruction_count(1); multiple_bundles;
4714 dst : E(write)+1;
4715 p : R(read);
4716 q : R(read);
4717 IALU : R;
4718 %}
4720 // Integer ALU hi-lo-reg operation
4721 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{
4722 instruction_count(1); multiple_bundles;
4723 dst : E(write)+1;
4724 IALU : R(2);
4725 %}
4727 // Float ALU hi-lo-reg operation (with temp)
4728 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{
4729 instruction_count(1); multiple_bundles;
4730 dst : E(write)+1;
4731 IALU : R(2);
4732 %}
4734 // Long Constant
4735 pipe_class loadConL( iRegL dst, immL src ) %{
4736 instruction_count(2); multiple_bundles;
4737 dst : E(write)+1;
4738 IALU : R(2);
4739 IALU : R(2);
4740 %}
4742 // Pointer Constant
4743 pipe_class loadConP( iRegP dst, immP src ) %{
4744 instruction_count(0); multiple_bundles;
4745 fixed_latency(6);
4746 %}
4748 // Polling Address
4749 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
4750 #ifdef _LP64
4751 instruction_count(0); multiple_bundles;
4752 fixed_latency(6);
4753 #else
4754 dst : E(write);
4755 IALU : R;
4756 #endif
4757 %}
4759 // Long Constant small
4760 pipe_class loadConLlo( iRegL dst, immL src ) %{
4761 instruction_count(2);
4762 dst : E(write);
4763 IALU : R;
4764 IALU : R;
4765 %}
4767 // [PHH] This is wrong for 64-bit. See LdImmF/D.
4768 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{
4769 instruction_count(1); multiple_bundles;
4770 src : R(read);
4771 dst : M(write)+1;
4772 IALU : R;
4773 MS : E;
4774 %}
4776 // Integer ALU nop operation
4777 pipe_class ialu_nop() %{
4778 single_instruction;
4779 IALU : R;
4780 %}
4782 // Integer ALU nop operation
4783 pipe_class ialu_nop_A0() %{
4784 single_instruction;
4785 A0 : R;
4786 %}
4788 // Integer ALU nop operation
4789 pipe_class ialu_nop_A1() %{
4790 single_instruction;
4791 A1 : R;
4792 %}
4794 // Integer Multiply reg-reg operation
4795 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
4796 single_instruction;
4797 dst : E(write);
4798 src1 : R(read);
4799 src2 : R(read);
4800 MS : R(5);
4801 %}
4803 // Integer Multiply reg-imm operation
4804 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{
4805 single_instruction;
4806 dst : E(write);
4807 src1 : R(read);
4808 MS : R(5);
4809 %}
4811 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
4812 single_instruction;
4813 dst : E(write)+4;
4814 src1 : R(read);
4815 src2 : R(read);
4816 MS : R(6);
4817 %}
4819 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{
4820 single_instruction;
4821 dst : E(write)+4;
4822 src1 : R(read);
4823 MS : R(6);
4824 %}
4826 // Integer Divide reg-reg
4827 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{
4828 instruction_count(1); multiple_bundles;
4829 dst : E(write);
4830 temp : E(write);
4831 src1 : R(read);
4832 src2 : R(read);
4833 temp : R(read);
4834 MS : R(38);
4835 %}
4837 // Integer Divide reg-imm
4838 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{
4839 instruction_count(1); multiple_bundles;
4840 dst : E(write);
4841 temp : E(write);
4842 src1 : R(read);
4843 temp : R(read);
4844 MS : R(38);
4845 %}
4847 // Long Divide
4848 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
4849 dst : E(write)+71;
4850 src1 : R(read);
4851 src2 : R(read)+1;
4852 MS : R(70);
4853 %}
4855 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{
4856 dst : E(write)+71;
4857 src1 : R(read);
4858 MS : R(70);
4859 %}
4861 // Floating Point Add Float
4862 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{
4863 single_instruction;
4864 dst : X(write);
4865 src1 : E(read);
4866 src2 : E(read);
4867 FA : R;
4868 %}
4870 // Floating Point Add Double
4871 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{
4872 single_instruction;
4873 dst : X(write);
4874 src1 : E(read);
4875 src2 : E(read);
4876 FA : R;
4877 %}
4879 // Floating Point Conditional Move based on integer flags
4880 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{
4881 single_instruction;
4882 dst : X(write);
4883 src : E(read);
4884 cr : R(read);
4885 FA : R(2);
4886 BR : R(2);
4887 %}
4889 // Floating Point Conditional Move based on integer flags
4890 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{
4891 single_instruction;
4892 dst : X(write);
4893 src : E(read);
4894 cr : R(read);
4895 FA : R(2);
4896 BR : R(2);
4897 %}
4899 // Floating Point Multiply Float
4900 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{
4901 single_instruction;
4902 dst : X(write);
4903 src1 : E(read);
4904 src2 : E(read);
4905 FM : R;
4906 %}
4908 // Floating Point Multiply Double
4909 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{
4910 single_instruction;
4911 dst : X(write);
4912 src1 : E(read);
4913 src2 : E(read);
4914 FM : R;
4915 %}
4917 // Floating Point Divide Float
4918 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{
4919 single_instruction;
4920 dst : X(write);
4921 src1 : E(read);
4922 src2 : E(read);
4923 FM : R;
4924 FDIV : C(14);
4925 %}
4927 // Floating Point Divide Double
4928 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{
4929 single_instruction;
4930 dst : X(write);
4931 src1 : E(read);
4932 src2 : E(read);
4933 FM : R;
4934 FDIV : C(17);
4935 %}
4937 // Floating Point Move/Negate/Abs Float
4938 pipe_class faddF_reg(regF dst, regF src) %{
4939 single_instruction;
4940 dst : W(write);
4941 src : E(read);
4942 FA : R(1);
4943 %}
4945 // Floating Point Move/Negate/Abs Double
4946 pipe_class faddD_reg(regD dst, regD src) %{
4947 single_instruction;
4948 dst : W(write);
4949 src : E(read);
4950 FA : R;
4951 %}
4953 // Floating Point Convert F->D
4954 pipe_class fcvtF2D(regD dst, regF src) %{
4955 single_instruction;
4956 dst : X(write);
4957 src : E(read);
4958 FA : R;
4959 %}
4961 // Floating Point Convert I->D
4962 pipe_class fcvtI2D(regD dst, regF src) %{
4963 single_instruction;
4964 dst : X(write);
4965 src : E(read);
4966 FA : R;
4967 %}
4969 // Floating Point Convert LHi->D
4970 pipe_class fcvtLHi2D(regD dst, regD src) %{
4971 single_instruction;
4972 dst : X(write);
4973 src : E(read);
4974 FA : R;
4975 %}
4977 // Floating Point Convert L->D
4978 pipe_class fcvtL2D(regD dst, regF src) %{
4979 single_instruction;
4980 dst : X(write);
4981 src : E(read);
4982 FA : R;
4983 %}
4985 // Floating Point Convert L->F
4986 pipe_class fcvtL2F(regD dst, regF src) %{
4987 single_instruction;
4988 dst : X(write);
4989 src : E(read);
4990 FA : R;
4991 %}
4993 // Floating Point Convert D->F
4994 pipe_class fcvtD2F(regD dst, regF src) %{
4995 single_instruction;
4996 dst : X(write);
4997 src : E(read);
4998 FA : R;
4999 %}
5001 // Floating Point Convert I->L
5002 pipe_class fcvtI2L(regD dst, regF src) %{
5003 single_instruction;
5004 dst : X(write);
5005 src : E(read);
5006 FA : R;
5007 %}
5009 // Floating Point Convert D->F
5010 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{
5011 instruction_count(1); multiple_bundles;
5012 dst : X(write)+6;
5013 src : E(read);
5014 FA : R;
5015 %}
5017 // Floating Point Convert D->L
5018 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{
5019 instruction_count(1); multiple_bundles;
5020 dst : X(write)+6;
5021 src : E(read);
5022 FA : R;
5023 %}
5025 // Floating Point Convert F->I
5026 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{
5027 instruction_count(1); multiple_bundles;
5028 dst : X(write)+6;
5029 src : E(read);
5030 FA : R;
5031 %}
5033 // Floating Point Convert F->L
5034 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{
5035 instruction_count(1); multiple_bundles;
5036 dst : X(write)+6;
5037 src : E(read);
5038 FA : R;
5039 %}
5041 // Floating Point Convert I->F
5042 pipe_class fcvtI2F(regF dst, regF src) %{
5043 single_instruction;
5044 dst : X(write);
5045 src : E(read);
5046 FA : R;
5047 %}
5049 // Floating Point Compare
5050 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{
5051 single_instruction;
5052 cr : X(write);
5053 src1 : E(read);
5054 src2 : E(read);
5055 FA : R;
5056 %}
5058 // Floating Point Compare
5059 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{
5060 single_instruction;
5061 cr : X(write);
5062 src1 : E(read);
5063 src2 : E(read);
5064 FA : R;
5065 %}
5067 // Floating Add Nop
5068 pipe_class fadd_nop() %{
5069 single_instruction;
5070 FA : R;
5071 %}
5073 // Integer Store to Memory
5074 pipe_class istore_mem_reg(memory mem, iRegI src) %{
5075 single_instruction;
5076 mem : R(read);
5077 src : C(read);
5078 MS : R;
5079 %}
5081 // Integer Store to Memory
5082 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{
5083 single_instruction;
5084 mem : R(read);
5085 src : C(read);
5086 MS : R;
5087 %}
5089 // Integer Store Zero to Memory
5090 pipe_class istore_mem_zero(memory mem, immI0 src) %{
5091 single_instruction;
5092 mem : R(read);
5093 MS : R;
5094 %}
5096 // Special Stack Slot Store
5097 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{
5098 single_instruction;
5099 stkSlot : R(read);
5100 src : C(read);
5101 MS : R;
5102 %}
5104 // Special Stack Slot Store
5105 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{
5106 instruction_count(2); multiple_bundles;
5107 stkSlot : R(read);
5108 src : C(read);
5109 MS : R(2);
5110 %}
5112 // Float Store
5113 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{
5114 single_instruction;
5115 mem : R(read);
5116 src : C(read);
5117 MS : R;
5118 %}
5120 // Float Store
5121 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{
5122 single_instruction;
5123 mem : R(read);
5124 MS : R;
5125 %}
5127 // Double Store
5128 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{
5129 instruction_count(1);
5130 mem : R(read);
5131 src : C(read);
5132 MS : R;
5133 %}
5135 // Double Store
5136 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{
5137 single_instruction;
5138 mem : R(read);
5139 MS : R;
5140 %}
5142 // Special Stack Slot Float Store
5143 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{
5144 single_instruction;
5145 stkSlot : R(read);
5146 src : C(read);
5147 MS : R;
5148 %}
5150 // Special Stack Slot Double Store
5151 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{
5152 single_instruction;
5153 stkSlot : R(read);
5154 src : C(read);
5155 MS : R;
5156 %}
5158 // Integer Load (when sign bit propagation not needed)
5159 pipe_class iload_mem(iRegI dst, memory mem) %{
5160 single_instruction;
5161 mem : R(read);
5162 dst : C(write);
5163 MS : R;
5164 %}
5166 // Integer Load from stack operand
5167 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{
5168 single_instruction;
5169 mem : R(read);
5170 dst : C(write);
5171 MS : R;
5172 %}
5174 // Integer Load (when sign bit propagation or masking is needed)
5175 pipe_class iload_mask_mem(iRegI dst, memory mem) %{
5176 single_instruction;
5177 mem : R(read);
5178 dst : M(write);
5179 MS : R;
5180 %}
5182 // Float Load
5183 pipe_class floadF_mem(regF dst, memory mem) %{
5184 single_instruction;
5185 mem : R(read);
5186 dst : M(write);
5187 MS : R;
5188 %}
5190 // Float Load
5191 pipe_class floadD_mem(regD dst, memory mem) %{
5192 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case
5193 mem : R(read);
5194 dst : M(write);
5195 MS : R;
5196 %}
5198 // Float Load
5199 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{
5200 single_instruction;
5201 stkSlot : R(read);
5202 dst : M(write);
5203 MS : R;
5204 %}
5206 // Float Load
5207 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{
5208 single_instruction;
5209 stkSlot : R(read);
5210 dst : M(write);
5211 MS : R;
5212 %}
5214 // Memory Nop
5215 pipe_class mem_nop() %{
5216 single_instruction;
5217 MS : R;
5218 %}
5220 pipe_class sethi(iRegP dst, immI src) %{
5221 single_instruction;
5222 dst : E(write);
5223 IALU : R;
5224 %}
5226 pipe_class loadPollP(iRegP poll) %{
5227 single_instruction;
5228 poll : R(read);
5229 MS : R;
5230 %}
5232 pipe_class br(Universe br, label labl) %{
5233 single_instruction_with_delay_slot;
5234 BR : R;
5235 %}
5237 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{
5238 single_instruction_with_delay_slot;
5239 cr : E(read);
5240 BR : R;
5241 %}
5243 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{
5244 single_instruction_with_delay_slot;
5245 op1 : E(read);
5246 BR : R;
5247 MS : R;
5248 %}
5250 // Compare and branch
5251 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{
5252 instruction_count(2); has_delay_slot;
5253 cr : E(write);
5254 src1 : R(read);
5255 src2 : R(read);
5256 IALU : R;
5257 BR : R;
5258 %}
5260 // Compare and branch
5261 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{
5262 instruction_count(2); has_delay_slot;
5263 cr : E(write);
5264 src1 : R(read);
5265 IALU : R;
5266 BR : R;
5267 %}
5269 // Compare and branch using cbcond
5270 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{
5271 single_instruction;
5272 src1 : E(read);
5273 src2 : E(read);
5274 IALU : R;
5275 BR : R;
5276 %}
5278 // Compare and branch using cbcond
5279 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{
5280 single_instruction;
5281 src1 : E(read);
5282 IALU : R;
5283 BR : R;
5284 %}
5286 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{
5287 single_instruction_with_delay_slot;
5288 cr : E(read);
5289 BR : R;
5290 %}
5292 pipe_class br_nop() %{
5293 single_instruction;
5294 BR : R;
5295 %}
5297 pipe_class simple_call(method meth) %{
5298 instruction_count(2); multiple_bundles; force_serialization;
5299 fixed_latency(100);
5300 BR : R(1);
5301 MS : R(1);
5302 A0 : R(1);
5303 %}
5305 pipe_class compiled_call(method meth) %{
5306 instruction_count(1); multiple_bundles; force_serialization;
5307 fixed_latency(100);
5308 MS : R(1);
5309 %}
5311 pipe_class call(method meth) %{
5312 instruction_count(0); multiple_bundles; force_serialization;
5313 fixed_latency(100);
5314 %}
5316 pipe_class tail_call(Universe ignore, label labl) %{
5317 single_instruction; has_delay_slot;
5318 fixed_latency(100);
5319 BR : R(1);
5320 MS : R(1);
5321 %}
5323 pipe_class ret(Universe ignore) %{
5324 single_instruction; has_delay_slot;
5325 BR : R(1);
5326 MS : R(1);
5327 %}
5329 pipe_class ret_poll(g3RegP poll) %{
5330 instruction_count(3); has_delay_slot;
5331 poll : E(read);
5332 MS : R;
5333 %}
5335 // The real do-nothing guy
5336 pipe_class empty( ) %{
5337 instruction_count(0);
5338 %}
5340 pipe_class long_memory_op() %{
5341 instruction_count(0); multiple_bundles; force_serialization;
5342 fixed_latency(25);
5343 MS : R(1);
5344 %}
5346 // Check-cast
5347 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{
5348 array : R(read);
5349 match : R(read);
5350 IALU : R(2);
5351 BR : R(2);
5352 MS : R;
5353 %}
5355 // Convert FPU flags into +1,0,-1
5356 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{
5357 src1 : E(read);
5358 src2 : E(read);
5359 dst : E(write);
5360 FA : R;
5361 MS : R(2);
5362 BR : R(2);
5363 %}
5365 // Compare for p < q, and conditionally add y
5366 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{
5367 p : E(read);
5368 q : E(read);
5369 y : E(read);
5370 IALU : R(3)
5371 %}
5373 // Perform a compare, then move conditionally in a branch delay slot.
5374 pipe_class min_max( iRegI src2, iRegI srcdst ) %{
5375 src2 : E(read);
5376 srcdst : E(read);
5377 IALU : R;
5378 BR : R;
5379 %}
5381 // Define the class for the Nop node
5382 define %{
5383 MachNop = ialu_nop;
5384 %}
5386 %}
5388 //----------INSTRUCTIONS-------------------------------------------------------
5390 //------------Special Stack Slot instructions - no match rules-----------------
5391 instruct stkI_to_regF(regF dst, stackSlotI src) %{
5392 // No match rule to avoid chain rule match.
5393 effect(DEF dst, USE src);
5394 ins_cost(MEMORY_REF_COST);
5395 size(4);
5396 format %{ "LDF $src,$dst\t! stkI to regF" %}
5397 opcode(Assembler::ldf_op3);
5398 ins_encode(simple_form3_mem_reg(src, dst));
5399 ins_pipe(floadF_stk);
5400 %}
5402 instruct stkL_to_regD(regD dst, stackSlotL src) %{
5403 // No match rule to avoid chain rule match.
5404 effect(DEF dst, USE src);
5405 ins_cost(MEMORY_REF_COST);
5406 size(4);
5407 format %{ "LDDF $src,$dst\t! stkL to regD" %}
5408 opcode(Assembler::lddf_op3);
5409 ins_encode(simple_form3_mem_reg(src, dst));
5410 ins_pipe(floadD_stk);
5411 %}
5413 instruct regF_to_stkI(stackSlotI dst, regF src) %{
5414 // No match rule to avoid chain rule match.
5415 effect(DEF dst, USE src);
5416 ins_cost(MEMORY_REF_COST);
5417 size(4);
5418 format %{ "STF $src,$dst\t! regF to stkI" %}
5419 opcode(Assembler::stf_op3);
5420 ins_encode(simple_form3_mem_reg(dst, src));
5421 ins_pipe(fstoreF_stk_reg);
5422 %}
5424 instruct regD_to_stkL(stackSlotL dst, regD src) %{
5425 // No match rule to avoid chain rule match.
5426 effect(DEF dst, USE src);
5427 ins_cost(MEMORY_REF_COST);
5428 size(4);
5429 format %{ "STDF $src,$dst\t! regD to stkL" %}
5430 opcode(Assembler::stdf_op3);
5431 ins_encode(simple_form3_mem_reg(dst, src));
5432 ins_pipe(fstoreD_stk_reg);
5433 %}
5435 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{
5436 effect(DEF dst, USE src);
5437 ins_cost(MEMORY_REF_COST*2);
5438 size(8);
5439 format %{ "STW $src,$dst.hi\t! long\n\t"
5440 "STW R_G0,$dst.lo" %}
5441 opcode(Assembler::stw_op3);
5442 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0));
5443 ins_pipe(lstoreI_stk_reg);
5444 %}
5446 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{
5447 // No match rule to avoid chain rule match.
5448 effect(DEF dst, USE src);
5449 ins_cost(MEMORY_REF_COST);
5450 size(4);
5451 format %{ "STX $src,$dst\t! regL to stkD" %}
5452 opcode(Assembler::stx_op3);
5453 ins_encode(simple_form3_mem_reg( dst, src ) );
5454 ins_pipe(istore_stk_reg);
5455 %}
5457 //---------- Chain stack slots between similar types --------
5459 // Load integer from stack slot
5460 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{
5461 match(Set dst src);
5462 ins_cost(MEMORY_REF_COST);
5464 size(4);
5465 format %{ "LDUW $src,$dst\t!stk" %}
5466 opcode(Assembler::lduw_op3);
5467 ins_encode(simple_form3_mem_reg( src, dst ) );
5468 ins_pipe(iload_mem);
5469 %}
5471 // Store integer to stack slot
5472 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{
5473 match(Set dst src);
5474 ins_cost(MEMORY_REF_COST);
5476 size(4);
5477 format %{ "STW $src,$dst\t!stk" %}
5478 opcode(Assembler::stw_op3);
5479 ins_encode(simple_form3_mem_reg( dst, src ) );
5480 ins_pipe(istore_mem_reg);
5481 %}
5483 // Load long from stack slot
5484 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{
5485 match(Set dst src);
5487 ins_cost(MEMORY_REF_COST);
5488 size(4);
5489 format %{ "LDX $src,$dst\t! long" %}
5490 opcode(Assembler::ldx_op3);
5491 ins_encode(simple_form3_mem_reg( src, dst ) );
5492 ins_pipe(iload_mem);
5493 %}
5495 // Store long to stack slot
5496 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{
5497 match(Set dst src);
5499 ins_cost(MEMORY_REF_COST);
5500 size(4);
5501 format %{ "STX $src,$dst\t! long" %}
5502 opcode(Assembler::stx_op3);
5503 ins_encode(simple_form3_mem_reg( dst, src ) );
5504 ins_pipe(istore_mem_reg);
5505 %}
5507 #ifdef _LP64
5508 // Load pointer from stack slot, 64-bit encoding
5509 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5510 match(Set dst src);
5511 ins_cost(MEMORY_REF_COST);
5512 size(4);
5513 format %{ "LDX $src,$dst\t!ptr" %}
5514 opcode(Assembler::ldx_op3);
5515 ins_encode(simple_form3_mem_reg( src, dst ) );
5516 ins_pipe(iload_mem);
5517 %}
5519 // Store pointer to stack slot
5520 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5521 match(Set dst src);
5522 ins_cost(MEMORY_REF_COST);
5523 size(4);
5524 format %{ "STX $src,$dst\t!ptr" %}
5525 opcode(Assembler::stx_op3);
5526 ins_encode(simple_form3_mem_reg( dst, src ) );
5527 ins_pipe(istore_mem_reg);
5528 %}
5529 #else // _LP64
5530 // Load pointer from stack slot, 32-bit encoding
5531 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5532 match(Set dst src);
5533 ins_cost(MEMORY_REF_COST);
5534 format %{ "LDUW $src,$dst\t!ptr" %}
5535 opcode(Assembler::lduw_op3, Assembler::ldst_op);
5536 ins_encode(simple_form3_mem_reg( src, dst ) );
5537 ins_pipe(iload_mem);
5538 %}
5540 // Store pointer to stack slot
5541 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5542 match(Set dst src);
5543 ins_cost(MEMORY_REF_COST);
5544 format %{ "STW $src,$dst\t!ptr" %}
5545 opcode(Assembler::stw_op3, Assembler::ldst_op);
5546 ins_encode(simple_form3_mem_reg( dst, src ) );
5547 ins_pipe(istore_mem_reg);
5548 %}
5549 #endif // _LP64
5551 //------------Special Nop instructions for bundling - no match rules-----------
5552 // Nop using the A0 functional unit
5553 instruct Nop_A0() %{
5554 ins_cost(0);
5556 format %{ "NOP ! Alu Pipeline" %}
5557 opcode(Assembler::or_op3, Assembler::arith_op);
5558 ins_encode( form2_nop() );
5559 ins_pipe(ialu_nop_A0);
5560 %}
5562 // Nop using the A1 functional unit
5563 instruct Nop_A1( ) %{
5564 ins_cost(0);
5566 format %{ "NOP ! Alu Pipeline" %}
5567 opcode(Assembler::or_op3, Assembler::arith_op);
5568 ins_encode( form2_nop() );
5569 ins_pipe(ialu_nop_A1);
5570 %}
5572 // Nop using the memory functional unit
5573 instruct Nop_MS( ) %{
5574 ins_cost(0);
5576 format %{ "NOP ! Memory Pipeline" %}
5577 ins_encode( emit_mem_nop );
5578 ins_pipe(mem_nop);
5579 %}
5581 // Nop using the floating add functional unit
5582 instruct Nop_FA( ) %{
5583 ins_cost(0);
5585 format %{ "NOP ! Floating Add Pipeline" %}
5586 ins_encode( emit_fadd_nop );
5587 ins_pipe(fadd_nop);
5588 %}
5590 // Nop using the branch functional unit
5591 instruct Nop_BR( ) %{
5592 ins_cost(0);
5594 format %{ "NOP ! Branch Pipeline" %}
5595 ins_encode( emit_br_nop );
5596 ins_pipe(br_nop);
5597 %}
5599 //----------Load/Store/Move Instructions---------------------------------------
5600 //----------Load Instructions--------------------------------------------------
5601 // Load Byte (8bit signed)
5602 instruct loadB(iRegI dst, memory mem) %{
5603 match(Set dst (LoadB mem));
5604 ins_cost(MEMORY_REF_COST);
5606 size(4);
5607 format %{ "LDSB $mem,$dst\t! byte" %}
5608 ins_encode %{
5609 __ ldsb($mem$$Address, $dst$$Register);
5610 %}
5611 ins_pipe(iload_mask_mem);
5612 %}
5614 // Load Byte (8bit signed) into a Long Register
5615 instruct loadB2L(iRegL dst, memory mem) %{
5616 match(Set dst (ConvI2L (LoadB mem)));
5617 ins_cost(MEMORY_REF_COST);
5619 size(4);
5620 format %{ "LDSB $mem,$dst\t! byte -> long" %}
5621 ins_encode %{
5622 __ ldsb($mem$$Address, $dst$$Register);
5623 %}
5624 ins_pipe(iload_mask_mem);
5625 %}
5627 // Load Unsigned Byte (8bit UNsigned) into an int reg
5628 instruct loadUB(iRegI dst, memory mem) %{
5629 match(Set dst (LoadUB mem));
5630 ins_cost(MEMORY_REF_COST);
5632 size(4);
5633 format %{ "LDUB $mem,$dst\t! ubyte" %}
5634 ins_encode %{
5635 __ ldub($mem$$Address, $dst$$Register);
5636 %}
5637 ins_pipe(iload_mem);
5638 %}
5640 // Load Unsigned Byte (8bit UNsigned) into a Long Register
5641 instruct loadUB2L(iRegL dst, memory mem) %{
5642 match(Set dst (ConvI2L (LoadUB mem)));
5643 ins_cost(MEMORY_REF_COST);
5645 size(4);
5646 format %{ "LDUB $mem,$dst\t! ubyte -> long" %}
5647 ins_encode %{
5648 __ ldub($mem$$Address, $dst$$Register);
5649 %}
5650 ins_pipe(iload_mem);
5651 %}
5653 // Load Unsigned Byte (8 bit UNsigned) with 8-bit mask into Long Register
5654 instruct loadUB2L_immI8(iRegL dst, memory mem, immI8 mask) %{
5655 match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
5656 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5658 size(2*4);
5659 format %{ "LDUB $mem,$dst\t# ubyte & 8-bit mask -> long\n\t"
5660 "AND $dst,$mask,$dst" %}
5661 ins_encode %{
5662 __ ldub($mem$$Address, $dst$$Register);
5663 __ and3($dst$$Register, $mask$$constant, $dst$$Register);
5664 %}
5665 ins_pipe(iload_mem);
5666 %}
5668 // Load Short (16bit signed)
5669 instruct loadS(iRegI dst, memory mem) %{
5670 match(Set dst (LoadS mem));
5671 ins_cost(MEMORY_REF_COST);
5673 size(4);
5674 format %{ "LDSH $mem,$dst\t! short" %}
5675 ins_encode %{
5676 __ ldsh($mem$$Address, $dst$$Register);
5677 %}
5678 ins_pipe(iload_mask_mem);
5679 %}
5681 // Load Short (16 bit signed) to Byte (8 bit signed)
5682 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5683 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
5684 ins_cost(MEMORY_REF_COST);
5686 size(4);
5688 format %{ "LDSB $mem+1,$dst\t! short -> byte" %}
5689 ins_encode %{
5690 __ ldsb($mem$$Address, $dst$$Register, 1);
5691 %}
5692 ins_pipe(iload_mask_mem);
5693 %}
5695 // Load Short (16bit signed) into a Long Register
5696 instruct loadS2L(iRegL dst, memory mem) %{
5697 match(Set dst (ConvI2L (LoadS mem)));
5698 ins_cost(MEMORY_REF_COST);
5700 size(4);
5701 format %{ "LDSH $mem,$dst\t! short -> long" %}
5702 ins_encode %{
5703 __ ldsh($mem$$Address, $dst$$Register);
5704 %}
5705 ins_pipe(iload_mask_mem);
5706 %}
5708 // Load Unsigned Short/Char (16bit UNsigned)
5709 instruct loadUS(iRegI dst, memory mem) %{
5710 match(Set dst (LoadUS mem));
5711 ins_cost(MEMORY_REF_COST);
5713 size(4);
5714 format %{ "LDUH $mem,$dst\t! ushort/char" %}
5715 ins_encode %{
5716 __ lduh($mem$$Address, $dst$$Register);
5717 %}
5718 ins_pipe(iload_mem);
5719 %}
5721 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
5722 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5723 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour));
5724 ins_cost(MEMORY_REF_COST);
5726 size(4);
5727 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %}
5728 ins_encode %{
5729 __ ldsb($mem$$Address, $dst$$Register, 1);
5730 %}
5731 ins_pipe(iload_mask_mem);
5732 %}
5734 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register
5735 instruct loadUS2L(iRegL dst, memory mem) %{
5736 match(Set dst (ConvI2L (LoadUS mem)));
5737 ins_cost(MEMORY_REF_COST);
5739 size(4);
5740 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %}
5741 ins_encode %{
5742 __ lduh($mem$$Address, $dst$$Register);
5743 %}
5744 ins_pipe(iload_mem);
5745 %}
5747 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register
5748 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
5749 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5750 ins_cost(MEMORY_REF_COST);
5752 size(4);
5753 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %}
5754 ins_encode %{
5755 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE
5756 %}
5757 ins_pipe(iload_mem);
5758 %}
5760 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register
5761 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{
5762 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5763 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5765 size(2*4);
5766 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t"
5767 "AND $dst,$mask,$dst" %}
5768 ins_encode %{
5769 Register Rdst = $dst$$Register;
5770 __ lduh($mem$$Address, Rdst);
5771 __ and3(Rdst, $mask$$constant, Rdst);
5772 %}
5773 ins_pipe(iload_mem);
5774 %}
5776 // Load Unsigned Short/Char (16bit UNsigned) with a 16-bit mask into a Long Register
5777 instruct loadUS2L_immI16(iRegL dst, memory mem, immI16 mask, iRegL tmp) %{
5778 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5779 effect(TEMP dst, TEMP tmp);
5780 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
5782 format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t"
5783 "SET $mask,$tmp\n\t"
5784 "AND $dst,$tmp,$dst" %}
5785 ins_encode %{
5786 Register Rdst = $dst$$Register;
5787 Register Rtmp = $tmp$$Register;
5788 __ lduh($mem$$Address, Rdst);
5789 __ set($mask$$constant, Rtmp);
5790 __ and3(Rdst, Rtmp, Rdst);
5791 %}
5792 ins_pipe(iload_mem);
5793 %}
5795 // Load Integer
5796 instruct loadI(iRegI dst, memory mem) %{
5797 match(Set dst (LoadI mem));
5798 ins_cost(MEMORY_REF_COST);
5800 size(4);
5801 format %{ "LDUW $mem,$dst\t! int" %}
5802 ins_encode %{
5803 __ lduw($mem$$Address, $dst$$Register);
5804 %}
5805 ins_pipe(iload_mem);
5806 %}
5808 // Load Integer to Byte (8 bit signed)
5809 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5810 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5811 ins_cost(MEMORY_REF_COST);
5813 size(4);
5815 format %{ "LDSB $mem+3,$dst\t! int -> byte" %}
5816 ins_encode %{
5817 __ ldsb($mem$$Address, $dst$$Register, 3);
5818 %}
5819 ins_pipe(iload_mask_mem);
5820 %}
5822 // Load Integer to Unsigned Byte (8 bit UNsigned)
5823 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{
5824 match(Set dst (AndI (LoadI mem) mask));
5825 ins_cost(MEMORY_REF_COST);
5827 size(4);
5829 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %}
5830 ins_encode %{
5831 __ ldub($mem$$Address, $dst$$Register, 3);
5832 %}
5833 ins_pipe(iload_mask_mem);
5834 %}
5836 // Load Integer to Short (16 bit signed)
5837 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{
5838 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5839 ins_cost(MEMORY_REF_COST);
5841 size(4);
5843 format %{ "LDSH $mem+2,$dst\t! int -> short" %}
5844 ins_encode %{
5845 __ ldsh($mem$$Address, $dst$$Register, 2);
5846 %}
5847 ins_pipe(iload_mask_mem);
5848 %}
5850 // Load Integer to Unsigned Short (16 bit UNsigned)
5851 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{
5852 match(Set dst (AndI (LoadI mem) mask));
5853 ins_cost(MEMORY_REF_COST);
5855 size(4);
5857 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %}
5858 ins_encode %{
5859 __ lduh($mem$$Address, $dst$$Register, 2);
5860 %}
5861 ins_pipe(iload_mask_mem);
5862 %}
5864 // Load Integer into a Long Register
5865 instruct loadI2L(iRegL dst, memory mem) %{
5866 match(Set dst (ConvI2L (LoadI mem)));
5867 ins_cost(MEMORY_REF_COST);
5869 size(4);
5870 format %{ "LDSW $mem,$dst\t! int -> long" %}
5871 ins_encode %{
5872 __ ldsw($mem$$Address, $dst$$Register);
5873 %}
5874 ins_pipe(iload_mask_mem);
5875 %}
5877 // Load Integer with mask 0xFF into a Long Register
5878 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
5879 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5880 ins_cost(MEMORY_REF_COST);
5882 size(4);
5883 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %}
5884 ins_encode %{
5885 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE
5886 %}
5887 ins_pipe(iload_mem);
5888 %}
5890 // Load Integer with mask 0xFFFF into a Long Register
5891 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{
5892 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5893 ins_cost(MEMORY_REF_COST);
5895 size(4);
5896 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %}
5897 ins_encode %{
5898 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE
5899 %}
5900 ins_pipe(iload_mem);
5901 %}
5903 // Load Integer with a 12-bit mask into a Long Register
5904 instruct loadI2L_immU12(iRegL dst, memory mem, immU12 mask) %{
5905 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5906 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5908 size(2*4);
5909 format %{ "LDUW $mem,$dst\t! int & 12-bit mask -> long\n\t"
5910 "AND $dst,$mask,$dst" %}
5911 ins_encode %{
5912 Register Rdst = $dst$$Register;
5913 __ lduw($mem$$Address, Rdst);
5914 __ and3(Rdst, $mask$$constant, Rdst);
5915 %}
5916 ins_pipe(iload_mem);
5917 %}
5919 // Load Integer with a 31-bit mask into a Long Register
5920 instruct loadI2L_immU31(iRegL dst, memory mem, immU31 mask, iRegL tmp) %{
5921 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5922 effect(TEMP dst, TEMP tmp);
5923 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
5925 format %{ "LDUW $mem,$dst\t! int & 31-bit mask -> long\n\t"
5926 "SET $mask,$tmp\n\t"
5927 "AND $dst,$tmp,$dst" %}
5928 ins_encode %{
5929 Register Rdst = $dst$$Register;
5930 Register Rtmp = $tmp$$Register;
5931 __ lduw($mem$$Address, Rdst);
5932 __ set($mask$$constant, Rtmp);
5933 __ and3(Rdst, Rtmp, Rdst);
5934 %}
5935 ins_pipe(iload_mem);
5936 %}
5938 // Load Unsigned Integer into a Long Register
5939 instruct loadUI2L(iRegL dst, memory mem, immL_32bits mask) %{
5940 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
5941 ins_cost(MEMORY_REF_COST);
5943 size(4);
5944 format %{ "LDUW $mem,$dst\t! uint -> long" %}
5945 ins_encode %{
5946 __ lduw($mem$$Address, $dst$$Register);
5947 %}
5948 ins_pipe(iload_mem);
5949 %}
5951 // Load Long - aligned
5952 instruct loadL(iRegL dst, memory mem ) %{
5953 match(Set dst (LoadL mem));
5954 ins_cost(MEMORY_REF_COST);
5956 size(4);
5957 format %{ "LDX $mem,$dst\t! long" %}
5958 ins_encode %{
5959 __ ldx($mem$$Address, $dst$$Register);
5960 %}
5961 ins_pipe(iload_mem);
5962 %}
5964 // Load Long - UNaligned
5965 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{
5966 match(Set dst (LoadL_unaligned mem));
5967 effect(KILL tmp);
5968 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
5969 size(16);
5970 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n"
5971 "\tLDUW $mem ,$dst\n"
5972 "\tSLLX #32, $dst, $dst\n"
5973 "\tOR $dst, R_O7, $dst" %}
5974 opcode(Assembler::lduw_op3);
5975 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst ));
5976 ins_pipe(iload_mem);
5977 %}
5979 // Load Range
5980 instruct loadRange(iRegI dst, memory mem) %{
5981 match(Set dst (LoadRange mem));
5982 ins_cost(MEMORY_REF_COST);
5984 size(4);
5985 format %{ "LDUW $mem,$dst\t! range" %}
5986 opcode(Assembler::lduw_op3);
5987 ins_encode(simple_form3_mem_reg( mem, dst ) );
5988 ins_pipe(iload_mem);
5989 %}
5991 // Load Integer into %f register (for fitos/fitod)
5992 instruct loadI_freg(regF dst, memory mem) %{
5993 match(Set dst (LoadI mem));
5994 ins_cost(MEMORY_REF_COST);
5995 size(4);
5997 format %{ "LDF $mem,$dst\t! for fitos/fitod" %}
5998 opcode(Assembler::ldf_op3);
5999 ins_encode(simple_form3_mem_reg( mem, dst ) );
6000 ins_pipe(floadF_mem);
6001 %}
6003 // Load Pointer
6004 instruct loadP(iRegP dst, memory mem) %{
6005 match(Set dst (LoadP mem));
6006 ins_cost(MEMORY_REF_COST);
6007 size(4);
6009 #ifndef _LP64
6010 format %{ "LDUW $mem,$dst\t! ptr" %}
6011 ins_encode %{
6012 __ lduw($mem$$Address, $dst$$Register);
6013 %}
6014 #else
6015 format %{ "LDX $mem,$dst\t! ptr" %}
6016 ins_encode %{
6017 __ ldx($mem$$Address, $dst$$Register);
6018 %}
6019 #endif
6020 ins_pipe(iload_mem);
6021 %}
6023 // Load Compressed Pointer
6024 instruct loadN(iRegN dst, memory mem) %{
6025 match(Set dst (LoadN mem));
6026 ins_cost(MEMORY_REF_COST);
6027 size(4);
6029 format %{ "LDUW $mem,$dst\t! compressed ptr" %}
6030 ins_encode %{
6031 __ lduw($mem$$Address, $dst$$Register);
6032 %}
6033 ins_pipe(iload_mem);
6034 %}
6036 // Load Klass Pointer
6037 instruct loadKlass(iRegP dst, memory mem) %{
6038 match(Set dst (LoadKlass mem));
6039 ins_cost(MEMORY_REF_COST);
6040 size(4);
6042 #ifndef _LP64
6043 format %{ "LDUW $mem,$dst\t! klass ptr" %}
6044 ins_encode %{
6045 __ lduw($mem$$Address, $dst$$Register);
6046 %}
6047 #else
6048 format %{ "LDX $mem,$dst\t! klass ptr" %}
6049 ins_encode %{
6050 __ ldx($mem$$Address, $dst$$Register);
6051 %}
6052 #endif
6053 ins_pipe(iload_mem);
6054 %}
6056 // Load narrow Klass Pointer
6057 instruct loadNKlass(iRegN dst, memory mem) %{
6058 match(Set dst (LoadNKlass mem));
6059 ins_cost(MEMORY_REF_COST);
6060 size(4);
6062 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
6063 ins_encode %{
6064 __ lduw($mem$$Address, $dst$$Register);
6065 %}
6066 ins_pipe(iload_mem);
6067 %}
6069 // Load Double
6070 instruct loadD(regD dst, memory mem) %{
6071 match(Set dst (LoadD mem));
6072 ins_cost(MEMORY_REF_COST);
6074 size(4);
6075 format %{ "LDDF $mem,$dst" %}
6076 opcode(Assembler::lddf_op3);
6077 ins_encode(simple_form3_mem_reg( mem, dst ) );
6078 ins_pipe(floadD_mem);
6079 %}
6081 // Load Double - UNaligned
6082 instruct loadD_unaligned(regD_low dst, memory mem ) %{
6083 match(Set dst (LoadD_unaligned mem));
6084 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
6085 size(8);
6086 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n"
6087 "\tLDF $mem+4,$dst.lo\t!" %}
6088 opcode(Assembler::ldf_op3);
6089 ins_encode( form3_mem_reg_double_unaligned( mem, dst ));
6090 ins_pipe(iload_mem);
6091 %}
6093 // Load Float
6094 instruct loadF(regF dst, memory mem) %{
6095 match(Set dst (LoadF mem));
6096 ins_cost(MEMORY_REF_COST);
6098 size(4);
6099 format %{ "LDF $mem,$dst" %}
6100 opcode(Assembler::ldf_op3);
6101 ins_encode(simple_form3_mem_reg( mem, dst ) );
6102 ins_pipe(floadF_mem);
6103 %}
6105 // Load Constant
6106 instruct loadConI( iRegI dst, immI src ) %{
6107 match(Set dst src);
6108 ins_cost(DEFAULT_COST * 3/2);
6109 format %{ "SET $src,$dst" %}
6110 ins_encode( Set32(src, dst) );
6111 ins_pipe(ialu_hi_lo_reg);
6112 %}
6114 instruct loadConI13( iRegI dst, immI13 src ) %{
6115 match(Set dst src);
6117 size(4);
6118 format %{ "MOV $src,$dst" %}
6119 ins_encode( Set13( src, dst ) );
6120 ins_pipe(ialu_imm);
6121 %}
6123 #ifndef _LP64
6124 instruct loadConP(iRegP dst, immP con) %{
6125 match(Set dst con);
6126 ins_cost(DEFAULT_COST * 3/2);
6127 format %{ "SET $con,$dst\t!ptr" %}
6128 ins_encode %{
6129 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
6130 intptr_t val = $con$$constant;
6131 if (constant_reloc == relocInfo::oop_type) {
6132 __ set_oop_constant((jobject) val, $dst$$Register);
6133 } else if (constant_reloc == relocInfo::metadata_type) {
6134 __ set_metadata_constant((Metadata*)val, $dst$$Register);
6135 } else { // non-oop pointers, e.g. card mark base, heap top
6136 assert(constant_reloc == relocInfo::none, "unexpected reloc type");
6137 __ set(val, $dst$$Register);
6138 }
6139 %}
6140 ins_pipe(loadConP);
6141 %}
6142 #else
6143 instruct loadConP_set(iRegP dst, immP_set con) %{
6144 match(Set dst con);
6145 ins_cost(DEFAULT_COST * 3/2);
6146 format %{ "SET $con,$dst\t! ptr" %}
6147 ins_encode %{
6148 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
6149 intptr_t val = $con$$constant;
6150 if (constant_reloc == relocInfo::oop_type) {
6151 __ set_oop_constant((jobject) val, $dst$$Register);
6152 } else if (constant_reloc == relocInfo::metadata_type) {
6153 __ set_metadata_constant((Metadata*)val, $dst$$Register);
6154 } else { // non-oop pointers, e.g. card mark base, heap top
6155 assert(constant_reloc == relocInfo::none, "unexpected reloc type");
6156 __ set(val, $dst$$Register);
6157 }
6158 %}
6159 ins_pipe(loadConP);
6160 %}
6162 instruct loadConP_load(iRegP dst, immP_load con) %{
6163 match(Set dst con);
6164 ins_cost(MEMORY_REF_COST);
6165 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %}
6166 ins_encode %{
6167 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
6168 __ ld_ptr($constanttablebase, con_offset, $dst$$Register);
6169 %}
6170 ins_pipe(loadConP);
6171 %}
6173 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
6174 match(Set dst con);
6175 ins_cost(DEFAULT_COST * 3/2);
6176 format %{ "SET $con,$dst\t! non-oop ptr" %}
6177 ins_encode %{
6178 __ set($con$$constant, $dst$$Register);
6179 %}
6180 ins_pipe(loadConP);
6181 %}
6182 #endif // _LP64
6184 instruct loadConP0(iRegP dst, immP0 src) %{
6185 match(Set dst src);
6187 size(4);
6188 format %{ "CLR $dst\t!ptr" %}
6189 ins_encode %{
6190 __ clr($dst$$Register);
6191 %}
6192 ins_pipe(ialu_imm);
6193 %}
6195 instruct loadConP_poll(iRegP dst, immP_poll src) %{
6196 match(Set dst src);
6197 ins_cost(DEFAULT_COST);
6198 format %{ "SET $src,$dst\t!ptr" %}
6199 ins_encode %{
6200 AddressLiteral polling_page(os::get_polling_page());
6201 __ sethi(polling_page, reg_to_register_object($dst$$reg));
6202 %}
6203 ins_pipe(loadConP_poll);
6204 %}
6206 instruct loadConN0(iRegN dst, immN0 src) %{
6207 match(Set dst src);
6209 size(4);
6210 format %{ "CLR $dst\t! compressed NULL ptr" %}
6211 ins_encode %{
6212 __ clr($dst$$Register);
6213 %}
6214 ins_pipe(ialu_imm);
6215 %}
6217 instruct loadConN(iRegN dst, immN src) %{
6218 match(Set dst src);
6219 ins_cost(DEFAULT_COST * 3/2);
6220 format %{ "SET $src,$dst\t! compressed ptr" %}
6221 ins_encode %{
6222 Register dst = $dst$$Register;
6223 __ set_narrow_oop((jobject)$src$$constant, dst);
6224 %}
6225 ins_pipe(ialu_hi_lo_reg);
6226 %}
6228 instruct loadConNKlass(iRegN dst, immNKlass src) %{
6229 match(Set dst src);
6230 ins_cost(DEFAULT_COST * 3/2);
6231 format %{ "SET $src,$dst\t! compressed klass ptr" %}
6232 ins_encode %{
6233 Register dst = $dst$$Register;
6234 __ set_narrow_klass((Klass*)$src$$constant, dst);
6235 %}
6236 ins_pipe(ialu_hi_lo_reg);
6237 %}
6239 // Materialize long value (predicated by immL_cheap).
6240 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{
6241 match(Set dst con);
6242 effect(KILL tmp);
6243 ins_cost(DEFAULT_COST * 3);
6244 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %}
6245 ins_encode %{
6246 __ set64($con$$constant, $dst$$Register, $tmp$$Register);
6247 %}
6248 ins_pipe(loadConL);
6249 %}
6251 // Load long value from constant table (predicated by immL_expensive).
6252 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{
6253 match(Set dst con);
6254 ins_cost(MEMORY_REF_COST);
6255 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %}
6256 ins_encode %{
6257 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
6258 __ ldx($constanttablebase, con_offset, $dst$$Register);
6259 %}
6260 ins_pipe(loadConL);
6261 %}
6263 instruct loadConL0( iRegL dst, immL0 src ) %{
6264 match(Set dst src);
6265 ins_cost(DEFAULT_COST);
6266 size(4);
6267 format %{ "CLR $dst\t! long" %}
6268 ins_encode( Set13( src, dst ) );
6269 ins_pipe(ialu_imm);
6270 %}
6272 instruct loadConL13( iRegL dst, immL13 src ) %{
6273 match(Set dst src);
6274 ins_cost(DEFAULT_COST * 2);
6276 size(4);
6277 format %{ "MOV $src,$dst\t! long" %}
6278 ins_encode( Set13( src, dst ) );
6279 ins_pipe(ialu_imm);
6280 %}
6282 instruct loadConF(regF dst, immF con, o7RegI tmp) %{
6283 match(Set dst con);
6284 effect(KILL tmp);
6285 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %}
6286 ins_encode %{
6287 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
6288 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister);
6289 %}
6290 ins_pipe(loadConFD);
6291 %}
6293 instruct loadConD(regD dst, immD con, o7RegI tmp) %{
6294 match(Set dst con);
6295 effect(KILL tmp);
6296 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %}
6297 ins_encode %{
6298 // XXX This is a quick fix for 6833573.
6299 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister);
6300 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
6301 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
6302 %}
6303 ins_pipe(loadConFD);
6304 %}
6306 // Prefetch instructions.
6307 // Must be safe to execute with invalid address (cannot fault).
6309 instruct prefetchr( memory mem ) %{
6310 match( PrefetchRead mem );
6311 ins_cost(MEMORY_REF_COST);
6312 size(4);
6314 format %{ "PREFETCH $mem,0\t! Prefetch read-many" %}
6315 opcode(Assembler::prefetch_op3);
6316 ins_encode( form3_mem_prefetch_read( mem ) );
6317 ins_pipe(iload_mem);
6318 %}
6320 instruct prefetchw( memory mem ) %{
6321 match( PrefetchWrite mem );
6322 ins_cost(MEMORY_REF_COST);
6323 size(4);
6325 format %{ "PREFETCH $mem,2\t! Prefetch write-many (and read)" %}
6326 opcode(Assembler::prefetch_op3);
6327 ins_encode( form3_mem_prefetch_write( mem ) );
6328 ins_pipe(iload_mem);
6329 %}
6331 // Prefetch instructions for allocation.
6333 instruct prefetchAlloc( memory mem ) %{
6334 predicate(AllocatePrefetchInstr == 0);
6335 match( PrefetchAllocation mem );
6336 ins_cost(MEMORY_REF_COST);
6337 size(4);
6339 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %}
6340 opcode(Assembler::prefetch_op3);
6341 ins_encode( form3_mem_prefetch_write( mem ) );
6342 ins_pipe(iload_mem);
6343 %}
6345 // Use BIS instruction to prefetch for allocation.
6346 // Could fault, need space at the end of TLAB.
6347 instruct prefetchAlloc_bis( iRegP dst ) %{
6348 predicate(AllocatePrefetchInstr == 1);
6349 match( PrefetchAllocation dst );
6350 ins_cost(MEMORY_REF_COST);
6351 size(4);
6353 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %}
6354 ins_encode %{
6355 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
6356 %}
6357 ins_pipe(istore_mem_reg);
6358 %}
6360 // Next code is used for finding next cache line address to prefetch.
6361 #ifndef _LP64
6362 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{
6363 match(Set dst (CastX2P (AndI (CastP2X src) mask)));
6364 ins_cost(DEFAULT_COST);
6365 size(4);
6367 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6368 ins_encode %{
6369 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6370 %}
6371 ins_pipe(ialu_reg_imm);
6372 %}
6373 #else
6374 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
6375 match(Set dst (CastX2P (AndL (CastP2X src) mask)));
6376 ins_cost(DEFAULT_COST);
6377 size(4);
6379 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6380 ins_encode %{
6381 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6382 %}
6383 ins_pipe(ialu_reg_imm);
6384 %}
6385 #endif
6387 //----------Store Instructions-------------------------------------------------
6388 // Store Byte
6389 instruct storeB(memory mem, iRegI src) %{
6390 match(Set mem (StoreB mem src));
6391 ins_cost(MEMORY_REF_COST);
6393 size(4);
6394 format %{ "STB $src,$mem\t! byte" %}
6395 opcode(Assembler::stb_op3);
6396 ins_encode(simple_form3_mem_reg( mem, src ) );
6397 ins_pipe(istore_mem_reg);
6398 %}
6400 instruct storeB0(memory mem, immI0 src) %{
6401 match(Set mem (StoreB mem src));
6402 ins_cost(MEMORY_REF_COST);
6404 size(4);
6405 format %{ "STB $src,$mem\t! byte" %}
6406 opcode(Assembler::stb_op3);
6407 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6408 ins_pipe(istore_mem_zero);
6409 %}
6411 instruct storeCM0(memory mem, immI0 src) %{
6412 match(Set mem (StoreCM mem src));
6413 ins_cost(MEMORY_REF_COST);
6415 size(4);
6416 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %}
6417 opcode(Assembler::stb_op3);
6418 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6419 ins_pipe(istore_mem_zero);
6420 %}
6422 // Store Char/Short
6423 instruct storeC(memory mem, iRegI src) %{
6424 match(Set mem (StoreC mem src));
6425 ins_cost(MEMORY_REF_COST);
6427 size(4);
6428 format %{ "STH $src,$mem\t! short" %}
6429 opcode(Assembler::sth_op3);
6430 ins_encode(simple_form3_mem_reg( mem, src ) );
6431 ins_pipe(istore_mem_reg);
6432 %}
6434 instruct storeC0(memory mem, immI0 src) %{
6435 match(Set mem (StoreC mem src));
6436 ins_cost(MEMORY_REF_COST);
6438 size(4);
6439 format %{ "STH $src,$mem\t! short" %}
6440 opcode(Assembler::sth_op3);
6441 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6442 ins_pipe(istore_mem_zero);
6443 %}
6445 // Store Integer
6446 instruct storeI(memory mem, iRegI src) %{
6447 match(Set mem (StoreI mem src));
6448 ins_cost(MEMORY_REF_COST);
6450 size(4);
6451 format %{ "STW $src,$mem" %}
6452 opcode(Assembler::stw_op3);
6453 ins_encode(simple_form3_mem_reg( mem, src ) );
6454 ins_pipe(istore_mem_reg);
6455 %}
6457 // Store Long
6458 instruct storeL(memory mem, iRegL src) %{
6459 match(Set mem (StoreL mem src));
6460 ins_cost(MEMORY_REF_COST);
6461 size(4);
6462 format %{ "STX $src,$mem\t! long" %}
6463 opcode(Assembler::stx_op3);
6464 ins_encode(simple_form3_mem_reg( mem, src ) );
6465 ins_pipe(istore_mem_reg);
6466 %}
6468 instruct storeI0(memory mem, immI0 src) %{
6469 match(Set mem (StoreI mem src));
6470 ins_cost(MEMORY_REF_COST);
6472 size(4);
6473 format %{ "STW $src,$mem" %}
6474 opcode(Assembler::stw_op3);
6475 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6476 ins_pipe(istore_mem_zero);
6477 %}
6479 instruct storeL0(memory mem, immL0 src) %{
6480 match(Set mem (StoreL mem src));
6481 ins_cost(MEMORY_REF_COST);
6483 size(4);
6484 format %{ "STX $src,$mem" %}
6485 opcode(Assembler::stx_op3);
6486 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6487 ins_pipe(istore_mem_zero);
6488 %}
6490 // Store Integer from float register (used after fstoi)
6491 instruct storeI_Freg(memory mem, regF src) %{
6492 match(Set mem (StoreI mem src));
6493 ins_cost(MEMORY_REF_COST);
6495 size(4);
6496 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %}
6497 opcode(Assembler::stf_op3);
6498 ins_encode(simple_form3_mem_reg( mem, src ) );
6499 ins_pipe(fstoreF_mem_reg);
6500 %}
6502 // Store Pointer
6503 instruct storeP(memory dst, sp_ptr_RegP src) %{
6504 match(Set dst (StoreP dst src));
6505 ins_cost(MEMORY_REF_COST);
6506 size(4);
6508 #ifndef _LP64
6509 format %{ "STW $src,$dst\t! ptr" %}
6510 opcode(Assembler::stw_op3, 0, REGP_OP);
6511 #else
6512 format %{ "STX $src,$dst\t! ptr" %}
6513 opcode(Assembler::stx_op3, 0, REGP_OP);
6514 #endif
6515 ins_encode( form3_mem_reg( dst, src ) );
6516 ins_pipe(istore_mem_spORreg);
6517 %}
6519 instruct storeP0(memory dst, immP0 src) %{
6520 match(Set dst (StoreP dst src));
6521 ins_cost(MEMORY_REF_COST);
6522 size(4);
6524 #ifndef _LP64
6525 format %{ "STW $src,$dst\t! ptr" %}
6526 opcode(Assembler::stw_op3, 0, REGP_OP);
6527 #else
6528 format %{ "STX $src,$dst\t! ptr" %}
6529 opcode(Assembler::stx_op3, 0, REGP_OP);
6530 #endif
6531 ins_encode( form3_mem_reg( dst, R_G0 ) );
6532 ins_pipe(istore_mem_zero);
6533 %}
6535 // Store Compressed Pointer
6536 instruct storeN(memory dst, iRegN src) %{
6537 match(Set dst (StoreN dst src));
6538 ins_cost(MEMORY_REF_COST);
6539 size(4);
6541 format %{ "STW $src,$dst\t! compressed ptr" %}
6542 ins_encode %{
6543 Register base = as_Register($dst$$base);
6544 Register index = as_Register($dst$$index);
6545 Register src = $src$$Register;
6546 if (index != G0) {
6547 __ stw(src, base, index);
6548 } else {
6549 __ stw(src, base, $dst$$disp);
6550 }
6551 %}
6552 ins_pipe(istore_mem_spORreg);
6553 %}
6555 instruct storeNKlass(memory dst, iRegN src) %{
6556 match(Set dst (StoreNKlass dst src));
6557 ins_cost(MEMORY_REF_COST);
6558 size(4);
6560 format %{ "STW $src,$dst\t! compressed klass ptr" %}
6561 ins_encode %{
6562 Register base = as_Register($dst$$base);
6563 Register index = as_Register($dst$$index);
6564 Register src = $src$$Register;
6565 if (index != G0) {
6566 __ stw(src, base, index);
6567 } else {
6568 __ stw(src, base, $dst$$disp);
6569 }
6570 %}
6571 ins_pipe(istore_mem_spORreg);
6572 %}
6574 instruct storeN0(memory dst, immN0 src) %{
6575 match(Set dst (StoreN dst src));
6576 ins_cost(MEMORY_REF_COST);
6577 size(4);
6579 format %{ "STW $src,$dst\t! compressed ptr" %}
6580 ins_encode %{
6581 Register base = as_Register($dst$$base);
6582 Register index = as_Register($dst$$index);
6583 if (index != G0) {
6584 __ stw(0, base, index);
6585 } else {
6586 __ stw(0, base, $dst$$disp);
6587 }
6588 %}
6589 ins_pipe(istore_mem_zero);
6590 %}
6592 // Store Double
6593 instruct storeD( memory mem, regD src) %{
6594 match(Set mem (StoreD mem src));
6595 ins_cost(MEMORY_REF_COST);
6597 size(4);
6598 format %{ "STDF $src,$mem" %}
6599 opcode(Assembler::stdf_op3);
6600 ins_encode(simple_form3_mem_reg( mem, src ) );
6601 ins_pipe(fstoreD_mem_reg);
6602 %}
6604 instruct storeD0( memory mem, immD0 src) %{
6605 match(Set mem (StoreD mem src));
6606 ins_cost(MEMORY_REF_COST);
6608 size(4);
6609 format %{ "STX $src,$mem" %}
6610 opcode(Assembler::stx_op3);
6611 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6612 ins_pipe(fstoreD_mem_zero);
6613 %}
6615 // Store Float
6616 instruct storeF( memory mem, regF src) %{
6617 match(Set mem (StoreF mem src));
6618 ins_cost(MEMORY_REF_COST);
6620 size(4);
6621 format %{ "STF $src,$mem" %}
6622 opcode(Assembler::stf_op3);
6623 ins_encode(simple_form3_mem_reg( mem, src ) );
6624 ins_pipe(fstoreF_mem_reg);
6625 %}
6627 instruct storeF0( memory mem, immF0 src) %{
6628 match(Set mem (StoreF mem src));
6629 ins_cost(MEMORY_REF_COST);
6631 size(4);
6632 format %{ "STW $src,$mem\t! storeF0" %}
6633 opcode(Assembler::stw_op3);
6634 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6635 ins_pipe(fstoreF_mem_zero);
6636 %}
6638 // Convert oop pointer into compressed form
6639 instruct encodeHeapOop(iRegN dst, iRegP src) %{
6640 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
6641 match(Set dst (EncodeP src));
6642 format %{ "encode_heap_oop $src, $dst" %}
6643 ins_encode %{
6644 __ encode_heap_oop($src$$Register, $dst$$Register);
6645 %}
6646 ins_pipe(ialu_reg);
6647 %}
6649 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{
6650 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
6651 match(Set dst (EncodeP src));
6652 format %{ "encode_heap_oop_not_null $src, $dst" %}
6653 ins_encode %{
6654 __ encode_heap_oop_not_null($src$$Register, $dst$$Register);
6655 %}
6656 ins_pipe(ialu_reg);
6657 %}
6659 instruct decodeHeapOop(iRegP dst, iRegN src) %{
6660 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
6661 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant);
6662 match(Set dst (DecodeN src));
6663 format %{ "decode_heap_oop $src, $dst" %}
6664 ins_encode %{
6665 __ decode_heap_oop($src$$Register, $dst$$Register);
6666 %}
6667 ins_pipe(ialu_reg);
6668 %}
6670 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{
6671 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
6672 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
6673 match(Set dst (DecodeN src));
6674 format %{ "decode_heap_oop_not_null $src, $dst" %}
6675 ins_encode %{
6676 __ decode_heap_oop_not_null($src$$Register, $dst$$Register);
6677 %}
6678 ins_pipe(ialu_reg);
6679 %}
6681 instruct encodeKlass_not_null(iRegN dst, iRegP src) %{
6682 match(Set dst (EncodePKlass src));
6683 format %{ "encode_klass_not_null $src, $dst" %}
6684 ins_encode %{
6685 __ encode_klass_not_null($src$$Register, $dst$$Register);
6686 %}
6687 ins_pipe(ialu_reg);
6688 %}
6690 instruct decodeKlass_not_null(iRegP dst, iRegN src) %{
6691 match(Set dst (DecodeNKlass src));
6692 format %{ "decode_klass_not_null $src, $dst" %}
6693 ins_encode %{
6694 __ decode_klass_not_null($src$$Register, $dst$$Register);
6695 %}
6696 ins_pipe(ialu_reg);
6697 %}
6699 //----------MemBar Instructions-----------------------------------------------
6700 // Memory barrier flavors
6702 instruct membar_acquire() %{
6703 match(MemBarAcquire);
6704 match(LoadFence);
6705 ins_cost(4*MEMORY_REF_COST);
6707 size(0);
6708 format %{ "MEMBAR-acquire" %}
6709 ins_encode( enc_membar_acquire );
6710 ins_pipe(long_memory_op);
6711 %}
6713 instruct membar_acquire_lock() %{
6714 match(MemBarAcquireLock);
6715 ins_cost(0);
6717 size(0);
6718 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %}
6719 ins_encode( );
6720 ins_pipe(empty);
6721 %}
6723 instruct membar_release() %{
6724 match(MemBarRelease);
6725 match(StoreFence);
6726 ins_cost(4*MEMORY_REF_COST);
6728 size(0);
6729 format %{ "MEMBAR-release" %}
6730 ins_encode( enc_membar_release );
6731 ins_pipe(long_memory_op);
6732 %}
6734 instruct membar_release_lock() %{
6735 match(MemBarReleaseLock);
6736 ins_cost(0);
6738 size(0);
6739 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %}
6740 ins_encode( );
6741 ins_pipe(empty);
6742 %}
6744 instruct membar_volatile() %{
6745 match(MemBarVolatile);
6746 ins_cost(4*MEMORY_REF_COST);
6748 size(4);
6749 format %{ "MEMBAR-volatile" %}
6750 ins_encode( enc_membar_volatile );
6751 ins_pipe(long_memory_op);
6752 %}
6754 instruct unnecessary_membar_volatile() %{
6755 match(MemBarVolatile);
6756 predicate(Matcher::post_store_load_barrier(n));
6757 ins_cost(0);
6759 size(0);
6760 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %}
6761 ins_encode( );
6762 ins_pipe(empty);
6763 %}
6765 instruct membar_storestore() %{
6766 match(MemBarStoreStore);
6767 ins_cost(0);
6769 size(0);
6770 format %{ "!MEMBAR-storestore (empty encoding)" %}
6771 ins_encode( );
6772 ins_pipe(empty);
6773 %}
6775 //----------Register Move Instructions-----------------------------------------
6776 instruct roundDouble_nop(regD dst) %{
6777 match(Set dst (RoundDouble dst));
6778 ins_cost(0);
6779 // SPARC results are already "rounded" (i.e., normal-format IEEE)
6780 ins_encode( );
6781 ins_pipe(empty);
6782 %}
6785 instruct roundFloat_nop(regF dst) %{
6786 match(Set dst (RoundFloat dst));
6787 ins_cost(0);
6788 // SPARC results are already "rounded" (i.e., normal-format IEEE)
6789 ins_encode( );
6790 ins_pipe(empty);
6791 %}
6794 // Cast Index to Pointer for unsafe natives
6795 instruct castX2P(iRegX src, iRegP dst) %{
6796 match(Set dst (CastX2P src));
6798 format %{ "MOV $src,$dst\t! IntX->Ptr" %}
6799 ins_encode( form3_g0_rs2_rd_move( src, dst ) );
6800 ins_pipe(ialu_reg);
6801 %}
6803 // Cast Pointer to Index for unsafe natives
6804 instruct castP2X(iRegP src, iRegX dst) %{
6805 match(Set dst (CastP2X src));
6807 format %{ "MOV $src,$dst\t! Ptr->IntX" %}
6808 ins_encode( form3_g0_rs2_rd_move( src, dst ) );
6809 ins_pipe(ialu_reg);
6810 %}
6812 instruct stfSSD(stackSlotD stkSlot, regD src) %{
6813 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6814 match(Set stkSlot src); // chain rule
6815 ins_cost(MEMORY_REF_COST);
6816 format %{ "STDF $src,$stkSlot\t!stk" %}
6817 opcode(Assembler::stdf_op3);
6818 ins_encode(simple_form3_mem_reg(stkSlot, src));
6819 ins_pipe(fstoreD_stk_reg);
6820 %}
6822 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{
6823 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6824 match(Set dst stkSlot); // chain rule
6825 ins_cost(MEMORY_REF_COST);
6826 format %{ "LDDF $stkSlot,$dst\t!stk" %}
6827 opcode(Assembler::lddf_op3);
6828 ins_encode(simple_form3_mem_reg(stkSlot, dst));
6829 ins_pipe(floadD_stk);
6830 %}
6832 instruct stfSSF(stackSlotF stkSlot, regF src) %{
6833 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6834 match(Set stkSlot src); // chain rule
6835 ins_cost(MEMORY_REF_COST);
6836 format %{ "STF $src,$stkSlot\t!stk" %}
6837 opcode(Assembler::stf_op3);
6838 ins_encode(simple_form3_mem_reg(stkSlot, src));
6839 ins_pipe(fstoreF_stk_reg);
6840 %}
6842 //----------Conditional Move---------------------------------------------------
6843 // Conditional move
6844 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{
6845 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6846 ins_cost(150);
6847 format %{ "MOV$cmp $pcc,$src,$dst" %}
6848 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6849 ins_pipe(ialu_reg);
6850 %}
6852 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{
6853 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6854 ins_cost(140);
6855 format %{ "MOV$cmp $pcc,$src,$dst" %}
6856 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
6857 ins_pipe(ialu_imm);
6858 %}
6860 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{
6861 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6862 ins_cost(150);
6863 size(4);
6864 format %{ "MOV$cmp $icc,$src,$dst" %}
6865 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6866 ins_pipe(ialu_reg);
6867 %}
6869 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{
6870 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6871 ins_cost(140);
6872 size(4);
6873 format %{ "MOV$cmp $icc,$src,$dst" %}
6874 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6875 ins_pipe(ialu_imm);
6876 %}
6878 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
6879 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6880 ins_cost(150);
6881 size(4);
6882 format %{ "MOV$cmp $icc,$src,$dst" %}
6883 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6884 ins_pipe(ialu_reg);
6885 %}
6887 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
6888 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6889 ins_cost(140);
6890 size(4);
6891 format %{ "MOV$cmp $icc,$src,$dst" %}
6892 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6893 ins_pipe(ialu_imm);
6894 %}
6896 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{
6897 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src)));
6898 ins_cost(150);
6899 size(4);
6900 format %{ "MOV$cmp $fcc,$src,$dst" %}
6901 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6902 ins_pipe(ialu_reg);
6903 %}
6905 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{
6906 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src)));
6907 ins_cost(140);
6908 size(4);
6909 format %{ "MOV$cmp $fcc,$src,$dst" %}
6910 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) );
6911 ins_pipe(ialu_imm);
6912 %}
6914 // Conditional move for RegN. Only cmov(reg,reg).
6915 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{
6916 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src)));
6917 ins_cost(150);
6918 format %{ "MOV$cmp $pcc,$src,$dst" %}
6919 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6920 ins_pipe(ialu_reg);
6921 %}
6923 // This instruction also works with CmpN so we don't need cmovNN_reg.
6924 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{
6925 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
6926 ins_cost(150);
6927 size(4);
6928 format %{ "MOV$cmp $icc,$src,$dst" %}
6929 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6930 ins_pipe(ialu_reg);
6931 %}
6933 // This instruction also works with CmpN so we don't need cmovNN_reg.
6934 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{
6935 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
6936 ins_cost(150);
6937 size(4);
6938 format %{ "MOV$cmp $icc,$src,$dst" %}
6939 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6940 ins_pipe(ialu_reg);
6941 %}
6943 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{
6944 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src)));
6945 ins_cost(150);
6946 size(4);
6947 format %{ "MOV$cmp $fcc,$src,$dst" %}
6948 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6949 ins_pipe(ialu_reg);
6950 %}
6952 // Conditional move
6953 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{
6954 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6955 ins_cost(150);
6956 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %}
6957 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6958 ins_pipe(ialu_reg);
6959 %}
6961 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{
6962 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6963 ins_cost(140);
6964 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %}
6965 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
6966 ins_pipe(ialu_imm);
6967 %}
6969 // This instruction also works with CmpN so we don't need cmovPN_reg.
6970 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{
6971 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6972 ins_cost(150);
6974 size(4);
6975 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6976 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6977 ins_pipe(ialu_reg);
6978 %}
6980 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
6981 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6982 ins_cost(150);
6984 size(4);
6985 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6986 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6987 ins_pipe(ialu_reg);
6988 %}
6990 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
6991 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6992 ins_cost(140);
6994 size(4);
6995 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6996 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6997 ins_pipe(ialu_imm);
6998 %}
7000 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
7001 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
7002 ins_cost(140);
7004 size(4);
7005 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
7006 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
7007 ins_pipe(ialu_imm);
7008 %}
7010 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{
7011 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
7012 ins_cost(150);
7013 size(4);
7014 format %{ "MOV$cmp $fcc,$src,$dst" %}
7015 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
7016 ins_pipe(ialu_imm);
7017 %}
7019 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{
7020 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
7021 ins_cost(140);
7022 size(4);
7023 format %{ "MOV$cmp $fcc,$src,$dst" %}
7024 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) );
7025 ins_pipe(ialu_imm);
7026 %}
7028 // Conditional move
7029 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{
7030 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src)));
7031 ins_cost(150);
7032 opcode(0x101);
7033 format %{ "FMOVD$cmp $pcc,$src,$dst" %}
7034 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) );
7035 ins_pipe(int_conditional_float_move);
7036 %}
7038 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{
7039 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
7040 ins_cost(150);
7042 size(4);
7043 format %{ "FMOVS$cmp $icc,$src,$dst" %}
7044 opcode(0x101);
7045 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
7046 ins_pipe(int_conditional_float_move);
7047 %}
7049 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
7050 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
7051 ins_cost(150);
7053 size(4);
7054 format %{ "FMOVS$cmp $icc,$src,$dst" %}
7055 opcode(0x101);
7056 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
7057 ins_pipe(int_conditional_float_move);
7058 %}
7060 // Conditional move,
7061 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{
7062 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src)));
7063 ins_cost(150);
7064 size(4);
7065 format %{ "FMOVF$cmp $fcc,$src,$dst" %}
7066 opcode(0x1);
7067 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) );
7068 ins_pipe(int_conditional_double_move);
7069 %}
7071 // Conditional move
7072 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{
7073 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src)));
7074 ins_cost(150);
7075 size(4);
7076 opcode(0x102);
7077 format %{ "FMOVD$cmp $pcc,$src,$dst" %}
7078 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) );
7079 ins_pipe(int_conditional_double_move);
7080 %}
7082 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{
7083 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
7084 ins_cost(150);
7086 size(4);
7087 format %{ "FMOVD$cmp $icc,$src,$dst" %}
7088 opcode(0x102);
7089 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
7090 ins_pipe(int_conditional_double_move);
7091 %}
7093 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
7094 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
7095 ins_cost(150);
7097 size(4);
7098 format %{ "FMOVD$cmp $icc,$src,$dst" %}
7099 opcode(0x102);
7100 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
7101 ins_pipe(int_conditional_double_move);
7102 %}
7104 // Conditional move,
7105 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{
7106 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src)));
7107 ins_cost(150);
7108 size(4);
7109 format %{ "FMOVD$cmp $fcc,$src,$dst" %}
7110 opcode(0x2);
7111 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) );
7112 ins_pipe(int_conditional_double_move);
7113 %}
7115 // Conditional move
7116 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{
7117 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
7118 ins_cost(150);
7119 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %}
7120 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
7121 ins_pipe(ialu_reg);
7122 %}
7124 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{
7125 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
7126 ins_cost(140);
7127 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %}
7128 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
7129 ins_pipe(ialu_imm);
7130 %}
7132 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
7133 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
7134 ins_cost(150);
7136 size(4);
7137 format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
7138 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
7139 ins_pipe(ialu_reg);
7140 %}
7143 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
7144 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
7145 ins_cost(150);
7147 size(4);
7148 format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
7149 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
7150 ins_pipe(ialu_reg);
7151 %}
7154 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{
7155 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src)));
7156 ins_cost(150);
7158 size(4);
7159 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %}
7160 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
7161 ins_pipe(ialu_reg);
7162 %}
7166 //----------OS and Locking Instructions----------------------------------------
7168 // This name is KNOWN by the ADLC and cannot be changed.
7169 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
7170 // for this guy.
7171 instruct tlsLoadP(g2RegP dst) %{
7172 match(Set dst (ThreadLocal));
7174 size(0);
7175 ins_cost(0);
7176 format %{ "# TLS is in G2" %}
7177 ins_encode( /*empty encoding*/ );
7178 ins_pipe(ialu_none);
7179 %}
7181 instruct checkCastPP( iRegP dst ) %{
7182 match(Set dst (CheckCastPP dst));
7184 size(0);
7185 format %{ "# checkcastPP of $dst" %}
7186 ins_encode( /*empty encoding*/ );
7187 ins_pipe(empty);
7188 %}
7191 instruct castPP( iRegP dst ) %{
7192 match(Set dst (CastPP dst));
7193 format %{ "# castPP of $dst" %}
7194 ins_encode( /*empty encoding*/ );
7195 ins_pipe(empty);
7196 %}
7198 instruct castII( iRegI dst ) %{
7199 match(Set dst (CastII dst));
7200 format %{ "# castII of $dst" %}
7201 ins_encode( /*empty encoding*/ );
7202 ins_cost(0);
7203 ins_pipe(empty);
7204 %}
7206 //----------Arithmetic Instructions--------------------------------------------
7207 // Addition Instructions
7208 // Register Addition
7209 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7210 match(Set dst (AddI src1 src2));
7212 size(4);
7213 format %{ "ADD $src1,$src2,$dst" %}
7214 ins_encode %{
7215 __ add($src1$$Register, $src2$$Register, $dst$$Register);
7216 %}
7217 ins_pipe(ialu_reg_reg);
7218 %}
7220 // Immediate Addition
7221 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7222 match(Set dst (AddI src1 src2));
7224 size(4);
7225 format %{ "ADD $src1,$src2,$dst" %}
7226 opcode(Assembler::add_op3, Assembler::arith_op);
7227 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7228 ins_pipe(ialu_reg_imm);
7229 %}
7231 // Pointer Register Addition
7232 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{
7233 match(Set dst (AddP src1 src2));
7235 size(4);
7236 format %{ "ADD $src1,$src2,$dst" %}
7237 opcode(Assembler::add_op3, Assembler::arith_op);
7238 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7239 ins_pipe(ialu_reg_reg);
7240 %}
7242 // Pointer Immediate Addition
7243 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{
7244 match(Set dst (AddP src1 src2));
7246 size(4);
7247 format %{ "ADD $src1,$src2,$dst" %}
7248 opcode(Assembler::add_op3, Assembler::arith_op);
7249 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7250 ins_pipe(ialu_reg_imm);
7251 %}
7253 // Long Addition
7254 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7255 match(Set dst (AddL src1 src2));
7257 size(4);
7258 format %{ "ADD $src1,$src2,$dst\t! long" %}
7259 opcode(Assembler::add_op3, Assembler::arith_op);
7260 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7261 ins_pipe(ialu_reg_reg);
7262 %}
7264 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7265 match(Set dst (AddL src1 con));
7267 size(4);
7268 format %{ "ADD $src1,$con,$dst" %}
7269 opcode(Assembler::add_op3, Assembler::arith_op);
7270 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7271 ins_pipe(ialu_reg_imm);
7272 %}
7274 //----------Conditional_store--------------------------------------------------
7275 // Conditional-store of the updated heap-top.
7276 // Used during allocation of the shared heap.
7277 // Sets flags (EQ) on success. Implemented with a CASA on Sparc.
7279 // LoadP-locked. Same as a regular pointer load when used with a compare-swap
7280 instruct loadPLocked(iRegP dst, memory mem) %{
7281 match(Set dst (LoadPLocked mem));
7282 ins_cost(MEMORY_REF_COST);
7284 #ifndef _LP64
7285 size(4);
7286 format %{ "LDUW $mem,$dst\t! ptr" %}
7287 opcode(Assembler::lduw_op3, 0, REGP_OP);
7288 #else
7289 format %{ "LDX $mem,$dst\t! ptr" %}
7290 opcode(Assembler::ldx_op3, 0, REGP_OP);
7291 #endif
7292 ins_encode( form3_mem_reg( mem, dst ) );
7293 ins_pipe(iload_mem);
7294 %}
7296 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{
7297 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
7298 effect( KILL newval );
7299 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t"
7300 "CMP R_G3,$oldval\t\t! See if we made progress" %}
7301 ins_encode( enc_cas(heap_top_ptr,oldval,newval) );
7302 ins_pipe( long_memory_op );
7303 %}
7305 // Conditional-store of an int value.
7306 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{
7307 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval)));
7308 effect( KILL newval );
7309 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
7310 "CMP $oldval,$newval\t\t! See if we made progress" %}
7311 ins_encode( enc_cas(mem_ptr,oldval,newval) );
7312 ins_pipe( long_memory_op );
7313 %}
7315 // Conditional-store of a long value.
7316 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{
7317 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval)));
7318 effect( KILL newval );
7319 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
7320 "CMP $oldval,$newval\t\t! See if we made progress" %}
7321 ins_encode( enc_cas(mem_ptr,oldval,newval) );
7322 ins_pipe( long_memory_op );
7323 %}
7325 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
7327 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7328 predicate(VM_Version::supports_cx8());
7329 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
7330 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7331 format %{
7332 "MOV $newval,O7\n\t"
7333 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7334 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7335 "MOV 1,$res\n\t"
7336 "MOVne xcc,R_G0,$res"
7337 %}
7338 ins_encode( enc_casx(mem_ptr, oldval, newval),
7339 enc_lflags_ne_to_boolean(res) );
7340 ins_pipe( long_memory_op );
7341 %}
7344 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7345 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
7346 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7347 format %{
7348 "MOV $newval,O7\n\t"
7349 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7350 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7351 "MOV 1,$res\n\t"
7352 "MOVne icc,R_G0,$res"
7353 %}
7354 ins_encode( enc_casi(mem_ptr, oldval, newval),
7355 enc_iflags_ne_to_boolean(res) );
7356 ins_pipe( long_memory_op );
7357 %}
7359 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7360 #ifdef _LP64
7361 predicate(VM_Version::supports_cx8());
7362 #endif
7363 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
7364 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7365 format %{
7366 "MOV $newval,O7\n\t"
7367 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7368 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7369 "MOV 1,$res\n\t"
7370 "MOVne xcc,R_G0,$res"
7371 %}
7372 #ifdef _LP64
7373 ins_encode( enc_casx(mem_ptr, oldval, newval),
7374 enc_lflags_ne_to_boolean(res) );
7375 #else
7376 ins_encode( enc_casi(mem_ptr, oldval, newval),
7377 enc_iflags_ne_to_boolean(res) );
7378 #endif
7379 ins_pipe( long_memory_op );
7380 %}
7382 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7383 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
7384 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7385 format %{
7386 "MOV $newval,O7\n\t"
7387 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7388 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7389 "MOV 1,$res\n\t"
7390 "MOVne icc,R_G0,$res"
7391 %}
7392 ins_encode( enc_casi(mem_ptr, oldval, newval),
7393 enc_iflags_ne_to_boolean(res) );
7394 ins_pipe( long_memory_op );
7395 %}
7397 instruct xchgI( memory mem, iRegI newval) %{
7398 match(Set newval (GetAndSetI mem newval));
7399 format %{ "SWAP [$mem],$newval" %}
7400 size(4);
7401 ins_encode %{
7402 __ swap($mem$$Address, $newval$$Register);
7403 %}
7404 ins_pipe( long_memory_op );
7405 %}
7407 #ifndef _LP64
7408 instruct xchgP( memory mem, iRegP newval) %{
7409 match(Set newval (GetAndSetP mem newval));
7410 format %{ "SWAP [$mem],$newval" %}
7411 size(4);
7412 ins_encode %{
7413 __ swap($mem$$Address, $newval$$Register);
7414 %}
7415 ins_pipe( long_memory_op );
7416 %}
7417 #endif
7419 instruct xchgN( memory mem, iRegN newval) %{
7420 match(Set newval (GetAndSetN mem newval));
7421 format %{ "SWAP [$mem],$newval" %}
7422 size(4);
7423 ins_encode %{
7424 __ swap($mem$$Address, $newval$$Register);
7425 %}
7426 ins_pipe( long_memory_op );
7427 %}
7429 //---------------------
7430 // Subtraction Instructions
7431 // Register Subtraction
7432 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7433 match(Set dst (SubI src1 src2));
7435 size(4);
7436 format %{ "SUB $src1,$src2,$dst" %}
7437 opcode(Assembler::sub_op3, Assembler::arith_op);
7438 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7439 ins_pipe(ialu_reg_reg);
7440 %}
7442 // Immediate Subtraction
7443 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7444 match(Set dst (SubI src1 src2));
7446 size(4);
7447 format %{ "SUB $src1,$src2,$dst" %}
7448 opcode(Assembler::sub_op3, Assembler::arith_op);
7449 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7450 ins_pipe(ialu_reg_imm);
7451 %}
7453 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
7454 match(Set dst (SubI zero src2));
7456 size(4);
7457 format %{ "NEG $src2,$dst" %}
7458 opcode(Assembler::sub_op3, Assembler::arith_op);
7459 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) );
7460 ins_pipe(ialu_zero_reg);
7461 %}
7463 // Long subtraction
7464 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7465 match(Set dst (SubL src1 src2));
7467 size(4);
7468 format %{ "SUB $src1,$src2,$dst\t! long" %}
7469 opcode(Assembler::sub_op3, Assembler::arith_op);
7470 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7471 ins_pipe(ialu_reg_reg);
7472 %}
7474 // Immediate Subtraction
7475 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7476 match(Set dst (SubL src1 con));
7478 size(4);
7479 format %{ "SUB $src1,$con,$dst\t! long" %}
7480 opcode(Assembler::sub_op3, Assembler::arith_op);
7481 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7482 ins_pipe(ialu_reg_imm);
7483 %}
7485 // Long negation
7486 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{
7487 match(Set dst (SubL zero src2));
7489 size(4);
7490 format %{ "NEG $src2,$dst\t! long" %}
7491 opcode(Assembler::sub_op3, Assembler::arith_op);
7492 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) );
7493 ins_pipe(ialu_zero_reg);
7494 %}
7496 // Multiplication Instructions
7497 // Integer Multiplication
7498 // Register Multiplication
7499 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7500 match(Set dst (MulI src1 src2));
7502 size(4);
7503 format %{ "MULX $src1,$src2,$dst" %}
7504 opcode(Assembler::mulx_op3, Assembler::arith_op);
7505 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7506 ins_pipe(imul_reg_reg);
7507 %}
7509 // Immediate Multiplication
7510 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7511 match(Set dst (MulI src1 src2));
7513 size(4);
7514 format %{ "MULX $src1,$src2,$dst" %}
7515 opcode(Assembler::mulx_op3, Assembler::arith_op);
7516 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7517 ins_pipe(imul_reg_imm);
7518 %}
7520 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7521 match(Set dst (MulL src1 src2));
7522 ins_cost(DEFAULT_COST * 5);
7523 size(4);
7524 format %{ "MULX $src1,$src2,$dst\t! long" %}
7525 opcode(Assembler::mulx_op3, Assembler::arith_op);
7526 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7527 ins_pipe(mulL_reg_reg);
7528 %}
7530 // Immediate Multiplication
7531 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7532 match(Set dst (MulL src1 src2));
7533 ins_cost(DEFAULT_COST * 5);
7534 size(4);
7535 format %{ "MULX $src1,$src2,$dst" %}
7536 opcode(Assembler::mulx_op3, Assembler::arith_op);
7537 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7538 ins_pipe(mulL_reg_imm);
7539 %}
7541 // Integer Division
7542 // Register Division
7543 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{
7544 match(Set dst (DivI src1 src2));
7545 ins_cost((2+71)*DEFAULT_COST);
7547 format %{ "SRA $src2,0,$src2\n\t"
7548 "SRA $src1,0,$src1\n\t"
7549 "SDIVX $src1,$src2,$dst" %}
7550 ins_encode( idiv_reg( src1, src2, dst ) );
7551 ins_pipe(sdiv_reg_reg);
7552 %}
7554 // Immediate Division
7555 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{
7556 match(Set dst (DivI src1 src2));
7557 ins_cost((2+71)*DEFAULT_COST);
7559 format %{ "SRA $src1,0,$src1\n\t"
7560 "SDIVX $src1,$src2,$dst" %}
7561 ins_encode( idiv_imm( src1, src2, dst ) );
7562 ins_pipe(sdiv_reg_imm);
7563 %}
7565 //----------Div-By-10-Expansion------------------------------------------------
7566 // Extract hi bits of a 32x32->64 bit multiply.
7567 // Expand rule only, not matched
7568 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{
7569 effect( DEF dst, USE src1, USE src2 );
7570 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t"
7571 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %}
7572 ins_encode( enc_mul_hi(dst,src1,src2));
7573 ins_pipe(sdiv_reg_reg);
7574 %}
7576 // Magic constant, reciprocal of 10
7577 instruct loadConI_x66666667(iRegIsafe dst) %{
7578 effect( DEF dst );
7580 size(8);
7581 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %}
7582 ins_encode( Set32(0x66666667, dst) );
7583 ins_pipe(ialu_hi_lo_reg);
7584 %}
7586 // Register Shift Right Arithmetic Long by 32-63
7587 instruct sra_31( iRegI dst, iRegI src ) %{
7588 effect( DEF dst, USE src );
7589 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %}
7590 ins_encode( form3_rs1_rd_copysign_hi(src,dst) );
7591 ins_pipe(ialu_reg_reg);
7592 %}
7594 // Arithmetic Shift Right by 8-bit immediate
7595 instruct sra_reg_2( iRegI dst, iRegI src ) %{
7596 effect( DEF dst, USE src );
7597 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %}
7598 opcode(Assembler::sra_op3, Assembler::arith_op);
7599 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) );
7600 ins_pipe(ialu_reg_imm);
7601 %}
7603 // Integer DIV with 10
7604 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{
7605 match(Set dst (DivI src div));
7606 ins_cost((6+6)*DEFAULT_COST);
7607 expand %{
7608 iRegIsafe tmp1; // Killed temps;
7609 iRegIsafe tmp2; // Killed temps;
7610 iRegI tmp3; // Killed temps;
7611 iRegI tmp4; // Killed temps;
7612 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1
7613 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2
7614 sra_31( tmp3, src ); // SRA src,31 -> tmp3
7615 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4
7616 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst
7617 %}
7618 %}
7620 // Register Long Division
7621 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7622 match(Set dst (DivL src1 src2));
7623 ins_cost(DEFAULT_COST*71);
7624 size(4);
7625 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7626 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7627 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7628 ins_pipe(divL_reg_reg);
7629 %}
7631 // Register Long Division
7632 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7633 match(Set dst (DivL src1 src2));
7634 ins_cost(DEFAULT_COST*71);
7635 size(4);
7636 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7637 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7638 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7639 ins_pipe(divL_reg_imm);
7640 %}
7642 // Integer Remainder
7643 // Register Remainder
7644 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{
7645 match(Set dst (ModI src1 src2));
7646 effect( KILL ccr, KILL temp);
7648 format %{ "SREM $src1,$src2,$dst" %}
7649 ins_encode( irem_reg(src1, src2, dst, temp) );
7650 ins_pipe(sdiv_reg_reg);
7651 %}
7653 // Immediate Remainder
7654 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{
7655 match(Set dst (ModI src1 src2));
7656 effect( KILL ccr, KILL temp);
7658 format %{ "SREM $src1,$src2,$dst" %}
7659 ins_encode( irem_imm(src1, src2, dst, temp) );
7660 ins_pipe(sdiv_reg_imm);
7661 %}
7663 // Register Long Remainder
7664 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7665 effect(DEF dst, USE src1, USE src2);
7666 size(4);
7667 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7668 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7669 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7670 ins_pipe(divL_reg_reg);
7671 %}
7673 // Register Long Division
7674 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{
7675 effect(DEF dst, USE src1, USE src2);
7676 size(4);
7677 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7678 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7679 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7680 ins_pipe(divL_reg_imm);
7681 %}
7683 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7684 effect(DEF dst, USE src1, USE src2);
7685 size(4);
7686 format %{ "MULX $src1,$src2,$dst\t! long" %}
7687 opcode(Assembler::mulx_op3, Assembler::arith_op);
7688 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7689 ins_pipe(mulL_reg_reg);
7690 %}
7692 // Immediate Multiplication
7693 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{
7694 effect(DEF dst, USE src1, USE src2);
7695 size(4);
7696 format %{ "MULX $src1,$src2,$dst" %}
7697 opcode(Assembler::mulx_op3, Assembler::arith_op);
7698 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7699 ins_pipe(mulL_reg_imm);
7700 %}
7702 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7703 effect(DEF dst, USE src1, USE src2);
7704 size(4);
7705 format %{ "SUB $src1,$src2,$dst\t! long" %}
7706 opcode(Assembler::sub_op3, Assembler::arith_op);
7707 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7708 ins_pipe(ialu_reg_reg);
7709 %}
7711 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
7712 effect(DEF dst, USE src1, USE src2);
7713 size(4);
7714 format %{ "SUB $src1,$src2,$dst\t! long" %}
7715 opcode(Assembler::sub_op3, Assembler::arith_op);
7716 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7717 ins_pipe(ialu_reg_reg);
7718 %}
7720 // Register Long Remainder
7721 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7722 match(Set dst (ModL src1 src2));
7723 ins_cost(DEFAULT_COST*(71 + 6 + 1));
7724 expand %{
7725 iRegL tmp1;
7726 iRegL tmp2;
7727 divL_reg_reg_1(tmp1, src1, src2);
7728 mulL_reg_reg_1(tmp2, tmp1, src2);
7729 subL_reg_reg_1(dst, src1, tmp2);
7730 %}
7731 %}
7733 // Register Long Remainder
7734 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7735 match(Set dst (ModL src1 src2));
7736 ins_cost(DEFAULT_COST*(71 + 6 + 1));
7737 expand %{
7738 iRegL tmp1;
7739 iRegL tmp2;
7740 divL_reg_imm13_1(tmp1, src1, src2);
7741 mulL_reg_imm13_1(tmp2, tmp1, src2);
7742 subL_reg_reg_2 (dst, src1, tmp2);
7743 %}
7744 %}
7746 // Integer Shift Instructions
7747 // Register Shift Left
7748 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7749 match(Set dst (LShiftI src1 src2));
7751 size(4);
7752 format %{ "SLL $src1,$src2,$dst" %}
7753 opcode(Assembler::sll_op3, Assembler::arith_op);
7754 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7755 ins_pipe(ialu_reg_reg);
7756 %}
7758 // Register Shift Left Immediate
7759 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7760 match(Set dst (LShiftI src1 src2));
7762 size(4);
7763 format %{ "SLL $src1,$src2,$dst" %}
7764 opcode(Assembler::sll_op3, Assembler::arith_op);
7765 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7766 ins_pipe(ialu_reg_imm);
7767 %}
7769 // Register Shift Left
7770 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7771 match(Set dst (LShiftL src1 src2));
7773 size(4);
7774 format %{ "SLLX $src1,$src2,$dst" %}
7775 opcode(Assembler::sllx_op3, Assembler::arith_op);
7776 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7777 ins_pipe(ialu_reg_reg);
7778 %}
7780 // Register Shift Left Immediate
7781 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7782 match(Set dst (LShiftL src1 src2));
7784 size(4);
7785 format %{ "SLLX $src1,$src2,$dst" %}
7786 opcode(Assembler::sllx_op3, Assembler::arith_op);
7787 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7788 ins_pipe(ialu_reg_imm);
7789 %}
7791 // Register Arithmetic Shift Right
7792 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7793 match(Set dst (RShiftI src1 src2));
7794 size(4);
7795 format %{ "SRA $src1,$src2,$dst" %}
7796 opcode(Assembler::sra_op3, Assembler::arith_op);
7797 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7798 ins_pipe(ialu_reg_reg);
7799 %}
7801 // Register Arithmetic Shift Right Immediate
7802 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7803 match(Set dst (RShiftI src1 src2));
7805 size(4);
7806 format %{ "SRA $src1,$src2,$dst" %}
7807 opcode(Assembler::sra_op3, Assembler::arith_op);
7808 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7809 ins_pipe(ialu_reg_imm);
7810 %}
7812 // Register Shift Right Arithmatic Long
7813 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7814 match(Set dst (RShiftL src1 src2));
7816 size(4);
7817 format %{ "SRAX $src1,$src2,$dst" %}
7818 opcode(Assembler::srax_op3, Assembler::arith_op);
7819 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7820 ins_pipe(ialu_reg_reg);
7821 %}
7823 // Register Shift Left Immediate
7824 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7825 match(Set dst (RShiftL src1 src2));
7827 size(4);
7828 format %{ "SRAX $src1,$src2,$dst" %}
7829 opcode(Assembler::srax_op3, Assembler::arith_op);
7830 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7831 ins_pipe(ialu_reg_imm);
7832 %}
7834 // Register Shift Right
7835 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7836 match(Set dst (URShiftI src1 src2));
7838 size(4);
7839 format %{ "SRL $src1,$src2,$dst" %}
7840 opcode(Assembler::srl_op3, Assembler::arith_op);
7841 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7842 ins_pipe(ialu_reg_reg);
7843 %}
7845 // Register Shift Right Immediate
7846 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7847 match(Set dst (URShiftI src1 src2));
7849 size(4);
7850 format %{ "SRL $src1,$src2,$dst" %}
7851 opcode(Assembler::srl_op3, Assembler::arith_op);
7852 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7853 ins_pipe(ialu_reg_imm);
7854 %}
7856 // Register Shift Right
7857 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7858 match(Set dst (URShiftL src1 src2));
7860 size(4);
7861 format %{ "SRLX $src1,$src2,$dst" %}
7862 opcode(Assembler::srlx_op3, Assembler::arith_op);
7863 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7864 ins_pipe(ialu_reg_reg);
7865 %}
7867 // Register Shift Right Immediate
7868 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7869 match(Set dst (URShiftL src1 src2));
7871 size(4);
7872 format %{ "SRLX $src1,$src2,$dst" %}
7873 opcode(Assembler::srlx_op3, Assembler::arith_op);
7874 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7875 ins_pipe(ialu_reg_imm);
7876 %}
7878 // Register Shift Right Immediate with a CastP2X
7879 #ifdef _LP64
7880 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
7881 match(Set dst (URShiftL (CastP2X src1) src2));
7882 size(4);
7883 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %}
7884 opcode(Assembler::srlx_op3, Assembler::arith_op);
7885 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7886 ins_pipe(ialu_reg_imm);
7887 %}
7888 #else
7889 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{
7890 match(Set dst (URShiftI (CastP2X src1) src2));
7891 size(4);
7892 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %}
7893 opcode(Assembler::srl_op3, Assembler::arith_op);
7894 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7895 ins_pipe(ialu_reg_imm);
7896 %}
7897 #endif
7900 //----------Floating Point Arithmetic Instructions-----------------------------
7902 // Add float single precision
7903 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
7904 match(Set dst (AddF src1 src2));
7906 size(4);
7907 format %{ "FADDS $src1,$src2,$dst" %}
7908 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf);
7909 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7910 ins_pipe(faddF_reg_reg);
7911 %}
7913 // Add float double precision
7914 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
7915 match(Set dst (AddD src1 src2));
7917 size(4);
7918 format %{ "FADDD $src1,$src2,$dst" %}
7919 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf);
7920 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7921 ins_pipe(faddD_reg_reg);
7922 %}
7924 // Sub float single precision
7925 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
7926 match(Set dst (SubF src1 src2));
7928 size(4);
7929 format %{ "FSUBS $src1,$src2,$dst" %}
7930 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf);
7931 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7932 ins_pipe(faddF_reg_reg);
7933 %}
7935 // Sub float double precision
7936 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
7937 match(Set dst (SubD src1 src2));
7939 size(4);
7940 format %{ "FSUBD $src1,$src2,$dst" %}
7941 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf);
7942 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7943 ins_pipe(faddD_reg_reg);
7944 %}
7946 // Mul float single precision
7947 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
7948 match(Set dst (MulF src1 src2));
7950 size(4);
7951 format %{ "FMULS $src1,$src2,$dst" %}
7952 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf);
7953 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7954 ins_pipe(fmulF_reg_reg);
7955 %}
7957 // Mul float double precision
7958 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
7959 match(Set dst (MulD src1 src2));
7961 size(4);
7962 format %{ "FMULD $src1,$src2,$dst" %}
7963 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf);
7964 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7965 ins_pipe(fmulD_reg_reg);
7966 %}
7968 // Div float single precision
7969 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
7970 match(Set dst (DivF src1 src2));
7972 size(4);
7973 format %{ "FDIVS $src1,$src2,$dst" %}
7974 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf);
7975 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7976 ins_pipe(fdivF_reg_reg);
7977 %}
7979 // Div float double precision
7980 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
7981 match(Set dst (DivD src1 src2));
7983 size(4);
7984 format %{ "FDIVD $src1,$src2,$dst" %}
7985 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf);
7986 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7987 ins_pipe(fdivD_reg_reg);
7988 %}
7990 // Absolute float double precision
7991 instruct absD_reg(regD dst, regD src) %{
7992 match(Set dst (AbsD src));
7994 format %{ "FABSd $src,$dst" %}
7995 ins_encode(fabsd(dst, src));
7996 ins_pipe(faddD_reg);
7997 %}
7999 // Absolute float single precision
8000 instruct absF_reg(regF dst, regF src) %{
8001 match(Set dst (AbsF src));
8003 format %{ "FABSs $src,$dst" %}
8004 ins_encode(fabss(dst, src));
8005 ins_pipe(faddF_reg);
8006 %}
8008 instruct negF_reg(regF dst, regF src) %{
8009 match(Set dst (NegF src));
8011 size(4);
8012 format %{ "FNEGs $src,$dst" %}
8013 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf);
8014 ins_encode(form3_opf_rs2F_rdF(src, dst));
8015 ins_pipe(faddF_reg);
8016 %}
8018 instruct negD_reg(regD dst, regD src) %{
8019 match(Set dst (NegD src));
8021 format %{ "FNEGd $src,$dst" %}
8022 ins_encode(fnegd(dst, src));
8023 ins_pipe(faddD_reg);
8024 %}
8026 // Sqrt float double precision
8027 instruct sqrtF_reg_reg(regF dst, regF src) %{
8028 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
8030 size(4);
8031 format %{ "FSQRTS $src,$dst" %}
8032 ins_encode(fsqrts(dst, src));
8033 ins_pipe(fdivF_reg_reg);
8034 %}
8036 // Sqrt float double precision
8037 instruct sqrtD_reg_reg(regD dst, regD src) %{
8038 match(Set dst (SqrtD src));
8040 size(4);
8041 format %{ "FSQRTD $src,$dst" %}
8042 ins_encode(fsqrtd(dst, src));
8043 ins_pipe(fdivD_reg_reg);
8044 %}
8046 //----------Logical Instructions-----------------------------------------------
8047 // And Instructions
8048 // Register And
8049 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8050 match(Set dst (AndI src1 src2));
8052 size(4);
8053 format %{ "AND $src1,$src2,$dst" %}
8054 opcode(Assembler::and_op3, Assembler::arith_op);
8055 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8056 ins_pipe(ialu_reg_reg);
8057 %}
8059 // Immediate And
8060 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8061 match(Set dst (AndI src1 src2));
8063 size(4);
8064 format %{ "AND $src1,$src2,$dst" %}
8065 opcode(Assembler::and_op3, Assembler::arith_op);
8066 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8067 ins_pipe(ialu_reg_imm);
8068 %}
8070 // Register And Long
8071 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8072 match(Set dst (AndL src1 src2));
8074 ins_cost(DEFAULT_COST);
8075 size(4);
8076 format %{ "AND $src1,$src2,$dst\t! long" %}
8077 opcode(Assembler::and_op3, Assembler::arith_op);
8078 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8079 ins_pipe(ialu_reg_reg);
8080 %}
8082 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
8083 match(Set dst (AndL src1 con));
8085 ins_cost(DEFAULT_COST);
8086 size(4);
8087 format %{ "AND $src1,$con,$dst\t! long" %}
8088 opcode(Assembler::and_op3, Assembler::arith_op);
8089 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8090 ins_pipe(ialu_reg_imm);
8091 %}
8093 // Or Instructions
8094 // Register Or
8095 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8096 match(Set dst (OrI src1 src2));
8098 size(4);
8099 format %{ "OR $src1,$src2,$dst" %}
8100 opcode(Assembler::or_op3, Assembler::arith_op);
8101 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8102 ins_pipe(ialu_reg_reg);
8103 %}
8105 // Immediate Or
8106 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8107 match(Set dst (OrI src1 src2));
8109 size(4);
8110 format %{ "OR $src1,$src2,$dst" %}
8111 opcode(Assembler::or_op3, Assembler::arith_op);
8112 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8113 ins_pipe(ialu_reg_imm);
8114 %}
8116 // Register Or Long
8117 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8118 match(Set dst (OrL src1 src2));
8120 ins_cost(DEFAULT_COST);
8121 size(4);
8122 format %{ "OR $src1,$src2,$dst\t! long" %}
8123 opcode(Assembler::or_op3, Assembler::arith_op);
8124 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8125 ins_pipe(ialu_reg_reg);
8126 %}
8128 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
8129 match(Set dst (OrL src1 con));
8130 ins_cost(DEFAULT_COST*2);
8132 ins_cost(DEFAULT_COST);
8133 size(4);
8134 format %{ "OR $src1,$con,$dst\t! long" %}
8135 opcode(Assembler::or_op3, Assembler::arith_op);
8136 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8137 ins_pipe(ialu_reg_imm);
8138 %}
8140 #ifndef _LP64
8142 // Use sp_ptr_RegP to match G2 (TLS register) without spilling.
8143 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
8144 match(Set dst (OrI src1 (CastP2X src2)));
8146 size(4);
8147 format %{ "OR $src1,$src2,$dst" %}
8148 opcode(Assembler::or_op3, Assembler::arith_op);
8149 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8150 ins_pipe(ialu_reg_reg);
8151 %}
8153 #else
8155 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
8156 match(Set dst (OrL src1 (CastP2X src2)));
8158 ins_cost(DEFAULT_COST);
8159 size(4);
8160 format %{ "OR $src1,$src2,$dst\t! long" %}
8161 opcode(Assembler::or_op3, Assembler::arith_op);
8162 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8163 ins_pipe(ialu_reg_reg);
8164 %}
8166 #endif
8168 // Xor Instructions
8169 // Register Xor
8170 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8171 match(Set dst (XorI src1 src2));
8173 size(4);
8174 format %{ "XOR $src1,$src2,$dst" %}
8175 opcode(Assembler::xor_op3, Assembler::arith_op);
8176 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8177 ins_pipe(ialu_reg_reg);
8178 %}
8180 // Immediate Xor
8181 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8182 match(Set dst (XorI src1 src2));
8184 size(4);
8185 format %{ "XOR $src1,$src2,$dst" %}
8186 opcode(Assembler::xor_op3, Assembler::arith_op);
8187 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8188 ins_pipe(ialu_reg_imm);
8189 %}
8191 // Register Xor Long
8192 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8193 match(Set dst (XorL src1 src2));
8195 ins_cost(DEFAULT_COST);
8196 size(4);
8197 format %{ "XOR $src1,$src2,$dst\t! long" %}
8198 opcode(Assembler::xor_op3, Assembler::arith_op);
8199 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8200 ins_pipe(ialu_reg_reg);
8201 %}
8203 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
8204 match(Set dst (XorL src1 con));
8206 ins_cost(DEFAULT_COST);
8207 size(4);
8208 format %{ "XOR $src1,$con,$dst\t! long" %}
8209 opcode(Assembler::xor_op3, Assembler::arith_op);
8210 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8211 ins_pipe(ialu_reg_imm);
8212 %}
8214 //----------Convert to Boolean-------------------------------------------------
8215 // Nice hack for 32-bit tests but doesn't work for
8216 // 64-bit pointers.
8217 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
8218 match(Set dst (Conv2B src));
8219 effect( KILL ccr );
8220 ins_cost(DEFAULT_COST*2);
8221 format %{ "CMP R_G0,$src\n\t"
8222 "ADDX R_G0,0,$dst" %}
8223 ins_encode( enc_to_bool( src, dst ) );
8224 ins_pipe(ialu_reg_ialu);
8225 %}
8227 #ifndef _LP64
8228 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
8229 match(Set dst (Conv2B src));
8230 effect( KILL ccr );
8231 ins_cost(DEFAULT_COST*2);
8232 format %{ "CMP R_G0,$src\n\t"
8233 "ADDX R_G0,0,$dst" %}
8234 ins_encode( enc_to_bool( src, dst ) );
8235 ins_pipe(ialu_reg_ialu);
8236 %}
8237 #else
8238 instruct convP2B( iRegI dst, iRegP src ) %{
8239 match(Set dst (Conv2B src));
8240 ins_cost(DEFAULT_COST*2);
8241 format %{ "MOV $src,$dst\n\t"
8242 "MOVRNZ $src,1,$dst" %}
8243 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) );
8244 ins_pipe(ialu_clr_and_mover);
8245 %}
8246 #endif
8248 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
8249 match(Set dst (CmpLTMask src zero));
8250 effect(KILL ccr);
8251 size(4);
8252 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %}
8253 ins_encode %{
8254 __ sra($src$$Register, 31, $dst$$Register);
8255 %}
8256 ins_pipe(ialu_reg_imm);
8257 %}
8259 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{
8260 match(Set dst (CmpLTMask p q));
8261 effect( KILL ccr );
8262 ins_cost(DEFAULT_COST*4);
8263 format %{ "CMP $p,$q\n\t"
8264 "MOV #0,$dst\n\t"
8265 "BLT,a .+8\n\t"
8266 "MOV #-1,$dst" %}
8267 ins_encode( enc_ltmask(p,q,dst) );
8268 ins_pipe(ialu_reg_reg_ialu);
8269 %}
8271 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{
8272 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
8273 effect(KILL ccr, TEMP tmp);
8274 ins_cost(DEFAULT_COST*3);
8276 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t"
8277 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t"
8278 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %}
8279 ins_encode(enc_cadd_cmpLTMask(p, q, y, tmp));
8280 ins_pipe(cadd_cmpltmask);
8281 %}
8283 instruct and_cmpLTMask(iRegI p, iRegI q, iRegI y, flagsReg ccr) %{
8284 match(Set p (AndI (CmpLTMask p q) y));
8285 effect(KILL ccr);
8286 ins_cost(DEFAULT_COST*3);
8288 format %{ "CMP $p,$q\n\t"
8289 "MOV $y,$p\n\t"
8290 "MOVge G0,$p" %}
8291 ins_encode %{
8292 __ cmp($p$$Register, $q$$Register);
8293 __ mov($y$$Register, $p$$Register);
8294 __ movcc(Assembler::greaterEqual, false, Assembler::icc, G0, $p$$Register);
8295 %}
8296 ins_pipe(ialu_reg_reg_ialu);
8297 %}
8299 //-----------------------------------------------------------------
8300 // Direct raw moves between float and general registers using VIS3.
8302 // ins_pipe(faddF_reg);
8303 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{
8304 predicate(UseVIS >= 3);
8305 match(Set dst (MoveF2I src));
8307 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %}
8308 ins_encode %{
8309 __ movstouw($src$$FloatRegister, $dst$$Register);
8310 %}
8311 ins_pipe(ialu_reg_reg);
8312 %}
8314 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{
8315 predicate(UseVIS >= 3);
8316 match(Set dst (MoveI2F src));
8318 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %}
8319 ins_encode %{
8320 __ movwtos($src$$Register, $dst$$FloatRegister);
8321 %}
8322 ins_pipe(ialu_reg_reg);
8323 %}
8325 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{
8326 predicate(UseVIS >= 3);
8327 match(Set dst (MoveD2L src));
8329 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %}
8330 ins_encode %{
8331 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register);
8332 %}
8333 ins_pipe(ialu_reg_reg);
8334 %}
8336 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{
8337 predicate(UseVIS >= 3);
8338 match(Set dst (MoveL2D src));
8340 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %}
8341 ins_encode %{
8342 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg));
8343 %}
8344 ins_pipe(ialu_reg_reg);
8345 %}
8348 // Raw moves between float and general registers using stack.
8350 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{
8351 match(Set dst (MoveF2I src));
8352 effect(DEF dst, USE src);
8353 ins_cost(MEMORY_REF_COST);
8355 size(4);
8356 format %{ "LDUW $src,$dst\t! MoveF2I" %}
8357 opcode(Assembler::lduw_op3);
8358 ins_encode(simple_form3_mem_reg( src, dst ) );
8359 ins_pipe(iload_mem);
8360 %}
8362 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{
8363 match(Set dst (MoveI2F src));
8364 effect(DEF dst, USE src);
8365 ins_cost(MEMORY_REF_COST);
8367 size(4);
8368 format %{ "LDF $src,$dst\t! MoveI2F" %}
8369 opcode(Assembler::ldf_op3);
8370 ins_encode(simple_form3_mem_reg(src, dst));
8371 ins_pipe(floadF_stk);
8372 %}
8374 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{
8375 match(Set dst (MoveD2L src));
8376 effect(DEF dst, USE src);
8377 ins_cost(MEMORY_REF_COST);
8379 size(4);
8380 format %{ "LDX $src,$dst\t! MoveD2L" %}
8381 opcode(Assembler::ldx_op3);
8382 ins_encode(simple_form3_mem_reg( src, dst ) );
8383 ins_pipe(iload_mem);
8384 %}
8386 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{
8387 match(Set dst (MoveL2D src));
8388 effect(DEF dst, USE src);
8389 ins_cost(MEMORY_REF_COST);
8391 size(4);
8392 format %{ "LDDF $src,$dst\t! MoveL2D" %}
8393 opcode(Assembler::lddf_op3);
8394 ins_encode(simple_form3_mem_reg(src, dst));
8395 ins_pipe(floadD_stk);
8396 %}
8398 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{
8399 match(Set dst (MoveF2I src));
8400 effect(DEF dst, USE src);
8401 ins_cost(MEMORY_REF_COST);
8403 size(4);
8404 format %{ "STF $src,$dst\t! MoveF2I" %}
8405 opcode(Assembler::stf_op3);
8406 ins_encode(simple_form3_mem_reg(dst, src));
8407 ins_pipe(fstoreF_stk_reg);
8408 %}
8410 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
8411 match(Set dst (MoveI2F src));
8412 effect(DEF dst, USE src);
8413 ins_cost(MEMORY_REF_COST);
8415 size(4);
8416 format %{ "STW $src,$dst\t! MoveI2F" %}
8417 opcode(Assembler::stw_op3);
8418 ins_encode(simple_form3_mem_reg( dst, src ) );
8419 ins_pipe(istore_mem_reg);
8420 %}
8422 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{
8423 match(Set dst (MoveD2L src));
8424 effect(DEF dst, USE src);
8425 ins_cost(MEMORY_REF_COST);
8427 size(4);
8428 format %{ "STDF $src,$dst\t! MoveD2L" %}
8429 opcode(Assembler::stdf_op3);
8430 ins_encode(simple_form3_mem_reg(dst, src));
8431 ins_pipe(fstoreD_stk_reg);
8432 %}
8434 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
8435 match(Set dst (MoveL2D src));
8436 effect(DEF dst, USE src);
8437 ins_cost(MEMORY_REF_COST);
8439 size(4);
8440 format %{ "STX $src,$dst\t! MoveL2D" %}
8441 opcode(Assembler::stx_op3);
8442 ins_encode(simple_form3_mem_reg( dst, src ) );
8443 ins_pipe(istore_mem_reg);
8444 %}
8447 //----------Arithmetic Conversion Instructions---------------------------------
8448 // The conversions operations are all Alpha sorted. Please keep it that way!
8450 instruct convD2F_reg(regF dst, regD src) %{
8451 match(Set dst (ConvD2F src));
8452 size(4);
8453 format %{ "FDTOS $src,$dst" %}
8454 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf);
8455 ins_encode(form3_opf_rs2D_rdF(src, dst));
8456 ins_pipe(fcvtD2F);
8457 %}
8460 // Convert a double to an int in a float register.
8461 // If the double is a NAN, stuff a zero in instead.
8462 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{
8463 effect(DEF dst, USE src, KILL fcc0);
8464 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
8465 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8466 "FDTOI $src,$dst\t! convert in delay slot\n\t"
8467 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
8468 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
8469 "skip:" %}
8470 ins_encode(form_d2i_helper(src,dst));
8471 ins_pipe(fcvtD2I);
8472 %}
8474 instruct convD2I_stk(stackSlotI dst, regD src) %{
8475 match(Set dst (ConvD2I src));
8476 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8477 expand %{
8478 regF tmp;
8479 convD2I_helper(tmp, src);
8480 regF_to_stkI(dst, tmp);
8481 %}
8482 %}
8484 instruct convD2I_reg(iRegI dst, regD src) %{
8485 predicate(UseVIS >= 3);
8486 match(Set dst (ConvD2I src));
8487 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8488 expand %{
8489 regF tmp;
8490 convD2I_helper(tmp, src);
8491 MoveF2I_reg_reg(dst, tmp);
8492 %}
8493 %}
8496 // Convert a double to a long in a double register.
8497 // If the double is a NAN, stuff a zero in instead.
8498 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{
8499 effect(DEF dst, USE src, KILL fcc0);
8500 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
8501 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8502 "FDTOX $src,$dst\t! convert in delay slot\n\t"
8503 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
8504 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
8505 "skip:" %}
8506 ins_encode(form_d2l_helper(src,dst));
8507 ins_pipe(fcvtD2L);
8508 %}
8510 instruct convD2L_stk(stackSlotL dst, regD src) %{
8511 match(Set dst (ConvD2L src));
8512 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8513 expand %{
8514 regD tmp;
8515 convD2L_helper(tmp, src);
8516 regD_to_stkL(dst, tmp);
8517 %}
8518 %}
8520 instruct convD2L_reg(iRegL dst, regD src) %{
8521 predicate(UseVIS >= 3);
8522 match(Set dst (ConvD2L src));
8523 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8524 expand %{
8525 regD tmp;
8526 convD2L_helper(tmp, src);
8527 MoveD2L_reg_reg(dst, tmp);
8528 %}
8529 %}
8532 instruct convF2D_reg(regD dst, regF src) %{
8533 match(Set dst (ConvF2D src));
8534 format %{ "FSTOD $src,$dst" %}
8535 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf);
8536 ins_encode(form3_opf_rs2F_rdD(src, dst));
8537 ins_pipe(fcvtF2D);
8538 %}
8541 // Convert a float to an int in a float register.
8542 // If the float is a NAN, stuff a zero in instead.
8543 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{
8544 effect(DEF dst, USE src, KILL fcc0);
8545 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
8546 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8547 "FSTOI $src,$dst\t! convert in delay slot\n\t"
8548 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
8549 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
8550 "skip:" %}
8551 ins_encode(form_f2i_helper(src,dst));
8552 ins_pipe(fcvtF2I);
8553 %}
8555 instruct convF2I_stk(stackSlotI dst, regF src) %{
8556 match(Set dst (ConvF2I src));
8557 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8558 expand %{
8559 regF tmp;
8560 convF2I_helper(tmp, src);
8561 regF_to_stkI(dst, tmp);
8562 %}
8563 %}
8565 instruct convF2I_reg(iRegI dst, regF src) %{
8566 predicate(UseVIS >= 3);
8567 match(Set dst (ConvF2I src));
8568 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8569 expand %{
8570 regF tmp;
8571 convF2I_helper(tmp, src);
8572 MoveF2I_reg_reg(dst, tmp);
8573 %}
8574 %}
8577 // Convert a float to a long in a float register.
8578 // If the float is a NAN, stuff a zero in instead.
8579 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{
8580 effect(DEF dst, USE src, KILL fcc0);
8581 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
8582 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8583 "FSTOX $src,$dst\t! convert in delay slot\n\t"
8584 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
8585 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
8586 "skip:" %}
8587 ins_encode(form_f2l_helper(src,dst));
8588 ins_pipe(fcvtF2L);
8589 %}
8591 instruct convF2L_stk(stackSlotL dst, regF src) %{
8592 match(Set dst (ConvF2L src));
8593 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8594 expand %{
8595 regD tmp;
8596 convF2L_helper(tmp, src);
8597 regD_to_stkL(dst, tmp);
8598 %}
8599 %}
8601 instruct convF2L_reg(iRegL dst, regF src) %{
8602 predicate(UseVIS >= 3);
8603 match(Set dst (ConvF2L src));
8604 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8605 expand %{
8606 regD tmp;
8607 convF2L_helper(tmp, src);
8608 MoveD2L_reg_reg(dst, tmp);
8609 %}
8610 %}
8613 instruct convI2D_helper(regD dst, regF tmp) %{
8614 effect(USE tmp, DEF dst);
8615 format %{ "FITOD $tmp,$dst" %}
8616 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
8617 ins_encode(form3_opf_rs2F_rdD(tmp, dst));
8618 ins_pipe(fcvtI2D);
8619 %}
8621 instruct convI2D_stk(stackSlotI src, regD dst) %{
8622 match(Set dst (ConvI2D src));
8623 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8624 expand %{
8625 regF tmp;
8626 stkI_to_regF(tmp, src);
8627 convI2D_helper(dst, tmp);
8628 %}
8629 %}
8631 instruct convI2D_reg(regD_low dst, iRegI src) %{
8632 predicate(UseVIS >= 3);
8633 match(Set dst (ConvI2D src));
8634 expand %{
8635 regF tmp;
8636 MoveI2F_reg_reg(tmp, src);
8637 convI2D_helper(dst, tmp);
8638 %}
8639 %}
8641 instruct convI2D_mem(regD_low dst, memory mem) %{
8642 match(Set dst (ConvI2D (LoadI mem)));
8643 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8644 size(8);
8645 format %{ "LDF $mem,$dst\n\t"
8646 "FITOD $dst,$dst" %}
8647 opcode(Assembler::ldf_op3, Assembler::fitod_opf);
8648 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
8649 ins_pipe(floadF_mem);
8650 %}
8653 instruct convI2F_helper(regF dst, regF tmp) %{
8654 effect(DEF dst, USE tmp);
8655 format %{ "FITOS $tmp,$dst" %}
8656 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf);
8657 ins_encode(form3_opf_rs2F_rdF(tmp, dst));
8658 ins_pipe(fcvtI2F);
8659 %}
8661 instruct convI2F_stk(regF dst, stackSlotI src) %{
8662 match(Set dst (ConvI2F src));
8663 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8664 expand %{
8665 regF tmp;
8666 stkI_to_regF(tmp,src);
8667 convI2F_helper(dst, tmp);
8668 %}
8669 %}
8671 instruct convI2F_reg(regF dst, iRegI src) %{
8672 predicate(UseVIS >= 3);
8673 match(Set dst (ConvI2F src));
8674 ins_cost(DEFAULT_COST);
8675 expand %{
8676 regF tmp;
8677 MoveI2F_reg_reg(tmp, src);
8678 convI2F_helper(dst, tmp);
8679 %}
8680 %}
8682 instruct convI2F_mem( regF dst, memory mem ) %{
8683 match(Set dst (ConvI2F (LoadI mem)));
8684 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8685 size(8);
8686 format %{ "LDF $mem,$dst\n\t"
8687 "FITOS $dst,$dst" %}
8688 opcode(Assembler::ldf_op3, Assembler::fitos_opf);
8689 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
8690 ins_pipe(floadF_mem);
8691 %}
8694 instruct convI2L_reg(iRegL dst, iRegI src) %{
8695 match(Set dst (ConvI2L src));
8696 size(4);
8697 format %{ "SRA $src,0,$dst\t! int->long" %}
8698 opcode(Assembler::sra_op3, Assembler::arith_op);
8699 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8700 ins_pipe(ialu_reg_reg);
8701 %}
8703 // Zero-extend convert int to long
8704 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
8705 match(Set dst (AndL (ConvI2L src) mask) );
8706 size(4);
8707 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %}
8708 opcode(Assembler::srl_op3, Assembler::arith_op);
8709 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8710 ins_pipe(ialu_reg_reg);
8711 %}
8713 // Zero-extend long
8714 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
8715 match(Set dst (AndL src mask) );
8716 size(4);
8717 format %{ "SRL $src,0,$dst\t! zero-extend long" %}
8718 opcode(Assembler::srl_op3, Assembler::arith_op);
8719 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8720 ins_pipe(ialu_reg_reg);
8721 %}
8724 //-----------
8725 // Long to Double conversion using V8 opcodes.
8726 // Still useful because cheetah traps and becomes
8727 // amazingly slow for some common numbers.
8729 // Magic constant, 0x43300000
8730 instruct loadConI_x43300000(iRegI dst) %{
8731 effect(DEF dst);
8732 size(4);
8733 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %}
8734 ins_encode(SetHi22(0x43300000, dst));
8735 ins_pipe(ialu_none);
8736 %}
8738 // Magic constant, 0x41f00000
8739 instruct loadConI_x41f00000(iRegI dst) %{
8740 effect(DEF dst);
8741 size(4);
8742 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %}
8743 ins_encode(SetHi22(0x41f00000, dst));
8744 ins_pipe(ialu_none);
8745 %}
8747 // Construct a double from two float halves
8748 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{
8749 effect(DEF dst, USE src1, USE src2);
8750 size(8);
8751 format %{ "FMOVS $src1.hi,$dst.hi\n\t"
8752 "FMOVS $src2.lo,$dst.lo" %}
8753 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf);
8754 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst));
8755 ins_pipe(faddD_reg_reg);
8756 %}
8758 // Convert integer in high half of a double register (in the lower half of
8759 // the double register file) to double
8760 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{
8761 effect(DEF dst, USE src);
8762 size(4);
8763 format %{ "FITOD $src,$dst" %}
8764 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
8765 ins_encode(form3_opf_rs2D_rdD(src, dst));
8766 ins_pipe(fcvtLHi2D);
8767 %}
8769 // Add float double precision
8770 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{
8771 effect(DEF dst, USE src1, USE src2);
8772 size(4);
8773 format %{ "FADDD $src1,$src2,$dst" %}
8774 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf);
8775 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8776 ins_pipe(faddD_reg_reg);
8777 %}
8779 // Sub float double precision
8780 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{
8781 effect(DEF dst, USE src1, USE src2);
8782 size(4);
8783 format %{ "FSUBD $src1,$src2,$dst" %}
8784 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf);
8785 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8786 ins_pipe(faddD_reg_reg);
8787 %}
8789 // Mul float double precision
8790 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{
8791 effect(DEF dst, USE src1, USE src2);
8792 size(4);
8793 format %{ "FMULD $src1,$src2,$dst" %}
8794 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf);
8795 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8796 ins_pipe(fmulD_reg_reg);
8797 %}
8799 instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{
8800 match(Set dst (ConvL2D src));
8801 ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6);
8803 expand %{
8804 regD_low tmpsrc;
8805 iRegI ix43300000;
8806 iRegI ix41f00000;
8807 stackSlotL lx43300000;
8808 stackSlotL lx41f00000;
8809 regD_low dx43300000;
8810 regD dx41f00000;
8811 regD tmp1;
8812 regD_low tmp2;
8813 regD tmp3;
8814 regD tmp4;
8816 stkL_to_regD(tmpsrc, src);
8818 loadConI_x43300000(ix43300000);
8819 loadConI_x41f00000(ix41f00000);
8820 regI_to_stkLHi(lx43300000, ix43300000);
8821 regI_to_stkLHi(lx41f00000, ix41f00000);
8822 stkL_to_regD(dx43300000, lx43300000);
8823 stkL_to_regD(dx41f00000, lx41f00000);
8825 convI2D_regDHi_regD(tmp1, tmpsrc);
8826 regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc);
8827 subD_regD_regD(tmp3, tmp2, dx43300000);
8828 mulD_regD_regD(tmp4, tmp1, dx41f00000);
8829 addD_regD_regD(dst, tmp3, tmp4);
8830 %}
8831 %}
8833 // Long to Double conversion using fast fxtof
8834 instruct convL2D_helper(regD dst, regD tmp) %{
8835 effect(DEF dst, USE tmp);
8836 size(4);
8837 format %{ "FXTOD $tmp,$dst" %}
8838 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf);
8839 ins_encode(form3_opf_rs2D_rdD(tmp, dst));
8840 ins_pipe(fcvtL2D);
8841 %}
8843 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{
8844 predicate(VM_Version::has_fast_fxtof());
8845 match(Set dst (ConvL2D src));
8846 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST);
8847 expand %{
8848 regD tmp;
8849 stkL_to_regD(tmp, src);
8850 convL2D_helper(dst, tmp);
8851 %}
8852 %}
8854 instruct convL2D_reg(regD dst, iRegL src) %{
8855 predicate(UseVIS >= 3);
8856 match(Set dst (ConvL2D src));
8857 expand %{
8858 regD tmp;
8859 MoveL2D_reg_reg(tmp, src);
8860 convL2D_helper(dst, tmp);
8861 %}
8862 %}
8864 // Long to Float conversion using fast fxtof
8865 instruct convL2F_helper(regF dst, regD tmp) %{
8866 effect(DEF dst, USE tmp);
8867 size(4);
8868 format %{ "FXTOS $tmp,$dst" %}
8869 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf);
8870 ins_encode(form3_opf_rs2D_rdF(tmp, dst));
8871 ins_pipe(fcvtL2F);
8872 %}
8874 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{
8875 match(Set dst (ConvL2F src));
8876 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8877 expand %{
8878 regD tmp;
8879 stkL_to_regD(tmp, src);
8880 convL2F_helper(dst, tmp);
8881 %}
8882 %}
8884 instruct convL2F_reg(regF dst, iRegL src) %{
8885 predicate(UseVIS >= 3);
8886 match(Set dst (ConvL2F src));
8887 ins_cost(DEFAULT_COST);
8888 expand %{
8889 regD tmp;
8890 MoveL2D_reg_reg(tmp, src);
8891 convL2F_helper(dst, tmp);
8892 %}
8893 %}
8895 //-----------
8897 instruct convL2I_reg(iRegI dst, iRegL src) %{
8898 match(Set dst (ConvL2I src));
8899 #ifndef _LP64
8900 format %{ "MOV $src.lo,$dst\t! long->int" %}
8901 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) );
8902 ins_pipe(ialu_move_reg_I_to_L);
8903 #else
8904 size(4);
8905 format %{ "SRA $src,R_G0,$dst\t! long->int" %}
8906 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) );
8907 ins_pipe(ialu_reg);
8908 #endif
8909 %}
8911 // Register Shift Right Immediate
8912 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{
8913 match(Set dst (ConvL2I (RShiftL src cnt)));
8915 size(4);
8916 format %{ "SRAX $src,$cnt,$dst" %}
8917 opcode(Assembler::srax_op3, Assembler::arith_op);
8918 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) );
8919 ins_pipe(ialu_reg_imm);
8920 %}
8922 //----------Control Flow Instructions------------------------------------------
8923 // Compare Instructions
8924 // Compare Integers
8925 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{
8926 match(Set icc (CmpI op1 op2));
8927 effect( DEF icc, USE op1, USE op2 );
8929 size(4);
8930 format %{ "CMP $op1,$op2" %}
8931 opcode(Assembler::subcc_op3, Assembler::arith_op);
8932 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8933 ins_pipe(ialu_cconly_reg_reg);
8934 %}
8936 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{
8937 match(Set icc (CmpU op1 op2));
8939 size(4);
8940 format %{ "CMP $op1,$op2\t! unsigned" %}
8941 opcode(Assembler::subcc_op3, Assembler::arith_op);
8942 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8943 ins_pipe(ialu_cconly_reg_reg);
8944 %}
8946 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{
8947 match(Set icc (CmpI op1 op2));
8948 effect( DEF icc, USE op1 );
8950 size(4);
8951 format %{ "CMP $op1,$op2" %}
8952 opcode(Assembler::subcc_op3, Assembler::arith_op);
8953 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8954 ins_pipe(ialu_cconly_reg_imm);
8955 %}
8957 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{
8958 match(Set icc (CmpI (AndI op1 op2) zero));
8960 size(4);
8961 format %{ "BTST $op2,$op1" %}
8962 opcode(Assembler::andcc_op3, Assembler::arith_op);
8963 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8964 ins_pipe(ialu_cconly_reg_reg_zero);
8965 %}
8967 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{
8968 match(Set icc (CmpI (AndI op1 op2) zero));
8970 size(4);
8971 format %{ "BTST $op2,$op1" %}
8972 opcode(Assembler::andcc_op3, Assembler::arith_op);
8973 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8974 ins_pipe(ialu_cconly_reg_imm_zero);
8975 %}
8977 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{
8978 match(Set xcc (CmpL op1 op2));
8979 effect( DEF xcc, USE op1, USE op2 );
8981 size(4);
8982 format %{ "CMP $op1,$op2\t\t! long" %}
8983 opcode(Assembler::subcc_op3, Assembler::arith_op);
8984 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8985 ins_pipe(ialu_cconly_reg_reg);
8986 %}
8988 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{
8989 match(Set xcc (CmpL op1 con));
8990 effect( DEF xcc, USE op1, USE con );
8992 size(4);
8993 format %{ "CMP $op1,$con\t\t! long" %}
8994 opcode(Assembler::subcc_op3, Assembler::arith_op);
8995 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) );
8996 ins_pipe(ialu_cconly_reg_reg);
8997 %}
8999 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{
9000 match(Set xcc (CmpL (AndL op1 op2) zero));
9001 effect( DEF xcc, USE op1, USE op2 );
9003 size(4);
9004 format %{ "BTST $op1,$op2\t\t! long" %}
9005 opcode(Assembler::andcc_op3, Assembler::arith_op);
9006 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
9007 ins_pipe(ialu_cconly_reg_reg);
9008 %}
9010 // useful for checking the alignment of a pointer:
9011 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{
9012 match(Set xcc (CmpL (AndL op1 con) zero));
9013 effect( DEF xcc, USE op1, USE con );
9015 size(4);
9016 format %{ "BTST $op1,$con\t\t! long" %}
9017 opcode(Assembler::andcc_op3, Assembler::arith_op);
9018 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) );
9019 ins_pipe(ialu_cconly_reg_reg);
9020 %}
9022 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU12 op2 ) %{
9023 match(Set icc (CmpU op1 op2));
9025 size(4);
9026 format %{ "CMP $op1,$op2\t! unsigned" %}
9027 opcode(Assembler::subcc_op3, Assembler::arith_op);
9028 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
9029 ins_pipe(ialu_cconly_reg_imm);
9030 %}
9032 // Compare Pointers
9033 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{
9034 match(Set pcc (CmpP op1 op2));
9036 size(4);
9037 format %{ "CMP $op1,$op2\t! ptr" %}
9038 opcode(Assembler::subcc_op3, Assembler::arith_op);
9039 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
9040 ins_pipe(ialu_cconly_reg_reg);
9041 %}
9043 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{
9044 match(Set pcc (CmpP op1 op2));
9046 size(4);
9047 format %{ "CMP $op1,$op2\t! ptr" %}
9048 opcode(Assembler::subcc_op3, Assembler::arith_op);
9049 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
9050 ins_pipe(ialu_cconly_reg_imm);
9051 %}
9053 // Compare Narrow oops
9054 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{
9055 match(Set icc (CmpN op1 op2));
9057 size(4);
9058 format %{ "CMP $op1,$op2\t! compressed ptr" %}
9059 opcode(Assembler::subcc_op3, Assembler::arith_op);
9060 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
9061 ins_pipe(ialu_cconly_reg_reg);
9062 %}
9064 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{
9065 match(Set icc (CmpN op1 op2));
9067 size(4);
9068 format %{ "CMP $op1,$op2\t! compressed ptr" %}
9069 opcode(Assembler::subcc_op3, Assembler::arith_op);
9070 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
9071 ins_pipe(ialu_cconly_reg_imm);
9072 %}
9074 //----------Max and Min--------------------------------------------------------
9075 // Min Instructions
9076 // Conditional move for min
9077 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{
9078 effect( USE_DEF op2, USE op1, USE icc );
9080 size(4);
9081 format %{ "MOVlt icc,$op1,$op2\t! min" %}
9082 opcode(Assembler::less);
9083 ins_encode( enc_cmov_reg_minmax(op2,op1) );
9084 ins_pipe(ialu_reg_flags);
9085 %}
9087 // Min Register with Register.
9088 instruct minI_eReg(iRegI op1, iRegI op2) %{
9089 match(Set op2 (MinI op1 op2));
9090 ins_cost(DEFAULT_COST*2);
9091 expand %{
9092 flagsReg icc;
9093 compI_iReg(icc,op1,op2);
9094 cmovI_reg_lt(op2,op1,icc);
9095 %}
9096 %}
9098 // Max Instructions
9099 // Conditional move for max
9100 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{
9101 effect( USE_DEF op2, USE op1, USE icc );
9102 format %{ "MOVgt icc,$op1,$op2\t! max" %}
9103 opcode(Assembler::greater);
9104 ins_encode( enc_cmov_reg_minmax(op2,op1) );
9105 ins_pipe(ialu_reg_flags);
9106 %}
9108 // Max Register with Register
9109 instruct maxI_eReg(iRegI op1, iRegI op2) %{
9110 match(Set op2 (MaxI op1 op2));
9111 ins_cost(DEFAULT_COST*2);
9112 expand %{
9113 flagsReg icc;
9114 compI_iReg(icc,op1,op2);
9115 cmovI_reg_gt(op2,op1,icc);
9116 %}
9117 %}
9120 //----------Float Compares----------------------------------------------------
9121 // Compare floating, generate condition code
9122 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{
9123 match(Set fcc (CmpF src1 src2));
9125 size(4);
9126 format %{ "FCMPs $fcc,$src1,$src2" %}
9127 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf);
9128 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) );
9129 ins_pipe(faddF_fcc_reg_reg_zero);
9130 %}
9132 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{
9133 match(Set fcc (CmpD src1 src2));
9135 size(4);
9136 format %{ "FCMPd $fcc,$src1,$src2" %}
9137 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf);
9138 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) );
9139 ins_pipe(faddD_fcc_reg_reg_zero);
9140 %}
9143 // Compare floating, generate -1,0,1
9144 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{
9145 match(Set dst (CmpF3 src1 src2));
9146 effect(KILL fcc0);
9147 ins_cost(DEFAULT_COST*3+BRANCH_COST*3);
9148 format %{ "fcmpl $dst,$src1,$src2" %}
9149 // Primary = float
9150 opcode( true );
9151 ins_encode( floating_cmp( dst, src1, src2 ) );
9152 ins_pipe( floating_cmp );
9153 %}
9155 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{
9156 match(Set dst (CmpD3 src1 src2));
9157 effect(KILL fcc0);
9158 ins_cost(DEFAULT_COST*3+BRANCH_COST*3);
9159 format %{ "dcmpl $dst,$src1,$src2" %}
9160 // Primary = double (not float)
9161 opcode( false );
9162 ins_encode( floating_cmp( dst, src1, src2 ) );
9163 ins_pipe( floating_cmp );
9164 %}
9166 //----------Branches---------------------------------------------------------
9167 // Jump
9168 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above)
9169 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{
9170 match(Jump switch_val);
9171 effect(TEMP table);
9173 ins_cost(350);
9175 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t"
9176 "LD [O7 + $switch_val], O7\n\t"
9177 "JUMP O7" %}
9178 ins_encode %{
9179 // Calculate table address into a register.
9180 Register table_reg;
9181 Register label_reg = O7;
9182 // If we are calculating the size of this instruction don't trust
9183 // zero offsets because they might change when
9184 // MachConstantBaseNode decides to optimize the constant table
9185 // base.
9186 if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) {
9187 table_reg = $constanttablebase;
9188 } else {
9189 table_reg = O7;
9190 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7);
9191 __ add($constanttablebase, con_offset, table_reg);
9192 }
9194 // Jump to base address + switch value
9195 __ ld_ptr(table_reg, $switch_val$$Register, label_reg);
9196 __ jmp(label_reg, G0);
9197 __ delayed()->nop();
9198 %}
9199 ins_pipe(ialu_reg_reg);
9200 %}
9202 // Direct Branch. Use V8 version with longer range.
9203 instruct branch(label labl) %{
9204 match(Goto);
9205 effect(USE labl);
9207 size(8);
9208 ins_cost(BRANCH_COST);
9209 format %{ "BA $labl" %}
9210 ins_encode %{
9211 Label* L = $labl$$label;
9212 __ ba(*L);
9213 __ delayed()->nop();
9214 %}
9215 ins_pipe(br);
9216 %}
9218 // Direct Branch, short with no delay slot
9219 instruct branch_short(label labl) %{
9220 match(Goto);
9221 predicate(UseCBCond);
9222 effect(USE labl);
9224 size(4);
9225 ins_cost(BRANCH_COST);
9226 format %{ "BA $labl\t! short branch" %}
9227 ins_encode %{
9228 Label* L = $labl$$label;
9229 assert(__ use_cbcond(*L), "back to back cbcond");
9230 __ ba_short(*L);
9231 %}
9232 ins_short_branch(1);
9233 ins_avoid_back_to_back(1);
9234 ins_pipe(cbcond_reg_imm);
9235 %}
9237 // Conditional Direct Branch
9238 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
9239 match(If cmp icc);
9240 effect(USE labl);
9242 size(8);
9243 ins_cost(BRANCH_COST);
9244 format %{ "BP$cmp $icc,$labl" %}
9245 // Prim = bits 24-22, Secnd = bits 31-30
9246 ins_encode( enc_bp( labl, cmp, icc ) );
9247 ins_pipe(br_cc);
9248 %}
9250 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
9251 match(If cmp icc);
9252 effect(USE labl);
9254 ins_cost(BRANCH_COST);
9255 format %{ "BP$cmp $icc,$labl" %}
9256 // Prim = bits 24-22, Secnd = bits 31-30
9257 ins_encode( enc_bp( labl, cmp, icc ) );
9258 ins_pipe(br_cc);
9259 %}
9261 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
9262 match(If cmp pcc);
9263 effect(USE labl);
9265 size(8);
9266 ins_cost(BRANCH_COST);
9267 format %{ "BP$cmp $pcc,$labl" %}
9268 ins_encode %{
9269 Label* L = $labl$$label;
9270 Assembler::Predict predict_taken =
9271 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9273 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9274 __ delayed()->nop();
9275 %}
9276 ins_pipe(br_cc);
9277 %}
9279 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{
9280 match(If cmp fcc);
9281 effect(USE labl);
9283 size(8);
9284 ins_cost(BRANCH_COST);
9285 format %{ "FBP$cmp $fcc,$labl" %}
9286 ins_encode %{
9287 Label* L = $labl$$label;
9288 Assembler::Predict predict_taken =
9289 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9291 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L);
9292 __ delayed()->nop();
9293 %}
9294 ins_pipe(br_fcc);
9295 %}
9297 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
9298 match(CountedLoopEnd cmp icc);
9299 effect(USE labl);
9301 size(8);
9302 ins_cost(BRANCH_COST);
9303 format %{ "BP$cmp $icc,$labl\t! Loop end" %}
9304 // Prim = bits 24-22, Secnd = bits 31-30
9305 ins_encode( enc_bp( labl, cmp, icc ) );
9306 ins_pipe(br_cc);
9307 %}
9309 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
9310 match(CountedLoopEnd cmp icc);
9311 effect(USE labl);
9313 size(8);
9314 ins_cost(BRANCH_COST);
9315 format %{ "BP$cmp $icc,$labl\t! Loop end" %}
9316 // Prim = bits 24-22, Secnd = bits 31-30
9317 ins_encode( enc_bp( labl, cmp, icc ) );
9318 ins_pipe(br_cc);
9319 %}
9321 // Compare and branch instructions
9322 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9323 match(If cmp (CmpI op1 op2));
9324 effect(USE labl, KILL icc);
9326 size(12);
9327 ins_cost(BRANCH_COST);
9328 format %{ "CMP $op1,$op2\t! int\n\t"
9329 "BP$cmp $labl" %}
9330 ins_encode %{
9331 Label* L = $labl$$label;
9332 Assembler::Predict predict_taken =
9333 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9334 __ cmp($op1$$Register, $op2$$Register);
9335 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9336 __ delayed()->nop();
9337 %}
9338 ins_pipe(cmp_br_reg_reg);
9339 %}
9341 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9342 match(If cmp (CmpI op1 op2));
9343 effect(USE labl, KILL icc);
9345 size(12);
9346 ins_cost(BRANCH_COST);
9347 format %{ "CMP $op1,$op2\t! int\n\t"
9348 "BP$cmp $labl" %}
9349 ins_encode %{
9350 Label* L = $labl$$label;
9351 Assembler::Predict predict_taken =
9352 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9353 __ cmp($op1$$Register, $op2$$constant);
9354 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9355 __ delayed()->nop();
9356 %}
9357 ins_pipe(cmp_br_reg_imm);
9358 %}
9360 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
9361 match(If cmp (CmpU op1 op2));
9362 effect(USE labl, KILL icc);
9364 size(12);
9365 ins_cost(BRANCH_COST);
9366 format %{ "CMP $op1,$op2\t! unsigned\n\t"
9367 "BP$cmp $labl" %}
9368 ins_encode %{
9369 Label* L = $labl$$label;
9370 Assembler::Predict predict_taken =
9371 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9372 __ cmp($op1$$Register, $op2$$Register);
9373 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9374 __ delayed()->nop();
9375 %}
9376 ins_pipe(cmp_br_reg_reg);
9377 %}
9379 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
9380 match(If cmp (CmpU op1 op2));
9381 effect(USE labl, KILL icc);
9383 size(12);
9384 ins_cost(BRANCH_COST);
9385 format %{ "CMP $op1,$op2\t! unsigned\n\t"
9386 "BP$cmp $labl" %}
9387 ins_encode %{
9388 Label* L = $labl$$label;
9389 Assembler::Predict predict_taken =
9390 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9391 __ cmp($op1$$Register, $op2$$constant);
9392 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9393 __ delayed()->nop();
9394 %}
9395 ins_pipe(cmp_br_reg_imm);
9396 %}
9398 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
9399 match(If cmp (CmpL op1 op2));
9400 effect(USE labl, KILL xcc);
9402 size(12);
9403 ins_cost(BRANCH_COST);
9404 format %{ "CMP $op1,$op2\t! long\n\t"
9405 "BP$cmp $labl" %}
9406 ins_encode %{
9407 Label* L = $labl$$label;
9408 Assembler::Predict predict_taken =
9409 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9410 __ cmp($op1$$Register, $op2$$Register);
9411 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9412 __ delayed()->nop();
9413 %}
9414 ins_pipe(cmp_br_reg_reg);
9415 %}
9417 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
9418 match(If cmp (CmpL op1 op2));
9419 effect(USE labl, KILL xcc);
9421 size(12);
9422 ins_cost(BRANCH_COST);
9423 format %{ "CMP $op1,$op2\t! long\n\t"
9424 "BP$cmp $labl" %}
9425 ins_encode %{
9426 Label* L = $labl$$label;
9427 Assembler::Predict predict_taken =
9428 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9429 __ cmp($op1$$Register, $op2$$constant);
9430 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9431 __ delayed()->nop();
9432 %}
9433 ins_pipe(cmp_br_reg_imm);
9434 %}
9436 // Compare Pointers and branch
9437 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9438 match(If cmp (CmpP op1 op2));
9439 effect(USE labl, KILL pcc);
9441 size(12);
9442 ins_cost(BRANCH_COST);
9443 format %{ "CMP $op1,$op2\t! ptr\n\t"
9444 "B$cmp $labl" %}
9445 ins_encode %{
9446 Label* L = $labl$$label;
9447 Assembler::Predict predict_taken =
9448 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9449 __ cmp($op1$$Register, $op2$$Register);
9450 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9451 __ delayed()->nop();
9452 %}
9453 ins_pipe(cmp_br_reg_reg);
9454 %}
9456 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9457 match(If cmp (CmpP op1 null));
9458 effect(USE labl, KILL pcc);
9460 size(12);
9461 ins_cost(BRANCH_COST);
9462 format %{ "CMP $op1,0\t! ptr\n\t"
9463 "B$cmp $labl" %}
9464 ins_encode %{
9465 Label* L = $labl$$label;
9466 Assembler::Predict predict_taken =
9467 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9468 __ cmp($op1$$Register, G0);
9469 // bpr() is not used here since it has shorter distance.
9470 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9471 __ delayed()->nop();
9472 %}
9473 ins_pipe(cmp_br_reg_reg);
9474 %}
9476 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9477 match(If cmp (CmpN op1 op2));
9478 effect(USE labl, KILL icc);
9480 size(12);
9481 ins_cost(BRANCH_COST);
9482 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
9483 "BP$cmp $labl" %}
9484 ins_encode %{
9485 Label* L = $labl$$label;
9486 Assembler::Predict predict_taken =
9487 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9488 __ cmp($op1$$Register, $op2$$Register);
9489 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9490 __ delayed()->nop();
9491 %}
9492 ins_pipe(cmp_br_reg_reg);
9493 %}
9495 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
9496 match(If cmp (CmpN op1 null));
9497 effect(USE labl, KILL icc);
9499 size(12);
9500 ins_cost(BRANCH_COST);
9501 format %{ "CMP $op1,0\t! compressed ptr\n\t"
9502 "BP$cmp $labl" %}
9503 ins_encode %{
9504 Label* L = $labl$$label;
9505 Assembler::Predict predict_taken =
9506 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9507 __ cmp($op1$$Register, G0);
9508 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9509 __ delayed()->nop();
9510 %}
9511 ins_pipe(cmp_br_reg_reg);
9512 %}
9514 // Loop back branch
9515 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9516 match(CountedLoopEnd cmp (CmpI op1 op2));
9517 effect(USE labl, KILL icc);
9519 size(12);
9520 ins_cost(BRANCH_COST);
9521 format %{ "CMP $op1,$op2\t! int\n\t"
9522 "BP$cmp $labl\t! Loop end" %}
9523 ins_encode %{
9524 Label* L = $labl$$label;
9525 Assembler::Predict predict_taken =
9526 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9527 __ cmp($op1$$Register, $op2$$Register);
9528 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9529 __ delayed()->nop();
9530 %}
9531 ins_pipe(cmp_br_reg_reg);
9532 %}
9534 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9535 match(CountedLoopEnd cmp (CmpI op1 op2));
9536 effect(USE labl, KILL icc);
9538 size(12);
9539 ins_cost(BRANCH_COST);
9540 format %{ "CMP $op1,$op2\t! int\n\t"
9541 "BP$cmp $labl\t! Loop end" %}
9542 ins_encode %{
9543 Label* L = $labl$$label;
9544 Assembler::Predict predict_taken =
9545 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9546 __ cmp($op1$$Register, $op2$$constant);
9547 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9548 __ delayed()->nop();
9549 %}
9550 ins_pipe(cmp_br_reg_imm);
9551 %}
9553 // Short compare and branch instructions
9554 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9555 match(If cmp (CmpI op1 op2));
9556 predicate(UseCBCond);
9557 effect(USE labl, KILL icc);
9559 size(4);
9560 ins_cost(BRANCH_COST);
9561 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
9562 ins_encode %{
9563 Label* L = $labl$$label;
9564 assert(__ use_cbcond(*L), "back to back cbcond");
9565 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9566 %}
9567 ins_short_branch(1);
9568 ins_avoid_back_to_back(1);
9569 ins_pipe(cbcond_reg_reg);
9570 %}
9572 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9573 match(If cmp (CmpI op1 op2));
9574 predicate(UseCBCond);
9575 effect(USE labl, KILL icc);
9577 size(4);
9578 ins_cost(BRANCH_COST);
9579 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
9580 ins_encode %{
9581 Label* L = $labl$$label;
9582 assert(__ use_cbcond(*L), "back to back cbcond");
9583 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9584 %}
9585 ins_short_branch(1);
9586 ins_avoid_back_to_back(1);
9587 ins_pipe(cbcond_reg_imm);
9588 %}
9590 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
9591 match(If cmp (CmpU op1 op2));
9592 predicate(UseCBCond);
9593 effect(USE labl, KILL icc);
9595 size(4);
9596 ins_cost(BRANCH_COST);
9597 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
9598 ins_encode %{
9599 Label* L = $labl$$label;
9600 assert(__ use_cbcond(*L), "back to back cbcond");
9601 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9602 %}
9603 ins_short_branch(1);
9604 ins_avoid_back_to_back(1);
9605 ins_pipe(cbcond_reg_reg);
9606 %}
9608 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
9609 match(If cmp (CmpU op1 op2));
9610 predicate(UseCBCond);
9611 effect(USE labl, KILL icc);
9613 size(4);
9614 ins_cost(BRANCH_COST);
9615 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
9616 ins_encode %{
9617 Label* L = $labl$$label;
9618 assert(__ use_cbcond(*L), "back to back cbcond");
9619 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9620 %}
9621 ins_short_branch(1);
9622 ins_avoid_back_to_back(1);
9623 ins_pipe(cbcond_reg_imm);
9624 %}
9626 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
9627 match(If cmp (CmpL op1 op2));
9628 predicate(UseCBCond);
9629 effect(USE labl, KILL xcc);
9631 size(4);
9632 ins_cost(BRANCH_COST);
9633 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9634 ins_encode %{
9635 Label* L = $labl$$label;
9636 assert(__ use_cbcond(*L), "back to back cbcond");
9637 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L);
9638 %}
9639 ins_short_branch(1);
9640 ins_avoid_back_to_back(1);
9641 ins_pipe(cbcond_reg_reg);
9642 %}
9644 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
9645 match(If cmp (CmpL op1 op2));
9646 predicate(UseCBCond);
9647 effect(USE labl, KILL xcc);
9649 size(4);
9650 ins_cost(BRANCH_COST);
9651 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9652 ins_encode %{
9653 Label* L = $labl$$label;
9654 assert(__ use_cbcond(*L), "back to back cbcond");
9655 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
9656 %}
9657 ins_short_branch(1);
9658 ins_avoid_back_to_back(1);
9659 ins_pipe(cbcond_reg_imm);
9660 %}
9662 // Compare Pointers and branch
9663 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9664 match(If cmp (CmpP op1 op2));
9665 predicate(UseCBCond);
9666 effect(USE labl, KILL pcc);
9668 size(4);
9669 ins_cost(BRANCH_COST);
9670 #ifdef _LP64
9671 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
9672 #else
9673 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %}
9674 #endif
9675 ins_encode %{
9676 Label* L = $labl$$label;
9677 assert(__ use_cbcond(*L), "back to back cbcond");
9678 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
9679 %}
9680 ins_short_branch(1);
9681 ins_avoid_back_to_back(1);
9682 ins_pipe(cbcond_reg_reg);
9683 %}
9685 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9686 match(If cmp (CmpP op1 null));
9687 predicate(UseCBCond);
9688 effect(USE labl, KILL pcc);
9690 size(4);
9691 ins_cost(BRANCH_COST);
9692 #ifdef _LP64
9693 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
9694 #else
9695 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %}
9696 #endif
9697 ins_encode %{
9698 Label* L = $labl$$label;
9699 assert(__ use_cbcond(*L), "back to back cbcond");
9700 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
9701 %}
9702 ins_short_branch(1);
9703 ins_avoid_back_to_back(1);
9704 ins_pipe(cbcond_reg_reg);
9705 %}
9707 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9708 match(If cmp (CmpN op1 op2));
9709 predicate(UseCBCond);
9710 effect(USE labl, KILL icc);
9712 size(4);
9713 ins_cost(BRANCH_COST);
9714 format %{ "CWB$cmp $op1,op2,$labl\t! compressed ptr" %}
9715 ins_encode %{
9716 Label* L = $labl$$label;
9717 assert(__ use_cbcond(*L), "back to back cbcond");
9718 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9719 %}
9720 ins_short_branch(1);
9721 ins_avoid_back_to_back(1);
9722 ins_pipe(cbcond_reg_reg);
9723 %}
9725 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
9726 match(If cmp (CmpN op1 null));
9727 predicate(UseCBCond);
9728 effect(USE labl, KILL icc);
9730 size(4);
9731 ins_cost(BRANCH_COST);
9732 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %}
9733 ins_encode %{
9734 Label* L = $labl$$label;
9735 assert(__ use_cbcond(*L), "back to back cbcond");
9736 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L);
9737 %}
9738 ins_short_branch(1);
9739 ins_avoid_back_to_back(1);
9740 ins_pipe(cbcond_reg_reg);
9741 %}
9743 // Loop back branch
9744 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9745 match(CountedLoopEnd cmp (CmpI op1 op2));
9746 predicate(UseCBCond);
9747 effect(USE labl, KILL icc);
9749 size(4);
9750 ins_cost(BRANCH_COST);
9751 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
9752 ins_encode %{
9753 Label* L = $labl$$label;
9754 assert(__ use_cbcond(*L), "back to back cbcond");
9755 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9756 %}
9757 ins_short_branch(1);
9758 ins_avoid_back_to_back(1);
9759 ins_pipe(cbcond_reg_reg);
9760 %}
9762 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9763 match(CountedLoopEnd cmp (CmpI op1 op2));
9764 predicate(UseCBCond);
9765 effect(USE labl, KILL icc);
9767 size(4);
9768 ins_cost(BRANCH_COST);
9769 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
9770 ins_encode %{
9771 Label* L = $labl$$label;
9772 assert(__ use_cbcond(*L), "back to back cbcond");
9773 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9774 %}
9775 ins_short_branch(1);
9776 ins_avoid_back_to_back(1);
9777 ins_pipe(cbcond_reg_imm);
9778 %}
9780 // Branch-on-register tests all 64 bits. We assume that values
9781 // in 64-bit registers always remains zero or sign extended
9782 // unless our code munges the high bits. Interrupts can chop
9783 // the high order bits to zero or sign at any time.
9784 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{
9785 match(If cmp (CmpI op1 zero));
9786 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9787 effect(USE labl);
9789 size(8);
9790 ins_cost(BRANCH_COST);
9791 format %{ "BR$cmp $op1,$labl" %}
9792 ins_encode( enc_bpr( labl, cmp, op1 ) );
9793 ins_pipe(br_reg);
9794 %}
9796 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{
9797 match(If cmp (CmpP op1 null));
9798 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9799 effect(USE labl);
9801 size(8);
9802 ins_cost(BRANCH_COST);
9803 format %{ "BR$cmp $op1,$labl" %}
9804 ins_encode( enc_bpr( labl, cmp, op1 ) );
9805 ins_pipe(br_reg);
9806 %}
9808 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{
9809 match(If cmp (CmpL op1 zero));
9810 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9811 effect(USE labl);
9813 size(8);
9814 ins_cost(BRANCH_COST);
9815 format %{ "BR$cmp $op1,$labl" %}
9816 ins_encode( enc_bpr( labl, cmp, op1 ) );
9817 ins_pipe(br_reg);
9818 %}
9821 // ============================================================================
9822 // Long Compare
9823 //
9824 // Currently we hold longs in 2 registers. Comparing such values efficiently
9825 // is tricky. The flavor of compare used depends on whether we are testing
9826 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit.
9827 // The GE test is the negated LT test. The LE test can be had by commuting
9828 // the operands (yielding a GE test) and then negating; negate again for the
9829 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the
9830 // NE test is negated from that.
9832 // Due to a shortcoming in the ADLC, it mixes up expressions like:
9833 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the
9834 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections
9835 // are collapsed internally in the ADLC's dfa-gen code. The match for
9836 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the
9837 // foo match ends up with the wrong leaf. One fix is to not match both
9838 // reg-reg and reg-zero forms of long-compare. This is unfortunate because
9839 // both forms beat the trinary form of long-compare and both are very useful
9840 // on Intel which has so few registers.
9842 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{
9843 match(If cmp xcc);
9844 effect(USE labl);
9846 size(8);
9847 ins_cost(BRANCH_COST);
9848 format %{ "BP$cmp $xcc,$labl" %}
9849 ins_encode %{
9850 Label* L = $labl$$label;
9851 Assembler::Predict predict_taken =
9852 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9854 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9855 __ delayed()->nop();
9856 %}
9857 ins_pipe(br_cc);
9858 %}
9860 // Manifest a CmpL3 result in an integer register. Very painful.
9861 // This is the test to avoid.
9862 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{
9863 match(Set dst (CmpL3 src1 src2) );
9864 effect( KILL ccr );
9865 ins_cost(6*DEFAULT_COST);
9866 size(24);
9867 format %{ "CMP $src1,$src2\t\t! long\n"
9868 "\tBLT,a,pn done\n"
9869 "\tMOV -1,$dst\t! delay slot\n"
9870 "\tBGT,a,pn done\n"
9871 "\tMOV 1,$dst\t! delay slot\n"
9872 "\tCLR $dst\n"
9873 "done:" %}
9874 ins_encode( cmpl_flag(src1,src2,dst) );
9875 ins_pipe(cmpL_reg);
9876 %}
9878 // Conditional move
9879 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{
9880 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
9881 ins_cost(150);
9882 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %}
9883 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9884 ins_pipe(ialu_reg);
9885 %}
9887 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{
9888 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
9889 ins_cost(140);
9890 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %}
9891 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9892 ins_pipe(ialu_imm);
9893 %}
9895 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{
9896 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
9897 ins_cost(150);
9898 format %{ "MOV$cmp $xcc,$src,$dst" %}
9899 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9900 ins_pipe(ialu_reg);
9901 %}
9903 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{
9904 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
9905 ins_cost(140);
9906 format %{ "MOV$cmp $xcc,$src,$dst" %}
9907 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9908 ins_pipe(ialu_imm);
9909 %}
9911 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{
9912 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src)));
9913 ins_cost(150);
9914 format %{ "MOV$cmp $xcc,$src,$dst" %}
9915 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9916 ins_pipe(ialu_reg);
9917 %}
9919 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{
9920 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
9921 ins_cost(150);
9922 format %{ "MOV$cmp $xcc,$src,$dst" %}
9923 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9924 ins_pipe(ialu_reg);
9925 %}
9927 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{
9928 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
9929 ins_cost(140);
9930 format %{ "MOV$cmp $xcc,$src,$dst" %}
9931 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9932 ins_pipe(ialu_imm);
9933 %}
9935 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{
9936 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src)));
9937 ins_cost(150);
9938 opcode(0x101);
9939 format %{ "FMOVS$cmp $xcc,$src,$dst" %}
9940 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9941 ins_pipe(int_conditional_float_move);
9942 %}
9944 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{
9945 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
9946 ins_cost(150);
9947 opcode(0x102);
9948 format %{ "FMOVD$cmp $xcc,$src,$dst" %}
9949 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9950 ins_pipe(int_conditional_float_move);
9951 %}
9953 // ============================================================================
9954 // Safepoint Instruction
9955 instruct safePoint_poll(iRegP poll) %{
9956 match(SafePoint poll);
9957 effect(USE poll);
9959 size(4);
9960 #ifdef _LP64
9961 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %}
9962 #else
9963 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %}
9964 #endif
9965 ins_encode %{
9966 __ relocate(relocInfo::poll_type);
9967 __ ld_ptr($poll$$Register, 0, G0);
9968 %}
9969 ins_pipe(loadPollP);
9970 %}
9972 // ============================================================================
9973 // Call Instructions
9974 // Call Java Static Instruction
9975 instruct CallStaticJavaDirect( method meth ) %{
9976 match(CallStaticJava);
9977 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
9978 effect(USE meth);
9980 size(8);
9981 ins_cost(CALL_COST);
9982 format %{ "CALL,static ; NOP ==> " %}
9983 ins_encode( Java_Static_Call( meth ), call_epilog );
9984 ins_pipe(simple_call);
9985 %}
9987 // Call Java Static Instruction (method handle version)
9988 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{
9989 match(CallStaticJava);
9990 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
9991 effect(USE meth, KILL l7_mh_SP_save);
9993 size(16);
9994 ins_cost(CALL_COST);
9995 format %{ "CALL,static/MethodHandle" %}
9996 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog);
9997 ins_pipe(simple_call);
9998 %}
10000 // Call Java Dynamic Instruction
10001 instruct CallDynamicJavaDirect( method meth ) %{
10002 match(CallDynamicJava);
10003 effect(USE meth);
10005 ins_cost(CALL_COST);
10006 format %{ "SET (empty),R_G5\n\t"
10007 "CALL,dynamic ; NOP ==> " %}
10008 ins_encode( Java_Dynamic_Call( meth ), call_epilog );
10009 ins_pipe(call);
10010 %}
10012 // Call Runtime Instruction
10013 instruct CallRuntimeDirect(method meth, l7RegP l7) %{
10014 match(CallRuntime);
10015 effect(USE meth, KILL l7);
10016 ins_cost(CALL_COST);
10017 format %{ "CALL,runtime" %}
10018 ins_encode( Java_To_Runtime( meth ),
10019 call_epilog, adjust_long_from_native_call );
10020 ins_pipe(simple_call);
10021 %}
10023 // Call runtime without safepoint - same as CallRuntime
10024 instruct CallLeafDirect(method meth, l7RegP l7) %{
10025 match(CallLeaf);
10026 effect(USE meth, KILL l7);
10027 ins_cost(CALL_COST);
10028 format %{ "CALL,runtime leaf" %}
10029 ins_encode( Java_To_Runtime( meth ),
10030 call_epilog,
10031 adjust_long_from_native_call );
10032 ins_pipe(simple_call);
10033 %}
10035 // Call runtime without safepoint - same as CallLeaf
10036 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{
10037 match(CallLeafNoFP);
10038 effect(USE meth, KILL l7);
10039 ins_cost(CALL_COST);
10040 format %{ "CALL,runtime leaf nofp" %}
10041 ins_encode( Java_To_Runtime( meth ),
10042 call_epilog,
10043 adjust_long_from_native_call );
10044 ins_pipe(simple_call);
10045 %}
10047 // Tail Call; Jump from runtime stub to Java code.
10048 // Also known as an 'interprocedural jump'.
10049 // Target of jump will eventually return to caller.
10050 // TailJump below removes the return address.
10051 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{
10052 match(TailCall jump_target method_oop );
10054 ins_cost(CALL_COST);
10055 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %}
10056 ins_encode(form_jmpl(jump_target));
10057 ins_pipe(tail_call);
10058 %}
10061 // Return Instruction
10062 instruct Ret() %{
10063 match(Return);
10065 // The epilogue node did the ret already.
10066 size(0);
10067 format %{ "! return" %}
10068 ins_encode();
10069 ins_pipe(empty);
10070 %}
10073 // Tail Jump; remove the return address; jump to target.
10074 // TailCall above leaves the return address around.
10075 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
10076 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
10077 // "restore" before this instruction (in Epilogue), we need to materialize it
10078 // in %i0.
10079 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{
10080 match( TailJump jump_target ex_oop );
10081 ins_cost(CALL_COST);
10082 format %{ "! discard R_O7\n\t"
10083 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %}
10084 ins_encode(form_jmpl_set_exception_pc(jump_target));
10085 // opcode(Assembler::jmpl_op3, Assembler::arith_op);
10086 // The hack duplicates the exception oop into G3, so that CreateEx can use it there.
10087 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() );
10088 ins_pipe(tail_call);
10089 %}
10091 // Create exception oop: created by stack-crawling runtime code.
10092 // Created exception is now available to this handler, and is setup
10093 // just prior to jumping to this handler. No code emitted.
10094 instruct CreateException( o0RegP ex_oop )
10095 %{
10096 match(Set ex_oop (CreateEx));
10097 ins_cost(0);
10099 size(0);
10100 // use the following format syntax
10101 format %{ "! exception oop is in R_O0; no code emitted" %}
10102 ins_encode();
10103 ins_pipe(empty);
10104 %}
10107 // Rethrow exception:
10108 // The exception oop will come in the first argument position.
10109 // Then JUMP (not call) to the rethrow stub code.
10110 instruct RethrowException()
10111 %{
10112 match(Rethrow);
10113 ins_cost(CALL_COST);
10115 // use the following format syntax
10116 format %{ "Jmp rethrow_stub" %}
10117 ins_encode(enc_rethrow);
10118 ins_pipe(tail_call);
10119 %}
10122 // Die now
10123 instruct ShouldNotReachHere( )
10124 %{
10125 match(Halt);
10126 ins_cost(CALL_COST);
10128 size(4);
10129 // Use the following format syntax
10130 format %{ "ILLTRAP ; ShouldNotReachHere" %}
10131 ins_encode( form2_illtrap() );
10132 ins_pipe(tail_call);
10133 %}
10135 // ============================================================================
10136 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
10137 // array for an instance of the superklass. Set a hidden internal cache on a
10138 // hit (cache is checked with exposed code in gen_subtype_check()). Return
10139 // not zero for a miss or zero for a hit. The encoding ALSO sets flags.
10140 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{
10141 match(Set index (PartialSubtypeCheck sub super));
10142 effect( KILL pcc, KILL o7 );
10143 ins_cost(DEFAULT_COST*10);
10144 format %{ "CALL PartialSubtypeCheck\n\tNOP" %}
10145 ins_encode( enc_PartialSubtypeCheck() );
10146 ins_pipe(partial_subtype_check_pipe);
10147 %}
10149 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{
10150 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero));
10151 effect( KILL idx, KILL o7 );
10152 ins_cost(DEFAULT_COST*10);
10153 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %}
10154 ins_encode( enc_PartialSubtypeCheck() );
10155 ins_pipe(partial_subtype_check_pipe);
10156 %}
10159 // ============================================================================
10160 // inlined locking and unlocking
10162 instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
10163 match(Set pcc (FastLock object box));
10165 effect(TEMP scratch2, USE_KILL box, KILL scratch);
10166 ins_cost(100);
10168 format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
10169 ins_encode( Fast_Lock(object, box, scratch, scratch2) );
10170 ins_pipe(long_memory_op);
10171 %}
10174 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
10175 match(Set pcc (FastUnlock object box));
10176 effect(TEMP scratch2, USE_KILL box, KILL scratch);
10177 ins_cost(100);
10179 format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
10180 ins_encode( Fast_Unlock(object, box, scratch, scratch2) );
10181 ins_pipe(long_memory_op);
10182 %}
10184 // The encodings are generic.
10185 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{
10186 predicate(!use_block_zeroing(n->in(2)) );
10187 match(Set dummy (ClearArray cnt base));
10188 effect(TEMP temp, KILL ccr);
10189 ins_cost(300);
10190 format %{ "MOV $cnt,$temp\n"
10191 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n"
10192 " BRge loop\t\t! Clearing loop\n"
10193 " STX G0,[$base+$temp]\t! delay slot" %}
10195 ins_encode %{
10196 // Compiler ensures base is doubleword aligned and cnt is count of doublewords
10197 Register nof_bytes_arg = $cnt$$Register;
10198 Register nof_bytes_tmp = $temp$$Register;
10199 Register base_pointer_arg = $base$$Register;
10201 Label loop;
10202 __ mov(nof_bytes_arg, nof_bytes_tmp);
10204 // Loop and clear, walking backwards through the array.
10205 // nof_bytes_tmp (if >0) is always the number of bytes to zero
10206 __ bind(loop);
10207 __ deccc(nof_bytes_tmp, 8);
10208 __ br(Assembler::greaterEqual, true, Assembler::pt, loop);
10209 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp);
10210 // %%%% this mini-loop must not cross a cache boundary!
10211 %}
10212 ins_pipe(long_memory_op);
10213 %}
10215 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{
10216 predicate(use_block_zeroing(n->in(2)));
10217 match(Set dummy (ClearArray cnt base));
10218 effect(USE_KILL cnt, USE_KILL base, KILL ccr);
10219 ins_cost(300);
10220 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
10222 ins_encode %{
10224 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
10225 Register to = $base$$Register;
10226 Register count = $cnt$$Register;
10228 Label Ldone;
10229 __ nop(); // Separate short branches
10230 // Use BIS for zeroing (temp is not used).
10231 __ bis_zeroing(to, count, G0, Ldone);
10232 __ bind(Ldone);
10234 %}
10235 ins_pipe(long_memory_op);
10236 %}
10238 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{
10239 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit));
10240 match(Set dummy (ClearArray cnt base));
10241 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr);
10242 ins_cost(300);
10243 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
10245 ins_encode %{
10247 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
10248 Register to = $base$$Register;
10249 Register count = $cnt$$Register;
10250 Register temp = $tmp$$Register;
10252 Label Ldone;
10253 __ nop(); // Separate short branches
10254 // Use BIS for zeroing
10255 __ bis_zeroing(to, count, temp, Ldone);
10256 __ bind(Ldone);
10258 %}
10259 ins_pipe(long_memory_op);
10260 %}
10262 instruct string_compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
10263 o7RegI tmp, flagsReg ccr) %{
10264 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10265 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp);
10266 ins_cost(300);
10267 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %}
10268 ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result) );
10269 ins_pipe(long_memory_op);
10270 %}
10272 instruct string_equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result,
10273 o7RegI tmp, flagsReg ccr) %{
10274 match(Set result (StrEquals (Binary str1 str2) cnt));
10275 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr);
10276 ins_cost(300);
10277 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp" %}
10278 ins_encode( enc_String_Equals(str1, str2, cnt, result) );
10279 ins_pipe(long_memory_op);
10280 %}
10282 instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result,
10283 o7RegI tmp2, flagsReg ccr) %{
10284 match(Set result (AryEq ary1 ary2));
10285 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr);
10286 ins_cost(300);
10287 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %}
10288 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, result));
10289 ins_pipe(long_memory_op);
10290 %}
10293 //---------- Zeros Count Instructions ------------------------------------------
10295 instruct countLeadingZerosI(iRegIsafe dst, iRegI src, iRegI tmp, flagsReg cr) %{
10296 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10297 match(Set dst (CountLeadingZerosI src));
10298 effect(TEMP dst, TEMP tmp, KILL cr);
10300 // x |= (x >> 1);
10301 // x |= (x >> 2);
10302 // x |= (x >> 4);
10303 // x |= (x >> 8);
10304 // x |= (x >> 16);
10305 // return (WORDBITS - popc(x));
10306 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t"
10307 "SRL $src,0,$dst\t! 32-bit zero extend\n\t"
10308 "OR $dst,$tmp,$dst\n\t"
10309 "SRL $dst,2,$tmp\n\t"
10310 "OR $dst,$tmp,$dst\n\t"
10311 "SRL $dst,4,$tmp\n\t"
10312 "OR $dst,$tmp,$dst\n\t"
10313 "SRL $dst,8,$tmp\n\t"
10314 "OR $dst,$tmp,$dst\n\t"
10315 "SRL $dst,16,$tmp\n\t"
10316 "OR $dst,$tmp,$dst\n\t"
10317 "POPC $dst,$dst\n\t"
10318 "MOV 32,$tmp\n\t"
10319 "SUB $tmp,$dst,$dst" %}
10320 ins_encode %{
10321 Register Rdst = $dst$$Register;
10322 Register Rsrc = $src$$Register;
10323 Register Rtmp = $tmp$$Register;
10324 __ srl(Rsrc, 1, Rtmp);
10325 __ srl(Rsrc, 0, Rdst);
10326 __ or3(Rdst, Rtmp, Rdst);
10327 __ srl(Rdst, 2, Rtmp);
10328 __ or3(Rdst, Rtmp, Rdst);
10329 __ srl(Rdst, 4, Rtmp);
10330 __ or3(Rdst, Rtmp, Rdst);
10331 __ srl(Rdst, 8, Rtmp);
10332 __ or3(Rdst, Rtmp, Rdst);
10333 __ srl(Rdst, 16, Rtmp);
10334 __ or3(Rdst, Rtmp, Rdst);
10335 __ popc(Rdst, Rdst);
10336 __ mov(BitsPerInt, Rtmp);
10337 __ sub(Rtmp, Rdst, Rdst);
10338 %}
10339 ins_pipe(ialu_reg);
10340 %}
10342 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{
10343 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10344 match(Set dst (CountLeadingZerosL src));
10345 effect(TEMP dst, TEMP tmp, KILL cr);
10347 // x |= (x >> 1);
10348 // x |= (x >> 2);
10349 // x |= (x >> 4);
10350 // x |= (x >> 8);
10351 // x |= (x >> 16);
10352 // x |= (x >> 32);
10353 // return (WORDBITS - popc(x));
10354 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t"
10355 "OR $src,$tmp,$dst\n\t"
10356 "SRLX $dst,2,$tmp\n\t"
10357 "OR $dst,$tmp,$dst\n\t"
10358 "SRLX $dst,4,$tmp\n\t"
10359 "OR $dst,$tmp,$dst\n\t"
10360 "SRLX $dst,8,$tmp\n\t"
10361 "OR $dst,$tmp,$dst\n\t"
10362 "SRLX $dst,16,$tmp\n\t"
10363 "OR $dst,$tmp,$dst\n\t"
10364 "SRLX $dst,32,$tmp\n\t"
10365 "OR $dst,$tmp,$dst\n\t"
10366 "POPC $dst,$dst\n\t"
10367 "MOV 64,$tmp\n\t"
10368 "SUB $tmp,$dst,$dst" %}
10369 ins_encode %{
10370 Register Rdst = $dst$$Register;
10371 Register Rsrc = $src$$Register;
10372 Register Rtmp = $tmp$$Register;
10373 __ srlx(Rsrc, 1, Rtmp);
10374 __ or3( Rsrc, Rtmp, Rdst);
10375 __ srlx(Rdst, 2, Rtmp);
10376 __ or3( Rdst, Rtmp, Rdst);
10377 __ srlx(Rdst, 4, Rtmp);
10378 __ or3( Rdst, Rtmp, Rdst);
10379 __ srlx(Rdst, 8, Rtmp);
10380 __ or3( Rdst, Rtmp, Rdst);
10381 __ srlx(Rdst, 16, Rtmp);
10382 __ or3( Rdst, Rtmp, Rdst);
10383 __ srlx(Rdst, 32, Rtmp);
10384 __ or3( Rdst, Rtmp, Rdst);
10385 __ popc(Rdst, Rdst);
10386 __ mov(BitsPerLong, Rtmp);
10387 __ sub(Rtmp, Rdst, Rdst);
10388 %}
10389 ins_pipe(ialu_reg);
10390 %}
10392 instruct countTrailingZerosI(iRegIsafe dst, iRegI src, flagsReg cr) %{
10393 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10394 match(Set dst (CountTrailingZerosI src));
10395 effect(TEMP dst, KILL cr);
10397 // return popc(~x & (x - 1));
10398 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t"
10399 "ANDN $dst,$src,$dst\n\t"
10400 "SRL $dst,R_G0,$dst\n\t"
10401 "POPC $dst,$dst" %}
10402 ins_encode %{
10403 Register Rdst = $dst$$Register;
10404 Register Rsrc = $src$$Register;
10405 __ sub(Rsrc, 1, Rdst);
10406 __ andn(Rdst, Rsrc, Rdst);
10407 __ srl(Rdst, G0, Rdst);
10408 __ popc(Rdst, Rdst);
10409 %}
10410 ins_pipe(ialu_reg);
10411 %}
10413 instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{
10414 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10415 match(Set dst (CountTrailingZerosL src));
10416 effect(TEMP dst, KILL cr);
10418 // return popc(~x & (x - 1));
10419 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t"
10420 "ANDN $dst,$src,$dst\n\t"
10421 "POPC $dst,$dst" %}
10422 ins_encode %{
10423 Register Rdst = $dst$$Register;
10424 Register Rsrc = $src$$Register;
10425 __ sub(Rsrc, 1, Rdst);
10426 __ andn(Rdst, Rsrc, Rdst);
10427 __ popc(Rdst, Rdst);
10428 %}
10429 ins_pipe(ialu_reg);
10430 %}
10433 //---------- Population Count Instructions -------------------------------------
10435 instruct popCountI(iRegIsafe dst, iRegI src) %{
10436 predicate(UsePopCountInstruction);
10437 match(Set dst (PopCountI src));
10439 format %{ "SRL $src, G0, $dst\t! clear upper word for 64 bit POPC\n\t"
10440 "POPC $dst, $dst" %}
10441 ins_encode %{
10442 __ srl($src$$Register, G0, $dst$$Register);
10443 __ popc($dst$$Register, $dst$$Register);
10444 %}
10445 ins_pipe(ialu_reg);
10446 %}
10448 // Note: Long.bitCount(long) returns an int.
10449 instruct popCountL(iRegIsafe dst, iRegL src) %{
10450 predicate(UsePopCountInstruction);
10451 match(Set dst (PopCountL src));
10453 format %{ "POPC $src, $dst" %}
10454 ins_encode %{
10455 __ popc($src$$Register, $dst$$Register);
10456 %}
10457 ins_pipe(ialu_reg);
10458 %}
10461 // ============================================================================
10462 //------------Bytes reverse--------------------------------------------------
10464 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{
10465 match(Set dst (ReverseBytesI src));
10467 // Op cost is artificially doubled to make sure that load or store
10468 // instructions are preferred over this one which requires a spill
10469 // onto a stack slot.
10470 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10471 format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
10473 ins_encode %{
10474 __ set($src$$disp + STACK_BIAS, O7);
10475 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10476 %}
10477 ins_pipe( iload_mem );
10478 %}
10480 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
10481 match(Set dst (ReverseBytesL src));
10483 // Op cost is artificially doubled to make sure that load or store
10484 // instructions are preferred over this one which requires a spill
10485 // onto a stack slot.
10486 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10487 format %{ "LDXA $src, $dst\t!asi=primary_little" %}
10489 ins_encode %{
10490 __ set($src$$disp + STACK_BIAS, O7);
10491 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10492 %}
10493 ins_pipe( iload_mem );
10494 %}
10496 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{
10497 match(Set dst (ReverseBytesUS src));
10499 // Op cost is artificially doubled to make sure that load or store
10500 // instructions are preferred over this one which requires a spill
10501 // onto a stack slot.
10502 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10503 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %}
10505 ins_encode %{
10506 // the value was spilled as an int so bias the load
10507 __ set($src$$disp + STACK_BIAS + 2, O7);
10508 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10509 %}
10510 ins_pipe( iload_mem );
10511 %}
10513 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{
10514 match(Set dst (ReverseBytesS src));
10516 // Op cost is artificially doubled to make sure that load or store
10517 // instructions are preferred over this one which requires a spill
10518 // onto a stack slot.
10519 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10520 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %}
10522 ins_encode %{
10523 // the value was spilled as an int so bias the load
10524 __ set($src$$disp + STACK_BIAS + 2, O7);
10525 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10526 %}
10527 ins_pipe( iload_mem );
10528 %}
10530 // Load Integer reversed byte order
10531 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{
10532 match(Set dst (ReverseBytesI (LoadI src)));
10534 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
10535 size(4);
10536 format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
10538 ins_encode %{
10539 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10540 %}
10541 ins_pipe(iload_mem);
10542 %}
10544 // Load Long - aligned and reversed
10545 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{
10546 match(Set dst (ReverseBytesL (LoadL src)));
10548 ins_cost(MEMORY_REF_COST);
10549 size(4);
10550 format %{ "LDXA $src, $dst\t!asi=primary_little" %}
10552 ins_encode %{
10553 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10554 %}
10555 ins_pipe(iload_mem);
10556 %}
10558 // Load unsigned short / char reversed byte order
10559 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{
10560 match(Set dst (ReverseBytesUS (LoadUS src)));
10562 ins_cost(MEMORY_REF_COST);
10563 size(4);
10564 format %{ "LDUHA $src, $dst\t!asi=primary_little" %}
10566 ins_encode %{
10567 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10568 %}
10569 ins_pipe(iload_mem);
10570 %}
10572 // Load short reversed byte order
10573 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{
10574 match(Set dst (ReverseBytesS (LoadS src)));
10576 ins_cost(MEMORY_REF_COST);
10577 size(4);
10578 format %{ "LDSHA $src, $dst\t!asi=primary_little" %}
10580 ins_encode %{
10581 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10582 %}
10583 ins_pipe(iload_mem);
10584 %}
10586 // Store Integer reversed byte order
10587 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{
10588 match(Set dst (StoreI dst (ReverseBytesI src)));
10590 ins_cost(MEMORY_REF_COST);
10591 size(4);
10592 format %{ "STWA $src, $dst\t!asi=primary_little" %}
10594 ins_encode %{
10595 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10596 %}
10597 ins_pipe(istore_mem_reg);
10598 %}
10600 // Store Long reversed byte order
10601 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{
10602 match(Set dst (StoreL dst (ReverseBytesL src)));
10604 ins_cost(MEMORY_REF_COST);
10605 size(4);
10606 format %{ "STXA $src, $dst\t!asi=primary_little" %}
10608 ins_encode %{
10609 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10610 %}
10611 ins_pipe(istore_mem_reg);
10612 %}
10614 // Store unsighed short/char reversed byte order
10615 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{
10616 match(Set dst (StoreC dst (ReverseBytesUS src)));
10618 ins_cost(MEMORY_REF_COST);
10619 size(4);
10620 format %{ "STHA $src, $dst\t!asi=primary_little" %}
10622 ins_encode %{
10623 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10624 %}
10625 ins_pipe(istore_mem_reg);
10626 %}
10628 // Store short reversed byte order
10629 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{
10630 match(Set dst (StoreC dst (ReverseBytesS src)));
10632 ins_cost(MEMORY_REF_COST);
10633 size(4);
10634 format %{ "STHA $src, $dst\t!asi=primary_little" %}
10636 ins_encode %{
10637 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10638 %}
10639 ins_pipe(istore_mem_reg);
10640 %}
10642 // ====================VECTOR INSTRUCTIONS=====================================
10644 // Load Aligned Packed values into a Double Register
10645 instruct loadV8(regD dst, memory mem) %{
10646 predicate(n->as_LoadVector()->memory_size() == 8);
10647 match(Set dst (LoadVector mem));
10648 ins_cost(MEMORY_REF_COST);
10649 size(4);
10650 format %{ "LDDF $mem,$dst\t! load vector (8 bytes)" %}
10651 ins_encode %{
10652 __ ldf(FloatRegisterImpl::D, $mem$$Address, as_DoubleFloatRegister($dst$$reg));
10653 %}
10654 ins_pipe(floadD_mem);
10655 %}
10657 // Store Vector in Double register to memory
10658 instruct storeV8(memory mem, regD src) %{
10659 predicate(n->as_StoreVector()->memory_size() == 8);
10660 match(Set mem (StoreVector mem src));
10661 ins_cost(MEMORY_REF_COST);
10662 size(4);
10663 format %{ "STDF $src,$mem\t! store vector (8 bytes)" %}
10664 ins_encode %{
10665 __ stf(FloatRegisterImpl::D, as_DoubleFloatRegister($src$$reg), $mem$$Address);
10666 %}
10667 ins_pipe(fstoreD_mem_reg);
10668 %}
10670 // Store Zero into vector in memory
10671 instruct storeV8B_zero(memory mem, immI0 zero) %{
10672 predicate(n->as_StoreVector()->memory_size() == 8);
10673 match(Set mem (StoreVector mem (ReplicateB zero)));
10674 ins_cost(MEMORY_REF_COST);
10675 size(4);
10676 format %{ "STX $zero,$mem\t! store zero vector (8 bytes)" %}
10677 ins_encode %{
10678 __ stx(G0, $mem$$Address);
10679 %}
10680 ins_pipe(fstoreD_mem_zero);
10681 %}
10683 instruct storeV4S_zero(memory mem, immI0 zero) %{
10684 predicate(n->as_StoreVector()->memory_size() == 8);
10685 match(Set mem (StoreVector mem (ReplicateS zero)));
10686 ins_cost(MEMORY_REF_COST);
10687 size(4);
10688 format %{ "STX $zero,$mem\t! store zero vector (4 shorts)" %}
10689 ins_encode %{
10690 __ stx(G0, $mem$$Address);
10691 %}
10692 ins_pipe(fstoreD_mem_zero);
10693 %}
10695 instruct storeV2I_zero(memory mem, immI0 zero) %{
10696 predicate(n->as_StoreVector()->memory_size() == 8);
10697 match(Set mem (StoreVector mem (ReplicateI zero)));
10698 ins_cost(MEMORY_REF_COST);
10699 size(4);
10700 format %{ "STX $zero,$mem\t! store zero vector (2 ints)" %}
10701 ins_encode %{
10702 __ stx(G0, $mem$$Address);
10703 %}
10704 ins_pipe(fstoreD_mem_zero);
10705 %}
10707 instruct storeV2F_zero(memory mem, immF0 zero) %{
10708 predicate(n->as_StoreVector()->memory_size() == 8);
10709 match(Set mem (StoreVector mem (ReplicateF zero)));
10710 ins_cost(MEMORY_REF_COST);
10711 size(4);
10712 format %{ "STX $zero,$mem\t! store zero vector (2 floats)" %}
10713 ins_encode %{
10714 __ stx(G0, $mem$$Address);
10715 %}
10716 ins_pipe(fstoreD_mem_zero);
10717 %}
10719 // Replicate scalar to packed byte values into Double register
10720 instruct Repl8B_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10721 predicate(n->as_Vector()->length() == 8 && UseVIS >= 3);
10722 match(Set dst (ReplicateB src));
10723 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10724 format %{ "SLLX $src,56,$tmp\n\t"
10725 "SRLX $tmp, 8,$tmp2\n\t"
10726 "OR $tmp,$tmp2,$tmp\n\t"
10727 "SRLX $tmp,16,$tmp2\n\t"
10728 "OR $tmp,$tmp2,$tmp\n\t"
10729 "SRLX $tmp,32,$tmp2\n\t"
10730 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t"
10731 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10732 ins_encode %{
10733 Register Rsrc = $src$$Register;
10734 Register Rtmp = $tmp$$Register;
10735 Register Rtmp2 = $tmp2$$Register;
10736 __ sllx(Rsrc, 56, Rtmp);
10737 __ srlx(Rtmp, 8, Rtmp2);
10738 __ or3 (Rtmp, Rtmp2, Rtmp);
10739 __ srlx(Rtmp, 16, Rtmp2);
10740 __ or3 (Rtmp, Rtmp2, Rtmp);
10741 __ srlx(Rtmp, 32, Rtmp2);
10742 __ or3 (Rtmp, Rtmp2, Rtmp);
10743 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10744 %}
10745 ins_pipe(ialu_reg);
10746 %}
10748 // Replicate scalar to packed byte values into Double stack
10749 instruct Repl8B_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10750 predicate(n->as_Vector()->length() == 8 && UseVIS < 3);
10751 match(Set dst (ReplicateB src));
10752 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10753 format %{ "SLLX $src,56,$tmp\n\t"
10754 "SRLX $tmp, 8,$tmp2\n\t"
10755 "OR $tmp,$tmp2,$tmp\n\t"
10756 "SRLX $tmp,16,$tmp2\n\t"
10757 "OR $tmp,$tmp2,$tmp\n\t"
10758 "SRLX $tmp,32,$tmp2\n\t"
10759 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t"
10760 "STX $tmp,$dst\t! regL to stkD" %}
10761 ins_encode %{
10762 Register Rsrc = $src$$Register;
10763 Register Rtmp = $tmp$$Register;
10764 Register Rtmp2 = $tmp2$$Register;
10765 __ sllx(Rsrc, 56, Rtmp);
10766 __ srlx(Rtmp, 8, Rtmp2);
10767 __ or3 (Rtmp, Rtmp2, Rtmp);
10768 __ srlx(Rtmp, 16, Rtmp2);
10769 __ or3 (Rtmp, Rtmp2, Rtmp);
10770 __ srlx(Rtmp, 32, Rtmp2);
10771 __ or3 (Rtmp, Rtmp2, Rtmp);
10772 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10773 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10774 %}
10775 ins_pipe(ialu_reg);
10776 %}
10778 // Replicate scalar constant to packed byte values in Double register
10779 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{
10780 predicate(n->as_Vector()->length() == 8);
10781 match(Set dst (ReplicateB con));
10782 effect(KILL tmp);
10783 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %}
10784 ins_encode %{
10785 // XXX This is a quick fix for 6833573.
10786 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister);
10787 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register);
10788 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10789 %}
10790 ins_pipe(loadConFD);
10791 %}
10793 // Replicate scalar to packed char/short values into Double register
10794 instruct Repl4S_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10795 predicate(n->as_Vector()->length() == 4 && UseVIS >= 3);
10796 match(Set dst (ReplicateS src));
10797 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10798 format %{ "SLLX $src,48,$tmp\n\t"
10799 "SRLX $tmp,16,$tmp2\n\t"
10800 "OR $tmp,$tmp2,$tmp\n\t"
10801 "SRLX $tmp,32,$tmp2\n\t"
10802 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t"
10803 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10804 ins_encode %{
10805 Register Rsrc = $src$$Register;
10806 Register Rtmp = $tmp$$Register;
10807 Register Rtmp2 = $tmp2$$Register;
10808 __ sllx(Rsrc, 48, Rtmp);
10809 __ srlx(Rtmp, 16, Rtmp2);
10810 __ or3 (Rtmp, Rtmp2, Rtmp);
10811 __ srlx(Rtmp, 32, Rtmp2);
10812 __ or3 (Rtmp, Rtmp2, Rtmp);
10813 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10814 %}
10815 ins_pipe(ialu_reg);
10816 %}
10818 // Replicate scalar to packed char/short values into Double stack
10819 instruct Repl4S_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10820 predicate(n->as_Vector()->length() == 4 && UseVIS < 3);
10821 match(Set dst (ReplicateS src));
10822 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10823 format %{ "SLLX $src,48,$tmp\n\t"
10824 "SRLX $tmp,16,$tmp2\n\t"
10825 "OR $tmp,$tmp2,$tmp\n\t"
10826 "SRLX $tmp,32,$tmp2\n\t"
10827 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t"
10828 "STX $tmp,$dst\t! regL to stkD" %}
10829 ins_encode %{
10830 Register Rsrc = $src$$Register;
10831 Register Rtmp = $tmp$$Register;
10832 Register Rtmp2 = $tmp2$$Register;
10833 __ sllx(Rsrc, 48, Rtmp);
10834 __ srlx(Rtmp, 16, Rtmp2);
10835 __ or3 (Rtmp, Rtmp2, Rtmp);
10836 __ srlx(Rtmp, 32, Rtmp2);
10837 __ or3 (Rtmp, Rtmp2, Rtmp);
10838 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10839 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10840 %}
10841 ins_pipe(ialu_reg);
10842 %}
10844 // Replicate scalar constant to packed char/short values in Double register
10845 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{
10846 predicate(n->as_Vector()->length() == 4);
10847 match(Set dst (ReplicateS con));
10848 effect(KILL tmp);
10849 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %}
10850 ins_encode %{
10851 // XXX This is a quick fix for 6833573.
10852 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister);
10853 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register);
10854 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10855 %}
10856 ins_pipe(loadConFD);
10857 %}
10859 // Replicate scalar to packed int values into Double register
10860 instruct Repl2I_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10861 predicate(n->as_Vector()->length() == 2 && UseVIS >= 3);
10862 match(Set dst (ReplicateI src));
10863 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10864 format %{ "SLLX $src,32,$tmp\n\t"
10865 "SRLX $tmp,32,$tmp2\n\t"
10866 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t"
10867 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10868 ins_encode %{
10869 Register Rsrc = $src$$Register;
10870 Register Rtmp = $tmp$$Register;
10871 Register Rtmp2 = $tmp2$$Register;
10872 __ sllx(Rsrc, 32, Rtmp);
10873 __ srlx(Rtmp, 32, Rtmp2);
10874 __ or3 (Rtmp, Rtmp2, Rtmp);
10875 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10876 %}
10877 ins_pipe(ialu_reg);
10878 %}
10880 // Replicate scalar to packed int values into Double stack
10881 instruct Repl2I_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10882 predicate(n->as_Vector()->length() == 2 && UseVIS < 3);
10883 match(Set dst (ReplicateI src));
10884 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10885 format %{ "SLLX $src,32,$tmp\n\t"
10886 "SRLX $tmp,32,$tmp2\n\t"
10887 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t"
10888 "STX $tmp,$dst\t! regL to stkD" %}
10889 ins_encode %{
10890 Register Rsrc = $src$$Register;
10891 Register Rtmp = $tmp$$Register;
10892 Register Rtmp2 = $tmp2$$Register;
10893 __ sllx(Rsrc, 32, Rtmp);
10894 __ srlx(Rtmp, 32, Rtmp2);
10895 __ or3 (Rtmp, Rtmp2, Rtmp);
10896 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10897 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10898 %}
10899 ins_pipe(ialu_reg);
10900 %}
10902 // Replicate scalar zero constant to packed int values in Double register
10903 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{
10904 predicate(n->as_Vector()->length() == 2);
10905 match(Set dst (ReplicateI con));
10906 effect(KILL tmp);
10907 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %}
10908 ins_encode %{
10909 // XXX This is a quick fix for 6833573.
10910 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister);
10911 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register);
10912 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10913 %}
10914 ins_pipe(loadConFD);
10915 %}
10917 // Replicate scalar to packed float values into Double stack
10918 instruct Repl2F_stk(stackSlotD dst, regF src) %{
10919 predicate(n->as_Vector()->length() == 2);
10920 match(Set dst (ReplicateF src));
10921 ins_cost(MEMORY_REF_COST*2);
10922 format %{ "STF $src,$dst.hi\t! packed2F\n\t"
10923 "STF $src,$dst.lo" %}
10924 opcode(Assembler::stf_op3);
10925 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, src));
10926 ins_pipe(fstoreF_stk_reg);
10927 %}
10929 // Replicate scalar zero constant to packed float values in Double register
10930 instruct Repl2F_immF(regD dst, immF con, o7RegI tmp) %{
10931 predicate(n->as_Vector()->length() == 2);
10932 match(Set dst (ReplicateF con));
10933 effect(KILL tmp);
10934 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2F($con)" %}
10935 ins_encode %{
10936 // XXX This is a quick fix for 6833573.
10937 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immF($con$$constant)), $dst$$FloatRegister);
10938 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immF($con$$constant)), $tmp$$Register);
10939 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10940 %}
10941 ins_pipe(loadConFD);
10942 %}
10944 //----------PEEPHOLE RULES-----------------------------------------------------
10945 // These must follow all instruction definitions as they use the names
10946 // defined in the instructions definitions.
10947 //
10948 // peepmatch ( root_instr_name [preceding_instruction]* );
10949 //
10950 // peepconstraint %{
10951 // (instruction_number.operand_name relational_op instruction_number.operand_name
10952 // [, ...] );
10953 // // instruction numbers are zero-based using left to right order in peepmatch
10954 //
10955 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
10956 // // provide an instruction_number.operand_name for each operand that appears
10957 // // in the replacement instruction's match rule
10958 //
10959 // ---------VM FLAGS---------------------------------------------------------
10960 //
10961 // All peephole optimizations can be turned off using -XX:-OptoPeephole
10962 //
10963 // Each peephole rule is given an identifying number starting with zero and
10964 // increasing by one in the order seen by the parser. An individual peephole
10965 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
10966 // on the command-line.
10967 //
10968 // ---------CURRENT LIMITATIONS----------------------------------------------
10969 //
10970 // Only match adjacent instructions in same basic block
10971 // Only equality constraints
10972 // Only constraints between operands, not (0.dest_reg == EAX_enc)
10973 // Only one replacement instruction
10974 //
10975 // ---------EXAMPLE----------------------------------------------------------
10976 //
10977 // // pertinent parts of existing instructions in architecture description
10978 // instruct movI(eRegI dst, eRegI src) %{
10979 // match(Set dst (CopyI src));
10980 // %}
10981 //
10982 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
10983 // match(Set dst (AddI dst src));
10984 // effect(KILL cr);
10985 // %}
10986 //
10987 // // Change (inc mov) to lea
10988 // peephole %{
10989 // // increment preceeded by register-register move
10990 // peepmatch ( incI_eReg movI );
10991 // // require that the destination register of the increment
10992 // // match the destination register of the move
10993 // peepconstraint ( 0.dst == 1.dst );
10994 // // construct a replacement instruction that sets
10995 // // the destination to ( move's source register + one )
10996 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) );
10997 // %}
10998 //
11000 // // Change load of spilled value to only a spill
11001 // instruct storeI(memory mem, eRegI src) %{
11002 // match(Set mem (StoreI mem src));
11003 // %}
11004 //
11005 // instruct loadI(eRegI dst, memory mem) %{
11006 // match(Set dst (LoadI mem));
11007 // %}
11008 //
11009 // peephole %{
11010 // peepmatch ( loadI storeI );
11011 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
11012 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
11013 // %}
11015 //----------SMARTSPILL RULES---------------------------------------------------
11016 // These must follow all instruction definitions as they use the names
11017 // defined in the instructions definitions.
11018 //
11019 // SPARC will probably not have any of these rules due to RISC instruction set.
11021 //----------PIPELINE-----------------------------------------------------------
11022 // Rules which define the behavior of the target architectures pipeline.