Mon, 26 Sep 2011 10:24:05 -0700
7081933: Use zeroing elimination optimization for large array
Summary: Don't zero new typeArray during runtime call if the allocation is followed by arraycopy into it.
Reviewed-by: twisti
1 //
2 // Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 // or visit www.oracle.com if you need additional information or have any
21 // questions.
22 //
23 //
25 // SPARC Architecture Description File
27 //----------REGISTER DEFINITION BLOCK------------------------------------------
28 // This information is used by the matcher and the register allocator to
29 // describe individual registers and classes of registers within the target
30 // archtecture.
31 register %{
32 //----------Architecture Description Register Definitions----------------------
33 // General Registers
34 // "reg_def" name ( register save type, C convention save type,
35 // ideal register type, encoding, vm name );
36 // Register Save Types:
37 //
38 // NS = No-Save: The register allocator assumes that these registers
39 // can be used without saving upon entry to the method, &
40 // that they do not need to be saved at call sites.
41 //
42 // SOC = Save-On-Call: The register allocator assumes that these registers
43 // can be used without saving upon entry to the method,
44 // but that they must be saved at call sites.
45 //
46 // SOE = Save-On-Entry: The register allocator assumes that these registers
47 // must be saved before using them upon entry to the
48 // method, but they do not need to be saved at call
49 // sites.
50 //
51 // AS = Always-Save: The register allocator assumes that these registers
52 // must be saved before using them upon entry to the
53 // method, & that they must be saved at call sites.
54 //
55 // Ideal Register Type is used to determine how to save & restore a
56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
58 //
59 // The encoding number is the actual bit-pattern placed into the opcodes.
62 // ----------------------------
63 // Integer/Long Registers
64 // ----------------------------
66 // Need to expose the hi/lo aspect of 64-bit registers
67 // This register set is used for both the 64-bit build and
68 // the 32-bit build with 1-register longs.
70 // Global Registers 0-7
71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next());
72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg());
73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next());
74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg());
75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next());
76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg());
77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next());
78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg());
79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next());
80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg());
81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next());
82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg());
83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next());
84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg());
85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next());
86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg());
88 // Output Registers 0-7
89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next());
90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg());
91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next());
92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg());
93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next());
94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg());
95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next());
96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg());
97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next());
98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg());
99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next());
100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg());
101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next());
102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg());
103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next());
104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg());
106 // Local Registers 0-7
107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next());
108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg());
109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next());
110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg());
111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next());
112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg());
113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next());
114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg());
115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next());
116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg());
117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next());
118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg());
119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next());
120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg());
121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next());
122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg());
124 // Input Registers 0-7
125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next());
126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg());
127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next());
128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg());
129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next());
130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg());
131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next());
132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg());
133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next());
134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg());
135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next());
136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg());
137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next());
138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next());
140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg());
142 // ----------------------------
143 // Float/Double Registers
144 // ----------------------------
146 // Float Registers
147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
180 // Double Registers
181 // The rules of ADL require that double registers be defined in pairs.
182 // Each pair must be two 32-bit values, but not necessarily a pair of
183 // single float registers. In each pair, ADLC-assigned register numbers
184 // must be adjacent, with the lower number even. Finally, when the
185 // CPU stores such a register pair to memory, the word associated with
186 // the lower ADLC-assigned number must be stored to the lower address.
188 // These definitions specify the actual bit encodings of the sparc
189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp
190 // wants 0-63, so we have to convert every time we want to use fp regs
191 // with the macroassembler, using reg_to_DoubleFloatRegister_object().
192 // 255 is a flag meaning "don't go here".
193 // I believe we can't handle callee-save doubles D32 and up until
194 // the place in the sparc stack crawler that asserts on the 255 is
195 // fixed up.
196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg());
197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next());
198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg());
199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next());
200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg());
201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next());
202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg());
203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next());
204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg());
205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next());
206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg());
207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next());
208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg());
209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next());
210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg());
211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next());
212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg());
213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next());
214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg());
215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next());
216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg());
217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next());
218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg());
219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next());
220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg());
221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next());
222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg());
223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next());
224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg());
225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next());
226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg());
227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next());
230 // ----------------------------
231 // Special Registers
232 // Condition Codes Flag Registers
233 // I tried to break out ICC and XCC but it's not very pretty.
234 // Every Sparc instruction which defs/kills one also kills the other.
235 // Hence every compare instruction which defs one kind of flags ends
236 // up needing a kill of the other.
237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad());
241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad());
242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad());
244 // ----------------------------
245 // Specify the enum values for the registers. These enums are only used by the
246 // OptoReg "class". We can convert these enum values at will to VMReg when needed
247 // for visibility to the rest of the vm. The order of this enum influences the
248 // register allocator so having the freedom to set this order and not be stuck
249 // with the order that is natural for the rest of the vm is worth it.
250 alloc_class chunk0(
251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H,
252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H,
253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H,
254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H);
256 // Note that a register is not allocatable unless it is also mentioned
257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg.
259 alloc_class chunk1(
260 // The first registers listed here are those most likely to be used
261 // as temporaries. We move F0..F7 away from the front of the list,
262 // to reduce the likelihood of interferences with parameters and
263 // return values. Likewise, we avoid using F0/F1 for parameters,
264 // since they are used for return values.
265 // This FPU fine-tuning is worth about 1% on the SPEC geomean.
266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,
268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31,
269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values
270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,
271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x,
272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,
273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x);
275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3);
277 //----------Architecture Description Register Classes--------------------------
278 // Several register classes are automatically defined based upon information in
279 // this architecture description.
280 // 1) reg_class inline_cache_reg ( as defined in frame section )
281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section )
282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
283 //
285 // G0 is not included in integer class since it has special meaning.
286 reg_class g0_reg(R_G0);
288 // ----------------------------
289 // Integer Register Classes
290 // ----------------------------
291 // Exclusions from i_reg:
292 // R_G0: hardwired zero
293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java)
294 // R_G6: reserved by Solaris ABI to tools
295 // R_G7: reserved by Solaris ABI to libthread
296 // R_O7: Used as a temp in many encodings
297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
299 // Class for all integer registers, except the G registers. This is used for
300 // encodings which use G registers as temps. The regular inputs to such
301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator
302 // will not put an input into a temp register.
303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
305 reg_class g1_regI(R_G1);
306 reg_class g3_regI(R_G3);
307 reg_class g4_regI(R_G4);
308 reg_class o0_regI(R_O0);
309 reg_class o7_regI(R_O7);
311 // ----------------------------
312 // Pointer Register Classes
313 // ----------------------------
314 #ifdef _LP64
315 // 64-bit build means 64-bit pointers means hi/lo pairs
316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
320 // Lock encodings use G3 and G4 internally
321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5,
322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
325 // Special class for storeP instructions, which can store SP or RPC to TLS.
326 // It is also used for memory addressing, allowing direct TLS addressing.
327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP,
329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP );
331 // R_L7 is the lowest-priority callee-save (i.e., NS) register
332 // We use it to save R_G2 across calls out of Java.
333 reg_class l7_regP(R_L7H,R_L7);
335 // Other special pointer regs
336 reg_class g1_regP(R_G1H,R_G1);
337 reg_class g2_regP(R_G2H,R_G2);
338 reg_class g3_regP(R_G3H,R_G3);
339 reg_class g4_regP(R_G4H,R_G4);
340 reg_class g5_regP(R_G5H,R_G5);
341 reg_class i0_regP(R_I0H,R_I0);
342 reg_class o0_regP(R_O0H,R_O0);
343 reg_class o1_regP(R_O1H,R_O1);
344 reg_class o2_regP(R_O2H,R_O2);
345 reg_class o7_regP(R_O7H,R_O7);
347 #else // _LP64
348 // 32-bit build means 32-bit pointers means 1 register.
349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5,
350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
353 // Lock encodings use G3 and G4 internally
354 reg_class lock_ptr_reg(R_G1, R_G5,
355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
358 // Special class for storeP instructions, which can store SP or RPC to TLS.
359 // It is also used for memory addressing, allowing direct TLS addressing.
360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5,
361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP,
362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP);
364 // R_L7 is the lowest-priority callee-save (i.e., NS) register
365 // We use it to save R_G2 across calls out of Java.
366 reg_class l7_regP(R_L7);
368 // Other special pointer regs
369 reg_class g1_regP(R_G1);
370 reg_class g2_regP(R_G2);
371 reg_class g3_regP(R_G3);
372 reg_class g4_regP(R_G4);
373 reg_class g5_regP(R_G5);
374 reg_class i0_regP(R_I0);
375 reg_class o0_regP(R_O0);
376 reg_class o1_regP(R_O1);
377 reg_class o2_regP(R_O2);
378 reg_class o7_regP(R_O7);
379 #endif // _LP64
382 // ----------------------------
383 // Long Register Classes
384 // ----------------------------
385 // Longs in 1 register. Aligned adjacent hi/lo pairs.
386 // Note: O7 is never in this class; it is sometimes used as an encoding temp.
387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
389 #ifdef _LP64
390 // 64-bit, longs in 1 register: use all 64-bit integer registers
391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's.
392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
394 #endif // _LP64
395 );
397 reg_class g1_regL(R_G1H,R_G1);
398 reg_class g3_regL(R_G3H,R_G3);
399 reg_class o2_regL(R_O2H,R_O2);
400 reg_class o7_regL(R_O7H,R_O7);
402 // ----------------------------
403 // Special Class for Condition Code Flags Register
404 reg_class int_flags(CCR);
405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3);
406 reg_class float_flag0(FCC0);
409 // ----------------------------
410 // Float Point Register Classes
411 // ----------------------------
412 // Skip F30/F31, they are reserved for mem-mem copies
413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
415 // Paired floating point registers--they show up in the same order as the floats,
416 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
417 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
418 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,
419 /* Use extra V9 double registers; this AD file does not support V8 */
420 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x,
421 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x
422 );
424 // Paired floating point registers--they show up in the same order as the floats,
425 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
426 // This class is usable for mis-aligned loads as happen in I2C adapters.
427 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
428 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
429 %}
431 //----------DEFINITION BLOCK---------------------------------------------------
432 // Define name --> value mappings to inform the ADLC of an integer valued name
433 // Current support includes integer values in the range [0, 0x7FFFFFFF]
434 // Format:
435 // int_def <name> ( <int_value>, <expression>);
436 // Generated Code in ad_<arch>.hpp
437 // #define <name> (<expression>)
438 // // value == <int_value>
439 // Generated code in ad_<arch>.cpp adlc_verification()
440 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
441 //
442 definitions %{
443 // The default cost (of an ALU instruction).
444 int_def DEFAULT_COST ( 100, 100);
445 int_def HUGE_COST (1000000, 1000000);
447 // Memory refs are twice as expensive as run-of-the-mill.
448 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
450 // Branches are even more expensive.
451 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
452 int_def CALL_COST ( 300, DEFAULT_COST * 3);
453 %}
456 //----------SOURCE BLOCK-------------------------------------------------------
457 // This is a block of C++ code which provides values, functions, and
458 // definitions necessary in the rest of the architecture description
459 source_hpp %{
460 // Must be visible to the DFA in dfa_sparc.cpp
461 extern bool can_branch_register( Node *bol, Node *cmp );
463 extern bool use_block_zeroing(Node* count);
465 // Macros to extract hi & lo halves from a long pair.
466 // G0 is not part of any long pair, so assert on that.
467 // Prevents accidentally using G1 instead of G0.
468 #define LONG_HI_REG(x) (x)
469 #define LONG_LO_REG(x) (x)
471 %}
473 source %{
474 #define __ _masm.
476 // tertiary op of a LoadP or StoreP encoding
477 #define REGP_OP true
479 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding);
480 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding);
481 static Register reg_to_register_object(int register_encoding);
483 // Used by the DFA in dfa_sparc.cpp.
484 // Check for being able to use a V9 branch-on-register. Requires a
485 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign-
486 // extended. Doesn't work following an integer ADD, for example, because of
487 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On
488 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and
489 // replace them with zero, which could become sign-extension in a different OS
490 // release. There's no obvious reason why an interrupt will ever fill these
491 // bits with non-zero junk (the registers are reloaded with standard LD
492 // instructions which either zero-fill or sign-fill).
493 bool can_branch_register( Node *bol, Node *cmp ) {
494 if( !BranchOnRegister ) return false;
495 #ifdef _LP64
496 if( cmp->Opcode() == Op_CmpP )
497 return true; // No problems with pointer compares
498 #endif
499 if( cmp->Opcode() == Op_CmpL )
500 return true; // No problems with long compares
502 if( !SparcV9RegsHiBitsZero ) return false;
503 if( bol->as_Bool()->_test._test != BoolTest::ne &&
504 bol->as_Bool()->_test._test != BoolTest::eq )
505 return false;
507 // Check for comparing against a 'safe' value. Any operation which
508 // clears out the high word is safe. Thus, loads and certain shifts
509 // are safe, as are non-negative constants. Any operation which
510 // preserves zero bits in the high word is safe as long as each of its
511 // inputs are safe. Thus, phis and bitwise booleans are safe if their
512 // inputs are safe. At present, the only important case to recognize
513 // seems to be loads. Constants should fold away, and shifts &
514 // logicals can use the 'cc' forms.
515 Node *x = cmp->in(1);
516 if( x->is_Load() ) return true;
517 if( x->is_Phi() ) {
518 for( uint i = 1; i < x->req(); i++ )
519 if( !x->in(i)->is_Load() )
520 return false;
521 return true;
522 }
523 return false;
524 }
526 bool use_block_zeroing(Node* count) {
527 // Use BIS for zeroing if count is not constant
528 // or it is >= BlockZeroingLowLimit.
529 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit);
530 }
532 // ****************************************************************************
534 // REQUIRED FUNCTIONALITY
536 // !!!!! Special hack to get all type of calls to specify the byte offset
537 // from the start of the call to the point where the return address
538 // will point.
539 // The "return address" is the address of the call instruction, plus 8.
541 int MachCallStaticJavaNode::ret_addr_offset() {
542 int offset = NativeCall::instruction_size; // call; delay slot
543 if (_method_handle_invoke)
544 offset += 4; // restore SP
545 return offset;
546 }
548 int MachCallDynamicJavaNode::ret_addr_offset() {
549 int vtable_index = this->_vtable_index;
550 if (vtable_index < 0) {
551 // must be invalid_vtable_index, not nonvirtual_vtable_index
552 assert(vtable_index == methodOopDesc::invalid_vtable_index, "correct sentinel value");
553 return (NativeMovConstReg::instruction_size +
554 NativeCall::instruction_size); // sethi; setlo; call; delay slot
555 } else {
556 assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
557 int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
558 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
559 int klass_load_size;
560 if (UseCompressedOops) {
561 assert(Universe::heap() != NULL, "java heap should be initialized");
562 if (Universe::narrow_oop_base() == NULL)
563 klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass()
564 else
565 klass_load_size = 3*BytesPerInstWord;
566 } else {
567 klass_load_size = 1*BytesPerInstWord;
568 }
569 if( Assembler::is_simm13(v_off) ) {
570 return klass_load_size +
571 (2*BytesPerInstWord + // ld_ptr, ld_ptr
572 NativeCall::instruction_size); // call; delay slot
573 } else {
574 return klass_load_size +
575 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr
576 NativeCall::instruction_size); // call; delay slot
577 }
578 }
579 }
581 int MachCallRuntimeNode::ret_addr_offset() {
582 #ifdef _LP64
583 if (MacroAssembler::is_far_target(entry_point())) {
584 return NativeFarCall::instruction_size;
585 } else {
586 return NativeCall::instruction_size;
587 }
588 #else
589 return NativeCall::instruction_size; // call; delay slot
590 #endif
591 }
593 // Indicate if the safepoint node needs the polling page as an input.
594 // Since Sparc does not have absolute addressing, it does.
595 bool SafePointNode::needs_polling_address_input() {
596 return true;
597 }
599 // emit an interrupt that is caught by the debugger (for debugging compiler)
600 void emit_break(CodeBuffer &cbuf) {
601 MacroAssembler _masm(&cbuf);
602 __ breakpoint_trap();
603 }
605 #ifndef PRODUCT
606 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
607 st->print("TA");
608 }
609 #endif
611 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
612 emit_break(cbuf);
613 }
615 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
616 return MachNode::size(ra_);
617 }
619 // Traceable jump
620 void emit_jmpl(CodeBuffer &cbuf, int jump_target) {
621 MacroAssembler _masm(&cbuf);
622 Register rdest = reg_to_register_object(jump_target);
623 __ JMP(rdest, 0);
624 __ delayed()->nop();
625 }
627 // Traceable jump and set exception pc
628 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) {
629 MacroAssembler _masm(&cbuf);
630 Register rdest = reg_to_register_object(jump_target);
631 __ JMP(rdest, 0);
632 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc );
633 }
635 void emit_nop(CodeBuffer &cbuf) {
636 MacroAssembler _masm(&cbuf);
637 __ nop();
638 }
640 void emit_illtrap(CodeBuffer &cbuf) {
641 MacroAssembler _masm(&cbuf);
642 __ illtrap(0);
643 }
646 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) {
647 assert(n->rule() != loadUB_rule, "");
649 intptr_t offset = 0;
650 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP
651 const Node* addr = n->get_base_and_disp(offset, adr_type);
652 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP");
653 assert(addr != NULL && addr != (Node*)-1, "invalid addr");
654 assert(addr->bottom_type()->isa_oopptr() == atype, "");
655 atype = atype->add_offset(offset);
656 assert(disp32 == offset, "wrong disp32");
657 return atype->_offset;
658 }
661 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) {
662 assert(n->rule() != loadUB_rule, "");
664 intptr_t offset = 0;
665 Node* addr = n->in(2);
666 assert(addr->bottom_type()->isa_oopptr() == atype, "");
667 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) {
668 Node* a = addr->in(2/*AddPNode::Address*/);
669 Node* o = addr->in(3/*AddPNode::Offset*/);
670 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot;
671 atype = a->bottom_type()->is_ptr()->add_offset(offset);
672 assert(atype->isa_oop_ptr(), "still an oop");
673 }
674 offset = atype->is_ptr()->_offset;
675 if (offset != Type::OffsetBot) offset += disp32;
676 return offset;
677 }
679 static inline jdouble replicate_immI(int con, int count, int width) {
680 // Load a constant replicated "count" times with width "width"
681 int bit_width = width * 8;
682 jlong elt_val = con;
683 elt_val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits
684 jlong val = elt_val;
685 for (int i = 0; i < count - 1; i++) {
686 val <<= bit_width;
687 val |= elt_val;
688 }
689 jdouble dval = *((jdouble*) &val); // coerce to double type
690 return dval;
691 }
693 // Standard Sparc opcode form2 field breakdown
694 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) {
695 f0 &= (1<<19)-1; // Mask displacement to 19 bits
696 int op = (f30 << 30) |
697 (f29 << 29) |
698 (f25 << 25) |
699 (f22 << 22) |
700 (f20 << 20) |
701 (f19 << 19) |
702 (f0 << 0);
703 cbuf.insts()->emit_int32(op);
704 }
706 // Standard Sparc opcode form2 field breakdown
707 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) {
708 f0 >>= 10; // Drop 10 bits
709 f0 &= (1<<22)-1; // Mask displacement to 22 bits
710 int op = (f30 << 30) |
711 (f25 << 25) |
712 (f22 << 22) |
713 (f0 << 0);
714 cbuf.insts()->emit_int32(op);
715 }
717 // Standard Sparc opcode form3 field breakdown
718 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) {
719 int op = (f30 << 30) |
720 (f25 << 25) |
721 (f19 << 19) |
722 (f14 << 14) |
723 (f5 << 5) |
724 (f0 << 0);
725 cbuf.insts()->emit_int32(op);
726 }
728 // Standard Sparc opcode form3 field breakdown
729 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) {
730 simm13 &= (1<<13)-1; // Mask to 13 bits
731 int op = (f30 << 30) |
732 (f25 << 25) |
733 (f19 << 19) |
734 (f14 << 14) |
735 (1 << 13) | // bit to indicate immediate-mode
736 (simm13<<0);
737 cbuf.insts()->emit_int32(op);
738 }
740 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) {
741 simm10 &= (1<<10)-1; // Mask to 10 bits
742 emit3_simm13(cbuf,f30,f25,f19,f14,simm10);
743 }
745 #ifdef ASSERT
746 // Helper function for VerifyOops in emit_form3_mem_reg
747 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) {
748 warning("VerifyOops encountered unexpected instruction:");
749 n->dump(2);
750 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]);
751 }
752 #endif
755 void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary,
756 int src1_enc, int disp32, int src2_enc, int dst_enc) {
758 #ifdef ASSERT
759 // The following code implements the +VerifyOops feature.
760 // It verifies oop values which are loaded into or stored out of
761 // the current method activation. +VerifyOops complements techniques
762 // like ScavengeALot, because it eagerly inspects oops in transit,
763 // as they enter or leave the stack, as opposed to ScavengeALot,
764 // which inspects oops "at rest", in the stack or heap, at safepoints.
765 // For this reason, +VerifyOops can sometimes detect bugs very close
766 // to their point of creation. It can also serve as a cross-check
767 // on the validity of oop maps, when used toegether with ScavengeALot.
769 // It would be good to verify oops at other points, especially
770 // when an oop is used as a base pointer for a load or store.
771 // This is presently difficult, because it is hard to know when
772 // a base address is biased or not. (If we had such information,
773 // it would be easy and useful to make a two-argument version of
774 // verify_oop which unbiases the base, and performs verification.)
776 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary");
777 bool is_verified_oop_base = false;
778 bool is_verified_oop_load = false;
779 bool is_verified_oop_store = false;
780 int tmp_enc = -1;
781 if (VerifyOops && src1_enc != R_SP_enc) {
782 // classify the op, mainly for an assert check
783 int st_op = 0, ld_op = 0;
784 switch (primary) {
785 case Assembler::stb_op3: st_op = Op_StoreB; break;
786 case Assembler::sth_op3: st_op = Op_StoreC; break;
787 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0
788 case Assembler::stw_op3: st_op = Op_StoreI; break;
789 case Assembler::std_op3: st_op = Op_StoreL; break;
790 case Assembler::stf_op3: st_op = Op_StoreF; break;
791 case Assembler::stdf_op3: st_op = Op_StoreD; break;
793 case Assembler::ldsb_op3: ld_op = Op_LoadB; break;
794 case Assembler::lduh_op3: ld_op = Op_LoadUS; break;
795 case Assembler::ldsh_op3: ld_op = Op_LoadS; break;
796 case Assembler::ldx_op3: // may become LoadP or stay LoadI
797 case Assembler::ldsw_op3: // may become LoadP or stay LoadI
798 case Assembler::lduw_op3: ld_op = Op_LoadI; break;
799 case Assembler::ldd_op3: ld_op = Op_LoadL; break;
800 case Assembler::ldf_op3: ld_op = Op_LoadF; break;
801 case Assembler::lddf_op3: ld_op = Op_LoadD; break;
802 case Assembler::ldub_op3: ld_op = Op_LoadB; break;
803 case Assembler::prefetch_op3: ld_op = Op_LoadI; break;
805 default: ShouldNotReachHere();
806 }
807 if (tertiary == REGP_OP) {
808 if (st_op == Op_StoreI) st_op = Op_StoreP;
809 else if (ld_op == Op_LoadI) ld_op = Op_LoadP;
810 else ShouldNotReachHere();
811 if (st_op) {
812 // a store
813 // inputs are (0:control, 1:memory, 2:address, 3:value)
814 Node* n2 = n->in(3);
815 if (n2 != NULL) {
816 const Type* t = n2->bottom_type();
817 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false;
818 }
819 } else {
820 // a load
821 const Type* t = n->bottom_type();
822 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false;
823 }
824 }
826 if (ld_op) {
827 // a Load
828 // inputs are (0:control, 1:memory, 2:address)
829 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases
830 !(n->ideal_Opcode()==Op_LoadLLocked && ld_op==Op_LoadI) &&
831 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) &&
832 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) &&
833 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) &&
834 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) &&
835 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) &&
836 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) &&
837 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) &&
838 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) &&
839 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) &&
840 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
841 !(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) &&
842 !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
843 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) &&
844 !(n->ideal_Opcode()==Op_Load2I && ld_op==Op_LoadD) &&
845 !(n->ideal_Opcode()==Op_Load4C && ld_op==Op_LoadD) &&
846 !(n->ideal_Opcode()==Op_Load4S && ld_op==Op_LoadD) &&
847 !(n->ideal_Opcode()==Op_Load8B && ld_op==Op_LoadD) &&
848 !(n->rule() == loadUB_rule)) {
849 verify_oops_warning(n, n->ideal_Opcode(), ld_op);
850 }
851 } else if (st_op) {
852 // a Store
853 // inputs are (0:control, 1:memory, 2:address, 3:value)
854 if (!(n->ideal_Opcode()==st_op) && // Following are special cases
855 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) &&
856 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) &&
857 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) &&
858 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) &&
859 !(n->ideal_Opcode()==Op_Store2I && st_op==Op_StoreD) &&
860 !(n->ideal_Opcode()==Op_Store4C && st_op==Op_StoreD) &&
861 !(n->ideal_Opcode()==Op_Store8B && st_op==Op_StoreD) &&
862 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) {
863 verify_oops_warning(n, n->ideal_Opcode(), st_op);
864 }
865 }
867 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) {
868 Node* addr = n->in(2);
869 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) {
870 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr?
871 if (atype != NULL) {
872 intptr_t offset = get_offset_from_base(n, atype, disp32);
873 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32);
874 if (offset != offset_2) {
875 get_offset_from_base(n, atype, disp32);
876 get_offset_from_base_2(n, atype, disp32);
877 }
878 assert(offset == offset_2, "different offsets");
879 if (offset == disp32) {
880 // we now know that src1 is a true oop pointer
881 is_verified_oop_base = true;
882 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) {
883 if( primary == Assembler::ldd_op3 ) {
884 is_verified_oop_base = false; // Cannot 'ldd' into O7
885 } else {
886 tmp_enc = dst_enc;
887 dst_enc = R_O7_enc; // Load into O7; preserve source oop
888 assert(src1_enc != dst_enc, "");
889 }
890 }
891 }
892 if (st_op && (( offset == oopDesc::klass_offset_in_bytes())
893 || offset == oopDesc::mark_offset_in_bytes())) {
894 // loading the mark should not be allowed either, but
895 // we don't check this since it conflicts with InlineObjectHash
896 // usage of LoadINode to get the mark. We could keep the
897 // check if we create a new LoadMarkNode
898 // but do not verify the object before its header is initialized
899 ShouldNotReachHere();
900 }
901 }
902 }
903 }
904 }
905 #endif
907 uint instr;
908 instr = (Assembler::ldst_op << 30)
909 | (dst_enc << 25)
910 | (primary << 19)
911 | (src1_enc << 14);
913 uint index = src2_enc;
914 int disp = disp32;
916 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc)
917 disp += STACK_BIAS;
919 // We should have a compiler bailout here rather than a guarantee.
920 // Better yet would be some mechanism to handle variable-size matches correctly.
921 guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" );
923 if( disp == 0 ) {
924 // use reg-reg form
925 // bit 13 is already zero
926 instr |= index;
927 } else {
928 // use reg-imm form
929 instr |= 0x00002000; // set bit 13 to one
930 instr |= disp & 0x1FFF;
931 }
933 cbuf.insts()->emit_int32(instr);
935 #ifdef ASSERT
936 {
937 MacroAssembler _masm(&cbuf);
938 if (is_verified_oop_base) {
939 __ verify_oop(reg_to_register_object(src1_enc));
940 }
941 if (is_verified_oop_store) {
942 __ verify_oop(reg_to_register_object(dst_enc));
943 }
944 if (tmp_enc != -1) {
945 __ mov(O7, reg_to_register_object(tmp_enc));
946 }
947 if (is_verified_oop_load) {
948 __ verify_oop(reg_to_register_object(dst_enc));
949 }
950 }
951 #endif
952 }
954 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) {
955 // The method which records debug information at every safepoint
956 // expects the call to be the first instruction in the snippet as
957 // it creates a PcDesc structure which tracks the offset of a call
958 // from the start of the codeBlob. This offset is computed as
959 // code_end() - code_begin() of the code which has been emitted
960 // so far.
961 // In this particular case we have skirted around the problem by
962 // putting the "mov" instruction in the delay slot but the problem
963 // may bite us again at some other point and a cleaner/generic
964 // solution using relocations would be needed.
965 MacroAssembler _masm(&cbuf);
966 __ set_inst_mark();
968 // We flush the current window just so that there is a valid stack copy
969 // the fact that the current window becomes active again instantly is
970 // not a problem there is nothing live in it.
972 #ifdef ASSERT
973 int startpos = __ offset();
974 #endif /* ASSERT */
976 __ call((address)entry_point, rtype);
978 if (preserve_g2) __ delayed()->mov(G2, L7);
979 else __ delayed()->nop();
981 if (preserve_g2) __ mov(L7, G2);
983 #ifdef ASSERT
984 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) {
985 #ifdef _LP64
986 // Trash argument dump slots.
987 __ set(0xb0b8ac0db0b8ac0d, G1);
988 __ mov(G1, G5);
989 __ stx(G1, SP, STACK_BIAS + 0x80);
990 __ stx(G1, SP, STACK_BIAS + 0x88);
991 __ stx(G1, SP, STACK_BIAS + 0x90);
992 __ stx(G1, SP, STACK_BIAS + 0x98);
993 __ stx(G1, SP, STACK_BIAS + 0xA0);
994 __ stx(G1, SP, STACK_BIAS + 0xA8);
995 #else // _LP64
996 // this is also a native call, so smash the first 7 stack locations,
997 // and the various registers
999 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset],
1000 // while [SP+0x44..0x58] are the argument dump slots.
1001 __ set((intptr_t)0xbaadf00d, G1);
1002 __ mov(G1, G5);
1003 __ sllx(G1, 32, G1);
1004 __ or3(G1, G5, G1);
1005 __ mov(G1, G5);
1006 __ stx(G1, SP, 0x40);
1007 __ stx(G1, SP, 0x48);
1008 __ stx(G1, SP, 0x50);
1009 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot
1010 #endif // _LP64
1011 }
1012 #endif /*ASSERT*/
1013 }
1015 //=============================================================================
1016 // REQUIRED FUNCTIONALITY for encoding
1017 void emit_lo(CodeBuffer &cbuf, int val) { }
1018 void emit_hi(CodeBuffer &cbuf, int val) { }
1021 //=============================================================================
1022 const bool Matcher::constant_table_absolute_addressing = false;
1023 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask;
1025 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1026 Compile* C = ra_->C;
1027 Compile::ConstantTable& constant_table = C->constant_table();
1028 MacroAssembler _masm(&cbuf);
1030 Register r = as_Register(ra_->get_encode(this));
1031 CodeSection* cs = __ code()->consts();
1032 int consts_size = cs->align_at_start(cs->size());
1034 if (UseRDPCForConstantTableBase) {
1035 // For the following RDPC logic to work correctly the consts
1036 // section must be allocated right before the insts section. This
1037 // assert checks for that. The layout and the SECT_* constants
1038 // are defined in src/share/vm/asm/codeBuffer.hpp.
1039 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be");
1040 int offset = __ offset();
1041 int disp;
1043 // If the displacement from the current PC to the constant table
1044 // base fits into simm13 we set the constant table base to the
1045 // current PC.
1046 if (__ is_simm13(-(consts_size + offset))) {
1047 constant_table.set_table_base_offset(-(consts_size + offset));
1048 disp = 0;
1049 } else {
1050 // If the offset of the top constant (last entry in the table)
1051 // fits into simm13 we set the constant table base to the actual
1052 // table base.
1053 if (__ is_simm13(constant_table.top_offset())) {
1054 constant_table.set_table_base_offset(0);
1055 disp = consts_size + offset;
1056 } else {
1057 // Otherwise we set the constant table base in the middle of the
1058 // constant table.
1059 int half_consts_size = consts_size / 2;
1060 assert(half_consts_size * 2 == consts_size, "sanity");
1061 constant_table.set_table_base_offset(-half_consts_size); // table base offset gets added to the load displacement.
1062 disp = half_consts_size + offset;
1063 }
1064 }
1066 __ rdpc(r);
1068 if (disp != 0) {
1069 assert(r != O7, "need temporary");
1070 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r);
1071 }
1072 }
1073 else {
1074 // Materialize the constant table base.
1075 assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size));
1076 address baseaddr = cs->start() + -(constant_table.table_base_offset());
1077 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1078 AddressLiteral base(baseaddr, rspec);
1079 __ set(base, r);
1080 }
1081 }
1083 uint MachConstantBaseNode::size(PhaseRegAlloc*) const {
1084 if (UseRDPCForConstantTableBase) {
1085 // This is really the worst case but generally it's only 1 instruction.
1086 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord;
1087 } else {
1088 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord;
1089 }
1090 }
1092 #ifndef PRODUCT
1093 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1094 char reg[128];
1095 ra_->dump_register(this, reg);
1096 if (UseRDPCForConstantTableBase) {
1097 st->print("RDPC %s\t! constant table base", reg);
1098 } else {
1099 st->print("SET &constanttable,%s\t! constant table base", reg);
1100 }
1101 }
1102 #endif
1105 //=============================================================================
1107 #ifndef PRODUCT
1108 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1109 Compile* C = ra_->C;
1111 for (int i = 0; i < OptoPrologueNops; i++) {
1112 st->print_cr("NOP"); st->print("\t");
1113 }
1115 if( VerifyThread ) {
1116 st->print_cr("Verify_Thread"); st->print("\t");
1117 }
1119 size_t framesize = C->frame_slots() << LogBytesPerInt;
1121 // Calls to C2R adapters often do not accept exceptional returns.
1122 // We require that their callers must bang for them. But be careful, because
1123 // some VM calls (such as call site linkage) can use several kilobytes of
1124 // stack. But the stack safety zone should account for that.
1125 // See bugs 4446381, 4468289, 4497237.
1126 if (C->need_stack_bang(framesize)) {
1127 st->print_cr("! stack bang"); st->print("\t");
1128 }
1130 if (Assembler::is_simm13(-framesize)) {
1131 st->print ("SAVE R_SP,-%d,R_SP",framesize);
1132 } else {
1133 st->print_cr("SETHI R_SP,hi%%(-%d),R_G3",framesize); st->print("\t");
1134 st->print_cr("ADD R_G3,lo%%(-%d),R_G3",framesize); st->print("\t");
1135 st->print ("SAVE R_SP,R_G3,R_SP");
1136 }
1138 }
1139 #endif
1141 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1142 Compile* C = ra_->C;
1143 MacroAssembler _masm(&cbuf);
1145 for (int i = 0; i < OptoPrologueNops; i++) {
1146 __ nop();
1147 }
1149 __ verify_thread();
1151 size_t framesize = C->frame_slots() << LogBytesPerInt;
1152 assert(framesize >= 16*wordSize, "must have room for reg. save area");
1153 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1155 // Calls to C2R adapters often do not accept exceptional returns.
1156 // We require that their callers must bang for them. But be careful, because
1157 // some VM calls (such as call site linkage) can use several kilobytes of
1158 // stack. But the stack safety zone should account for that.
1159 // See bugs 4446381, 4468289, 4497237.
1160 if (C->need_stack_bang(framesize)) {
1161 __ generate_stack_overflow_check(framesize);
1162 }
1164 if (Assembler::is_simm13(-framesize)) {
1165 __ save(SP, -framesize, SP);
1166 } else {
1167 __ sethi(-framesize & ~0x3ff, G3);
1168 __ add(G3, -framesize & 0x3ff, G3);
1169 __ save(SP, G3, SP);
1170 }
1171 C->set_frame_complete( __ offset() );
1172 }
1174 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1175 return MachNode::size(ra_);
1176 }
1178 int MachPrologNode::reloc() const {
1179 return 10; // a large enough number
1180 }
1182 //=============================================================================
1183 #ifndef PRODUCT
1184 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1185 Compile* C = ra_->C;
1187 if( do_polling() && ra_->C->is_method_compilation() ) {
1188 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
1189 #ifdef _LP64
1190 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
1191 #else
1192 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t");
1193 #endif
1194 }
1196 if( do_polling() )
1197 st->print("RET\n\t");
1199 st->print("RESTORE");
1200 }
1201 #endif
1203 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1204 MacroAssembler _masm(&cbuf);
1205 Compile* C = ra_->C;
1207 __ verify_thread();
1209 // If this does safepoint polling, then do it here
1210 if( do_polling() && ra_->C->is_method_compilation() ) {
1211 AddressLiteral polling_page(os::get_polling_page());
1212 __ sethi(polling_page, L0);
1213 __ relocate(relocInfo::poll_return_type);
1214 __ ld_ptr( L0, 0, G0 );
1215 }
1217 // If this is a return, then stuff the restore in the delay slot
1218 if( do_polling() ) {
1219 __ ret();
1220 __ delayed()->restore();
1221 } else {
1222 __ restore();
1223 }
1224 }
1226 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1227 return MachNode::size(ra_);
1228 }
1230 int MachEpilogNode::reloc() const {
1231 return 16; // a large enough number
1232 }
1234 const Pipeline * MachEpilogNode::pipeline() const {
1235 return MachNode::pipeline_class();
1236 }
1238 int MachEpilogNode::safepoint_offset() const {
1239 assert( do_polling(), "no return for this epilog node");
1240 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord;
1241 }
1243 //=============================================================================
1245 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1246 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1247 static enum RC rc_class( OptoReg::Name reg ) {
1248 if( !OptoReg::is_valid(reg) ) return rc_bad;
1249 if (OptoReg::is_stack(reg)) return rc_stack;
1250 VMReg r = OptoReg::as_VMReg(reg);
1251 if (r->is_Register()) return rc_int;
1252 assert(r->is_FloatRegister(), "must be");
1253 return rc_float;
1254 }
1256 static int impl_helper( const MachNode *mach, CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) {
1257 if( cbuf ) {
1258 // Better yet would be some mechanism to handle variable-size matches correctly
1259 if (!Assembler::is_simm13(offset + STACK_BIAS)) {
1260 ra_->C->record_method_not_compilable("unable to handle large constant offsets");
1261 } else {
1262 emit_form3_mem_reg(*cbuf, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]);
1263 }
1264 }
1265 #ifndef PRODUCT
1266 else if( !do_size ) {
1267 if( size != 0 ) st->print("\n\t");
1268 if( is_load ) st->print("%s [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg));
1269 else st->print("%s R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset);
1270 }
1271 #endif
1272 return size+4;
1273 }
1275 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int op1, int op2, const char *op_str, int size, outputStream* st ) {
1276 if( cbuf ) emit3( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src] );
1277 #ifndef PRODUCT
1278 else if( !do_size ) {
1279 if( size != 0 ) st->print("\n\t");
1280 st->print("%s R_%s,R_%s\t! spill",op_str,OptoReg::regname(src),OptoReg::regname(dst));
1281 }
1282 #endif
1283 return size+4;
1284 }
1286 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
1287 PhaseRegAlloc *ra_,
1288 bool do_size,
1289 outputStream* st ) const {
1290 // Get registers to move
1291 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1292 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1293 OptoReg::Name dst_second = ra_->get_reg_second(this );
1294 OptoReg::Name dst_first = ra_->get_reg_first(this );
1296 enum RC src_second_rc = rc_class(src_second);
1297 enum RC src_first_rc = rc_class(src_first);
1298 enum RC dst_second_rc = rc_class(dst_second);
1299 enum RC dst_first_rc = rc_class(dst_first);
1301 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1303 // Generate spill code!
1304 int size = 0;
1306 if( src_first == dst_first && src_second == dst_second )
1307 return size; // Self copy, no move
1309 // --------------------------------------
1310 // Check for mem-mem move. Load into unused float registers and fall into
1311 // the float-store case.
1312 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1313 int offset = ra_->reg2offset(src_first);
1314 // Further check for aligned-adjacent pair, so we can use a double load
1315 if( (src_first&1)==0 && src_first+1 == src_second ) {
1316 src_second = OptoReg::Name(R_F31_num);
1317 src_second_rc = rc_float;
1318 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::lddf_op3,"LDDF",size, st);
1319 } else {
1320 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::ldf_op3 ,"LDF ",size, st);
1321 }
1322 src_first = OptoReg::Name(R_F30_num);
1323 src_first_rc = rc_float;
1324 }
1326 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) {
1327 int offset = ra_->reg2offset(src_second);
1328 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F31_num,Assembler::ldf_op3,"LDF ",size, st);
1329 src_second = OptoReg::Name(R_F31_num);
1330 src_second_rc = rc_float;
1331 }
1333 // --------------------------------------
1334 // Check for float->int copy; requires a trip through memory
1335 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) {
1336 int offset = frame::register_save_words*wordSize;
1337 if (cbuf) {
1338 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 );
1339 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1340 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1341 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 );
1342 }
1343 #ifndef PRODUCT
1344 else if (!do_size) {
1345 if (size != 0) st->print("\n\t");
1346 st->print( "SUB R_SP,16,R_SP\n");
1347 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1348 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1349 st->print("\tADD R_SP,16,R_SP\n");
1350 }
1351 #endif
1352 size += 16;
1353 }
1355 // Check for float->int copy on T4
1356 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) {
1357 // Further check for aligned-adjacent pair, so we can use a double move
1358 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
1359 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st);
1360 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st);
1361 }
1362 // Check for int->float copy on T4
1363 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) {
1364 // Further check for aligned-adjacent pair, so we can use a double move
1365 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
1366 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st);
1367 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st);
1368 }
1370 // --------------------------------------
1371 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations.
1372 // In such cases, I have to do the big-endian swap. For aligned targets, the
1373 // hardware does the flop for me. Doubles are always aligned, so no problem
1374 // there. Misaligned sources only come from native-long-returns (handled
1375 // special below).
1376 #ifndef _LP64
1377 if( src_first_rc == rc_int && // source is already big-endian
1378 src_second_rc != rc_bad && // 64-bit move
1379 ((dst_first&1)!=0 || dst_second != dst_first+1) ) { // misaligned dst
1380 assert( (src_first&1)==0 && src_second == src_first+1, "source must be aligned" );
1381 // Do the big-endian flop.
1382 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ;
1383 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc;
1384 }
1385 #endif
1387 // --------------------------------------
1388 // Check for integer reg-reg copy
1389 if( src_first_rc == rc_int && dst_first_rc == rc_int ) {
1390 #ifndef _LP64
1391 if( src_first == R_O0_num && src_second == R_O1_num ) { // Check for the evil O0/O1 native long-return case
1392 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1393 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1394 // operand contains the least significant word of the 64-bit value and vice versa.
1395 OptoReg::Name tmp = OptoReg::Name(R_O7_num);
1396 assert( (dst_first&1)==0 && dst_second == dst_first+1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
1397 // Shift O0 left in-place, zero-extend O1, then OR them into the dst
1398 if( cbuf ) {
1399 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020 );
1400 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000 );
1401 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second] );
1402 #ifndef PRODUCT
1403 } else if( !do_size ) {
1404 if( size != 0 ) st->print("\n\t");
1405 st->print("SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
1406 st->print("SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
1407 st->print("OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
1408 #endif
1409 }
1410 return size+12;
1411 }
1412 else if( dst_first == R_I0_num && dst_second == R_I1_num ) {
1413 // returning a long value in I0/I1
1414 // a SpillCopy must be able to target a return instruction's reg_class
1415 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1416 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1417 // operand contains the least significant word of the 64-bit value and vice versa.
1418 OptoReg::Name tdest = dst_first;
1420 if (src_first == dst_first) {
1421 tdest = OptoReg::Name(R_O7_num);
1422 size += 4;
1423 }
1425 if( cbuf ) {
1426 assert( (src_first&1) == 0 && (src_first+1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
1427 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1
1428 // ShrL_reg_imm6
1429 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000 );
1430 // ShrR_reg_imm6 src, 0, dst
1431 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000 );
1432 if (tdest != dst_first) {
1433 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest] );
1434 }
1435 }
1436 #ifndef PRODUCT
1437 else if( !do_size ) {
1438 if( size != 0 ) st->print("\n\t"); // %%%%% !!!!!
1439 st->print("SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
1440 st->print("SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
1441 if (tdest != dst_first) {
1442 st->print("MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
1443 }
1444 }
1445 #endif // PRODUCT
1446 return size+8;
1447 }
1448 #endif // !_LP64
1449 // Else normal reg-reg copy
1450 assert( src_second != dst_first, "smashed second before evacuating it" );
1451 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::or_op3,0,"MOV ",size, st);
1452 assert( (src_first&1) == 0 && (dst_first&1) == 0, "never move second-halves of int registers" );
1453 // This moves an aligned adjacent pair.
1454 // See if we are done.
1455 if( src_first+1 == src_second && dst_first+1 == dst_second )
1456 return size;
1457 }
1459 // Check for integer store
1460 if( src_first_rc == rc_int && dst_first_rc == rc_stack ) {
1461 int offset = ra_->reg2offset(dst_first);
1462 // Further check for aligned-adjacent pair, so we can use a double store
1463 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1464 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stx_op3,"STX ",size, st);
1465 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stw_op3,"STW ",size, st);
1466 }
1468 // Check for integer load
1469 if( dst_first_rc == rc_int && src_first_rc == rc_stack ) {
1470 int offset = ra_->reg2offset(src_first);
1471 // Further check for aligned-adjacent pair, so we can use a double load
1472 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1473 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldx_op3 ,"LDX ",size, st);
1474 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1475 }
1477 // Check for float reg-reg copy
1478 if( src_first_rc == rc_float && dst_first_rc == rc_float ) {
1479 // Further check for aligned-adjacent pair, so we can use a double move
1480 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1481 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovd_opf,"FMOVD",size, st);
1482 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovs_opf,"FMOVS",size, st);
1483 }
1485 // Check for float store
1486 if( src_first_rc == rc_float && dst_first_rc == rc_stack ) {
1487 int offset = ra_->reg2offset(dst_first);
1488 // Further check for aligned-adjacent pair, so we can use a double store
1489 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1490 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stdf_op3,"STDF",size, st);
1491 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1492 }
1494 // Check for float load
1495 if( dst_first_rc == rc_float && src_first_rc == rc_stack ) {
1496 int offset = ra_->reg2offset(src_first);
1497 // Further check for aligned-adjacent pair, so we can use a double load
1498 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1499 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lddf_op3,"LDDF",size, st);
1500 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldf_op3 ,"LDF ",size, st);
1501 }
1503 // --------------------------------------------------------------------
1504 // Check for hi bits still needing moving. Only happens for misaligned
1505 // arguments to native calls.
1506 if( src_second == dst_second )
1507 return size; // Self copy; no move
1508 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
1510 #ifndef _LP64
1511 // In the LP64 build, all registers can be moved as aligned/adjacent
1512 // pairs, so there's never any need to move the high bits separately.
1513 // The 32-bit builds have to deal with the 32-bit ABI which can force
1514 // all sorts of silly alignment problems.
1516 // Check for integer reg-reg copy. Hi bits are stuck up in the top
1517 // 32-bits of a 64-bit register, but are needed in low bits of another
1518 // register (else it's a hi-bits-to-hi-bits copy which should have
1519 // happened already as part of a 64-bit move)
1520 if( src_second_rc == rc_int && dst_second_rc == rc_int ) {
1521 assert( (src_second&1)==1, "its the evil O0/O1 native return case" );
1522 assert( (dst_second&1)==0, "should have moved with 1 64-bit move" );
1523 // Shift src_second down to dst_second's low bits.
1524 if( cbuf ) {
1525 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 );
1526 #ifndef PRODUCT
1527 } else if( !do_size ) {
1528 if( size != 0 ) st->print("\n\t");
1529 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(dst_second));
1530 #endif
1531 }
1532 return size+4;
1533 }
1535 // Check for high word integer store. Must down-shift the hi bits
1536 // into a temp register, then fall into the case of storing int bits.
1537 if( src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second&1)==1 ) {
1538 // Shift src_second down to dst_second's low bits.
1539 if( cbuf ) {
1540 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 );
1541 #ifndef PRODUCT
1542 } else if( !do_size ) {
1543 if( size != 0 ) st->print("\n\t");
1544 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(R_O7_num));
1545 #endif
1546 }
1547 size+=4;
1548 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num!
1549 }
1551 // Check for high word integer load
1552 if( dst_second_rc == rc_int && src_second_rc == rc_stack )
1553 return impl_helper(this,cbuf,ra_,do_size,true ,ra_->reg2offset(src_second),dst_second,Assembler::lduw_op3,"LDUW",size, st);
1555 // Check for high word integer store
1556 if( src_second_rc == rc_int && dst_second_rc == rc_stack )
1557 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stw_op3 ,"STW ",size, st);
1559 // Check for high word float store
1560 if( src_second_rc == rc_float && dst_second_rc == rc_stack )
1561 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stf_op3 ,"STF ",size, st);
1563 #endif // !_LP64
1565 Unimplemented();
1566 }
1568 #ifndef PRODUCT
1569 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1570 implementation( NULL, ra_, false, st );
1571 }
1572 #endif
1574 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1575 implementation( &cbuf, ra_, false, NULL );
1576 }
1578 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1579 return implementation( NULL, ra_, true, NULL );
1580 }
1582 //=============================================================================
1583 #ifndef PRODUCT
1584 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const {
1585 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1586 }
1587 #endif
1589 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1590 MacroAssembler _masm(&cbuf);
1591 for(int i = 0; i < _count; i += 1) {
1592 __ nop();
1593 }
1594 }
1596 uint MachNopNode::size(PhaseRegAlloc *ra_) const {
1597 return 4 * _count;
1598 }
1601 //=============================================================================
1602 #ifndef PRODUCT
1603 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1604 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1605 int reg = ra_->get_reg_first(this);
1606 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]);
1607 }
1608 #endif
1610 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1611 MacroAssembler _masm(&cbuf);
1612 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS;
1613 int reg = ra_->get_encode(this);
1615 if (Assembler::is_simm13(offset)) {
1616 __ add(SP, offset, reg_to_register_object(reg));
1617 } else {
1618 __ set(offset, O7);
1619 __ add(SP, O7, reg_to_register_object(reg));
1620 }
1621 }
1623 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1624 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
1625 assert(ra_ == ra_->C->regalloc(), "sanity");
1626 return ra_->C->scratch_emit_size(this);
1627 }
1629 //=============================================================================
1631 // emit call stub, compiled java to interpretor
1632 void emit_java_to_interp(CodeBuffer &cbuf ) {
1634 // Stub is fixed up when the corresponding call is converted from calling
1635 // compiled code to calling interpreted code.
1636 // set (empty), G5
1637 // jmp -1
1639 address mark = cbuf.insts_mark(); // get mark within main instrs section
1641 MacroAssembler _masm(&cbuf);
1643 address base =
1644 __ start_a_stub(Compile::MAX_stubs_size);
1645 if (base == NULL) return; // CodeBuffer::expand failed
1647 // static stub relocation stores the instruction address of the call
1648 __ relocate(static_stub_Relocation::spec(mark));
1650 __ set_oop(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode()));
1652 __ set_inst_mark();
1653 AddressLiteral addrlit(-1);
1654 __ JUMP(addrlit, G3, 0);
1656 __ delayed()->nop();
1658 // Update current stubs pointer and restore code_end.
1659 __ end_a_stub();
1660 }
1662 // size of call stub, compiled java to interpretor
1663 uint size_java_to_interp() {
1664 // This doesn't need to be accurate but it must be larger or equal to
1665 // the real size of the stub.
1666 return (NativeMovConstReg::instruction_size + // sethi/setlo;
1667 NativeJump::instruction_size + // sethi; jmp; nop
1668 (TraceJumps ? 20 * BytesPerInstWord : 0) );
1669 }
1670 // relocation entries for call stub, compiled java to interpretor
1671 uint reloc_java_to_interp() {
1672 return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call
1673 }
1676 //=============================================================================
1677 #ifndef PRODUCT
1678 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1679 st->print_cr("\nUEP:");
1680 #ifdef _LP64
1681 if (UseCompressedOops) {
1682 assert(Universe::heap() != NULL, "java heap should be initialized");
1683 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
1684 st->print_cr("\tSLL R_G5,3,R_G5");
1685 if (Universe::narrow_oop_base() != NULL)
1686 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
1687 } else {
1688 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1689 }
1690 st->print_cr("\tCMP R_G5,R_G3" );
1691 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
1692 #else // _LP64
1693 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1694 st->print_cr("\tCMP R_G5,R_G3" );
1695 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2");
1696 #endif // _LP64
1697 }
1698 #endif
1700 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1701 MacroAssembler _masm(&cbuf);
1702 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
1703 Register temp_reg = G3;
1704 assert( G5_ic_reg != temp_reg, "conflicting registers" );
1706 // Load klass from receiver
1707 __ load_klass(O0, temp_reg);
1708 // Compare against expected klass
1709 __ cmp(temp_reg, G5_ic_reg);
1710 // Branch to miss code, checks xcc or icc depending
1711 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2);
1712 }
1714 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1715 return MachNode::size(ra_);
1716 }
1719 //=============================================================================
1721 uint size_exception_handler() {
1722 if (TraceJumps) {
1723 return (400); // just a guess
1724 }
1725 return ( NativeJump::instruction_size ); // sethi;jmp;nop
1726 }
1728 uint size_deopt_handler() {
1729 if (TraceJumps) {
1730 return (400); // just a guess
1731 }
1732 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore
1733 }
1735 // Emit exception handler code.
1736 int emit_exception_handler(CodeBuffer& cbuf) {
1737 Register temp_reg = G3;
1738 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point());
1739 MacroAssembler _masm(&cbuf);
1741 address base =
1742 __ start_a_stub(size_exception_handler());
1743 if (base == NULL) return 0; // CodeBuffer::expand failed
1745 int offset = __ offset();
1747 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp
1748 __ delayed()->nop();
1750 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1752 __ end_a_stub();
1754 return offset;
1755 }
1757 int emit_deopt_handler(CodeBuffer& cbuf) {
1758 // Can't use any of the current frame's registers as we may have deopted
1759 // at a poll and everything (including G3) can be live.
1760 Register temp_reg = L0;
1761 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
1762 MacroAssembler _masm(&cbuf);
1764 address base =
1765 __ start_a_stub(size_deopt_handler());
1766 if (base == NULL) return 0; // CodeBuffer::expand failed
1768 int offset = __ offset();
1769 __ save_frame(0);
1770 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp
1771 __ delayed()->restore();
1773 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1775 __ end_a_stub();
1776 return offset;
1778 }
1780 // Given a register encoding, produce a Integer Register object
1781 static Register reg_to_register_object(int register_encoding) {
1782 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding");
1783 return as_Register(register_encoding);
1784 }
1786 // Given a register encoding, produce a single-precision Float Register object
1787 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) {
1788 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding");
1789 return as_SingleFloatRegister(register_encoding);
1790 }
1792 // Given a register encoding, produce a double-precision Float Register object
1793 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) {
1794 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding");
1795 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding");
1796 return as_DoubleFloatRegister(register_encoding);
1797 }
1799 const bool Matcher::match_rule_supported(int opcode) {
1800 if (!has_match_rule(opcode))
1801 return false;
1803 switch (opcode) {
1804 case Op_CountLeadingZerosI:
1805 case Op_CountLeadingZerosL:
1806 case Op_CountTrailingZerosI:
1807 case Op_CountTrailingZerosL:
1808 if (!UsePopCountInstruction)
1809 return false;
1810 break;
1811 }
1813 return true; // Per default match rules are supported.
1814 }
1816 int Matcher::regnum_to_fpu_offset(int regnum) {
1817 return regnum - 32; // The FP registers are in the second chunk
1818 }
1820 #ifdef ASSERT
1821 address last_rethrow = NULL; // debugging aid for Rethrow encoding
1822 #endif
1824 // Vector width in bytes
1825 const uint Matcher::vector_width_in_bytes(void) {
1826 return 8;
1827 }
1829 // Vector ideal reg
1830 const uint Matcher::vector_ideal_reg(void) {
1831 return Op_RegD;
1832 }
1834 // USII supports fxtof through the whole range of number, USIII doesn't
1835 const bool Matcher::convL2FSupported(void) {
1836 return VM_Version::has_fast_fxtof();
1837 }
1839 // Is this branch offset short enough that a short branch can be used?
1840 //
1841 // NOTE: If the platform does not provide any short branch variants, then
1842 // this method should return false for offset 0.
1843 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1844 // The passed offset is relative to address of the branch.
1845 // Don't need to adjust the offset.
1846 return UseCBCond && Assembler::is_simm(offset, 12);
1847 }
1849 const bool Matcher::isSimpleConstant64(jlong value) {
1850 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1851 // Depends on optimizations in MacroAssembler::setx.
1852 int hi = (int)(value >> 32);
1853 int lo = (int)(value & ~0);
1854 return (hi == 0) || (hi == -1) || (lo == 0);
1855 }
1857 // No scaling for the parameter the ClearArray node.
1858 const bool Matcher::init_array_count_is_in_bytes = true;
1860 // Threshold size for cleararray.
1861 const int Matcher::init_array_short_size = 8 * BytesPerLong;
1863 // Should the Matcher clone shifts on addressing modes, expecting them to
1864 // be subsumed into complex addressing expressions or compute them into
1865 // registers? True for Intel but false for most RISCs
1866 const bool Matcher::clone_shift_expressions = false;
1868 // Do we need to mask the count passed to shift instructions or does
1869 // the cpu only look at the lower 5/6 bits anyway?
1870 const bool Matcher::need_masked_shift_count = false;
1872 bool Matcher::narrow_oop_use_complex_address() {
1873 NOT_LP64(ShouldNotCallThis());
1874 assert(UseCompressedOops, "only for compressed oops code");
1875 return false;
1876 }
1878 // Is it better to copy float constants, or load them directly from memory?
1879 // Intel can load a float constant from a direct address, requiring no
1880 // extra registers. Most RISCs will have to materialize an address into a
1881 // register first, so they would do better to copy the constant from stack.
1882 const bool Matcher::rematerialize_float_constants = false;
1884 // If CPU can load and store mis-aligned doubles directly then no fixup is
1885 // needed. Else we split the double into 2 integer pieces and move it
1886 // piece-by-piece. Only happens when passing doubles into C code as the
1887 // Java calling convention forces doubles to be aligned.
1888 #ifdef _LP64
1889 const bool Matcher::misaligned_doubles_ok = true;
1890 #else
1891 const bool Matcher::misaligned_doubles_ok = false;
1892 #endif
1894 // No-op on SPARC.
1895 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
1896 }
1898 // Advertise here if the CPU requires explicit rounding operations
1899 // to implement the UseStrictFP mode.
1900 const bool Matcher::strict_fp_requires_explicit_rounding = false;
1902 // Are floats conerted to double when stored to stack during deoptimization?
1903 // Sparc does not handle callee-save floats.
1904 bool Matcher::float_in_double() { return false; }
1906 // Do ints take an entire long register or just half?
1907 // Note that we if-def off of _LP64.
1908 // The relevant question is how the int is callee-saved. In _LP64
1909 // the whole long is written but de-opt'ing will have to extract
1910 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
1911 #ifdef _LP64
1912 const bool Matcher::int_in_long = true;
1913 #else
1914 const bool Matcher::int_in_long = false;
1915 #endif
1917 // Return whether or not this register is ever used as an argument. This
1918 // function is used on startup to build the trampoline stubs in generateOptoStub.
1919 // Registers not mentioned will be killed by the VM call in the trampoline, and
1920 // arguments in those registers not be available to the callee.
1921 bool Matcher::can_be_java_arg( int reg ) {
1922 // Standard sparc 6 args in registers
1923 if( reg == R_I0_num ||
1924 reg == R_I1_num ||
1925 reg == R_I2_num ||
1926 reg == R_I3_num ||
1927 reg == R_I4_num ||
1928 reg == R_I5_num ) return true;
1929 #ifdef _LP64
1930 // 64-bit builds can pass 64-bit pointers and longs in
1931 // the high I registers
1932 if( reg == R_I0H_num ||
1933 reg == R_I1H_num ||
1934 reg == R_I2H_num ||
1935 reg == R_I3H_num ||
1936 reg == R_I4H_num ||
1937 reg == R_I5H_num ) return true;
1939 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) {
1940 return true;
1941 }
1943 #else
1944 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
1945 // Longs cannot be passed in O regs, because O regs become I regs
1946 // after a 'save' and I regs get their high bits chopped off on
1947 // interrupt.
1948 if( reg == R_G1H_num || reg == R_G1_num ) return true;
1949 if( reg == R_G4H_num || reg == R_G4_num ) return true;
1950 #endif
1951 // A few float args in registers
1952 if( reg >= R_F0_num && reg <= R_F7_num ) return true;
1954 return false;
1955 }
1957 bool Matcher::is_spillable_arg( int reg ) {
1958 return can_be_java_arg(reg);
1959 }
1961 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
1962 // Use hardware SDIVX instruction when it is
1963 // faster than a code which use multiply.
1964 return VM_Version::has_fast_idiv();
1965 }
1967 // Register for DIVI projection of divmodI
1968 RegMask Matcher::divI_proj_mask() {
1969 ShouldNotReachHere();
1970 return RegMask();
1971 }
1973 // Register for MODI projection of divmodI
1974 RegMask Matcher::modI_proj_mask() {
1975 ShouldNotReachHere();
1976 return RegMask();
1977 }
1979 // Register for DIVL projection of divmodL
1980 RegMask Matcher::divL_proj_mask() {
1981 ShouldNotReachHere();
1982 return RegMask();
1983 }
1985 // Register for MODL projection of divmodL
1986 RegMask Matcher::modL_proj_mask() {
1987 ShouldNotReachHere();
1988 return RegMask();
1989 }
1991 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
1992 return L7_REGP_mask;
1993 }
1995 %}
1998 // The intptr_t operand types, defined by textual substitution.
1999 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
2000 #ifdef _LP64
2001 #define immX immL
2002 #define immX13 immL13
2003 #define immX13m7 immL13m7
2004 #define iRegX iRegL
2005 #define g1RegX g1RegL
2006 #else
2007 #define immX immI
2008 #define immX13 immI13
2009 #define immX13m7 immI13m7
2010 #define iRegX iRegI
2011 #define g1RegX g1RegI
2012 #endif
2014 //----------ENCODING BLOCK-----------------------------------------------------
2015 // This block specifies the encoding classes used by the compiler to output
2016 // byte streams. Encoding classes are parameterized macros used by
2017 // Machine Instruction Nodes in order to generate the bit encoding of the
2018 // instruction. Operands specify their base encoding interface with the
2019 // interface keyword. There are currently supported four interfaces,
2020 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
2021 // operand to generate a function which returns its register number when
2022 // queried. CONST_INTER causes an operand to generate a function which
2023 // returns the value of the constant when queried. MEMORY_INTER causes an
2024 // operand to generate four functions which return the Base Register, the
2025 // Index Register, the Scale Value, and the Offset Value of the operand when
2026 // queried. COND_INTER causes an operand to generate six functions which
2027 // return the encoding code (ie - encoding bits for the instruction)
2028 // associated with each basic boolean condition for a conditional instruction.
2029 //
2030 // Instructions specify two basic values for encoding. Again, a function
2031 // is available to check if the constant displacement is an oop. They use the
2032 // ins_encode keyword to specify their encoding classes (which must be
2033 // a sequence of enc_class names, and their parameters, specified in
2034 // the encoding block), and they use the
2035 // opcode keyword to specify, in order, their primary, secondary, and
2036 // tertiary opcode. Only the opcode sections which a particular instruction
2037 // needs for encoding need to be specified.
2038 encode %{
2039 enc_class enc_untested %{
2040 #ifdef ASSERT
2041 MacroAssembler _masm(&cbuf);
2042 __ untested("encoding");
2043 #endif
2044 %}
2046 enc_class form3_mem_reg( memory mem, iRegI dst ) %{
2047 emit_form3_mem_reg(cbuf, this, $primary, $tertiary,
2048 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
2049 %}
2051 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{
2052 emit_form3_mem_reg(cbuf, this, $primary, -1,
2053 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
2054 %}
2056 enc_class form3_mem_prefetch_read( memory mem ) %{
2057 emit_form3_mem_reg(cbuf, this, $primary, -1,
2058 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
2059 %}
2061 enc_class form3_mem_prefetch_write( memory mem ) %{
2062 emit_form3_mem_reg(cbuf, this, $primary, -1,
2063 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/);
2064 %}
2066 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{
2067 assert( Assembler::is_simm13($mem$$disp ), "need disp and disp+4" );
2068 assert( Assembler::is_simm13($mem$$disp+4), "need disp and disp+4" );
2069 guarantee($mem$$index == R_G0_enc, "double index?");
2070 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc );
2071 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg );
2072 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 );
2073 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc );
2074 %}
2076 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{
2077 assert( Assembler::is_simm13($mem$$disp ), "need disp and disp+4" );
2078 assert( Assembler::is_simm13($mem$$disp+4), "need disp and disp+4" );
2079 guarantee($mem$$index == R_G0_enc, "double index?");
2080 // Load long with 2 instructions
2081 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 );
2082 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 );
2083 %}
2085 //%%% form3_mem_plus_4_reg is a hack--get rid of it
2086 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{
2087 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4");
2088 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg);
2089 %}
2091 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{
2092 // Encode a reg-reg copy. If it is useless, then empty encoding.
2093 if( $rs2$$reg != $rd$$reg )
2094 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg );
2095 %}
2097 // Target lo half of long
2098 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{
2099 // Encode a reg-reg copy. If it is useless, then empty encoding.
2100 if( $rs2$$reg != LONG_LO_REG($rd$$reg) )
2101 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg );
2102 %}
2104 // Source lo half of long
2105 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{
2106 // Encode a reg-reg copy. If it is useless, then empty encoding.
2107 if( LONG_LO_REG($rs2$$reg) != $rd$$reg )
2108 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) );
2109 %}
2111 // Target hi half of long
2112 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{
2113 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 );
2114 %}
2116 // Source lo half of long, and leave it sign extended.
2117 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{
2118 // Sign extend low half
2119 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 );
2120 %}
2122 // Source hi half of long, and leave it sign extended.
2123 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{
2124 // Shift high half to low half
2125 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 );
2126 %}
2128 // Source hi half of long
2129 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{
2130 // Encode a reg-reg copy. If it is useless, then empty encoding.
2131 if( LONG_HI_REG($rs2$$reg) != $rd$$reg )
2132 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) );
2133 %}
2135 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{
2136 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg );
2137 %}
2139 enc_class enc_to_bool( iRegI src, iRegI dst ) %{
2140 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg );
2141 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 );
2142 %}
2144 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{
2145 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg );
2146 // clear if nothing else is happening
2147 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 );
2148 // blt,a,pn done
2149 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 );
2150 // mov dst,-1 in delay slot
2151 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 );
2152 %}
2154 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{
2155 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F );
2156 %}
2158 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{
2159 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 );
2160 %}
2162 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{
2163 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg );
2164 %}
2166 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{
2167 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant );
2168 %}
2170 enc_class move_return_pc_to_o1() %{
2171 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset );
2172 %}
2174 #ifdef _LP64
2175 /* %%% merge with enc_to_bool */
2176 enc_class enc_convP2B( iRegI dst, iRegP src ) %{
2177 MacroAssembler _masm(&cbuf);
2179 Register src_reg = reg_to_register_object($src$$reg);
2180 Register dst_reg = reg_to_register_object($dst$$reg);
2181 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg);
2182 %}
2183 #endif
2185 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{
2186 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)))
2187 MacroAssembler _masm(&cbuf);
2189 Register p_reg = reg_to_register_object($p$$reg);
2190 Register q_reg = reg_to_register_object($q$$reg);
2191 Register y_reg = reg_to_register_object($y$$reg);
2192 Register tmp_reg = reg_to_register_object($tmp$$reg);
2194 __ subcc( p_reg, q_reg, p_reg );
2195 __ add ( p_reg, y_reg, tmp_reg );
2196 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg );
2197 %}
2199 enc_class form_d2i_helper(regD src, regF dst) %{
2200 // fcmp %fcc0,$src,$src
2201 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2202 // branch %fcc0 not-nan, predict taken
2203 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2204 // fdtoi $src,$dst
2205 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg );
2206 // fitos $dst,$dst (if nan)
2207 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg );
2208 // clear $dst (if nan)
2209 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg );
2210 // carry on here...
2211 %}
2213 enc_class form_d2l_helper(regD src, regD dst) %{
2214 // fcmp %fcc0,$src,$src check for NAN
2215 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2216 // branch %fcc0 not-nan, predict taken
2217 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2218 // fdtox $src,$dst convert in delay slot
2219 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg );
2220 // fxtod $dst,$dst (if nan)
2221 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg );
2222 // clear $dst (if nan)
2223 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg );
2224 // carry on here...
2225 %}
2227 enc_class form_f2i_helper(regF src, regF dst) %{
2228 // fcmps %fcc0,$src,$src
2229 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg );
2230 // branch %fcc0 not-nan, predict taken
2231 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2232 // fstoi $src,$dst
2233 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg );
2234 // fitos $dst,$dst (if nan)
2235 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg );
2236 // clear $dst (if nan)
2237 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg );
2238 // carry on here...
2239 %}
2241 enc_class form_f2l_helper(regF src, regD dst) %{
2242 // fcmps %fcc0,$src,$src
2243 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg );
2244 // branch %fcc0 not-nan, predict taken
2245 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2246 // fstox $src,$dst
2247 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg );
2248 // fxtod $dst,$dst (if nan)
2249 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg );
2250 // clear $dst (if nan)
2251 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg );
2252 // carry on here...
2253 %}
2255 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2256 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2257 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2258 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2260 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %}
2262 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2263 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %}
2265 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{
2266 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2267 %}
2269 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{
2270 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2271 %}
2273 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{
2274 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2275 %}
2277 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{
2278 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2279 %}
2281 enc_class form3_convI2F(regF rs2, regF rd) %{
2282 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg);
2283 %}
2285 // Encloding class for traceable jumps
2286 enc_class form_jmpl(g3RegP dest) %{
2287 emit_jmpl(cbuf, $dest$$reg);
2288 %}
2290 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{
2291 emit_jmpl_set_exception_pc(cbuf, $dest$$reg);
2292 %}
2294 enc_class form2_nop() %{
2295 emit_nop(cbuf);
2296 %}
2298 enc_class form2_illtrap() %{
2299 emit_illtrap(cbuf);
2300 %}
2303 // Compare longs and convert into -1, 0, 1.
2304 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{
2305 // CMP $src1,$src2
2306 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg );
2307 // blt,a,pn done
2308 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 );
2309 // mov dst,-1 in delay slot
2310 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 );
2311 // bgt,a,pn done
2312 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 );
2313 // mov dst,1 in delay slot
2314 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 );
2315 // CLR $dst
2316 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 );
2317 %}
2319 enc_class enc_PartialSubtypeCheck() %{
2320 MacroAssembler _masm(&cbuf);
2321 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type);
2322 __ delayed()->nop();
2323 %}
2325 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{
2326 MacroAssembler _masm(&cbuf);
2327 Label* L = $labl$$label;
2328 Assembler::Predict predict_taken =
2329 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
2331 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
2332 __ delayed()->nop();
2333 %}
2335 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{
2336 MacroAssembler _masm(&cbuf);
2337 Label* L = $labl$$label;
2338 Assembler::Predict predict_taken =
2339 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
2341 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L);
2342 __ delayed()->nop();
2343 %}
2345 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{
2346 int op = (Assembler::arith_op << 30) |
2347 ($dst$$reg << 25) |
2348 (Assembler::movcc_op3 << 19) |
2349 (1 << 18) | // cc2 bit for 'icc'
2350 ($cmp$$cmpcode << 14) |
2351 (0 << 13) | // select register move
2352 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc'
2353 ($src$$reg << 0);
2354 cbuf.insts()->emit_int32(op);
2355 %}
2357 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{
2358 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits
2359 int op = (Assembler::arith_op << 30) |
2360 ($dst$$reg << 25) |
2361 (Assembler::movcc_op3 << 19) |
2362 (1 << 18) | // cc2 bit for 'icc'
2363 ($cmp$$cmpcode << 14) |
2364 (1 << 13) | // select immediate move
2365 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc'
2366 (simm11 << 0);
2367 cbuf.insts()->emit_int32(op);
2368 %}
2370 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{
2371 int op = (Assembler::arith_op << 30) |
2372 ($dst$$reg << 25) |
2373 (Assembler::movcc_op3 << 19) |
2374 (0 << 18) | // cc2 bit for 'fccX'
2375 ($cmp$$cmpcode << 14) |
2376 (0 << 13) | // select register move
2377 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3
2378 ($src$$reg << 0);
2379 cbuf.insts()->emit_int32(op);
2380 %}
2382 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{
2383 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits
2384 int op = (Assembler::arith_op << 30) |
2385 ($dst$$reg << 25) |
2386 (Assembler::movcc_op3 << 19) |
2387 (0 << 18) | // cc2 bit for 'fccX'
2388 ($cmp$$cmpcode << 14) |
2389 (1 << 13) | // select immediate move
2390 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3
2391 (simm11 << 0);
2392 cbuf.insts()->emit_int32(op);
2393 %}
2395 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{
2396 int op = (Assembler::arith_op << 30) |
2397 ($dst$$reg << 25) |
2398 (Assembler::fpop2_op3 << 19) |
2399 (0 << 18) |
2400 ($cmp$$cmpcode << 14) |
2401 (1 << 13) | // select register move
2402 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc'
2403 ($primary << 5) | // select single, double or quad
2404 ($src$$reg << 0);
2405 cbuf.insts()->emit_int32(op);
2406 %}
2408 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{
2409 int op = (Assembler::arith_op << 30) |
2410 ($dst$$reg << 25) |
2411 (Assembler::fpop2_op3 << 19) |
2412 (0 << 18) |
2413 ($cmp$$cmpcode << 14) |
2414 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX'
2415 ($primary << 5) | // select single, double or quad
2416 ($src$$reg << 0);
2417 cbuf.insts()->emit_int32(op);
2418 %}
2420 // Used by the MIN/MAX encodings. Same as a CMOV, but
2421 // the condition comes from opcode-field instead of an argument.
2422 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{
2423 int op = (Assembler::arith_op << 30) |
2424 ($dst$$reg << 25) |
2425 (Assembler::movcc_op3 << 19) |
2426 (1 << 18) | // cc2 bit for 'icc'
2427 ($primary << 14) |
2428 (0 << 13) | // select register move
2429 (0 << 11) | // cc1, cc0 bits for 'icc'
2430 ($src$$reg << 0);
2431 cbuf.insts()->emit_int32(op);
2432 %}
2434 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{
2435 int op = (Assembler::arith_op << 30) |
2436 ($dst$$reg << 25) |
2437 (Assembler::movcc_op3 << 19) |
2438 (6 << 16) | // cc2 bit for 'xcc'
2439 ($primary << 14) |
2440 (0 << 13) | // select register move
2441 (0 << 11) | // cc1, cc0 bits for 'icc'
2442 ($src$$reg << 0);
2443 cbuf.insts()->emit_int32(op);
2444 %}
2446 enc_class Set13( immI13 src, iRegI rd ) %{
2447 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant );
2448 %}
2450 enc_class SetHi22( immI src, iRegI rd ) %{
2451 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant );
2452 %}
2454 enc_class Set32( immI src, iRegI rd ) %{
2455 MacroAssembler _masm(&cbuf);
2456 __ set($src$$constant, reg_to_register_object($rd$$reg));
2457 %}
2459 enc_class call_epilog %{
2460 if( VerifyStackAtCalls ) {
2461 MacroAssembler _masm(&cbuf);
2462 int framesize = ra_->C->frame_slots() << LogBytesPerInt;
2463 Register temp_reg = G3;
2464 __ add(SP, framesize, temp_reg);
2465 __ cmp(temp_reg, FP);
2466 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc);
2467 }
2468 %}
2470 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value
2471 // to G1 so the register allocator will not have to deal with the misaligned register
2472 // pair.
2473 enc_class adjust_long_from_native_call %{
2474 #ifndef _LP64
2475 if (returns_long()) {
2476 // sllx O0,32,O0
2477 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 );
2478 // srl O1,0,O1
2479 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 );
2480 // or O0,O1,G1
2481 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc );
2482 }
2483 #endif
2484 %}
2486 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
2487 // CALL directly to the runtime
2488 // The user of this is responsible for ensuring that R_L7 is empty (killed).
2489 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type,
2490 /*preserve_g2=*/true);
2491 %}
2493 enc_class preserve_SP %{
2494 MacroAssembler _masm(&cbuf);
2495 __ mov(SP, L7_mh_SP_save);
2496 %}
2498 enc_class restore_SP %{
2499 MacroAssembler _masm(&cbuf);
2500 __ mov(L7_mh_SP_save, SP);
2501 %}
2503 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
2504 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2505 // who we intended to call.
2506 if ( !_method ) {
2507 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type);
2508 } else if (_optimized_virtual) {
2509 emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type);
2510 } else {
2511 emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type);
2512 }
2513 if( _method ) { // Emit stub for static call
2514 emit_java_to_interp(cbuf);
2515 }
2516 %}
2518 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
2519 MacroAssembler _masm(&cbuf);
2520 __ set_inst_mark();
2521 int vtable_index = this->_vtable_index;
2522 // MachCallDynamicJavaNode::ret_addr_offset uses this same test
2523 if (vtable_index < 0) {
2524 // must be invalid_vtable_index, not nonvirtual_vtable_index
2525 assert(vtable_index == methodOopDesc::invalid_vtable_index, "correct sentinel value");
2526 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2527 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()");
2528 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub");
2529 // !!!!!
2530 // Generate "set 0x01, R_G5", placeholder instruction to load oop-info
2531 // emit_call_dynamic_prologue( cbuf );
2532 __ set_oop((jobject)Universe::non_oop_word(), G5_ic_reg);
2534 address virtual_call_oop_addr = __ inst_mark();
2535 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2536 // who we intended to call.
2537 __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr));
2538 emit_call_reloc(cbuf, $meth$$method, relocInfo::none);
2539 } else {
2540 assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
2541 // Just go thru the vtable
2542 // get receiver klass (receiver already checked for non-null)
2543 // If we end up going thru a c2i adapter interpreter expects method in G5
2544 int off = __ offset();
2545 __ load_klass(O0, G3_scratch);
2546 int klass_load_size;
2547 if (UseCompressedOops) {
2548 assert(Universe::heap() != NULL, "java heap should be initialized");
2549 if (Universe::narrow_oop_base() == NULL)
2550 klass_load_size = 2*BytesPerInstWord;
2551 else
2552 klass_load_size = 3*BytesPerInstWord;
2553 } else {
2554 klass_load_size = 1*BytesPerInstWord;
2555 }
2556 int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
2557 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
2558 if( __ is_simm13(v_off) ) {
2559 __ ld_ptr(G3, v_off, G5_method);
2560 } else {
2561 // Generate 2 instructions
2562 __ Assembler::sethi(v_off & ~0x3ff, G5_method);
2563 __ or3(G5_method, v_off & 0x3ff, G5_method);
2564 // ld_ptr, set_hi, set
2565 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord,
2566 "Unexpected instruction size(s)");
2567 __ ld_ptr(G3, G5_method, G5_method);
2568 }
2569 // NOTE: for vtable dispatches, the vtable entry will never be null.
2570 // However it may very well end up in handle_wrong_method if the
2571 // method is abstract for the particular class.
2572 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3_scratch);
2573 // jump to target (either compiled code or c2iadapter)
2574 __ jmpl(G3_scratch, G0, O7);
2575 __ delayed()->nop();
2576 }
2577 %}
2579 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL
2580 MacroAssembler _masm(&cbuf);
2582 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2583 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because
2584 // we might be calling a C2I adapter which needs it.
2586 assert(temp_reg != G5_ic_reg, "conflicting registers");
2587 // Load nmethod
2588 __ ld_ptr(G5_ic_reg, in_bytes(methodOopDesc::from_compiled_offset()), temp_reg);
2590 // CALL to compiled java, indirect the contents of G3
2591 __ set_inst_mark();
2592 __ callr(temp_reg, G0);
2593 __ delayed()->nop();
2594 %}
2596 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{
2597 MacroAssembler _masm(&cbuf);
2598 Register Rdividend = reg_to_register_object($src1$$reg);
2599 Register Rdivisor = reg_to_register_object($src2$$reg);
2600 Register Rresult = reg_to_register_object($dst$$reg);
2602 __ sra(Rdivisor, 0, Rdivisor);
2603 __ sra(Rdividend, 0, Rdividend);
2604 __ sdivx(Rdividend, Rdivisor, Rresult);
2605 %}
2607 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{
2608 MacroAssembler _masm(&cbuf);
2610 Register Rdividend = reg_to_register_object($src1$$reg);
2611 int divisor = $imm$$constant;
2612 Register Rresult = reg_to_register_object($dst$$reg);
2614 __ sra(Rdividend, 0, Rdividend);
2615 __ sdivx(Rdividend, divisor, Rresult);
2616 %}
2618 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{
2619 MacroAssembler _masm(&cbuf);
2620 Register Rsrc1 = reg_to_register_object($src1$$reg);
2621 Register Rsrc2 = reg_to_register_object($src2$$reg);
2622 Register Rdst = reg_to_register_object($dst$$reg);
2624 __ sra( Rsrc1, 0, Rsrc1 );
2625 __ sra( Rsrc2, 0, Rsrc2 );
2626 __ mulx( Rsrc1, Rsrc2, Rdst );
2627 __ srlx( Rdst, 32, Rdst );
2628 %}
2630 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{
2631 MacroAssembler _masm(&cbuf);
2632 Register Rdividend = reg_to_register_object($src1$$reg);
2633 Register Rdivisor = reg_to_register_object($src2$$reg);
2634 Register Rresult = reg_to_register_object($dst$$reg);
2635 Register Rscratch = reg_to_register_object($scratch$$reg);
2637 assert(Rdividend != Rscratch, "");
2638 assert(Rdivisor != Rscratch, "");
2640 __ sra(Rdividend, 0, Rdividend);
2641 __ sra(Rdivisor, 0, Rdivisor);
2642 __ sdivx(Rdividend, Rdivisor, Rscratch);
2643 __ mulx(Rscratch, Rdivisor, Rscratch);
2644 __ sub(Rdividend, Rscratch, Rresult);
2645 %}
2647 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{
2648 MacroAssembler _masm(&cbuf);
2650 Register Rdividend = reg_to_register_object($src1$$reg);
2651 int divisor = $imm$$constant;
2652 Register Rresult = reg_to_register_object($dst$$reg);
2653 Register Rscratch = reg_to_register_object($scratch$$reg);
2655 assert(Rdividend != Rscratch, "");
2657 __ sra(Rdividend, 0, Rdividend);
2658 __ sdivx(Rdividend, divisor, Rscratch);
2659 __ mulx(Rscratch, divisor, Rscratch);
2660 __ sub(Rdividend, Rscratch, Rresult);
2661 %}
2663 enc_class fabss (sflt_reg dst, sflt_reg src) %{
2664 MacroAssembler _masm(&cbuf);
2666 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2667 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2669 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst);
2670 %}
2672 enc_class fabsd (dflt_reg dst, dflt_reg src) %{
2673 MacroAssembler _masm(&cbuf);
2675 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2676 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2678 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst);
2679 %}
2681 enc_class fnegd (dflt_reg dst, dflt_reg src) %{
2682 MacroAssembler _masm(&cbuf);
2684 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2685 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2687 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst);
2688 %}
2690 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{
2691 MacroAssembler _masm(&cbuf);
2693 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2694 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2696 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst);
2697 %}
2699 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{
2700 MacroAssembler _masm(&cbuf);
2702 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2703 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2705 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst);
2706 %}
2708 enc_class fmovs (dflt_reg dst, dflt_reg src) %{
2709 MacroAssembler _masm(&cbuf);
2711 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2712 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2714 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst);
2715 %}
2717 enc_class fmovd (dflt_reg dst, dflt_reg src) %{
2718 MacroAssembler _masm(&cbuf);
2720 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2721 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2723 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst);
2724 %}
2726 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
2727 MacroAssembler _masm(&cbuf);
2729 Register Roop = reg_to_register_object($oop$$reg);
2730 Register Rbox = reg_to_register_object($box$$reg);
2731 Register Rscratch = reg_to_register_object($scratch$$reg);
2732 Register Rmark = reg_to_register_object($scratch2$$reg);
2734 assert(Roop != Rscratch, "");
2735 assert(Roop != Rmark, "");
2736 assert(Rbox != Rscratch, "");
2737 assert(Rbox != Rmark, "");
2739 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining);
2740 %}
2742 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
2743 MacroAssembler _masm(&cbuf);
2745 Register Roop = reg_to_register_object($oop$$reg);
2746 Register Rbox = reg_to_register_object($box$$reg);
2747 Register Rscratch = reg_to_register_object($scratch$$reg);
2748 Register Rmark = reg_to_register_object($scratch2$$reg);
2750 assert(Roop != Rscratch, "");
2751 assert(Roop != Rmark, "");
2752 assert(Rbox != Rscratch, "");
2753 assert(Rbox != Rmark, "");
2755 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining);
2756 %}
2758 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{
2759 MacroAssembler _masm(&cbuf);
2760 Register Rmem = reg_to_register_object($mem$$reg);
2761 Register Rold = reg_to_register_object($old$$reg);
2762 Register Rnew = reg_to_register_object($new$$reg);
2764 // casx_under_lock picks 1 of 3 encodings:
2765 // For 32-bit pointers you get a 32-bit CAS
2766 // For 64-bit pointers you get a 64-bit CASX
2767 __ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
2768 __ cmp( Rold, Rnew );
2769 %}
2771 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{
2772 Register Rmem = reg_to_register_object($mem$$reg);
2773 Register Rold = reg_to_register_object($old$$reg);
2774 Register Rnew = reg_to_register_object($new$$reg);
2776 MacroAssembler _masm(&cbuf);
2777 __ mov(Rnew, O7);
2778 __ casx(Rmem, Rold, O7);
2779 __ cmp( Rold, O7 );
2780 %}
2782 // raw int cas, used for compareAndSwap
2783 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{
2784 Register Rmem = reg_to_register_object($mem$$reg);
2785 Register Rold = reg_to_register_object($old$$reg);
2786 Register Rnew = reg_to_register_object($new$$reg);
2788 MacroAssembler _masm(&cbuf);
2789 __ mov(Rnew, O7);
2790 __ cas(Rmem, Rold, O7);
2791 __ cmp( Rold, O7 );
2792 %}
2794 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{
2795 Register Rres = reg_to_register_object($res$$reg);
2797 MacroAssembler _masm(&cbuf);
2798 __ mov(1, Rres);
2799 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres );
2800 %}
2802 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{
2803 Register Rres = reg_to_register_object($res$$reg);
2805 MacroAssembler _masm(&cbuf);
2806 __ mov(1, Rres);
2807 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres );
2808 %}
2810 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{
2811 MacroAssembler _masm(&cbuf);
2812 Register Rdst = reg_to_register_object($dst$$reg);
2813 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg)
2814 : reg_to_DoubleFloatRegister_object($src1$$reg);
2815 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg)
2816 : reg_to_DoubleFloatRegister_object($src2$$reg);
2818 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1)
2819 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst);
2820 %}
2823 enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{
2824 Label Ldone, Lloop;
2825 MacroAssembler _masm(&cbuf);
2827 Register str1_reg = reg_to_register_object($str1$$reg);
2828 Register str2_reg = reg_to_register_object($str2$$reg);
2829 Register cnt1_reg = reg_to_register_object($cnt1$$reg);
2830 Register cnt2_reg = reg_to_register_object($cnt2$$reg);
2831 Register result_reg = reg_to_register_object($result$$reg);
2833 assert(result_reg != str1_reg &&
2834 result_reg != str2_reg &&
2835 result_reg != cnt1_reg &&
2836 result_reg != cnt2_reg ,
2837 "need different registers");
2839 // Compute the minimum of the string lengths(str1_reg) and the
2840 // difference of the string lengths (stack)
2842 // See if the lengths are different, and calculate min in str1_reg.
2843 // Stash diff in O7 in case we need it for a tie-breaker.
2844 Label Lskip;
2845 __ subcc(cnt1_reg, cnt2_reg, O7);
2846 __ sll(cnt1_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit
2847 __ br(Assembler::greater, true, Assembler::pt, Lskip);
2848 // cnt2 is shorter, so use its count:
2849 __ delayed()->sll(cnt2_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit
2850 __ bind(Lskip);
2852 // reallocate cnt1_reg, cnt2_reg, result_reg
2853 // Note: limit_reg holds the string length pre-scaled by 2
2854 Register limit_reg = cnt1_reg;
2855 Register chr2_reg = cnt2_reg;
2856 Register chr1_reg = result_reg;
2857 // str{12} are the base pointers
2859 // Is the minimum length zero?
2860 __ cmp(limit_reg, (int)(0 * sizeof(jchar))); // use cast to resolve overloading ambiguity
2861 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2862 __ delayed()->mov(O7, result_reg); // result is difference in lengths
2864 // Load first characters
2865 __ lduh(str1_reg, 0, chr1_reg);
2866 __ lduh(str2_reg, 0, chr2_reg);
2868 // Compare first characters
2869 __ subcc(chr1_reg, chr2_reg, chr1_reg);
2870 __ br(Assembler::notZero, false, Assembler::pt, Ldone);
2871 assert(chr1_reg == result_reg, "result must be pre-placed");
2872 __ delayed()->nop();
2874 {
2875 // Check after comparing first character to see if strings are equivalent
2876 Label LSkip2;
2877 // Check if the strings start at same location
2878 __ cmp(str1_reg, str2_reg);
2879 __ brx(Assembler::notEqual, true, Assembler::pt, LSkip2);
2880 __ delayed()->nop();
2882 // Check if the length difference is zero (in O7)
2883 __ cmp(G0, O7);
2884 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2885 __ delayed()->mov(G0, result_reg); // result is zero
2887 // Strings might not be equal
2888 __ bind(LSkip2);
2889 }
2891 __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg);
2892 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2893 __ delayed()->mov(O7, result_reg); // result is difference in lengths
2895 // Shift str1_reg and str2_reg to the end of the arrays, negate limit
2896 __ add(str1_reg, limit_reg, str1_reg);
2897 __ add(str2_reg, limit_reg, str2_reg);
2898 __ neg(chr1_reg, limit_reg); // limit = -(limit-2)
2900 // Compare the rest of the characters
2901 __ lduh(str1_reg, limit_reg, chr1_reg);
2902 __ bind(Lloop);
2903 // __ lduh(str1_reg, limit_reg, chr1_reg); // hoisted
2904 __ lduh(str2_reg, limit_reg, chr2_reg);
2905 __ subcc(chr1_reg, chr2_reg, chr1_reg);
2906 __ br(Assembler::notZero, false, Assembler::pt, Ldone);
2907 assert(chr1_reg == result_reg, "result must be pre-placed");
2908 __ delayed()->inccc(limit_reg, sizeof(jchar));
2909 // annul LDUH if branch is not taken to prevent access past end of string
2910 __ br(Assembler::notZero, true, Assembler::pt, Lloop);
2911 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted
2913 // If strings are equal up to min length, return the length difference.
2914 __ mov(O7, result_reg);
2916 // Otherwise, return the difference between the first mismatched chars.
2917 __ bind(Ldone);
2918 %}
2920 enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result) %{
2921 Label Lword_loop, Lpost_word, Lchar, Lchar_loop, Ldone;
2922 MacroAssembler _masm(&cbuf);
2924 Register str1_reg = reg_to_register_object($str1$$reg);
2925 Register str2_reg = reg_to_register_object($str2$$reg);
2926 Register cnt_reg = reg_to_register_object($cnt$$reg);
2927 Register tmp1_reg = O7;
2928 Register result_reg = reg_to_register_object($result$$reg);
2930 assert(result_reg != str1_reg &&
2931 result_reg != str2_reg &&
2932 result_reg != cnt_reg &&
2933 result_reg != tmp1_reg ,
2934 "need different registers");
2936 __ cmp(str1_reg, str2_reg); //same char[] ?
2937 __ brx(Assembler::equal, true, Assembler::pn, Ldone);
2938 __ delayed()->add(G0, 1, result_reg);
2940 __ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn);
2941 __ delayed()->add(G0, 1, result_reg); // count == 0
2943 //rename registers
2944 Register limit_reg = cnt_reg;
2945 Register chr1_reg = result_reg;
2946 Register chr2_reg = tmp1_reg;
2948 //check for alignment and position the pointers to the ends
2949 __ or3(str1_reg, str2_reg, chr1_reg);
2950 __ andcc(chr1_reg, 0x3, chr1_reg);
2951 // notZero means at least one not 4-byte aligned.
2952 // We could optimize the case when both arrays are not aligned
2953 // but it is not frequent case and it requires additional checks.
2954 __ br(Assembler::notZero, false, Assembler::pn, Lchar); // char by char compare
2955 __ delayed()->sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); // set byte count
2957 // Compare char[] arrays aligned to 4 bytes.
2958 __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg,
2959 chr1_reg, chr2_reg, Ldone);
2960 __ ba(Ldone);
2961 __ delayed()->add(G0, 1, result_reg);
2963 // char by char compare
2964 __ bind(Lchar);
2965 __ add(str1_reg, limit_reg, str1_reg);
2966 __ add(str2_reg, limit_reg, str2_reg);
2967 __ neg(limit_reg); //negate count
2969 __ lduh(str1_reg, limit_reg, chr1_reg);
2970 // Lchar_loop
2971 __ bind(Lchar_loop);
2972 __ lduh(str2_reg, limit_reg, chr2_reg);
2973 __ cmp(chr1_reg, chr2_reg);
2974 __ br(Assembler::notEqual, true, Assembler::pt, Ldone);
2975 __ delayed()->mov(G0, result_reg); //not equal
2976 __ inccc(limit_reg, sizeof(jchar));
2977 // annul LDUH if branch is not taken to prevent access past end of string
2978 __ br(Assembler::notZero, true, Assembler::pt, Lchar_loop);
2979 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted
2981 __ add(G0, 1, result_reg); //equal
2983 __ bind(Ldone);
2984 %}
2986 enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI result) %{
2987 Label Lvector, Ldone, Lloop;
2988 MacroAssembler _masm(&cbuf);
2990 Register ary1_reg = reg_to_register_object($ary1$$reg);
2991 Register ary2_reg = reg_to_register_object($ary2$$reg);
2992 Register tmp1_reg = reg_to_register_object($tmp1$$reg);
2993 Register tmp2_reg = O7;
2994 Register result_reg = reg_to_register_object($result$$reg);
2996 int length_offset = arrayOopDesc::length_offset_in_bytes();
2997 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
2999 // return true if the same array
3000 __ cmp(ary1_reg, ary2_reg);
3001 __ brx(Assembler::equal, true, Assembler::pn, Ldone);
3002 __ delayed()->add(G0, 1, result_reg); // equal
3004 __ br_null(ary1_reg, true, Assembler::pn, Ldone);
3005 __ delayed()->mov(G0, result_reg); // not equal
3007 __ br_null(ary2_reg, true, Assembler::pn, Ldone);
3008 __ delayed()->mov(G0, result_reg); // not equal
3010 //load the lengths of arrays
3011 __ ld(Address(ary1_reg, length_offset), tmp1_reg);
3012 __ ld(Address(ary2_reg, length_offset), tmp2_reg);
3014 // return false if the two arrays are not equal length
3015 __ cmp(tmp1_reg, tmp2_reg);
3016 __ br(Assembler::notEqual, true, Assembler::pn, Ldone);
3017 __ delayed()->mov(G0, result_reg); // not equal
3019 __ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn);
3020 __ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal
3022 // load array addresses
3023 __ add(ary1_reg, base_offset, ary1_reg);
3024 __ add(ary2_reg, base_offset, ary2_reg);
3026 // renaming registers
3027 Register chr1_reg = result_reg; // for characters in ary1
3028 Register chr2_reg = tmp2_reg; // for characters in ary2
3029 Register limit_reg = tmp1_reg; // length
3031 // set byte count
3032 __ sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg);
3034 // Compare char[] arrays aligned to 4 bytes.
3035 __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg,
3036 chr1_reg, chr2_reg, Ldone);
3037 __ add(G0, 1, result_reg); // equals
3039 __ bind(Ldone);
3040 %}
3042 enc_class enc_rethrow() %{
3043 cbuf.set_insts_mark();
3044 Register temp_reg = G3;
3045 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub());
3046 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg");
3047 MacroAssembler _masm(&cbuf);
3048 #ifdef ASSERT
3049 __ save_frame(0);
3050 AddressLiteral last_rethrow_addrlit(&last_rethrow);
3051 __ sethi(last_rethrow_addrlit, L1);
3052 Address addr(L1, last_rethrow_addrlit.low10());
3053 __ get_pc(L2);
3054 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
3055 __ st_ptr(L2, addr);
3056 __ restore();
3057 #endif
3058 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp
3059 __ delayed()->nop();
3060 %}
3062 enc_class emit_mem_nop() %{
3063 // Generates the instruction LDUXA [o6,g0],#0x82,g0
3064 cbuf.insts()->emit_int32((unsigned int) 0xc0839040);
3065 %}
3067 enc_class emit_fadd_nop() %{
3068 // Generates the instruction FMOVS f31,f31
3069 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f);
3070 %}
3072 enc_class emit_br_nop() %{
3073 // Generates the instruction BPN,PN .
3074 cbuf.insts()->emit_int32((unsigned int) 0x00400000);
3075 %}
3077 enc_class enc_membar_acquire %{
3078 MacroAssembler _masm(&cbuf);
3079 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) );
3080 %}
3082 enc_class enc_membar_release %{
3083 MacroAssembler _masm(&cbuf);
3084 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) );
3085 %}
3087 enc_class enc_membar_volatile %{
3088 MacroAssembler _masm(&cbuf);
3089 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3090 %}
3092 enc_class enc_repl8b( iRegI src, iRegL dst ) %{
3093 MacroAssembler _masm(&cbuf);
3094 Register src_reg = reg_to_register_object($src$$reg);
3095 Register dst_reg = reg_to_register_object($dst$$reg);
3096 __ sllx(src_reg, 56, dst_reg);
3097 __ srlx(dst_reg, 8, O7);
3098 __ or3 (dst_reg, O7, dst_reg);
3099 __ srlx(dst_reg, 16, O7);
3100 __ or3 (dst_reg, O7, dst_reg);
3101 __ srlx(dst_reg, 32, O7);
3102 __ or3 (dst_reg, O7, dst_reg);
3103 %}
3105 enc_class enc_repl4b( iRegI src, iRegL dst ) %{
3106 MacroAssembler _masm(&cbuf);
3107 Register src_reg = reg_to_register_object($src$$reg);
3108 Register dst_reg = reg_to_register_object($dst$$reg);
3109 __ sll(src_reg, 24, dst_reg);
3110 __ srl(dst_reg, 8, O7);
3111 __ or3(dst_reg, O7, dst_reg);
3112 __ srl(dst_reg, 16, O7);
3113 __ or3(dst_reg, O7, dst_reg);
3114 %}
3116 enc_class enc_repl4s( iRegI src, iRegL dst ) %{
3117 MacroAssembler _masm(&cbuf);
3118 Register src_reg = reg_to_register_object($src$$reg);
3119 Register dst_reg = reg_to_register_object($dst$$reg);
3120 __ sllx(src_reg, 48, dst_reg);
3121 __ srlx(dst_reg, 16, O7);
3122 __ or3 (dst_reg, O7, dst_reg);
3123 __ srlx(dst_reg, 32, O7);
3124 __ or3 (dst_reg, O7, dst_reg);
3125 %}
3127 enc_class enc_repl2i( iRegI src, iRegL dst ) %{
3128 MacroAssembler _masm(&cbuf);
3129 Register src_reg = reg_to_register_object($src$$reg);
3130 Register dst_reg = reg_to_register_object($dst$$reg);
3131 __ sllx(src_reg, 32, dst_reg);
3132 __ srlx(dst_reg, 32, O7);
3133 __ or3 (dst_reg, O7, dst_reg);
3134 %}
3136 %}
3138 //----------FRAME--------------------------------------------------------------
3139 // Definition of frame structure and management information.
3140 //
3141 // S T A C K L A Y O U T Allocators stack-slot number
3142 // | (to get allocators register number
3143 // G Owned by | | v add VMRegImpl::stack0)
3144 // r CALLER | |
3145 // o | +--------+ pad to even-align allocators stack-slot
3146 // w V | pad0 | numbers; owned by CALLER
3147 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3148 // h ^ | in | 5
3149 // | | args | 4 Holes in incoming args owned by SELF
3150 // | | | | 3
3151 // | | +--------+
3152 // V | | old out| Empty on Intel, window on Sparc
3153 // | old |preserve| Must be even aligned.
3154 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned
3155 // | | in | 3 area for Intel ret address
3156 // Owned by |preserve| Empty on Sparc.
3157 // SELF +--------+
3158 // | | pad2 | 2 pad to align old SP
3159 // | +--------+ 1
3160 // | | locks | 0
3161 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned
3162 // | | pad1 | 11 pad to align new SP
3163 // | +--------+
3164 // | | | 10
3165 // | | spills | 9 spills
3166 // V | | 8 (pad0 slot for callee)
3167 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3168 // ^ | out | 7
3169 // | | args | 6 Holes in outgoing args owned by CALLEE
3170 // Owned by +--------+
3171 // CALLEE | new out| 6 Empty on Intel, window on Sparc
3172 // | new |preserve| Must be even-aligned.
3173 // | SP-+--------+----> Matcher::_new_SP, even aligned
3174 // | | |
3175 //
3176 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3177 // known from SELF's arguments and the Java calling convention.
3178 // Region 6-7 is determined per call site.
3179 // Note 2: If the calling convention leaves holes in the incoming argument
3180 // area, those holes are owned by SELF. Holes in the outgoing area
3181 // are owned by the CALLEE. Holes should not be nessecary in the
3182 // incoming area, as the Java calling convention is completely under
3183 // the control of the AD file. Doubles can be sorted and packed to
3184 // avoid holes. Holes in the outgoing arguments may be nessecary for
3185 // varargs C calling conventions.
3186 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3187 // even aligned with pad0 as needed.
3188 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3189 // region 6-11 is even aligned; it may be padded out more so that
3190 // the region from SP to FP meets the minimum stack alignment.
3192 frame %{
3193 // What direction does stack grow in (assumed to be same for native & Java)
3194 stack_direction(TOWARDS_LOW);
3196 // These two registers define part of the calling convention
3197 // between compiled code and the interpreter.
3198 inline_cache_reg(R_G5); // Inline Cache Register or methodOop for I2C
3199 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter
3201 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3202 cisc_spilling_operand_name(indOffset);
3204 // Number of stack slots consumed by a Monitor enter
3205 #ifdef _LP64
3206 sync_stack_slots(2);
3207 #else
3208 sync_stack_slots(1);
3209 #endif
3211 // Compiled code's Frame Pointer
3212 frame_pointer(R_SP);
3214 // Stack alignment requirement
3215 stack_alignment(StackAlignmentInBytes);
3216 // LP64: Alignment size in bytes (128-bit -> 16 bytes)
3217 // !LP64: Alignment size in bytes (64-bit -> 8 bytes)
3219 // Number of stack slots between incoming argument block and the start of
3220 // a new frame. The PROLOG must add this many slots to the stack. The
3221 // EPILOG must remove this many slots.
3222 in_preserve_stack_slots(0);
3224 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3225 // for calls to C. Supports the var-args backing area for register parms.
3226 // ADLC doesn't support parsing expressions, so I folded the math by hand.
3227 #ifdef _LP64
3228 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word
3229 varargs_C_out_slots_killed(12);
3230 #else
3231 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word
3232 varargs_C_out_slots_killed( 7);
3233 #endif
3235 // The after-PROLOG location of the return address. Location of
3236 // return address specifies a type (REG or STACK) and a number
3237 // representing the register number (i.e. - use a register name) or
3238 // stack slot.
3239 return_addr(REG R_I7); // Ret Addr is in register I7
3241 // Body of function which returns an OptoRegs array locating
3242 // arguments either in registers or in stack slots for calling
3243 // java
3244 calling_convention %{
3245 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing);
3247 %}
3249 // Body of function which returns an OptoRegs array locating
3250 // arguments either in registers or in stack slots for callin
3251 // C.
3252 c_calling_convention %{
3253 // This is obviously always outgoing
3254 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
3255 %}
3257 // Location of native (C/C++) and interpreter return values. This is specified to
3258 // be the same as Java. In the 32-bit VM, long values are actually returned from
3259 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying
3260 // to and from the register pairs is done by the appropriate call and epilog
3261 // opcodes. This simplifies the register allocator.
3262 c_return_value %{
3263 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3264 #ifdef _LP64
3265 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3266 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3267 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3268 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3269 #else // !_LP64
3270 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3271 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3272 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3273 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3274 #endif
3275 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3276 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3277 %}
3279 // Location of compiled Java return values. Same as C
3280 return_value %{
3281 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3282 #ifdef _LP64
3283 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3284 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3285 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3286 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3287 #else // !_LP64
3288 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3289 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3290 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3291 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3292 #endif
3293 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3294 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3295 %}
3297 %}
3300 //----------ATTRIBUTES---------------------------------------------------------
3301 //----------Operand Attributes-------------------------------------------------
3302 op_attrib op_cost(1); // Required cost attribute
3304 //----------Instruction Attributes---------------------------------------------
3305 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
3306 ins_attrib ins_size(32); // Required size attribute (in bits)
3307 ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back
3308 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3309 // non-matching short branch variant of some
3310 // long branch?
3312 //----------OPERANDS-----------------------------------------------------------
3313 // Operand definitions must precede instruction definitions for correct parsing
3314 // in the ADLC because operands constitute user defined types which are used in
3315 // instruction definitions.
3317 //----------Simple Operands----------------------------------------------------
3318 // Immediate Operands
3319 // Integer Immediate: 32-bit
3320 operand immI() %{
3321 match(ConI);
3323 op_cost(0);
3324 // formats are generated automatically for constants and base registers
3325 format %{ %}
3326 interface(CONST_INTER);
3327 %}
3329 // Integer Immediate: 8-bit
3330 operand immI8() %{
3331 predicate(Assembler::is_simm(n->get_int(), 8));
3332 match(ConI);
3333 op_cost(0);
3334 format %{ %}
3335 interface(CONST_INTER);
3336 %}
3338 // Integer Immediate: 13-bit
3339 operand immI13() %{
3340 predicate(Assembler::is_simm13(n->get_int()));
3341 match(ConI);
3342 op_cost(0);
3344 format %{ %}
3345 interface(CONST_INTER);
3346 %}
3348 // Integer Immediate: 13-bit minus 7
3349 operand immI13m7() %{
3350 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095));
3351 match(ConI);
3352 op_cost(0);
3354 format %{ %}
3355 interface(CONST_INTER);
3356 %}
3358 // Integer Immediate: 16-bit
3359 operand immI16() %{
3360 predicate(Assembler::is_simm(n->get_int(), 16));
3361 match(ConI);
3362 op_cost(0);
3363 format %{ %}
3364 interface(CONST_INTER);
3365 %}
3367 // Unsigned (positive) Integer Immediate: 13-bit
3368 operand immU13() %{
3369 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int()));
3370 match(ConI);
3371 op_cost(0);
3373 format %{ %}
3374 interface(CONST_INTER);
3375 %}
3377 // Integer Immediate: 6-bit
3378 operand immU6() %{
3379 predicate(n->get_int() >= 0 && n->get_int() <= 63);
3380 match(ConI);
3381 op_cost(0);
3382 format %{ %}
3383 interface(CONST_INTER);
3384 %}
3386 // Integer Immediate: 11-bit
3387 operand immI11() %{
3388 predicate(Assembler::is_simm(n->get_int(),11));
3389 match(ConI);
3390 op_cost(0);
3391 format %{ %}
3392 interface(CONST_INTER);
3393 %}
3395 // Integer Immediate: 5-bit
3396 operand immI5() %{
3397 predicate(Assembler::is_simm(n->get_int(), 5));
3398 match(ConI);
3399 op_cost(0);
3400 format %{ %}
3401 interface(CONST_INTER);
3402 %}
3404 // Integer Immediate: 0-bit
3405 operand immI0() %{
3406 predicate(n->get_int() == 0);
3407 match(ConI);
3408 op_cost(0);
3410 format %{ %}
3411 interface(CONST_INTER);
3412 %}
3414 // Integer Immediate: the value 10
3415 operand immI10() %{
3416 predicate(n->get_int() == 10);
3417 match(ConI);
3418 op_cost(0);
3420 format %{ %}
3421 interface(CONST_INTER);
3422 %}
3424 // Integer Immediate: the values 0-31
3425 operand immU5() %{
3426 predicate(n->get_int() >= 0 && n->get_int() <= 31);
3427 match(ConI);
3428 op_cost(0);
3430 format %{ %}
3431 interface(CONST_INTER);
3432 %}
3434 // Integer Immediate: the values 1-31
3435 operand immI_1_31() %{
3436 predicate(n->get_int() >= 1 && n->get_int() <= 31);
3437 match(ConI);
3438 op_cost(0);
3440 format %{ %}
3441 interface(CONST_INTER);
3442 %}
3444 // Integer Immediate: the values 32-63
3445 operand immI_32_63() %{
3446 predicate(n->get_int() >= 32 && n->get_int() <= 63);
3447 match(ConI);
3448 op_cost(0);
3450 format %{ %}
3451 interface(CONST_INTER);
3452 %}
3454 // Immediates for special shifts (sign extend)
3456 // Integer Immediate: the value 16
3457 operand immI_16() %{
3458 predicate(n->get_int() == 16);
3459 match(ConI);
3460 op_cost(0);
3462 format %{ %}
3463 interface(CONST_INTER);
3464 %}
3466 // Integer Immediate: the value 24
3467 operand immI_24() %{
3468 predicate(n->get_int() == 24);
3469 match(ConI);
3470 op_cost(0);
3472 format %{ %}
3473 interface(CONST_INTER);
3474 %}
3476 // Integer Immediate: the value 255
3477 operand immI_255() %{
3478 predicate( n->get_int() == 255 );
3479 match(ConI);
3480 op_cost(0);
3482 format %{ %}
3483 interface(CONST_INTER);
3484 %}
3486 // Integer Immediate: the value 65535
3487 operand immI_65535() %{
3488 predicate(n->get_int() == 65535);
3489 match(ConI);
3490 op_cost(0);
3492 format %{ %}
3493 interface(CONST_INTER);
3494 %}
3496 // Long Immediate: the value FF
3497 operand immL_FF() %{
3498 predicate( n->get_long() == 0xFFL );
3499 match(ConL);
3500 op_cost(0);
3502 format %{ %}
3503 interface(CONST_INTER);
3504 %}
3506 // Long Immediate: the value FFFF
3507 operand immL_FFFF() %{
3508 predicate( n->get_long() == 0xFFFFL );
3509 match(ConL);
3510 op_cost(0);
3512 format %{ %}
3513 interface(CONST_INTER);
3514 %}
3516 // Pointer Immediate: 32 or 64-bit
3517 operand immP() %{
3518 match(ConP);
3520 op_cost(5);
3521 // formats are generated automatically for constants and base registers
3522 format %{ %}
3523 interface(CONST_INTER);
3524 %}
3526 #ifdef _LP64
3527 // Pointer Immediate: 64-bit
3528 operand immP_set() %{
3529 predicate(!VM_Version::is_niagara_plus());
3530 match(ConP);
3532 op_cost(5);
3533 // formats are generated automatically for constants and base registers
3534 format %{ %}
3535 interface(CONST_INTER);
3536 %}
3538 // Pointer Immediate: 64-bit
3539 // From Niagara2 processors on a load should be better than materializing.
3540 operand immP_load() %{
3541 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3)));
3542 match(ConP);
3544 op_cost(5);
3545 // formats are generated automatically for constants and base registers
3546 format %{ %}
3547 interface(CONST_INTER);
3548 %}
3550 // Pointer Immediate: 64-bit
3551 operand immP_no_oop_cheap() %{
3552 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3));
3553 match(ConP);
3555 op_cost(5);
3556 // formats are generated automatically for constants and base registers
3557 format %{ %}
3558 interface(CONST_INTER);
3559 %}
3560 #endif
3562 operand immP13() %{
3563 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
3564 match(ConP);
3565 op_cost(0);
3567 format %{ %}
3568 interface(CONST_INTER);
3569 %}
3571 operand immP0() %{
3572 predicate(n->get_ptr() == 0);
3573 match(ConP);
3574 op_cost(0);
3576 format %{ %}
3577 interface(CONST_INTER);
3578 %}
3580 operand immP_poll() %{
3581 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3582 match(ConP);
3584 // formats are generated automatically for constants and base registers
3585 format %{ %}
3586 interface(CONST_INTER);
3587 %}
3589 // Pointer Immediate
3590 operand immN()
3591 %{
3592 match(ConN);
3594 op_cost(10);
3595 format %{ %}
3596 interface(CONST_INTER);
3597 %}
3599 // NULL Pointer Immediate
3600 operand immN0()
3601 %{
3602 predicate(n->get_narrowcon() == 0);
3603 match(ConN);
3605 op_cost(0);
3606 format %{ %}
3607 interface(CONST_INTER);
3608 %}
3610 operand immL() %{
3611 match(ConL);
3612 op_cost(40);
3613 // formats are generated automatically for constants and base registers
3614 format %{ %}
3615 interface(CONST_INTER);
3616 %}
3618 operand immL0() %{
3619 predicate(n->get_long() == 0L);
3620 match(ConL);
3621 op_cost(0);
3622 // formats are generated automatically for constants and base registers
3623 format %{ %}
3624 interface(CONST_INTER);
3625 %}
3627 // Integer Immediate: 5-bit
3628 operand immL5() %{
3629 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm((int)n->get_long(), 5));
3630 match(ConL);
3631 op_cost(0);
3632 format %{ %}
3633 interface(CONST_INTER);
3634 %}
3636 // Long Immediate: 13-bit
3637 operand immL13() %{
3638 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L));
3639 match(ConL);
3640 op_cost(0);
3642 format %{ %}
3643 interface(CONST_INTER);
3644 %}
3646 // Long Immediate: 13-bit minus 7
3647 operand immL13m7() %{
3648 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L));
3649 match(ConL);
3650 op_cost(0);
3652 format %{ %}
3653 interface(CONST_INTER);
3654 %}
3656 // Long Immediate: low 32-bit mask
3657 operand immL_32bits() %{
3658 predicate(n->get_long() == 0xFFFFFFFFL);
3659 match(ConL);
3660 op_cost(0);
3662 format %{ %}
3663 interface(CONST_INTER);
3664 %}
3666 // Long Immediate: cheap (materialize in <= 3 instructions)
3667 operand immL_cheap() %{
3668 predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3669 match(ConL);
3670 op_cost(0);
3672 format %{ %}
3673 interface(CONST_INTER);
3674 %}
3676 // Long Immediate: expensive (materialize in > 3 instructions)
3677 operand immL_expensive() %{
3678 predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3);
3679 match(ConL);
3680 op_cost(0);
3682 format %{ %}
3683 interface(CONST_INTER);
3684 %}
3686 // Double Immediate
3687 operand immD() %{
3688 match(ConD);
3690 op_cost(40);
3691 format %{ %}
3692 interface(CONST_INTER);
3693 %}
3695 operand immD0() %{
3696 #ifdef _LP64
3697 // on 64-bit architectures this comparision is faster
3698 predicate(jlong_cast(n->getd()) == 0);
3699 #else
3700 predicate((n->getd() == 0) && (fpclass(n->getd()) == FP_PZERO));
3701 #endif
3702 match(ConD);
3704 op_cost(0);
3705 format %{ %}
3706 interface(CONST_INTER);
3707 %}
3709 // Float Immediate
3710 operand immF() %{
3711 match(ConF);
3713 op_cost(20);
3714 format %{ %}
3715 interface(CONST_INTER);
3716 %}
3718 // Float Immediate: 0
3719 operand immF0() %{
3720 predicate((n->getf() == 0) && (fpclass(n->getf()) == FP_PZERO));
3721 match(ConF);
3723 op_cost(0);
3724 format %{ %}
3725 interface(CONST_INTER);
3726 %}
3728 // Integer Register Operands
3729 // Integer Register
3730 operand iRegI() %{
3731 constraint(ALLOC_IN_RC(int_reg));
3732 match(RegI);
3734 match(notemp_iRegI);
3735 match(g1RegI);
3736 match(o0RegI);
3737 match(iRegIsafe);
3739 format %{ %}
3740 interface(REG_INTER);
3741 %}
3743 operand notemp_iRegI() %{
3744 constraint(ALLOC_IN_RC(notemp_int_reg));
3745 match(RegI);
3747 match(o0RegI);
3749 format %{ %}
3750 interface(REG_INTER);
3751 %}
3753 operand o0RegI() %{
3754 constraint(ALLOC_IN_RC(o0_regI));
3755 match(iRegI);
3757 format %{ %}
3758 interface(REG_INTER);
3759 %}
3761 // Pointer Register
3762 operand iRegP() %{
3763 constraint(ALLOC_IN_RC(ptr_reg));
3764 match(RegP);
3766 match(lock_ptr_RegP);
3767 match(g1RegP);
3768 match(g2RegP);
3769 match(g3RegP);
3770 match(g4RegP);
3771 match(i0RegP);
3772 match(o0RegP);
3773 match(o1RegP);
3774 match(l7RegP);
3776 format %{ %}
3777 interface(REG_INTER);
3778 %}
3780 operand sp_ptr_RegP() %{
3781 constraint(ALLOC_IN_RC(sp_ptr_reg));
3782 match(RegP);
3783 match(iRegP);
3785 format %{ %}
3786 interface(REG_INTER);
3787 %}
3789 operand lock_ptr_RegP() %{
3790 constraint(ALLOC_IN_RC(lock_ptr_reg));
3791 match(RegP);
3792 match(i0RegP);
3793 match(o0RegP);
3794 match(o1RegP);
3795 match(l7RegP);
3797 format %{ %}
3798 interface(REG_INTER);
3799 %}
3801 operand g1RegP() %{
3802 constraint(ALLOC_IN_RC(g1_regP));
3803 match(iRegP);
3805 format %{ %}
3806 interface(REG_INTER);
3807 %}
3809 operand g2RegP() %{
3810 constraint(ALLOC_IN_RC(g2_regP));
3811 match(iRegP);
3813 format %{ %}
3814 interface(REG_INTER);
3815 %}
3817 operand g3RegP() %{
3818 constraint(ALLOC_IN_RC(g3_regP));
3819 match(iRegP);
3821 format %{ %}
3822 interface(REG_INTER);
3823 %}
3825 operand g1RegI() %{
3826 constraint(ALLOC_IN_RC(g1_regI));
3827 match(iRegI);
3829 format %{ %}
3830 interface(REG_INTER);
3831 %}
3833 operand g3RegI() %{
3834 constraint(ALLOC_IN_RC(g3_regI));
3835 match(iRegI);
3837 format %{ %}
3838 interface(REG_INTER);
3839 %}
3841 operand g4RegI() %{
3842 constraint(ALLOC_IN_RC(g4_regI));
3843 match(iRegI);
3845 format %{ %}
3846 interface(REG_INTER);
3847 %}
3849 operand g4RegP() %{
3850 constraint(ALLOC_IN_RC(g4_regP));
3851 match(iRegP);
3853 format %{ %}
3854 interface(REG_INTER);
3855 %}
3857 operand i0RegP() %{
3858 constraint(ALLOC_IN_RC(i0_regP));
3859 match(iRegP);
3861 format %{ %}
3862 interface(REG_INTER);
3863 %}
3865 operand o0RegP() %{
3866 constraint(ALLOC_IN_RC(o0_regP));
3867 match(iRegP);
3869 format %{ %}
3870 interface(REG_INTER);
3871 %}
3873 operand o1RegP() %{
3874 constraint(ALLOC_IN_RC(o1_regP));
3875 match(iRegP);
3877 format %{ %}
3878 interface(REG_INTER);
3879 %}
3881 operand o2RegP() %{
3882 constraint(ALLOC_IN_RC(o2_regP));
3883 match(iRegP);
3885 format %{ %}
3886 interface(REG_INTER);
3887 %}
3889 operand o7RegP() %{
3890 constraint(ALLOC_IN_RC(o7_regP));
3891 match(iRegP);
3893 format %{ %}
3894 interface(REG_INTER);
3895 %}
3897 operand l7RegP() %{
3898 constraint(ALLOC_IN_RC(l7_regP));
3899 match(iRegP);
3901 format %{ %}
3902 interface(REG_INTER);
3903 %}
3905 operand o7RegI() %{
3906 constraint(ALLOC_IN_RC(o7_regI));
3907 match(iRegI);
3909 format %{ %}
3910 interface(REG_INTER);
3911 %}
3913 operand iRegN() %{
3914 constraint(ALLOC_IN_RC(int_reg));
3915 match(RegN);
3917 format %{ %}
3918 interface(REG_INTER);
3919 %}
3921 // Long Register
3922 operand iRegL() %{
3923 constraint(ALLOC_IN_RC(long_reg));
3924 match(RegL);
3926 format %{ %}
3927 interface(REG_INTER);
3928 %}
3930 operand o2RegL() %{
3931 constraint(ALLOC_IN_RC(o2_regL));
3932 match(iRegL);
3934 format %{ %}
3935 interface(REG_INTER);
3936 %}
3938 operand o7RegL() %{
3939 constraint(ALLOC_IN_RC(o7_regL));
3940 match(iRegL);
3942 format %{ %}
3943 interface(REG_INTER);
3944 %}
3946 operand g1RegL() %{
3947 constraint(ALLOC_IN_RC(g1_regL));
3948 match(iRegL);
3950 format %{ %}
3951 interface(REG_INTER);
3952 %}
3954 operand g3RegL() %{
3955 constraint(ALLOC_IN_RC(g3_regL));
3956 match(iRegL);
3958 format %{ %}
3959 interface(REG_INTER);
3960 %}
3962 // Int Register safe
3963 // This is 64bit safe
3964 operand iRegIsafe() %{
3965 constraint(ALLOC_IN_RC(long_reg));
3967 match(iRegI);
3969 format %{ %}
3970 interface(REG_INTER);
3971 %}
3973 // Condition Code Flag Register
3974 operand flagsReg() %{
3975 constraint(ALLOC_IN_RC(int_flags));
3976 match(RegFlags);
3978 format %{ "ccr" %} // both ICC and XCC
3979 interface(REG_INTER);
3980 %}
3982 // Condition Code Register, unsigned comparisons.
3983 operand flagsRegU() %{
3984 constraint(ALLOC_IN_RC(int_flags));
3985 match(RegFlags);
3987 format %{ "icc_U" %}
3988 interface(REG_INTER);
3989 %}
3991 // Condition Code Register, pointer comparisons.
3992 operand flagsRegP() %{
3993 constraint(ALLOC_IN_RC(int_flags));
3994 match(RegFlags);
3996 #ifdef _LP64
3997 format %{ "xcc_P" %}
3998 #else
3999 format %{ "icc_P" %}
4000 #endif
4001 interface(REG_INTER);
4002 %}
4004 // Condition Code Register, long comparisons.
4005 operand flagsRegL() %{
4006 constraint(ALLOC_IN_RC(int_flags));
4007 match(RegFlags);
4009 format %{ "xcc_L" %}
4010 interface(REG_INTER);
4011 %}
4013 // Condition Code Register, floating comparisons, unordered same as "less".
4014 operand flagsRegF() %{
4015 constraint(ALLOC_IN_RC(float_flags));
4016 match(RegFlags);
4017 match(flagsRegF0);
4019 format %{ %}
4020 interface(REG_INTER);
4021 %}
4023 operand flagsRegF0() %{
4024 constraint(ALLOC_IN_RC(float_flag0));
4025 match(RegFlags);
4027 format %{ %}
4028 interface(REG_INTER);
4029 %}
4032 // Condition Code Flag Register used by long compare
4033 operand flagsReg_long_LTGE() %{
4034 constraint(ALLOC_IN_RC(int_flags));
4035 match(RegFlags);
4036 format %{ "icc_LTGE" %}
4037 interface(REG_INTER);
4038 %}
4039 operand flagsReg_long_EQNE() %{
4040 constraint(ALLOC_IN_RC(int_flags));
4041 match(RegFlags);
4042 format %{ "icc_EQNE" %}
4043 interface(REG_INTER);
4044 %}
4045 operand flagsReg_long_LEGT() %{
4046 constraint(ALLOC_IN_RC(int_flags));
4047 match(RegFlags);
4048 format %{ "icc_LEGT" %}
4049 interface(REG_INTER);
4050 %}
4053 operand regD() %{
4054 constraint(ALLOC_IN_RC(dflt_reg));
4055 match(RegD);
4057 match(regD_low);
4059 format %{ %}
4060 interface(REG_INTER);
4061 %}
4063 operand regF() %{
4064 constraint(ALLOC_IN_RC(sflt_reg));
4065 match(RegF);
4067 format %{ %}
4068 interface(REG_INTER);
4069 %}
4071 operand regD_low() %{
4072 constraint(ALLOC_IN_RC(dflt_low_reg));
4073 match(regD);
4075 format %{ %}
4076 interface(REG_INTER);
4077 %}
4079 // Special Registers
4081 // Method Register
4082 operand inline_cache_regP(iRegP reg) %{
4083 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1
4084 match(reg);
4085 format %{ %}
4086 interface(REG_INTER);
4087 %}
4089 operand interpreter_method_oop_regP(iRegP reg) %{
4090 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1
4091 match(reg);
4092 format %{ %}
4093 interface(REG_INTER);
4094 %}
4097 //----------Complex Operands---------------------------------------------------
4098 // Indirect Memory Reference
4099 operand indirect(sp_ptr_RegP reg) %{
4100 constraint(ALLOC_IN_RC(sp_ptr_reg));
4101 match(reg);
4103 op_cost(100);
4104 format %{ "[$reg]" %}
4105 interface(MEMORY_INTER) %{
4106 base($reg);
4107 index(0x0);
4108 scale(0x0);
4109 disp(0x0);
4110 %}
4111 %}
4113 // Indirect with simm13 Offset
4114 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{
4115 constraint(ALLOC_IN_RC(sp_ptr_reg));
4116 match(AddP reg offset);
4118 op_cost(100);
4119 format %{ "[$reg + $offset]" %}
4120 interface(MEMORY_INTER) %{
4121 base($reg);
4122 index(0x0);
4123 scale(0x0);
4124 disp($offset);
4125 %}
4126 %}
4128 // Indirect with simm13 Offset minus 7
4129 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{
4130 constraint(ALLOC_IN_RC(sp_ptr_reg));
4131 match(AddP reg offset);
4133 op_cost(100);
4134 format %{ "[$reg + $offset]" %}
4135 interface(MEMORY_INTER) %{
4136 base($reg);
4137 index(0x0);
4138 scale(0x0);
4139 disp($offset);
4140 %}
4141 %}
4143 // Note: Intel has a swapped version also, like this:
4144 //operand indOffsetX(iRegI reg, immP offset) %{
4145 // constraint(ALLOC_IN_RC(int_reg));
4146 // match(AddP offset reg);
4147 //
4148 // op_cost(100);
4149 // format %{ "[$reg + $offset]" %}
4150 // interface(MEMORY_INTER) %{
4151 // base($reg);
4152 // index(0x0);
4153 // scale(0x0);
4154 // disp($offset);
4155 // %}
4156 //%}
4157 //// However, it doesn't make sense for SPARC, since
4158 // we have no particularly good way to embed oops in
4159 // single instructions.
4161 // Indirect with Register Index
4162 operand indIndex(iRegP addr, iRegX index) %{
4163 constraint(ALLOC_IN_RC(ptr_reg));
4164 match(AddP addr index);
4166 op_cost(100);
4167 format %{ "[$addr + $index]" %}
4168 interface(MEMORY_INTER) %{
4169 base($addr);
4170 index($index);
4171 scale(0x0);
4172 disp(0x0);
4173 %}
4174 %}
4176 //----------Special Memory Operands--------------------------------------------
4177 // Stack Slot Operand - This operand is used for loading and storing temporary
4178 // values on the stack where a match requires a value to
4179 // flow through memory.
4180 operand stackSlotI(sRegI reg) %{
4181 constraint(ALLOC_IN_RC(stack_slots));
4182 op_cost(100);
4183 //match(RegI);
4184 format %{ "[$reg]" %}
4185 interface(MEMORY_INTER) %{
4186 base(0xE); // R_SP
4187 index(0x0);
4188 scale(0x0);
4189 disp($reg); // Stack Offset
4190 %}
4191 %}
4193 operand stackSlotP(sRegP reg) %{
4194 constraint(ALLOC_IN_RC(stack_slots));
4195 op_cost(100);
4196 //match(RegP);
4197 format %{ "[$reg]" %}
4198 interface(MEMORY_INTER) %{
4199 base(0xE); // R_SP
4200 index(0x0);
4201 scale(0x0);
4202 disp($reg); // Stack Offset
4203 %}
4204 %}
4206 operand stackSlotF(sRegF reg) %{
4207 constraint(ALLOC_IN_RC(stack_slots));
4208 op_cost(100);
4209 //match(RegF);
4210 format %{ "[$reg]" %}
4211 interface(MEMORY_INTER) %{
4212 base(0xE); // R_SP
4213 index(0x0);
4214 scale(0x0);
4215 disp($reg); // Stack Offset
4216 %}
4217 %}
4218 operand stackSlotD(sRegD reg) %{
4219 constraint(ALLOC_IN_RC(stack_slots));
4220 op_cost(100);
4221 //match(RegD);
4222 format %{ "[$reg]" %}
4223 interface(MEMORY_INTER) %{
4224 base(0xE); // R_SP
4225 index(0x0);
4226 scale(0x0);
4227 disp($reg); // Stack Offset
4228 %}
4229 %}
4230 operand stackSlotL(sRegL reg) %{
4231 constraint(ALLOC_IN_RC(stack_slots));
4232 op_cost(100);
4233 //match(RegL);
4234 format %{ "[$reg]" %}
4235 interface(MEMORY_INTER) %{
4236 base(0xE); // R_SP
4237 index(0x0);
4238 scale(0x0);
4239 disp($reg); // Stack Offset
4240 %}
4241 %}
4243 // Operands for expressing Control Flow
4244 // NOTE: Label is a predefined operand which should not be redefined in
4245 // the AD file. It is generically handled within the ADLC.
4247 //----------Conditional Branch Operands----------------------------------------
4248 // Comparison Op - This is the operation of the comparison, and is limited to
4249 // the following set of codes:
4250 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
4251 //
4252 // Other attributes of the comparison, such as unsignedness, are specified
4253 // by the comparison instruction that sets a condition code flags register.
4254 // That result is represented by a flags operand whose subtype is appropriate
4255 // to the unsignedness (etc.) of the comparison.
4256 //
4257 // Later, the instruction which matches both the Comparison Op (a Bool) and
4258 // the flags (produced by the Cmp) specifies the coding of the comparison op
4259 // by matching a specific subtype of Bool operand below, such as cmpOpU.
4261 operand cmpOp() %{
4262 match(Bool);
4264 format %{ "" %}
4265 interface(COND_INTER) %{
4266 equal(0x1);
4267 not_equal(0x9);
4268 less(0x3);
4269 greater_equal(0xB);
4270 less_equal(0x2);
4271 greater(0xA);
4272 %}
4273 %}
4275 // Comparison Op, unsigned
4276 operand cmpOpU() %{
4277 match(Bool);
4279 format %{ "u" %}
4280 interface(COND_INTER) %{
4281 equal(0x1);
4282 not_equal(0x9);
4283 less(0x5);
4284 greater_equal(0xD);
4285 less_equal(0x4);
4286 greater(0xC);
4287 %}
4288 %}
4290 // Comparison Op, pointer (same as unsigned)
4291 operand cmpOpP() %{
4292 match(Bool);
4294 format %{ "p" %}
4295 interface(COND_INTER) %{
4296 equal(0x1);
4297 not_equal(0x9);
4298 less(0x5);
4299 greater_equal(0xD);
4300 less_equal(0x4);
4301 greater(0xC);
4302 %}
4303 %}
4305 // Comparison Op, branch-register encoding
4306 operand cmpOp_reg() %{
4307 match(Bool);
4309 format %{ "" %}
4310 interface(COND_INTER) %{
4311 equal (0x1);
4312 not_equal (0x5);
4313 less (0x3);
4314 greater_equal(0x7);
4315 less_equal (0x2);
4316 greater (0x6);
4317 %}
4318 %}
4320 // Comparison Code, floating, unordered same as less
4321 operand cmpOpF() %{
4322 match(Bool);
4324 format %{ "fl" %}
4325 interface(COND_INTER) %{
4326 equal(0x9);
4327 not_equal(0x1);
4328 less(0x3);
4329 greater_equal(0xB);
4330 less_equal(0xE);
4331 greater(0x6);
4332 %}
4333 %}
4335 // Used by long compare
4336 operand cmpOp_commute() %{
4337 match(Bool);
4339 format %{ "" %}
4340 interface(COND_INTER) %{
4341 equal(0x1);
4342 not_equal(0x9);
4343 less(0xA);
4344 greater_equal(0x2);
4345 less_equal(0xB);
4346 greater(0x3);
4347 %}
4348 %}
4350 //----------OPERAND CLASSES----------------------------------------------------
4351 // Operand Classes are groups of operands that are used to simplify
4352 // instruction definitions by not requiring the AD writer to specify separate
4353 // instructions for every form of operand when the instruction accepts
4354 // multiple operand types with the same basic encoding and format. The classic
4355 // case of this is memory operands.
4356 opclass memory( indirect, indOffset13, indIndex );
4357 opclass indIndexMemory( indIndex );
4359 //----------PIPELINE-----------------------------------------------------------
4360 pipeline %{
4362 //----------ATTRIBUTES---------------------------------------------------------
4363 attributes %{
4364 fixed_size_instructions; // Fixed size instructions
4365 branch_has_delay_slot; // Branch has delay slot following
4366 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle
4367 instruction_unit_size = 4; // An instruction is 4 bytes long
4368 instruction_fetch_unit_size = 16; // The processor fetches one line
4369 instruction_fetch_units = 1; // of 16 bytes
4371 // List of nop instructions
4372 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR );
4373 %}
4375 //----------RESOURCES----------------------------------------------------------
4376 // Resources are the functional units available to the machine
4377 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1);
4379 //----------PIPELINE DESCRIPTION-----------------------------------------------
4380 // Pipeline Description specifies the stages in the machine's pipeline
4382 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D);
4384 //----------PIPELINE CLASSES---------------------------------------------------
4385 // Pipeline Classes describe the stages in which input and output are
4386 // referenced by the hardware pipeline.
4388 // Integer ALU reg-reg operation
4389 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
4390 single_instruction;
4391 dst : E(write);
4392 src1 : R(read);
4393 src2 : R(read);
4394 IALU : R;
4395 %}
4397 // Integer ALU reg-reg long operation
4398 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
4399 instruction_count(2);
4400 dst : E(write);
4401 src1 : R(read);
4402 src2 : R(read);
4403 IALU : R;
4404 IALU : R;
4405 %}
4407 // Integer ALU reg-reg long dependent operation
4408 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{
4409 instruction_count(1); multiple_bundles;
4410 dst : E(write);
4411 src1 : R(read);
4412 src2 : R(read);
4413 cr : E(write);
4414 IALU : R(2);
4415 %}
4417 // Integer ALU reg-imm operaion
4418 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{
4419 single_instruction;
4420 dst : E(write);
4421 src1 : R(read);
4422 IALU : R;
4423 %}
4425 // Integer ALU reg-reg operation with condition code
4426 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{
4427 single_instruction;
4428 dst : E(write);
4429 cr : E(write);
4430 src1 : R(read);
4431 src2 : R(read);
4432 IALU : R;
4433 %}
4435 // Integer ALU reg-imm operation with condition code
4436 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{
4437 single_instruction;
4438 dst : E(write);
4439 cr : E(write);
4440 src1 : R(read);
4441 IALU : R;
4442 %}
4444 // Integer ALU zero-reg operation
4445 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
4446 single_instruction;
4447 dst : E(write);
4448 src2 : R(read);
4449 IALU : R;
4450 %}
4452 // Integer ALU zero-reg operation with condition code only
4453 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{
4454 single_instruction;
4455 cr : E(write);
4456 src : R(read);
4457 IALU : R;
4458 %}
4460 // Integer ALU reg-reg operation with condition code only
4461 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
4462 single_instruction;
4463 cr : E(write);
4464 src1 : R(read);
4465 src2 : R(read);
4466 IALU : R;
4467 %}
4469 // Integer ALU reg-imm operation with condition code only
4470 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{
4471 single_instruction;
4472 cr : E(write);
4473 src1 : R(read);
4474 IALU : R;
4475 %}
4477 // Integer ALU reg-reg-zero operation with condition code only
4478 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{
4479 single_instruction;
4480 cr : E(write);
4481 src1 : R(read);
4482 src2 : R(read);
4483 IALU : R;
4484 %}
4486 // Integer ALU reg-imm-zero operation with condition code only
4487 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{
4488 single_instruction;
4489 cr : E(write);
4490 src1 : R(read);
4491 IALU : R;
4492 %}
4494 // Integer ALU reg-reg operation with condition code, src1 modified
4495 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
4496 single_instruction;
4497 cr : E(write);
4498 src1 : E(write);
4499 src1 : R(read);
4500 src2 : R(read);
4501 IALU : R;
4502 %}
4504 // Integer ALU reg-imm operation with condition code, src1 modified
4505 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{
4506 single_instruction;
4507 cr : E(write);
4508 src1 : E(write);
4509 src1 : R(read);
4510 IALU : R;
4511 %}
4513 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{
4514 multiple_bundles;
4515 dst : E(write)+4;
4516 cr : E(write);
4517 src1 : R(read);
4518 src2 : R(read);
4519 IALU : R(3);
4520 BR : R(2);
4521 %}
4523 // Integer ALU operation
4524 pipe_class ialu_none(iRegI dst) %{
4525 single_instruction;
4526 dst : E(write);
4527 IALU : R;
4528 %}
4530 // Integer ALU reg operation
4531 pipe_class ialu_reg(iRegI dst, iRegI src) %{
4532 single_instruction; may_have_no_code;
4533 dst : E(write);
4534 src : R(read);
4535 IALU : R;
4536 %}
4538 // Integer ALU reg conditional operation
4539 // This instruction has a 1 cycle stall, and cannot execute
4540 // in the same cycle as the instruction setting the condition
4541 // code. We kludge this by pretending to read the condition code
4542 // 1 cycle earlier, and by marking the functional units as busy
4543 // for 2 cycles with the result available 1 cycle later than
4544 // is really the case.
4545 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{
4546 single_instruction;
4547 op2_out : C(write);
4548 op1 : R(read);
4549 cr : R(read); // This is really E, with a 1 cycle stall
4550 BR : R(2);
4551 MS : R(2);
4552 %}
4554 #ifdef _LP64
4555 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
4556 instruction_count(1); multiple_bundles;
4557 dst : C(write)+1;
4558 src : R(read)+1;
4559 IALU : R(1);
4560 BR : E(2);
4561 MS : E(2);
4562 %}
4563 #endif
4565 // Integer ALU reg operation
4566 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
4567 single_instruction; may_have_no_code;
4568 dst : E(write);
4569 src : R(read);
4570 IALU : R;
4571 %}
4572 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{
4573 single_instruction; may_have_no_code;
4574 dst : E(write);
4575 src : R(read);
4576 IALU : R;
4577 %}
4579 // Two integer ALU reg operations
4580 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{
4581 instruction_count(2);
4582 dst : E(write);
4583 src : R(read);
4584 A0 : R;
4585 A1 : R;
4586 %}
4588 // Two integer ALU reg operations
4589 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{
4590 instruction_count(2); may_have_no_code;
4591 dst : E(write);
4592 src : R(read);
4593 A0 : R;
4594 A1 : R;
4595 %}
4597 // Integer ALU imm operation
4598 pipe_class ialu_imm(iRegI dst, immI13 src) %{
4599 single_instruction;
4600 dst : E(write);
4601 IALU : R;
4602 %}
4604 // Integer ALU reg-reg with carry operation
4605 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{
4606 single_instruction;
4607 dst : E(write);
4608 src1 : R(read);
4609 src2 : R(read);
4610 IALU : R;
4611 %}
4613 // Integer ALU cc operation
4614 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{
4615 single_instruction;
4616 dst : E(write);
4617 cc : R(read);
4618 IALU : R;
4619 %}
4621 // Integer ALU cc / second IALU operation
4622 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{
4623 instruction_count(1); multiple_bundles;
4624 dst : E(write)+1;
4625 src : R(read);
4626 IALU : R;
4627 %}
4629 // Integer ALU cc / second IALU operation
4630 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{
4631 instruction_count(1); multiple_bundles;
4632 dst : E(write)+1;
4633 p : R(read);
4634 q : R(read);
4635 IALU : R;
4636 %}
4638 // Integer ALU hi-lo-reg operation
4639 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{
4640 instruction_count(1); multiple_bundles;
4641 dst : E(write)+1;
4642 IALU : R(2);
4643 %}
4645 // Float ALU hi-lo-reg operation (with temp)
4646 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{
4647 instruction_count(1); multiple_bundles;
4648 dst : E(write)+1;
4649 IALU : R(2);
4650 %}
4652 // Long Constant
4653 pipe_class loadConL( iRegL dst, immL src ) %{
4654 instruction_count(2); multiple_bundles;
4655 dst : E(write)+1;
4656 IALU : R(2);
4657 IALU : R(2);
4658 %}
4660 // Pointer Constant
4661 pipe_class loadConP( iRegP dst, immP src ) %{
4662 instruction_count(0); multiple_bundles;
4663 fixed_latency(6);
4664 %}
4666 // Polling Address
4667 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
4668 #ifdef _LP64
4669 instruction_count(0); multiple_bundles;
4670 fixed_latency(6);
4671 #else
4672 dst : E(write);
4673 IALU : R;
4674 #endif
4675 %}
4677 // Long Constant small
4678 pipe_class loadConLlo( iRegL dst, immL src ) %{
4679 instruction_count(2);
4680 dst : E(write);
4681 IALU : R;
4682 IALU : R;
4683 %}
4685 // [PHH] This is wrong for 64-bit. See LdImmF/D.
4686 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{
4687 instruction_count(1); multiple_bundles;
4688 src : R(read);
4689 dst : M(write)+1;
4690 IALU : R;
4691 MS : E;
4692 %}
4694 // Integer ALU nop operation
4695 pipe_class ialu_nop() %{
4696 single_instruction;
4697 IALU : R;
4698 %}
4700 // Integer ALU nop operation
4701 pipe_class ialu_nop_A0() %{
4702 single_instruction;
4703 A0 : R;
4704 %}
4706 // Integer ALU nop operation
4707 pipe_class ialu_nop_A1() %{
4708 single_instruction;
4709 A1 : R;
4710 %}
4712 // Integer Multiply reg-reg operation
4713 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
4714 single_instruction;
4715 dst : E(write);
4716 src1 : R(read);
4717 src2 : R(read);
4718 MS : R(5);
4719 %}
4721 // Integer Multiply reg-imm operation
4722 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{
4723 single_instruction;
4724 dst : E(write);
4725 src1 : R(read);
4726 MS : R(5);
4727 %}
4729 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
4730 single_instruction;
4731 dst : E(write)+4;
4732 src1 : R(read);
4733 src2 : R(read);
4734 MS : R(6);
4735 %}
4737 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{
4738 single_instruction;
4739 dst : E(write)+4;
4740 src1 : R(read);
4741 MS : R(6);
4742 %}
4744 // Integer Divide reg-reg
4745 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{
4746 instruction_count(1); multiple_bundles;
4747 dst : E(write);
4748 temp : E(write);
4749 src1 : R(read);
4750 src2 : R(read);
4751 temp : R(read);
4752 MS : R(38);
4753 %}
4755 // Integer Divide reg-imm
4756 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{
4757 instruction_count(1); multiple_bundles;
4758 dst : E(write);
4759 temp : E(write);
4760 src1 : R(read);
4761 temp : R(read);
4762 MS : R(38);
4763 %}
4765 // Long Divide
4766 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
4767 dst : E(write)+71;
4768 src1 : R(read);
4769 src2 : R(read)+1;
4770 MS : R(70);
4771 %}
4773 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{
4774 dst : E(write)+71;
4775 src1 : R(read);
4776 MS : R(70);
4777 %}
4779 // Floating Point Add Float
4780 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{
4781 single_instruction;
4782 dst : X(write);
4783 src1 : E(read);
4784 src2 : E(read);
4785 FA : R;
4786 %}
4788 // Floating Point Add Double
4789 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{
4790 single_instruction;
4791 dst : X(write);
4792 src1 : E(read);
4793 src2 : E(read);
4794 FA : R;
4795 %}
4797 // Floating Point Conditional Move based on integer flags
4798 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{
4799 single_instruction;
4800 dst : X(write);
4801 src : E(read);
4802 cr : R(read);
4803 FA : R(2);
4804 BR : R(2);
4805 %}
4807 // Floating Point Conditional Move based on integer flags
4808 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{
4809 single_instruction;
4810 dst : X(write);
4811 src : E(read);
4812 cr : R(read);
4813 FA : R(2);
4814 BR : R(2);
4815 %}
4817 // Floating Point Multiply Float
4818 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{
4819 single_instruction;
4820 dst : X(write);
4821 src1 : E(read);
4822 src2 : E(read);
4823 FM : R;
4824 %}
4826 // Floating Point Multiply Double
4827 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{
4828 single_instruction;
4829 dst : X(write);
4830 src1 : E(read);
4831 src2 : E(read);
4832 FM : R;
4833 %}
4835 // Floating Point Divide Float
4836 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{
4837 single_instruction;
4838 dst : X(write);
4839 src1 : E(read);
4840 src2 : E(read);
4841 FM : R;
4842 FDIV : C(14);
4843 %}
4845 // Floating Point Divide Double
4846 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{
4847 single_instruction;
4848 dst : X(write);
4849 src1 : E(read);
4850 src2 : E(read);
4851 FM : R;
4852 FDIV : C(17);
4853 %}
4855 // Floating Point Move/Negate/Abs Float
4856 pipe_class faddF_reg(regF dst, regF src) %{
4857 single_instruction;
4858 dst : W(write);
4859 src : E(read);
4860 FA : R(1);
4861 %}
4863 // Floating Point Move/Negate/Abs Double
4864 pipe_class faddD_reg(regD dst, regD src) %{
4865 single_instruction;
4866 dst : W(write);
4867 src : E(read);
4868 FA : R;
4869 %}
4871 // Floating Point Convert F->D
4872 pipe_class fcvtF2D(regD dst, regF src) %{
4873 single_instruction;
4874 dst : X(write);
4875 src : E(read);
4876 FA : R;
4877 %}
4879 // Floating Point Convert I->D
4880 pipe_class fcvtI2D(regD dst, regF src) %{
4881 single_instruction;
4882 dst : X(write);
4883 src : E(read);
4884 FA : R;
4885 %}
4887 // Floating Point Convert LHi->D
4888 pipe_class fcvtLHi2D(regD dst, regD src) %{
4889 single_instruction;
4890 dst : X(write);
4891 src : E(read);
4892 FA : R;
4893 %}
4895 // Floating Point Convert L->D
4896 pipe_class fcvtL2D(regD dst, regF src) %{
4897 single_instruction;
4898 dst : X(write);
4899 src : E(read);
4900 FA : R;
4901 %}
4903 // Floating Point Convert L->F
4904 pipe_class fcvtL2F(regD dst, regF src) %{
4905 single_instruction;
4906 dst : X(write);
4907 src : E(read);
4908 FA : R;
4909 %}
4911 // Floating Point Convert D->F
4912 pipe_class fcvtD2F(regD dst, regF src) %{
4913 single_instruction;
4914 dst : X(write);
4915 src : E(read);
4916 FA : R;
4917 %}
4919 // Floating Point Convert I->L
4920 pipe_class fcvtI2L(regD dst, regF src) %{
4921 single_instruction;
4922 dst : X(write);
4923 src : E(read);
4924 FA : R;
4925 %}
4927 // Floating Point Convert D->F
4928 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{
4929 instruction_count(1); multiple_bundles;
4930 dst : X(write)+6;
4931 src : E(read);
4932 FA : R;
4933 %}
4935 // Floating Point Convert D->L
4936 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{
4937 instruction_count(1); multiple_bundles;
4938 dst : X(write)+6;
4939 src : E(read);
4940 FA : R;
4941 %}
4943 // Floating Point Convert F->I
4944 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{
4945 instruction_count(1); multiple_bundles;
4946 dst : X(write)+6;
4947 src : E(read);
4948 FA : R;
4949 %}
4951 // Floating Point Convert F->L
4952 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{
4953 instruction_count(1); multiple_bundles;
4954 dst : X(write)+6;
4955 src : E(read);
4956 FA : R;
4957 %}
4959 // Floating Point Convert I->F
4960 pipe_class fcvtI2F(regF dst, regF src) %{
4961 single_instruction;
4962 dst : X(write);
4963 src : E(read);
4964 FA : R;
4965 %}
4967 // Floating Point Compare
4968 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{
4969 single_instruction;
4970 cr : X(write);
4971 src1 : E(read);
4972 src2 : E(read);
4973 FA : R;
4974 %}
4976 // Floating Point Compare
4977 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{
4978 single_instruction;
4979 cr : X(write);
4980 src1 : E(read);
4981 src2 : E(read);
4982 FA : R;
4983 %}
4985 // Floating Add Nop
4986 pipe_class fadd_nop() %{
4987 single_instruction;
4988 FA : R;
4989 %}
4991 // Integer Store to Memory
4992 pipe_class istore_mem_reg(memory mem, iRegI src) %{
4993 single_instruction;
4994 mem : R(read);
4995 src : C(read);
4996 MS : R;
4997 %}
4999 // Integer Store to Memory
5000 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{
5001 single_instruction;
5002 mem : R(read);
5003 src : C(read);
5004 MS : R;
5005 %}
5007 // Integer Store Zero to Memory
5008 pipe_class istore_mem_zero(memory mem, immI0 src) %{
5009 single_instruction;
5010 mem : R(read);
5011 MS : R;
5012 %}
5014 // Special Stack Slot Store
5015 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{
5016 single_instruction;
5017 stkSlot : R(read);
5018 src : C(read);
5019 MS : R;
5020 %}
5022 // Special Stack Slot Store
5023 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{
5024 instruction_count(2); multiple_bundles;
5025 stkSlot : R(read);
5026 src : C(read);
5027 MS : R(2);
5028 %}
5030 // Float Store
5031 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{
5032 single_instruction;
5033 mem : R(read);
5034 src : C(read);
5035 MS : R;
5036 %}
5038 // Float Store
5039 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{
5040 single_instruction;
5041 mem : R(read);
5042 MS : R;
5043 %}
5045 // Double Store
5046 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{
5047 instruction_count(1);
5048 mem : R(read);
5049 src : C(read);
5050 MS : R;
5051 %}
5053 // Double Store
5054 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{
5055 single_instruction;
5056 mem : R(read);
5057 MS : R;
5058 %}
5060 // Special Stack Slot Float Store
5061 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{
5062 single_instruction;
5063 stkSlot : R(read);
5064 src : C(read);
5065 MS : R;
5066 %}
5068 // Special Stack Slot Double Store
5069 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{
5070 single_instruction;
5071 stkSlot : R(read);
5072 src : C(read);
5073 MS : R;
5074 %}
5076 // Integer Load (when sign bit propagation not needed)
5077 pipe_class iload_mem(iRegI dst, memory mem) %{
5078 single_instruction;
5079 mem : R(read);
5080 dst : C(write);
5081 MS : R;
5082 %}
5084 // Integer Load from stack operand
5085 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{
5086 single_instruction;
5087 mem : R(read);
5088 dst : C(write);
5089 MS : R;
5090 %}
5092 // Integer Load (when sign bit propagation or masking is needed)
5093 pipe_class iload_mask_mem(iRegI dst, memory mem) %{
5094 single_instruction;
5095 mem : R(read);
5096 dst : M(write);
5097 MS : R;
5098 %}
5100 // Float Load
5101 pipe_class floadF_mem(regF dst, memory mem) %{
5102 single_instruction;
5103 mem : R(read);
5104 dst : M(write);
5105 MS : R;
5106 %}
5108 // Float Load
5109 pipe_class floadD_mem(regD dst, memory mem) %{
5110 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case
5111 mem : R(read);
5112 dst : M(write);
5113 MS : R;
5114 %}
5116 // Float Load
5117 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{
5118 single_instruction;
5119 stkSlot : R(read);
5120 dst : M(write);
5121 MS : R;
5122 %}
5124 // Float Load
5125 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{
5126 single_instruction;
5127 stkSlot : R(read);
5128 dst : M(write);
5129 MS : R;
5130 %}
5132 // Memory Nop
5133 pipe_class mem_nop() %{
5134 single_instruction;
5135 MS : R;
5136 %}
5138 pipe_class sethi(iRegP dst, immI src) %{
5139 single_instruction;
5140 dst : E(write);
5141 IALU : R;
5142 %}
5144 pipe_class loadPollP(iRegP poll) %{
5145 single_instruction;
5146 poll : R(read);
5147 MS : R;
5148 %}
5150 pipe_class br(Universe br, label labl) %{
5151 single_instruction_with_delay_slot;
5152 BR : R;
5153 %}
5155 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{
5156 single_instruction_with_delay_slot;
5157 cr : E(read);
5158 BR : R;
5159 %}
5161 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{
5162 single_instruction_with_delay_slot;
5163 op1 : E(read);
5164 BR : R;
5165 MS : R;
5166 %}
5168 // Compare and branch
5169 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{
5170 instruction_count(2); has_delay_slot;
5171 cr : E(write);
5172 src1 : R(read);
5173 src2 : R(read);
5174 IALU : R;
5175 BR : R;
5176 %}
5178 // Compare and branch
5179 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{
5180 instruction_count(2); has_delay_slot;
5181 cr : E(write);
5182 src1 : R(read);
5183 IALU : R;
5184 BR : R;
5185 %}
5187 // Compare and branch using cbcond
5188 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{
5189 single_instruction;
5190 src1 : E(read);
5191 src2 : E(read);
5192 IALU : R;
5193 BR : R;
5194 %}
5196 // Compare and branch using cbcond
5197 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{
5198 single_instruction;
5199 src1 : E(read);
5200 IALU : R;
5201 BR : R;
5202 %}
5204 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{
5205 single_instruction_with_delay_slot;
5206 cr : E(read);
5207 BR : R;
5208 %}
5210 pipe_class br_nop() %{
5211 single_instruction;
5212 BR : R;
5213 %}
5215 pipe_class simple_call(method meth) %{
5216 instruction_count(2); multiple_bundles; force_serialization;
5217 fixed_latency(100);
5218 BR : R(1);
5219 MS : R(1);
5220 A0 : R(1);
5221 %}
5223 pipe_class compiled_call(method meth) %{
5224 instruction_count(1); multiple_bundles; force_serialization;
5225 fixed_latency(100);
5226 MS : R(1);
5227 %}
5229 pipe_class call(method meth) %{
5230 instruction_count(0); multiple_bundles; force_serialization;
5231 fixed_latency(100);
5232 %}
5234 pipe_class tail_call(Universe ignore, label labl) %{
5235 single_instruction; has_delay_slot;
5236 fixed_latency(100);
5237 BR : R(1);
5238 MS : R(1);
5239 %}
5241 pipe_class ret(Universe ignore) %{
5242 single_instruction; has_delay_slot;
5243 BR : R(1);
5244 MS : R(1);
5245 %}
5247 pipe_class ret_poll(g3RegP poll) %{
5248 instruction_count(3); has_delay_slot;
5249 poll : E(read);
5250 MS : R;
5251 %}
5253 // The real do-nothing guy
5254 pipe_class empty( ) %{
5255 instruction_count(0);
5256 %}
5258 pipe_class long_memory_op() %{
5259 instruction_count(0); multiple_bundles; force_serialization;
5260 fixed_latency(25);
5261 MS : R(1);
5262 %}
5264 // Check-cast
5265 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{
5266 array : R(read);
5267 match : R(read);
5268 IALU : R(2);
5269 BR : R(2);
5270 MS : R;
5271 %}
5273 // Convert FPU flags into +1,0,-1
5274 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{
5275 src1 : E(read);
5276 src2 : E(read);
5277 dst : E(write);
5278 FA : R;
5279 MS : R(2);
5280 BR : R(2);
5281 %}
5283 // Compare for p < q, and conditionally add y
5284 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{
5285 p : E(read);
5286 q : E(read);
5287 y : E(read);
5288 IALU : R(3)
5289 %}
5291 // Perform a compare, then move conditionally in a branch delay slot.
5292 pipe_class min_max( iRegI src2, iRegI srcdst ) %{
5293 src2 : E(read);
5294 srcdst : E(read);
5295 IALU : R;
5296 BR : R;
5297 %}
5299 // Define the class for the Nop node
5300 define %{
5301 MachNop = ialu_nop;
5302 %}
5304 %}
5306 //----------INSTRUCTIONS-------------------------------------------------------
5308 //------------Special Stack Slot instructions - no match rules-----------------
5309 instruct stkI_to_regF(regF dst, stackSlotI src) %{
5310 // No match rule to avoid chain rule match.
5311 effect(DEF dst, USE src);
5312 ins_cost(MEMORY_REF_COST);
5313 size(4);
5314 format %{ "LDF $src,$dst\t! stkI to regF" %}
5315 opcode(Assembler::ldf_op3);
5316 ins_encode(simple_form3_mem_reg(src, dst));
5317 ins_pipe(floadF_stk);
5318 %}
5320 instruct stkL_to_regD(regD dst, stackSlotL src) %{
5321 // No match rule to avoid chain rule match.
5322 effect(DEF dst, USE src);
5323 ins_cost(MEMORY_REF_COST);
5324 size(4);
5325 format %{ "LDDF $src,$dst\t! stkL to regD" %}
5326 opcode(Assembler::lddf_op3);
5327 ins_encode(simple_form3_mem_reg(src, dst));
5328 ins_pipe(floadD_stk);
5329 %}
5331 instruct regF_to_stkI(stackSlotI dst, regF src) %{
5332 // No match rule to avoid chain rule match.
5333 effect(DEF dst, USE src);
5334 ins_cost(MEMORY_REF_COST);
5335 size(4);
5336 format %{ "STF $src,$dst\t! regF to stkI" %}
5337 opcode(Assembler::stf_op3);
5338 ins_encode(simple_form3_mem_reg(dst, src));
5339 ins_pipe(fstoreF_stk_reg);
5340 %}
5342 instruct regD_to_stkL(stackSlotL dst, regD src) %{
5343 // No match rule to avoid chain rule match.
5344 effect(DEF dst, USE src);
5345 ins_cost(MEMORY_REF_COST);
5346 size(4);
5347 format %{ "STDF $src,$dst\t! regD to stkL" %}
5348 opcode(Assembler::stdf_op3);
5349 ins_encode(simple_form3_mem_reg(dst, src));
5350 ins_pipe(fstoreD_stk_reg);
5351 %}
5353 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{
5354 effect(DEF dst, USE src);
5355 ins_cost(MEMORY_REF_COST*2);
5356 size(8);
5357 format %{ "STW $src,$dst.hi\t! long\n\t"
5358 "STW R_G0,$dst.lo" %}
5359 opcode(Assembler::stw_op3);
5360 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0));
5361 ins_pipe(lstoreI_stk_reg);
5362 %}
5364 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{
5365 // No match rule to avoid chain rule match.
5366 effect(DEF dst, USE src);
5367 ins_cost(MEMORY_REF_COST);
5368 size(4);
5369 format %{ "STX $src,$dst\t! regL to stkD" %}
5370 opcode(Assembler::stx_op3);
5371 ins_encode(simple_form3_mem_reg( dst, src ) );
5372 ins_pipe(istore_stk_reg);
5373 %}
5375 //---------- Chain stack slots between similar types --------
5377 // Load integer from stack slot
5378 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{
5379 match(Set dst src);
5380 ins_cost(MEMORY_REF_COST);
5382 size(4);
5383 format %{ "LDUW $src,$dst\t!stk" %}
5384 opcode(Assembler::lduw_op3);
5385 ins_encode(simple_form3_mem_reg( src, dst ) );
5386 ins_pipe(iload_mem);
5387 %}
5389 // Store integer to stack slot
5390 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{
5391 match(Set dst src);
5392 ins_cost(MEMORY_REF_COST);
5394 size(4);
5395 format %{ "STW $src,$dst\t!stk" %}
5396 opcode(Assembler::stw_op3);
5397 ins_encode(simple_form3_mem_reg( dst, src ) );
5398 ins_pipe(istore_mem_reg);
5399 %}
5401 // Load long from stack slot
5402 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{
5403 match(Set dst src);
5405 ins_cost(MEMORY_REF_COST);
5406 size(4);
5407 format %{ "LDX $src,$dst\t! long" %}
5408 opcode(Assembler::ldx_op3);
5409 ins_encode(simple_form3_mem_reg( src, dst ) );
5410 ins_pipe(iload_mem);
5411 %}
5413 // Store long to stack slot
5414 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{
5415 match(Set dst src);
5417 ins_cost(MEMORY_REF_COST);
5418 size(4);
5419 format %{ "STX $src,$dst\t! long" %}
5420 opcode(Assembler::stx_op3);
5421 ins_encode(simple_form3_mem_reg( dst, src ) );
5422 ins_pipe(istore_mem_reg);
5423 %}
5425 #ifdef _LP64
5426 // Load pointer from stack slot, 64-bit encoding
5427 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5428 match(Set dst src);
5429 ins_cost(MEMORY_REF_COST);
5430 size(4);
5431 format %{ "LDX $src,$dst\t!ptr" %}
5432 opcode(Assembler::ldx_op3);
5433 ins_encode(simple_form3_mem_reg( src, dst ) );
5434 ins_pipe(iload_mem);
5435 %}
5437 // Store pointer to stack slot
5438 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5439 match(Set dst src);
5440 ins_cost(MEMORY_REF_COST);
5441 size(4);
5442 format %{ "STX $src,$dst\t!ptr" %}
5443 opcode(Assembler::stx_op3);
5444 ins_encode(simple_form3_mem_reg( dst, src ) );
5445 ins_pipe(istore_mem_reg);
5446 %}
5447 #else // _LP64
5448 // Load pointer from stack slot, 32-bit encoding
5449 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5450 match(Set dst src);
5451 ins_cost(MEMORY_REF_COST);
5452 format %{ "LDUW $src,$dst\t!ptr" %}
5453 opcode(Assembler::lduw_op3, Assembler::ldst_op);
5454 ins_encode(simple_form3_mem_reg( src, dst ) );
5455 ins_pipe(iload_mem);
5456 %}
5458 // Store pointer to stack slot
5459 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5460 match(Set dst src);
5461 ins_cost(MEMORY_REF_COST);
5462 format %{ "STW $src,$dst\t!ptr" %}
5463 opcode(Assembler::stw_op3, Assembler::ldst_op);
5464 ins_encode(simple_form3_mem_reg( dst, src ) );
5465 ins_pipe(istore_mem_reg);
5466 %}
5467 #endif // _LP64
5469 //------------Special Nop instructions for bundling - no match rules-----------
5470 // Nop using the A0 functional unit
5471 instruct Nop_A0() %{
5472 ins_cost(0);
5474 format %{ "NOP ! Alu Pipeline" %}
5475 opcode(Assembler::or_op3, Assembler::arith_op);
5476 ins_encode( form2_nop() );
5477 ins_pipe(ialu_nop_A0);
5478 %}
5480 // Nop using the A1 functional unit
5481 instruct Nop_A1( ) %{
5482 ins_cost(0);
5484 format %{ "NOP ! Alu Pipeline" %}
5485 opcode(Assembler::or_op3, Assembler::arith_op);
5486 ins_encode( form2_nop() );
5487 ins_pipe(ialu_nop_A1);
5488 %}
5490 // Nop using the memory functional unit
5491 instruct Nop_MS( ) %{
5492 ins_cost(0);
5494 format %{ "NOP ! Memory Pipeline" %}
5495 ins_encode( emit_mem_nop );
5496 ins_pipe(mem_nop);
5497 %}
5499 // Nop using the floating add functional unit
5500 instruct Nop_FA( ) %{
5501 ins_cost(0);
5503 format %{ "NOP ! Floating Add Pipeline" %}
5504 ins_encode( emit_fadd_nop );
5505 ins_pipe(fadd_nop);
5506 %}
5508 // Nop using the branch functional unit
5509 instruct Nop_BR( ) %{
5510 ins_cost(0);
5512 format %{ "NOP ! Branch Pipeline" %}
5513 ins_encode( emit_br_nop );
5514 ins_pipe(br_nop);
5515 %}
5517 //----------Load/Store/Move Instructions---------------------------------------
5518 //----------Load Instructions--------------------------------------------------
5519 // Load Byte (8bit signed)
5520 instruct loadB(iRegI dst, memory mem) %{
5521 match(Set dst (LoadB mem));
5522 ins_cost(MEMORY_REF_COST);
5524 size(4);
5525 format %{ "LDSB $mem,$dst\t! byte" %}
5526 ins_encode %{
5527 __ ldsb($mem$$Address, $dst$$Register);
5528 %}
5529 ins_pipe(iload_mask_mem);
5530 %}
5532 // Load Byte (8bit signed) into a Long Register
5533 instruct loadB2L(iRegL dst, memory mem) %{
5534 match(Set dst (ConvI2L (LoadB mem)));
5535 ins_cost(MEMORY_REF_COST);
5537 size(4);
5538 format %{ "LDSB $mem,$dst\t! byte -> long" %}
5539 ins_encode %{
5540 __ ldsb($mem$$Address, $dst$$Register);
5541 %}
5542 ins_pipe(iload_mask_mem);
5543 %}
5545 // Load Unsigned Byte (8bit UNsigned) into an int reg
5546 instruct loadUB(iRegI dst, memory mem) %{
5547 match(Set dst (LoadUB mem));
5548 ins_cost(MEMORY_REF_COST);
5550 size(4);
5551 format %{ "LDUB $mem,$dst\t! ubyte" %}
5552 ins_encode %{
5553 __ ldub($mem$$Address, $dst$$Register);
5554 %}
5555 ins_pipe(iload_mem);
5556 %}
5558 // Load Unsigned Byte (8bit UNsigned) into a Long Register
5559 instruct loadUB2L(iRegL dst, memory mem) %{
5560 match(Set dst (ConvI2L (LoadUB mem)));
5561 ins_cost(MEMORY_REF_COST);
5563 size(4);
5564 format %{ "LDUB $mem,$dst\t! ubyte -> long" %}
5565 ins_encode %{
5566 __ ldub($mem$$Address, $dst$$Register);
5567 %}
5568 ins_pipe(iload_mem);
5569 %}
5571 // Load Unsigned Byte (8 bit UNsigned) with 8-bit mask into Long Register
5572 instruct loadUB2L_immI8(iRegL dst, memory mem, immI8 mask) %{
5573 match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
5574 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5576 size(2*4);
5577 format %{ "LDUB $mem,$dst\t# ubyte & 8-bit mask -> long\n\t"
5578 "AND $dst,$mask,$dst" %}
5579 ins_encode %{
5580 __ ldub($mem$$Address, $dst$$Register);
5581 __ and3($dst$$Register, $mask$$constant, $dst$$Register);
5582 %}
5583 ins_pipe(iload_mem);
5584 %}
5586 // Load Short (16bit signed)
5587 instruct loadS(iRegI dst, memory mem) %{
5588 match(Set dst (LoadS mem));
5589 ins_cost(MEMORY_REF_COST);
5591 size(4);
5592 format %{ "LDSH $mem,$dst\t! short" %}
5593 ins_encode %{
5594 __ ldsh($mem$$Address, $dst$$Register);
5595 %}
5596 ins_pipe(iload_mask_mem);
5597 %}
5599 // Load Short (16 bit signed) to Byte (8 bit signed)
5600 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5601 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
5602 ins_cost(MEMORY_REF_COST);
5604 size(4);
5606 format %{ "LDSB $mem+1,$dst\t! short -> byte" %}
5607 ins_encode %{
5608 __ ldsb($mem$$Address, $dst$$Register, 1);
5609 %}
5610 ins_pipe(iload_mask_mem);
5611 %}
5613 // Load Short (16bit signed) into a Long Register
5614 instruct loadS2L(iRegL dst, memory mem) %{
5615 match(Set dst (ConvI2L (LoadS mem)));
5616 ins_cost(MEMORY_REF_COST);
5618 size(4);
5619 format %{ "LDSH $mem,$dst\t! short -> long" %}
5620 ins_encode %{
5621 __ ldsh($mem$$Address, $dst$$Register);
5622 %}
5623 ins_pipe(iload_mask_mem);
5624 %}
5626 // Load Unsigned Short/Char (16bit UNsigned)
5627 instruct loadUS(iRegI dst, memory mem) %{
5628 match(Set dst (LoadUS mem));
5629 ins_cost(MEMORY_REF_COST);
5631 size(4);
5632 format %{ "LDUH $mem,$dst\t! ushort/char" %}
5633 ins_encode %{
5634 __ lduh($mem$$Address, $dst$$Register);
5635 %}
5636 ins_pipe(iload_mem);
5637 %}
5639 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
5640 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5641 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour));
5642 ins_cost(MEMORY_REF_COST);
5644 size(4);
5645 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %}
5646 ins_encode %{
5647 __ ldsb($mem$$Address, $dst$$Register, 1);
5648 %}
5649 ins_pipe(iload_mask_mem);
5650 %}
5652 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register
5653 instruct loadUS2L(iRegL dst, memory mem) %{
5654 match(Set dst (ConvI2L (LoadUS mem)));
5655 ins_cost(MEMORY_REF_COST);
5657 size(4);
5658 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %}
5659 ins_encode %{
5660 __ lduh($mem$$Address, $dst$$Register);
5661 %}
5662 ins_pipe(iload_mem);
5663 %}
5665 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register
5666 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
5667 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5668 ins_cost(MEMORY_REF_COST);
5670 size(4);
5671 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %}
5672 ins_encode %{
5673 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE
5674 %}
5675 ins_pipe(iload_mem);
5676 %}
5678 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register
5679 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{
5680 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5681 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5683 size(2*4);
5684 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t"
5685 "AND $dst,$mask,$dst" %}
5686 ins_encode %{
5687 Register Rdst = $dst$$Register;
5688 __ lduh($mem$$Address, Rdst);
5689 __ and3(Rdst, $mask$$constant, Rdst);
5690 %}
5691 ins_pipe(iload_mem);
5692 %}
5694 // Load Unsigned Short/Char (16bit UNsigned) with a 16-bit mask into a Long Register
5695 instruct loadUS2L_immI16(iRegL dst, memory mem, immI16 mask, iRegL tmp) %{
5696 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5697 effect(TEMP dst, TEMP tmp);
5698 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
5700 size((3+1)*4); // set may use two instructions.
5701 format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t"
5702 "SET $mask,$tmp\n\t"
5703 "AND $dst,$tmp,$dst" %}
5704 ins_encode %{
5705 Register Rdst = $dst$$Register;
5706 Register Rtmp = $tmp$$Register;
5707 __ lduh($mem$$Address, Rdst);
5708 __ set($mask$$constant, Rtmp);
5709 __ and3(Rdst, Rtmp, Rdst);
5710 %}
5711 ins_pipe(iload_mem);
5712 %}
5714 // Load Integer
5715 instruct loadI(iRegI dst, memory mem) %{
5716 match(Set dst (LoadI mem));
5717 ins_cost(MEMORY_REF_COST);
5719 size(4);
5720 format %{ "LDUW $mem,$dst\t! int" %}
5721 ins_encode %{
5722 __ lduw($mem$$Address, $dst$$Register);
5723 %}
5724 ins_pipe(iload_mem);
5725 %}
5727 // Load Integer to Byte (8 bit signed)
5728 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5729 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5730 ins_cost(MEMORY_REF_COST);
5732 size(4);
5734 format %{ "LDSB $mem+3,$dst\t! int -> byte" %}
5735 ins_encode %{
5736 __ ldsb($mem$$Address, $dst$$Register, 3);
5737 %}
5738 ins_pipe(iload_mask_mem);
5739 %}
5741 // Load Integer to Unsigned Byte (8 bit UNsigned)
5742 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{
5743 match(Set dst (AndI (LoadI mem) mask));
5744 ins_cost(MEMORY_REF_COST);
5746 size(4);
5748 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %}
5749 ins_encode %{
5750 __ ldub($mem$$Address, $dst$$Register, 3);
5751 %}
5752 ins_pipe(iload_mask_mem);
5753 %}
5755 // Load Integer to Short (16 bit signed)
5756 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{
5757 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5758 ins_cost(MEMORY_REF_COST);
5760 size(4);
5762 format %{ "LDSH $mem+2,$dst\t! int -> short" %}
5763 ins_encode %{
5764 __ ldsh($mem$$Address, $dst$$Register, 2);
5765 %}
5766 ins_pipe(iload_mask_mem);
5767 %}
5769 // Load Integer to Unsigned Short (16 bit UNsigned)
5770 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{
5771 match(Set dst (AndI (LoadI mem) mask));
5772 ins_cost(MEMORY_REF_COST);
5774 size(4);
5776 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %}
5777 ins_encode %{
5778 __ lduh($mem$$Address, $dst$$Register, 2);
5779 %}
5780 ins_pipe(iload_mask_mem);
5781 %}
5783 // Load Integer into a Long Register
5784 instruct loadI2L(iRegL dst, memory mem) %{
5785 match(Set dst (ConvI2L (LoadI mem)));
5786 ins_cost(MEMORY_REF_COST);
5788 size(4);
5789 format %{ "LDSW $mem,$dst\t! int -> long" %}
5790 ins_encode %{
5791 __ ldsw($mem$$Address, $dst$$Register);
5792 %}
5793 ins_pipe(iload_mask_mem);
5794 %}
5796 // Load Integer with mask 0xFF into a Long Register
5797 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
5798 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5799 ins_cost(MEMORY_REF_COST);
5801 size(4);
5802 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %}
5803 ins_encode %{
5804 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE
5805 %}
5806 ins_pipe(iload_mem);
5807 %}
5809 // Load Integer with mask 0xFFFF into a Long Register
5810 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{
5811 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5812 ins_cost(MEMORY_REF_COST);
5814 size(4);
5815 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %}
5816 ins_encode %{
5817 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE
5818 %}
5819 ins_pipe(iload_mem);
5820 %}
5822 // Load Integer with a 13-bit mask into a Long Register
5823 instruct loadI2L_immI13(iRegL dst, memory mem, immI13 mask) %{
5824 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5825 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5827 size(2*4);
5828 format %{ "LDUW $mem,$dst\t! int & 13-bit mask -> long\n\t"
5829 "AND $dst,$mask,$dst" %}
5830 ins_encode %{
5831 Register Rdst = $dst$$Register;
5832 __ lduw($mem$$Address, Rdst);
5833 __ and3(Rdst, $mask$$constant, Rdst);
5834 %}
5835 ins_pipe(iload_mem);
5836 %}
5838 // Load Integer with a 32-bit mask into a Long Register
5839 instruct loadI2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{
5840 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5841 effect(TEMP dst, TEMP tmp);
5842 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
5844 size((3+1)*4); // set may use two instructions.
5845 format %{ "LDUW $mem,$dst\t! int & 32-bit mask -> long\n\t"
5846 "SET $mask,$tmp\n\t"
5847 "AND $dst,$tmp,$dst" %}
5848 ins_encode %{
5849 Register Rdst = $dst$$Register;
5850 Register Rtmp = $tmp$$Register;
5851 __ lduw($mem$$Address, Rdst);
5852 __ set($mask$$constant, Rtmp);
5853 __ and3(Rdst, Rtmp, Rdst);
5854 %}
5855 ins_pipe(iload_mem);
5856 %}
5858 // Load Unsigned Integer into a Long Register
5859 instruct loadUI2L(iRegL dst, memory mem) %{
5860 match(Set dst (LoadUI2L mem));
5861 ins_cost(MEMORY_REF_COST);
5863 size(4);
5864 format %{ "LDUW $mem,$dst\t! uint -> long" %}
5865 ins_encode %{
5866 __ lduw($mem$$Address, $dst$$Register);
5867 %}
5868 ins_pipe(iload_mem);
5869 %}
5871 // Load Long - aligned
5872 instruct loadL(iRegL dst, memory mem ) %{
5873 match(Set dst (LoadL mem));
5874 ins_cost(MEMORY_REF_COST);
5876 size(4);
5877 format %{ "LDX $mem,$dst\t! long" %}
5878 ins_encode %{
5879 __ ldx($mem$$Address, $dst$$Register);
5880 %}
5881 ins_pipe(iload_mem);
5882 %}
5884 // Load Long - UNaligned
5885 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{
5886 match(Set dst (LoadL_unaligned mem));
5887 effect(KILL tmp);
5888 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
5889 size(16);
5890 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n"
5891 "\tLDUW $mem ,$dst\n"
5892 "\tSLLX #32, $dst, $dst\n"
5893 "\tOR $dst, R_O7, $dst" %}
5894 opcode(Assembler::lduw_op3);
5895 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst ));
5896 ins_pipe(iload_mem);
5897 %}
5899 // Load Aligned Packed Byte into a Double Register
5900 instruct loadA8B(regD dst, memory mem) %{
5901 match(Set dst (Load8B mem));
5902 ins_cost(MEMORY_REF_COST);
5903 size(4);
5904 format %{ "LDDF $mem,$dst\t! packed8B" %}
5905 opcode(Assembler::lddf_op3);
5906 ins_encode(simple_form3_mem_reg( mem, dst ) );
5907 ins_pipe(floadD_mem);
5908 %}
5910 // Load Aligned Packed Char into a Double Register
5911 instruct loadA4C(regD dst, memory mem) %{
5912 match(Set dst (Load4C mem));
5913 ins_cost(MEMORY_REF_COST);
5914 size(4);
5915 format %{ "LDDF $mem,$dst\t! packed4C" %}
5916 opcode(Assembler::lddf_op3);
5917 ins_encode(simple_form3_mem_reg( mem, dst ) );
5918 ins_pipe(floadD_mem);
5919 %}
5921 // Load Aligned Packed Short into a Double Register
5922 instruct loadA4S(regD dst, memory mem) %{
5923 match(Set dst (Load4S mem));
5924 ins_cost(MEMORY_REF_COST);
5925 size(4);
5926 format %{ "LDDF $mem,$dst\t! packed4S" %}
5927 opcode(Assembler::lddf_op3);
5928 ins_encode(simple_form3_mem_reg( mem, dst ) );
5929 ins_pipe(floadD_mem);
5930 %}
5932 // Load Aligned Packed Int into a Double Register
5933 instruct loadA2I(regD dst, memory mem) %{
5934 match(Set dst (Load2I mem));
5935 ins_cost(MEMORY_REF_COST);
5936 size(4);
5937 format %{ "LDDF $mem,$dst\t! packed2I" %}
5938 opcode(Assembler::lddf_op3);
5939 ins_encode(simple_form3_mem_reg( mem, dst ) );
5940 ins_pipe(floadD_mem);
5941 %}
5943 // Load Range
5944 instruct loadRange(iRegI dst, memory mem) %{
5945 match(Set dst (LoadRange mem));
5946 ins_cost(MEMORY_REF_COST);
5948 size(4);
5949 format %{ "LDUW $mem,$dst\t! range" %}
5950 opcode(Assembler::lduw_op3);
5951 ins_encode(simple_form3_mem_reg( mem, dst ) );
5952 ins_pipe(iload_mem);
5953 %}
5955 // Load Integer into %f register (for fitos/fitod)
5956 instruct loadI_freg(regF dst, memory mem) %{
5957 match(Set dst (LoadI mem));
5958 ins_cost(MEMORY_REF_COST);
5959 size(4);
5961 format %{ "LDF $mem,$dst\t! for fitos/fitod" %}
5962 opcode(Assembler::ldf_op3);
5963 ins_encode(simple_form3_mem_reg( mem, dst ) );
5964 ins_pipe(floadF_mem);
5965 %}
5967 // Load Pointer
5968 instruct loadP(iRegP dst, memory mem) %{
5969 match(Set dst (LoadP mem));
5970 ins_cost(MEMORY_REF_COST);
5971 size(4);
5973 #ifndef _LP64
5974 format %{ "LDUW $mem,$dst\t! ptr" %}
5975 ins_encode %{
5976 __ lduw($mem$$Address, $dst$$Register);
5977 %}
5978 #else
5979 format %{ "LDX $mem,$dst\t! ptr" %}
5980 ins_encode %{
5981 __ ldx($mem$$Address, $dst$$Register);
5982 %}
5983 #endif
5984 ins_pipe(iload_mem);
5985 %}
5987 // Load Compressed Pointer
5988 instruct loadN(iRegN dst, memory mem) %{
5989 match(Set dst (LoadN mem));
5990 ins_cost(MEMORY_REF_COST);
5991 size(4);
5993 format %{ "LDUW $mem,$dst\t! compressed ptr" %}
5994 ins_encode %{
5995 __ lduw($mem$$Address, $dst$$Register);
5996 %}
5997 ins_pipe(iload_mem);
5998 %}
6000 // Load Klass Pointer
6001 instruct loadKlass(iRegP dst, memory mem) %{
6002 match(Set dst (LoadKlass mem));
6003 ins_cost(MEMORY_REF_COST);
6004 size(4);
6006 #ifndef _LP64
6007 format %{ "LDUW $mem,$dst\t! klass ptr" %}
6008 ins_encode %{
6009 __ lduw($mem$$Address, $dst$$Register);
6010 %}
6011 #else
6012 format %{ "LDX $mem,$dst\t! klass ptr" %}
6013 ins_encode %{
6014 __ ldx($mem$$Address, $dst$$Register);
6015 %}
6016 #endif
6017 ins_pipe(iload_mem);
6018 %}
6020 // Load narrow Klass Pointer
6021 instruct loadNKlass(iRegN dst, memory mem) %{
6022 match(Set dst (LoadNKlass mem));
6023 ins_cost(MEMORY_REF_COST);
6024 size(4);
6026 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
6027 ins_encode %{
6028 __ lduw($mem$$Address, $dst$$Register);
6029 %}
6030 ins_pipe(iload_mem);
6031 %}
6033 // Load Double
6034 instruct loadD(regD dst, memory mem) %{
6035 match(Set dst (LoadD mem));
6036 ins_cost(MEMORY_REF_COST);
6038 size(4);
6039 format %{ "LDDF $mem,$dst" %}
6040 opcode(Assembler::lddf_op3);
6041 ins_encode(simple_form3_mem_reg( mem, dst ) );
6042 ins_pipe(floadD_mem);
6043 %}
6045 // Load Double - UNaligned
6046 instruct loadD_unaligned(regD_low dst, memory mem ) %{
6047 match(Set dst (LoadD_unaligned mem));
6048 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
6049 size(8);
6050 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n"
6051 "\tLDF $mem+4,$dst.lo\t!" %}
6052 opcode(Assembler::ldf_op3);
6053 ins_encode( form3_mem_reg_double_unaligned( mem, dst ));
6054 ins_pipe(iload_mem);
6055 %}
6057 // Load Float
6058 instruct loadF(regF dst, memory mem) %{
6059 match(Set dst (LoadF mem));
6060 ins_cost(MEMORY_REF_COST);
6062 size(4);
6063 format %{ "LDF $mem,$dst" %}
6064 opcode(Assembler::ldf_op3);
6065 ins_encode(simple_form3_mem_reg( mem, dst ) );
6066 ins_pipe(floadF_mem);
6067 %}
6069 // Load Constant
6070 instruct loadConI( iRegI dst, immI src ) %{
6071 match(Set dst src);
6072 ins_cost(DEFAULT_COST * 3/2);
6073 format %{ "SET $src,$dst" %}
6074 ins_encode( Set32(src, dst) );
6075 ins_pipe(ialu_hi_lo_reg);
6076 %}
6078 instruct loadConI13( iRegI dst, immI13 src ) %{
6079 match(Set dst src);
6081 size(4);
6082 format %{ "MOV $src,$dst" %}
6083 ins_encode( Set13( src, dst ) );
6084 ins_pipe(ialu_imm);
6085 %}
6087 #ifndef _LP64
6088 instruct loadConP(iRegP dst, immP con) %{
6089 match(Set dst con);
6090 ins_cost(DEFAULT_COST * 3/2);
6091 format %{ "SET $con,$dst\t!ptr" %}
6092 ins_encode %{
6093 // [RGV] This next line should be generated from ADLC
6094 if (_opnds[1]->constant_is_oop()) {
6095 intptr_t val = $con$$constant;
6096 __ set_oop_constant((jobject) val, $dst$$Register);
6097 } else { // non-oop pointers, e.g. card mark base, heap top
6098 __ set($con$$constant, $dst$$Register);
6099 }
6100 %}
6101 ins_pipe(loadConP);
6102 %}
6103 #else
6104 instruct loadConP_set(iRegP dst, immP_set con) %{
6105 match(Set dst con);
6106 ins_cost(DEFAULT_COST * 3/2);
6107 format %{ "SET $con,$dst\t! ptr" %}
6108 ins_encode %{
6109 // [RGV] This next line should be generated from ADLC
6110 if (_opnds[1]->constant_is_oop()) {
6111 intptr_t val = $con$$constant;
6112 __ set_oop_constant((jobject) val, $dst$$Register);
6113 } else { // non-oop pointers, e.g. card mark base, heap top
6114 __ set($con$$constant, $dst$$Register);
6115 }
6116 %}
6117 ins_pipe(loadConP);
6118 %}
6120 instruct loadConP_load(iRegP dst, immP_load con) %{
6121 match(Set dst con);
6122 ins_cost(MEMORY_REF_COST);
6123 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %}
6124 ins_encode %{
6125 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
6126 __ ld_ptr($constanttablebase, con_offset, $dst$$Register);
6127 %}
6128 ins_pipe(loadConP);
6129 %}
6131 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
6132 match(Set dst con);
6133 ins_cost(DEFAULT_COST * 3/2);
6134 format %{ "SET $con,$dst\t! non-oop ptr" %}
6135 ins_encode %{
6136 __ set($con$$constant, $dst$$Register);
6137 %}
6138 ins_pipe(loadConP);
6139 %}
6140 #endif // _LP64
6142 instruct loadConP0(iRegP dst, immP0 src) %{
6143 match(Set dst src);
6145 size(4);
6146 format %{ "CLR $dst\t!ptr" %}
6147 ins_encode %{
6148 __ clr($dst$$Register);
6149 %}
6150 ins_pipe(ialu_imm);
6151 %}
6153 instruct loadConP_poll(iRegP dst, immP_poll src) %{
6154 match(Set dst src);
6155 ins_cost(DEFAULT_COST);
6156 format %{ "SET $src,$dst\t!ptr" %}
6157 ins_encode %{
6158 AddressLiteral polling_page(os::get_polling_page());
6159 __ sethi(polling_page, reg_to_register_object($dst$$reg));
6160 %}
6161 ins_pipe(loadConP_poll);
6162 %}
6164 instruct loadConN0(iRegN dst, immN0 src) %{
6165 match(Set dst src);
6167 size(4);
6168 format %{ "CLR $dst\t! compressed NULL ptr" %}
6169 ins_encode %{
6170 __ clr($dst$$Register);
6171 %}
6172 ins_pipe(ialu_imm);
6173 %}
6175 instruct loadConN(iRegN dst, immN src) %{
6176 match(Set dst src);
6177 ins_cost(DEFAULT_COST * 3/2);
6178 format %{ "SET $src,$dst\t! compressed ptr" %}
6179 ins_encode %{
6180 Register dst = $dst$$Register;
6181 __ set_narrow_oop((jobject)$src$$constant, dst);
6182 %}
6183 ins_pipe(ialu_hi_lo_reg);
6184 %}
6186 // Materialize long value (predicated by immL_cheap).
6187 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{
6188 match(Set dst con);
6189 effect(KILL tmp);
6190 ins_cost(DEFAULT_COST * 3);
6191 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %}
6192 ins_encode %{
6193 __ set64($con$$constant, $dst$$Register, $tmp$$Register);
6194 %}
6195 ins_pipe(loadConL);
6196 %}
6198 // Load long value from constant table (predicated by immL_expensive).
6199 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{
6200 match(Set dst con);
6201 ins_cost(MEMORY_REF_COST);
6202 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %}
6203 ins_encode %{
6204 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
6205 __ ldx($constanttablebase, con_offset, $dst$$Register);
6206 %}
6207 ins_pipe(loadConL);
6208 %}
6210 instruct loadConL0( iRegL dst, immL0 src ) %{
6211 match(Set dst src);
6212 ins_cost(DEFAULT_COST);
6213 size(4);
6214 format %{ "CLR $dst\t! long" %}
6215 ins_encode( Set13( src, dst ) );
6216 ins_pipe(ialu_imm);
6217 %}
6219 instruct loadConL13( iRegL dst, immL13 src ) %{
6220 match(Set dst src);
6221 ins_cost(DEFAULT_COST * 2);
6223 size(4);
6224 format %{ "MOV $src,$dst\t! long" %}
6225 ins_encode( Set13( src, dst ) );
6226 ins_pipe(ialu_imm);
6227 %}
6229 instruct loadConF(regF dst, immF con, o7RegI tmp) %{
6230 match(Set dst con);
6231 effect(KILL tmp);
6232 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %}
6233 ins_encode %{
6234 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
6235 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister);
6236 %}
6237 ins_pipe(loadConFD);
6238 %}
6240 instruct loadConD(regD dst, immD con, o7RegI tmp) %{
6241 match(Set dst con);
6242 effect(KILL tmp);
6243 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %}
6244 ins_encode %{
6245 // XXX This is a quick fix for 6833573.
6246 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister);
6247 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
6248 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
6249 %}
6250 ins_pipe(loadConFD);
6251 %}
6253 // Prefetch instructions.
6254 // Must be safe to execute with invalid address (cannot fault).
6256 instruct prefetchr( memory mem ) %{
6257 match( PrefetchRead mem );
6258 ins_cost(MEMORY_REF_COST);
6259 size(4);
6261 format %{ "PREFETCH $mem,0\t! Prefetch read-many" %}
6262 opcode(Assembler::prefetch_op3);
6263 ins_encode( form3_mem_prefetch_read( mem ) );
6264 ins_pipe(iload_mem);
6265 %}
6267 instruct prefetchw( memory mem ) %{
6268 match( PrefetchWrite mem );
6269 ins_cost(MEMORY_REF_COST);
6270 size(4);
6272 format %{ "PREFETCH $mem,2\t! Prefetch write-many (and read)" %}
6273 opcode(Assembler::prefetch_op3);
6274 ins_encode( form3_mem_prefetch_write( mem ) );
6275 ins_pipe(iload_mem);
6276 %}
6278 // Prefetch instructions for allocation.
6280 instruct prefetchAlloc( memory mem ) %{
6281 predicate(AllocatePrefetchInstr == 0);
6282 match( PrefetchAllocation mem );
6283 ins_cost(MEMORY_REF_COST);
6284 size(4);
6286 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %}
6287 opcode(Assembler::prefetch_op3);
6288 ins_encode( form3_mem_prefetch_write( mem ) );
6289 ins_pipe(iload_mem);
6290 %}
6292 // Use BIS instruction to prefetch for allocation.
6293 // Could fault, need space at the end of TLAB.
6294 instruct prefetchAlloc_bis( iRegP dst ) %{
6295 predicate(AllocatePrefetchInstr == 1);
6296 match( PrefetchAllocation dst );
6297 ins_cost(MEMORY_REF_COST);
6298 size(4);
6300 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %}
6301 ins_encode %{
6302 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
6303 %}
6304 ins_pipe(istore_mem_reg);
6305 %}
6307 // Next code is used for finding next cache line address to prefetch.
6308 #ifndef _LP64
6309 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{
6310 match(Set dst (CastX2P (AndI (CastP2X src) mask)));
6311 ins_cost(DEFAULT_COST);
6312 size(4);
6314 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6315 ins_encode %{
6316 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6317 %}
6318 ins_pipe(ialu_reg_imm);
6319 %}
6320 #else
6321 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
6322 match(Set dst (CastX2P (AndL (CastP2X src) mask)));
6323 ins_cost(DEFAULT_COST);
6324 size(4);
6326 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6327 ins_encode %{
6328 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6329 %}
6330 ins_pipe(ialu_reg_imm);
6331 %}
6332 #endif
6334 //----------Store Instructions-------------------------------------------------
6335 // Store Byte
6336 instruct storeB(memory mem, iRegI src) %{
6337 match(Set mem (StoreB mem src));
6338 ins_cost(MEMORY_REF_COST);
6340 size(4);
6341 format %{ "STB $src,$mem\t! byte" %}
6342 opcode(Assembler::stb_op3);
6343 ins_encode(simple_form3_mem_reg( mem, src ) );
6344 ins_pipe(istore_mem_reg);
6345 %}
6347 instruct storeB0(memory mem, immI0 src) %{
6348 match(Set mem (StoreB mem src));
6349 ins_cost(MEMORY_REF_COST);
6351 size(4);
6352 format %{ "STB $src,$mem\t! byte" %}
6353 opcode(Assembler::stb_op3);
6354 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6355 ins_pipe(istore_mem_zero);
6356 %}
6358 instruct storeCM0(memory mem, immI0 src) %{
6359 match(Set mem (StoreCM mem src));
6360 ins_cost(MEMORY_REF_COST);
6362 size(4);
6363 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %}
6364 opcode(Assembler::stb_op3);
6365 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6366 ins_pipe(istore_mem_zero);
6367 %}
6369 // Store Char/Short
6370 instruct storeC(memory mem, iRegI src) %{
6371 match(Set mem (StoreC mem src));
6372 ins_cost(MEMORY_REF_COST);
6374 size(4);
6375 format %{ "STH $src,$mem\t! short" %}
6376 opcode(Assembler::sth_op3);
6377 ins_encode(simple_form3_mem_reg( mem, src ) );
6378 ins_pipe(istore_mem_reg);
6379 %}
6381 instruct storeC0(memory mem, immI0 src) %{
6382 match(Set mem (StoreC mem src));
6383 ins_cost(MEMORY_REF_COST);
6385 size(4);
6386 format %{ "STH $src,$mem\t! short" %}
6387 opcode(Assembler::sth_op3);
6388 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6389 ins_pipe(istore_mem_zero);
6390 %}
6392 // Store Integer
6393 instruct storeI(memory mem, iRegI src) %{
6394 match(Set mem (StoreI mem src));
6395 ins_cost(MEMORY_REF_COST);
6397 size(4);
6398 format %{ "STW $src,$mem" %}
6399 opcode(Assembler::stw_op3);
6400 ins_encode(simple_form3_mem_reg( mem, src ) );
6401 ins_pipe(istore_mem_reg);
6402 %}
6404 // Store Long
6405 instruct storeL(memory mem, iRegL src) %{
6406 match(Set mem (StoreL mem src));
6407 ins_cost(MEMORY_REF_COST);
6408 size(4);
6409 format %{ "STX $src,$mem\t! long" %}
6410 opcode(Assembler::stx_op3);
6411 ins_encode(simple_form3_mem_reg( mem, src ) );
6412 ins_pipe(istore_mem_reg);
6413 %}
6415 instruct storeI0(memory mem, immI0 src) %{
6416 match(Set mem (StoreI mem src));
6417 ins_cost(MEMORY_REF_COST);
6419 size(4);
6420 format %{ "STW $src,$mem" %}
6421 opcode(Assembler::stw_op3);
6422 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6423 ins_pipe(istore_mem_zero);
6424 %}
6426 instruct storeL0(memory mem, immL0 src) %{
6427 match(Set mem (StoreL mem src));
6428 ins_cost(MEMORY_REF_COST);
6430 size(4);
6431 format %{ "STX $src,$mem" %}
6432 opcode(Assembler::stx_op3);
6433 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6434 ins_pipe(istore_mem_zero);
6435 %}
6437 // Store Integer from float register (used after fstoi)
6438 instruct storeI_Freg(memory mem, regF src) %{
6439 match(Set mem (StoreI mem src));
6440 ins_cost(MEMORY_REF_COST);
6442 size(4);
6443 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %}
6444 opcode(Assembler::stf_op3);
6445 ins_encode(simple_form3_mem_reg( mem, src ) );
6446 ins_pipe(fstoreF_mem_reg);
6447 %}
6449 // Store Pointer
6450 instruct storeP(memory dst, sp_ptr_RegP src) %{
6451 match(Set dst (StoreP dst src));
6452 ins_cost(MEMORY_REF_COST);
6453 size(4);
6455 #ifndef _LP64
6456 format %{ "STW $src,$dst\t! ptr" %}
6457 opcode(Assembler::stw_op3, 0, REGP_OP);
6458 #else
6459 format %{ "STX $src,$dst\t! ptr" %}
6460 opcode(Assembler::stx_op3, 0, REGP_OP);
6461 #endif
6462 ins_encode( form3_mem_reg( dst, src ) );
6463 ins_pipe(istore_mem_spORreg);
6464 %}
6466 instruct storeP0(memory dst, immP0 src) %{
6467 match(Set dst (StoreP dst src));
6468 ins_cost(MEMORY_REF_COST);
6469 size(4);
6471 #ifndef _LP64
6472 format %{ "STW $src,$dst\t! ptr" %}
6473 opcode(Assembler::stw_op3, 0, REGP_OP);
6474 #else
6475 format %{ "STX $src,$dst\t! ptr" %}
6476 opcode(Assembler::stx_op3, 0, REGP_OP);
6477 #endif
6478 ins_encode( form3_mem_reg( dst, R_G0 ) );
6479 ins_pipe(istore_mem_zero);
6480 %}
6482 // Store Compressed Pointer
6483 instruct storeN(memory dst, iRegN src) %{
6484 match(Set dst (StoreN dst src));
6485 ins_cost(MEMORY_REF_COST);
6486 size(4);
6488 format %{ "STW $src,$dst\t! compressed ptr" %}
6489 ins_encode %{
6490 Register base = as_Register($dst$$base);
6491 Register index = as_Register($dst$$index);
6492 Register src = $src$$Register;
6493 if (index != G0) {
6494 __ stw(src, base, index);
6495 } else {
6496 __ stw(src, base, $dst$$disp);
6497 }
6498 %}
6499 ins_pipe(istore_mem_spORreg);
6500 %}
6502 instruct storeN0(memory dst, immN0 src) %{
6503 match(Set dst (StoreN dst src));
6504 ins_cost(MEMORY_REF_COST);
6505 size(4);
6507 format %{ "STW $src,$dst\t! compressed ptr" %}
6508 ins_encode %{
6509 Register base = as_Register($dst$$base);
6510 Register index = as_Register($dst$$index);
6511 if (index != G0) {
6512 __ stw(0, base, index);
6513 } else {
6514 __ stw(0, base, $dst$$disp);
6515 }
6516 %}
6517 ins_pipe(istore_mem_zero);
6518 %}
6520 // Store Double
6521 instruct storeD( memory mem, regD src) %{
6522 match(Set mem (StoreD mem src));
6523 ins_cost(MEMORY_REF_COST);
6525 size(4);
6526 format %{ "STDF $src,$mem" %}
6527 opcode(Assembler::stdf_op3);
6528 ins_encode(simple_form3_mem_reg( mem, src ) );
6529 ins_pipe(fstoreD_mem_reg);
6530 %}
6532 instruct storeD0( memory mem, immD0 src) %{
6533 match(Set mem (StoreD mem src));
6534 ins_cost(MEMORY_REF_COST);
6536 size(4);
6537 format %{ "STX $src,$mem" %}
6538 opcode(Assembler::stx_op3);
6539 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6540 ins_pipe(fstoreD_mem_zero);
6541 %}
6543 // Store Float
6544 instruct storeF( memory mem, regF src) %{
6545 match(Set mem (StoreF mem src));
6546 ins_cost(MEMORY_REF_COST);
6548 size(4);
6549 format %{ "STF $src,$mem" %}
6550 opcode(Assembler::stf_op3);
6551 ins_encode(simple_form3_mem_reg( mem, src ) );
6552 ins_pipe(fstoreF_mem_reg);
6553 %}
6555 instruct storeF0( memory mem, immF0 src) %{
6556 match(Set mem (StoreF mem src));
6557 ins_cost(MEMORY_REF_COST);
6559 size(4);
6560 format %{ "STW $src,$mem\t! storeF0" %}
6561 opcode(Assembler::stw_op3);
6562 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6563 ins_pipe(fstoreF_mem_zero);
6564 %}
6566 // Store Aligned Packed Bytes in Double register to memory
6567 instruct storeA8B(memory mem, regD src) %{
6568 match(Set mem (Store8B mem src));
6569 ins_cost(MEMORY_REF_COST);
6570 size(4);
6571 format %{ "STDF $src,$mem\t! packed8B" %}
6572 opcode(Assembler::stdf_op3);
6573 ins_encode(simple_form3_mem_reg( mem, src ) );
6574 ins_pipe(fstoreD_mem_reg);
6575 %}
6577 // Convert oop pointer into compressed form
6578 instruct encodeHeapOop(iRegN dst, iRegP src) %{
6579 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
6580 match(Set dst (EncodeP src));
6581 format %{ "encode_heap_oop $src, $dst" %}
6582 ins_encode %{
6583 __ encode_heap_oop($src$$Register, $dst$$Register);
6584 %}
6585 ins_pipe(ialu_reg);
6586 %}
6588 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{
6589 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
6590 match(Set dst (EncodeP src));
6591 format %{ "encode_heap_oop_not_null $src, $dst" %}
6592 ins_encode %{
6593 __ encode_heap_oop_not_null($src$$Register, $dst$$Register);
6594 %}
6595 ins_pipe(ialu_reg);
6596 %}
6598 instruct decodeHeapOop(iRegP dst, iRegN src) %{
6599 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
6600 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant);
6601 match(Set dst (DecodeN src));
6602 format %{ "decode_heap_oop $src, $dst" %}
6603 ins_encode %{
6604 __ decode_heap_oop($src$$Register, $dst$$Register);
6605 %}
6606 ins_pipe(ialu_reg);
6607 %}
6609 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{
6610 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
6611 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
6612 match(Set dst (DecodeN src));
6613 format %{ "decode_heap_oop_not_null $src, $dst" %}
6614 ins_encode %{
6615 __ decode_heap_oop_not_null($src$$Register, $dst$$Register);
6616 %}
6617 ins_pipe(ialu_reg);
6618 %}
6621 // Store Zero into Aligned Packed Bytes
6622 instruct storeA8B0(memory mem, immI0 zero) %{
6623 match(Set mem (Store8B mem zero));
6624 ins_cost(MEMORY_REF_COST);
6625 size(4);
6626 format %{ "STX $zero,$mem\t! packed8B" %}
6627 opcode(Assembler::stx_op3);
6628 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6629 ins_pipe(fstoreD_mem_zero);
6630 %}
6632 // Store Aligned Packed Chars/Shorts in Double register to memory
6633 instruct storeA4C(memory mem, regD src) %{
6634 match(Set mem (Store4C mem src));
6635 ins_cost(MEMORY_REF_COST);
6636 size(4);
6637 format %{ "STDF $src,$mem\t! packed4C" %}
6638 opcode(Assembler::stdf_op3);
6639 ins_encode(simple_form3_mem_reg( mem, src ) );
6640 ins_pipe(fstoreD_mem_reg);
6641 %}
6643 // Store Zero into Aligned Packed Chars/Shorts
6644 instruct storeA4C0(memory mem, immI0 zero) %{
6645 match(Set mem (Store4C mem (Replicate4C zero)));
6646 ins_cost(MEMORY_REF_COST);
6647 size(4);
6648 format %{ "STX $zero,$mem\t! packed4C" %}
6649 opcode(Assembler::stx_op3);
6650 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6651 ins_pipe(fstoreD_mem_zero);
6652 %}
6654 // Store Aligned Packed Ints in Double register to memory
6655 instruct storeA2I(memory mem, regD src) %{
6656 match(Set mem (Store2I mem src));
6657 ins_cost(MEMORY_REF_COST);
6658 size(4);
6659 format %{ "STDF $src,$mem\t! packed2I" %}
6660 opcode(Assembler::stdf_op3);
6661 ins_encode(simple_form3_mem_reg( mem, src ) );
6662 ins_pipe(fstoreD_mem_reg);
6663 %}
6665 // Store Zero into Aligned Packed Ints
6666 instruct storeA2I0(memory mem, immI0 zero) %{
6667 match(Set mem (Store2I mem zero));
6668 ins_cost(MEMORY_REF_COST);
6669 size(4);
6670 format %{ "STX $zero,$mem\t! packed2I" %}
6671 opcode(Assembler::stx_op3);
6672 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6673 ins_pipe(fstoreD_mem_zero);
6674 %}
6677 //----------MemBar Instructions-----------------------------------------------
6678 // Memory barrier flavors
6680 instruct membar_acquire() %{
6681 match(MemBarAcquire);
6682 ins_cost(4*MEMORY_REF_COST);
6684 size(0);
6685 format %{ "MEMBAR-acquire" %}
6686 ins_encode( enc_membar_acquire );
6687 ins_pipe(long_memory_op);
6688 %}
6690 instruct membar_acquire_lock() %{
6691 match(MemBarAcquireLock);
6692 ins_cost(0);
6694 size(0);
6695 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %}
6696 ins_encode( );
6697 ins_pipe(empty);
6698 %}
6700 instruct membar_release() %{
6701 match(MemBarRelease);
6702 ins_cost(4*MEMORY_REF_COST);
6704 size(0);
6705 format %{ "MEMBAR-release" %}
6706 ins_encode( enc_membar_release );
6707 ins_pipe(long_memory_op);
6708 %}
6710 instruct membar_release_lock() %{
6711 match(MemBarReleaseLock);
6712 ins_cost(0);
6714 size(0);
6715 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %}
6716 ins_encode( );
6717 ins_pipe(empty);
6718 %}
6720 instruct membar_volatile() %{
6721 match(MemBarVolatile);
6722 ins_cost(4*MEMORY_REF_COST);
6724 size(4);
6725 format %{ "MEMBAR-volatile" %}
6726 ins_encode( enc_membar_volatile );
6727 ins_pipe(long_memory_op);
6728 %}
6730 instruct unnecessary_membar_volatile() %{
6731 match(MemBarVolatile);
6732 predicate(Matcher::post_store_load_barrier(n));
6733 ins_cost(0);
6735 size(0);
6736 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %}
6737 ins_encode( );
6738 ins_pipe(empty);
6739 %}
6741 //----------Register Move Instructions-----------------------------------------
6742 instruct roundDouble_nop(regD dst) %{
6743 match(Set dst (RoundDouble dst));
6744 ins_cost(0);
6745 // SPARC results are already "rounded" (i.e., normal-format IEEE)
6746 ins_encode( );
6747 ins_pipe(empty);
6748 %}
6751 instruct roundFloat_nop(regF dst) %{
6752 match(Set dst (RoundFloat dst));
6753 ins_cost(0);
6754 // SPARC results are already "rounded" (i.e., normal-format IEEE)
6755 ins_encode( );
6756 ins_pipe(empty);
6757 %}
6760 // Cast Index to Pointer for unsafe natives
6761 instruct castX2P(iRegX src, iRegP dst) %{
6762 match(Set dst (CastX2P src));
6764 format %{ "MOV $src,$dst\t! IntX->Ptr" %}
6765 ins_encode( form3_g0_rs2_rd_move( src, dst ) );
6766 ins_pipe(ialu_reg);
6767 %}
6769 // Cast Pointer to Index for unsafe natives
6770 instruct castP2X(iRegP src, iRegX dst) %{
6771 match(Set dst (CastP2X src));
6773 format %{ "MOV $src,$dst\t! Ptr->IntX" %}
6774 ins_encode( form3_g0_rs2_rd_move( src, dst ) );
6775 ins_pipe(ialu_reg);
6776 %}
6778 instruct stfSSD(stackSlotD stkSlot, regD src) %{
6779 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6780 match(Set stkSlot src); // chain rule
6781 ins_cost(MEMORY_REF_COST);
6782 format %{ "STDF $src,$stkSlot\t!stk" %}
6783 opcode(Assembler::stdf_op3);
6784 ins_encode(simple_form3_mem_reg(stkSlot, src));
6785 ins_pipe(fstoreD_stk_reg);
6786 %}
6788 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{
6789 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6790 match(Set dst stkSlot); // chain rule
6791 ins_cost(MEMORY_REF_COST);
6792 format %{ "LDDF $stkSlot,$dst\t!stk" %}
6793 opcode(Assembler::lddf_op3);
6794 ins_encode(simple_form3_mem_reg(stkSlot, dst));
6795 ins_pipe(floadD_stk);
6796 %}
6798 instruct stfSSF(stackSlotF stkSlot, regF src) %{
6799 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6800 match(Set stkSlot src); // chain rule
6801 ins_cost(MEMORY_REF_COST);
6802 format %{ "STF $src,$stkSlot\t!stk" %}
6803 opcode(Assembler::stf_op3);
6804 ins_encode(simple_form3_mem_reg(stkSlot, src));
6805 ins_pipe(fstoreF_stk_reg);
6806 %}
6808 //----------Conditional Move---------------------------------------------------
6809 // Conditional move
6810 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{
6811 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6812 ins_cost(150);
6813 format %{ "MOV$cmp $pcc,$src,$dst" %}
6814 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6815 ins_pipe(ialu_reg);
6816 %}
6818 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{
6819 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6820 ins_cost(140);
6821 format %{ "MOV$cmp $pcc,$src,$dst" %}
6822 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
6823 ins_pipe(ialu_imm);
6824 %}
6826 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{
6827 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6828 ins_cost(150);
6829 size(4);
6830 format %{ "MOV$cmp $icc,$src,$dst" %}
6831 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6832 ins_pipe(ialu_reg);
6833 %}
6835 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{
6836 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6837 ins_cost(140);
6838 size(4);
6839 format %{ "MOV$cmp $icc,$src,$dst" %}
6840 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6841 ins_pipe(ialu_imm);
6842 %}
6844 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
6845 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6846 ins_cost(150);
6847 size(4);
6848 format %{ "MOV$cmp $icc,$src,$dst" %}
6849 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6850 ins_pipe(ialu_reg);
6851 %}
6853 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
6854 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6855 ins_cost(140);
6856 size(4);
6857 format %{ "MOV$cmp $icc,$src,$dst" %}
6858 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6859 ins_pipe(ialu_imm);
6860 %}
6862 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{
6863 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src)));
6864 ins_cost(150);
6865 size(4);
6866 format %{ "MOV$cmp $fcc,$src,$dst" %}
6867 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6868 ins_pipe(ialu_reg);
6869 %}
6871 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{
6872 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src)));
6873 ins_cost(140);
6874 size(4);
6875 format %{ "MOV$cmp $fcc,$src,$dst" %}
6876 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) );
6877 ins_pipe(ialu_imm);
6878 %}
6880 // Conditional move for RegN. Only cmov(reg,reg).
6881 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{
6882 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src)));
6883 ins_cost(150);
6884 format %{ "MOV$cmp $pcc,$src,$dst" %}
6885 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6886 ins_pipe(ialu_reg);
6887 %}
6889 // This instruction also works with CmpN so we don't need cmovNN_reg.
6890 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{
6891 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
6892 ins_cost(150);
6893 size(4);
6894 format %{ "MOV$cmp $icc,$src,$dst" %}
6895 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6896 ins_pipe(ialu_reg);
6897 %}
6899 // This instruction also works with CmpN so we don't need cmovNN_reg.
6900 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{
6901 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
6902 ins_cost(150);
6903 size(4);
6904 format %{ "MOV$cmp $icc,$src,$dst" %}
6905 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6906 ins_pipe(ialu_reg);
6907 %}
6909 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{
6910 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src)));
6911 ins_cost(150);
6912 size(4);
6913 format %{ "MOV$cmp $fcc,$src,$dst" %}
6914 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6915 ins_pipe(ialu_reg);
6916 %}
6918 // Conditional move
6919 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{
6920 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6921 ins_cost(150);
6922 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %}
6923 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6924 ins_pipe(ialu_reg);
6925 %}
6927 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{
6928 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6929 ins_cost(140);
6930 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %}
6931 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
6932 ins_pipe(ialu_imm);
6933 %}
6935 // This instruction also works with CmpN so we don't need cmovPN_reg.
6936 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{
6937 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6938 ins_cost(150);
6940 size(4);
6941 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6942 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6943 ins_pipe(ialu_reg);
6944 %}
6946 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
6947 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6948 ins_cost(150);
6950 size(4);
6951 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6952 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6953 ins_pipe(ialu_reg);
6954 %}
6956 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
6957 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6958 ins_cost(140);
6960 size(4);
6961 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6962 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6963 ins_pipe(ialu_imm);
6964 %}
6966 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
6967 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6968 ins_cost(140);
6970 size(4);
6971 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6972 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6973 ins_pipe(ialu_imm);
6974 %}
6976 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{
6977 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
6978 ins_cost(150);
6979 size(4);
6980 format %{ "MOV$cmp $fcc,$src,$dst" %}
6981 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6982 ins_pipe(ialu_imm);
6983 %}
6985 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{
6986 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
6987 ins_cost(140);
6988 size(4);
6989 format %{ "MOV$cmp $fcc,$src,$dst" %}
6990 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) );
6991 ins_pipe(ialu_imm);
6992 %}
6994 // Conditional move
6995 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{
6996 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src)));
6997 ins_cost(150);
6998 opcode(0x101);
6999 format %{ "FMOVD$cmp $pcc,$src,$dst" %}
7000 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) );
7001 ins_pipe(int_conditional_float_move);
7002 %}
7004 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{
7005 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
7006 ins_cost(150);
7008 size(4);
7009 format %{ "FMOVS$cmp $icc,$src,$dst" %}
7010 opcode(0x101);
7011 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
7012 ins_pipe(int_conditional_float_move);
7013 %}
7015 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
7016 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
7017 ins_cost(150);
7019 size(4);
7020 format %{ "FMOVS$cmp $icc,$src,$dst" %}
7021 opcode(0x101);
7022 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
7023 ins_pipe(int_conditional_float_move);
7024 %}
7026 // Conditional move,
7027 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{
7028 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src)));
7029 ins_cost(150);
7030 size(4);
7031 format %{ "FMOVF$cmp $fcc,$src,$dst" %}
7032 opcode(0x1);
7033 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) );
7034 ins_pipe(int_conditional_double_move);
7035 %}
7037 // Conditional move
7038 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{
7039 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src)));
7040 ins_cost(150);
7041 size(4);
7042 opcode(0x102);
7043 format %{ "FMOVD$cmp $pcc,$src,$dst" %}
7044 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) );
7045 ins_pipe(int_conditional_double_move);
7046 %}
7048 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{
7049 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
7050 ins_cost(150);
7052 size(4);
7053 format %{ "FMOVD$cmp $icc,$src,$dst" %}
7054 opcode(0x102);
7055 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
7056 ins_pipe(int_conditional_double_move);
7057 %}
7059 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
7060 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
7061 ins_cost(150);
7063 size(4);
7064 format %{ "FMOVD$cmp $icc,$src,$dst" %}
7065 opcode(0x102);
7066 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
7067 ins_pipe(int_conditional_double_move);
7068 %}
7070 // Conditional move,
7071 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{
7072 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src)));
7073 ins_cost(150);
7074 size(4);
7075 format %{ "FMOVD$cmp $fcc,$src,$dst" %}
7076 opcode(0x2);
7077 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) );
7078 ins_pipe(int_conditional_double_move);
7079 %}
7081 // Conditional move
7082 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{
7083 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
7084 ins_cost(150);
7085 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %}
7086 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
7087 ins_pipe(ialu_reg);
7088 %}
7090 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{
7091 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
7092 ins_cost(140);
7093 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %}
7094 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
7095 ins_pipe(ialu_imm);
7096 %}
7098 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
7099 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
7100 ins_cost(150);
7102 size(4);
7103 format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
7104 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
7105 ins_pipe(ialu_reg);
7106 %}
7109 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
7110 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
7111 ins_cost(150);
7113 size(4);
7114 format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
7115 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
7116 ins_pipe(ialu_reg);
7117 %}
7120 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{
7121 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src)));
7122 ins_cost(150);
7124 size(4);
7125 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %}
7126 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
7127 ins_pipe(ialu_reg);
7128 %}
7132 //----------OS and Locking Instructions----------------------------------------
7134 // This name is KNOWN by the ADLC and cannot be changed.
7135 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
7136 // for this guy.
7137 instruct tlsLoadP(g2RegP dst) %{
7138 match(Set dst (ThreadLocal));
7140 size(0);
7141 ins_cost(0);
7142 format %{ "# TLS is in G2" %}
7143 ins_encode( /*empty encoding*/ );
7144 ins_pipe(ialu_none);
7145 %}
7147 instruct checkCastPP( iRegP dst ) %{
7148 match(Set dst (CheckCastPP dst));
7150 size(0);
7151 format %{ "# checkcastPP of $dst" %}
7152 ins_encode( /*empty encoding*/ );
7153 ins_pipe(empty);
7154 %}
7157 instruct castPP( iRegP dst ) %{
7158 match(Set dst (CastPP dst));
7159 format %{ "# castPP of $dst" %}
7160 ins_encode( /*empty encoding*/ );
7161 ins_pipe(empty);
7162 %}
7164 instruct castII( iRegI dst ) %{
7165 match(Set dst (CastII dst));
7166 format %{ "# castII of $dst" %}
7167 ins_encode( /*empty encoding*/ );
7168 ins_cost(0);
7169 ins_pipe(empty);
7170 %}
7172 //----------Arithmetic Instructions--------------------------------------------
7173 // Addition Instructions
7174 // Register Addition
7175 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7176 match(Set dst (AddI src1 src2));
7178 size(4);
7179 format %{ "ADD $src1,$src2,$dst" %}
7180 ins_encode %{
7181 __ add($src1$$Register, $src2$$Register, $dst$$Register);
7182 %}
7183 ins_pipe(ialu_reg_reg);
7184 %}
7186 // Immediate Addition
7187 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7188 match(Set dst (AddI src1 src2));
7190 size(4);
7191 format %{ "ADD $src1,$src2,$dst" %}
7192 opcode(Assembler::add_op3, Assembler::arith_op);
7193 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7194 ins_pipe(ialu_reg_imm);
7195 %}
7197 // Pointer Register Addition
7198 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{
7199 match(Set dst (AddP src1 src2));
7201 size(4);
7202 format %{ "ADD $src1,$src2,$dst" %}
7203 opcode(Assembler::add_op3, Assembler::arith_op);
7204 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7205 ins_pipe(ialu_reg_reg);
7206 %}
7208 // Pointer Immediate Addition
7209 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{
7210 match(Set dst (AddP src1 src2));
7212 size(4);
7213 format %{ "ADD $src1,$src2,$dst" %}
7214 opcode(Assembler::add_op3, Assembler::arith_op);
7215 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7216 ins_pipe(ialu_reg_imm);
7217 %}
7219 // Long Addition
7220 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7221 match(Set dst (AddL src1 src2));
7223 size(4);
7224 format %{ "ADD $src1,$src2,$dst\t! long" %}
7225 opcode(Assembler::add_op3, Assembler::arith_op);
7226 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7227 ins_pipe(ialu_reg_reg);
7228 %}
7230 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7231 match(Set dst (AddL src1 con));
7233 size(4);
7234 format %{ "ADD $src1,$con,$dst" %}
7235 opcode(Assembler::add_op3, Assembler::arith_op);
7236 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7237 ins_pipe(ialu_reg_imm);
7238 %}
7240 //----------Conditional_store--------------------------------------------------
7241 // Conditional-store of the updated heap-top.
7242 // Used during allocation of the shared heap.
7243 // Sets flags (EQ) on success. Implemented with a CASA on Sparc.
7245 // LoadP-locked. Same as a regular pointer load when used with a compare-swap
7246 instruct loadPLocked(iRegP dst, memory mem) %{
7247 match(Set dst (LoadPLocked mem));
7248 ins_cost(MEMORY_REF_COST);
7250 #ifndef _LP64
7251 size(4);
7252 format %{ "LDUW $mem,$dst\t! ptr" %}
7253 opcode(Assembler::lduw_op3, 0, REGP_OP);
7254 #else
7255 format %{ "LDX $mem,$dst\t! ptr" %}
7256 opcode(Assembler::ldx_op3, 0, REGP_OP);
7257 #endif
7258 ins_encode( form3_mem_reg( mem, dst ) );
7259 ins_pipe(iload_mem);
7260 %}
7262 // LoadL-locked. Same as a regular long load when used with a compare-swap
7263 instruct loadLLocked(iRegL dst, memory mem) %{
7264 match(Set dst (LoadLLocked mem));
7265 ins_cost(MEMORY_REF_COST);
7266 size(4);
7267 format %{ "LDX $mem,$dst\t! long" %}
7268 opcode(Assembler::ldx_op3);
7269 ins_encode(simple_form3_mem_reg( mem, dst ) );
7270 ins_pipe(iload_mem);
7271 %}
7273 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{
7274 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
7275 effect( KILL newval );
7276 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t"
7277 "CMP R_G3,$oldval\t\t! See if we made progress" %}
7278 ins_encode( enc_cas(heap_top_ptr,oldval,newval) );
7279 ins_pipe( long_memory_op );
7280 %}
7282 // Conditional-store of an int value.
7283 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{
7284 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval)));
7285 effect( KILL newval );
7286 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
7287 "CMP $oldval,$newval\t\t! See if we made progress" %}
7288 ins_encode( enc_cas(mem_ptr,oldval,newval) );
7289 ins_pipe( long_memory_op );
7290 %}
7292 // Conditional-store of a long value.
7293 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{
7294 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval)));
7295 effect( KILL newval );
7296 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
7297 "CMP $oldval,$newval\t\t! See if we made progress" %}
7298 ins_encode( enc_cas(mem_ptr,oldval,newval) );
7299 ins_pipe( long_memory_op );
7300 %}
7302 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
7304 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7305 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
7306 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7307 format %{
7308 "MOV $newval,O7\n\t"
7309 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7310 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7311 "MOV 1,$res\n\t"
7312 "MOVne xcc,R_G0,$res"
7313 %}
7314 ins_encode( enc_casx(mem_ptr, oldval, newval),
7315 enc_lflags_ne_to_boolean(res) );
7316 ins_pipe( long_memory_op );
7317 %}
7320 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7321 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
7322 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7323 format %{
7324 "MOV $newval,O7\n\t"
7325 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7326 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7327 "MOV 1,$res\n\t"
7328 "MOVne icc,R_G0,$res"
7329 %}
7330 ins_encode( enc_casi(mem_ptr, oldval, newval),
7331 enc_iflags_ne_to_boolean(res) );
7332 ins_pipe( long_memory_op );
7333 %}
7335 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7336 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
7337 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7338 format %{
7339 "MOV $newval,O7\n\t"
7340 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7341 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7342 "MOV 1,$res\n\t"
7343 "MOVne xcc,R_G0,$res"
7344 %}
7345 #ifdef _LP64
7346 ins_encode( enc_casx(mem_ptr, oldval, newval),
7347 enc_lflags_ne_to_boolean(res) );
7348 #else
7349 ins_encode( enc_casi(mem_ptr, oldval, newval),
7350 enc_iflags_ne_to_boolean(res) );
7351 #endif
7352 ins_pipe( long_memory_op );
7353 %}
7355 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7356 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
7357 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7358 format %{
7359 "MOV $newval,O7\n\t"
7360 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7361 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7362 "MOV 1,$res\n\t"
7363 "MOVne icc,R_G0,$res"
7364 %}
7365 ins_encode( enc_casi(mem_ptr, oldval, newval),
7366 enc_iflags_ne_to_boolean(res) );
7367 ins_pipe( long_memory_op );
7368 %}
7370 //---------------------
7371 // Subtraction Instructions
7372 // Register Subtraction
7373 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7374 match(Set dst (SubI src1 src2));
7376 size(4);
7377 format %{ "SUB $src1,$src2,$dst" %}
7378 opcode(Assembler::sub_op3, Assembler::arith_op);
7379 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7380 ins_pipe(ialu_reg_reg);
7381 %}
7383 // Immediate Subtraction
7384 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7385 match(Set dst (SubI src1 src2));
7387 size(4);
7388 format %{ "SUB $src1,$src2,$dst" %}
7389 opcode(Assembler::sub_op3, Assembler::arith_op);
7390 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7391 ins_pipe(ialu_reg_imm);
7392 %}
7394 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
7395 match(Set dst (SubI zero src2));
7397 size(4);
7398 format %{ "NEG $src2,$dst" %}
7399 opcode(Assembler::sub_op3, Assembler::arith_op);
7400 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) );
7401 ins_pipe(ialu_zero_reg);
7402 %}
7404 // Long subtraction
7405 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7406 match(Set dst (SubL src1 src2));
7408 size(4);
7409 format %{ "SUB $src1,$src2,$dst\t! long" %}
7410 opcode(Assembler::sub_op3, Assembler::arith_op);
7411 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7412 ins_pipe(ialu_reg_reg);
7413 %}
7415 // Immediate Subtraction
7416 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7417 match(Set dst (SubL src1 con));
7419 size(4);
7420 format %{ "SUB $src1,$con,$dst\t! long" %}
7421 opcode(Assembler::sub_op3, Assembler::arith_op);
7422 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7423 ins_pipe(ialu_reg_imm);
7424 %}
7426 // Long negation
7427 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{
7428 match(Set dst (SubL zero src2));
7430 size(4);
7431 format %{ "NEG $src2,$dst\t! long" %}
7432 opcode(Assembler::sub_op3, Assembler::arith_op);
7433 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) );
7434 ins_pipe(ialu_zero_reg);
7435 %}
7437 // Multiplication Instructions
7438 // Integer Multiplication
7439 // Register Multiplication
7440 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7441 match(Set dst (MulI src1 src2));
7443 size(4);
7444 format %{ "MULX $src1,$src2,$dst" %}
7445 opcode(Assembler::mulx_op3, Assembler::arith_op);
7446 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7447 ins_pipe(imul_reg_reg);
7448 %}
7450 // Immediate Multiplication
7451 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7452 match(Set dst (MulI src1 src2));
7454 size(4);
7455 format %{ "MULX $src1,$src2,$dst" %}
7456 opcode(Assembler::mulx_op3, Assembler::arith_op);
7457 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7458 ins_pipe(imul_reg_imm);
7459 %}
7461 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7462 match(Set dst (MulL src1 src2));
7463 ins_cost(DEFAULT_COST * 5);
7464 size(4);
7465 format %{ "MULX $src1,$src2,$dst\t! long" %}
7466 opcode(Assembler::mulx_op3, Assembler::arith_op);
7467 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7468 ins_pipe(mulL_reg_reg);
7469 %}
7471 // Immediate Multiplication
7472 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7473 match(Set dst (MulL src1 src2));
7474 ins_cost(DEFAULT_COST * 5);
7475 size(4);
7476 format %{ "MULX $src1,$src2,$dst" %}
7477 opcode(Assembler::mulx_op3, Assembler::arith_op);
7478 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7479 ins_pipe(mulL_reg_imm);
7480 %}
7482 // Integer Division
7483 // Register Division
7484 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{
7485 match(Set dst (DivI src1 src2));
7486 ins_cost((2+71)*DEFAULT_COST);
7488 format %{ "SRA $src2,0,$src2\n\t"
7489 "SRA $src1,0,$src1\n\t"
7490 "SDIVX $src1,$src2,$dst" %}
7491 ins_encode( idiv_reg( src1, src2, dst ) );
7492 ins_pipe(sdiv_reg_reg);
7493 %}
7495 // Immediate Division
7496 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{
7497 match(Set dst (DivI src1 src2));
7498 ins_cost((2+71)*DEFAULT_COST);
7500 format %{ "SRA $src1,0,$src1\n\t"
7501 "SDIVX $src1,$src2,$dst" %}
7502 ins_encode( idiv_imm( src1, src2, dst ) );
7503 ins_pipe(sdiv_reg_imm);
7504 %}
7506 //----------Div-By-10-Expansion------------------------------------------------
7507 // Extract hi bits of a 32x32->64 bit multiply.
7508 // Expand rule only, not matched
7509 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{
7510 effect( DEF dst, USE src1, USE src2 );
7511 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t"
7512 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %}
7513 ins_encode( enc_mul_hi(dst,src1,src2));
7514 ins_pipe(sdiv_reg_reg);
7515 %}
7517 // Magic constant, reciprocal of 10
7518 instruct loadConI_x66666667(iRegIsafe dst) %{
7519 effect( DEF dst );
7521 size(8);
7522 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %}
7523 ins_encode( Set32(0x66666667, dst) );
7524 ins_pipe(ialu_hi_lo_reg);
7525 %}
7527 // Register Shift Right Arithmetic Long by 32-63
7528 instruct sra_31( iRegI dst, iRegI src ) %{
7529 effect( DEF dst, USE src );
7530 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %}
7531 ins_encode( form3_rs1_rd_copysign_hi(src,dst) );
7532 ins_pipe(ialu_reg_reg);
7533 %}
7535 // Arithmetic Shift Right by 8-bit immediate
7536 instruct sra_reg_2( iRegI dst, iRegI src ) %{
7537 effect( DEF dst, USE src );
7538 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %}
7539 opcode(Assembler::sra_op3, Assembler::arith_op);
7540 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) );
7541 ins_pipe(ialu_reg_imm);
7542 %}
7544 // Integer DIV with 10
7545 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{
7546 match(Set dst (DivI src div));
7547 ins_cost((6+6)*DEFAULT_COST);
7548 expand %{
7549 iRegIsafe tmp1; // Killed temps;
7550 iRegIsafe tmp2; // Killed temps;
7551 iRegI tmp3; // Killed temps;
7552 iRegI tmp4; // Killed temps;
7553 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1
7554 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2
7555 sra_31( tmp3, src ); // SRA src,31 -> tmp3
7556 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4
7557 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst
7558 %}
7559 %}
7561 // Register Long Division
7562 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7563 match(Set dst (DivL src1 src2));
7564 ins_cost(DEFAULT_COST*71);
7565 size(4);
7566 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7567 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7568 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7569 ins_pipe(divL_reg_reg);
7570 %}
7572 // Register Long Division
7573 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7574 match(Set dst (DivL src1 src2));
7575 ins_cost(DEFAULT_COST*71);
7576 size(4);
7577 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7578 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7579 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7580 ins_pipe(divL_reg_imm);
7581 %}
7583 // Integer Remainder
7584 // Register Remainder
7585 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{
7586 match(Set dst (ModI src1 src2));
7587 effect( KILL ccr, KILL temp);
7589 format %{ "SREM $src1,$src2,$dst" %}
7590 ins_encode( irem_reg(src1, src2, dst, temp) );
7591 ins_pipe(sdiv_reg_reg);
7592 %}
7594 // Immediate Remainder
7595 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{
7596 match(Set dst (ModI src1 src2));
7597 effect( KILL ccr, KILL temp);
7599 format %{ "SREM $src1,$src2,$dst" %}
7600 ins_encode( irem_imm(src1, src2, dst, temp) );
7601 ins_pipe(sdiv_reg_imm);
7602 %}
7604 // Register Long Remainder
7605 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7606 effect(DEF dst, USE src1, USE src2);
7607 size(4);
7608 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7609 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7610 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7611 ins_pipe(divL_reg_reg);
7612 %}
7614 // Register Long Division
7615 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{
7616 effect(DEF dst, USE src1, USE src2);
7617 size(4);
7618 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7619 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7620 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7621 ins_pipe(divL_reg_imm);
7622 %}
7624 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7625 effect(DEF dst, USE src1, USE src2);
7626 size(4);
7627 format %{ "MULX $src1,$src2,$dst\t! long" %}
7628 opcode(Assembler::mulx_op3, Assembler::arith_op);
7629 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7630 ins_pipe(mulL_reg_reg);
7631 %}
7633 // Immediate Multiplication
7634 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{
7635 effect(DEF dst, USE src1, USE src2);
7636 size(4);
7637 format %{ "MULX $src1,$src2,$dst" %}
7638 opcode(Assembler::mulx_op3, Assembler::arith_op);
7639 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7640 ins_pipe(mulL_reg_imm);
7641 %}
7643 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7644 effect(DEF dst, USE src1, USE src2);
7645 size(4);
7646 format %{ "SUB $src1,$src2,$dst\t! long" %}
7647 opcode(Assembler::sub_op3, Assembler::arith_op);
7648 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7649 ins_pipe(ialu_reg_reg);
7650 %}
7652 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
7653 effect(DEF dst, USE src1, USE src2);
7654 size(4);
7655 format %{ "SUB $src1,$src2,$dst\t! long" %}
7656 opcode(Assembler::sub_op3, Assembler::arith_op);
7657 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7658 ins_pipe(ialu_reg_reg);
7659 %}
7661 // Register Long Remainder
7662 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7663 match(Set dst (ModL src1 src2));
7664 ins_cost(DEFAULT_COST*(71 + 6 + 1));
7665 expand %{
7666 iRegL tmp1;
7667 iRegL tmp2;
7668 divL_reg_reg_1(tmp1, src1, src2);
7669 mulL_reg_reg_1(tmp2, tmp1, src2);
7670 subL_reg_reg_1(dst, src1, tmp2);
7671 %}
7672 %}
7674 // Register Long Remainder
7675 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7676 match(Set dst (ModL src1 src2));
7677 ins_cost(DEFAULT_COST*(71 + 6 + 1));
7678 expand %{
7679 iRegL tmp1;
7680 iRegL tmp2;
7681 divL_reg_imm13_1(tmp1, src1, src2);
7682 mulL_reg_imm13_1(tmp2, tmp1, src2);
7683 subL_reg_reg_2 (dst, src1, tmp2);
7684 %}
7685 %}
7687 // Integer Shift Instructions
7688 // Register Shift Left
7689 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7690 match(Set dst (LShiftI src1 src2));
7692 size(4);
7693 format %{ "SLL $src1,$src2,$dst" %}
7694 opcode(Assembler::sll_op3, Assembler::arith_op);
7695 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7696 ins_pipe(ialu_reg_reg);
7697 %}
7699 // Register Shift Left Immediate
7700 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7701 match(Set dst (LShiftI src1 src2));
7703 size(4);
7704 format %{ "SLL $src1,$src2,$dst" %}
7705 opcode(Assembler::sll_op3, Assembler::arith_op);
7706 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7707 ins_pipe(ialu_reg_imm);
7708 %}
7710 // Register Shift Left
7711 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7712 match(Set dst (LShiftL src1 src2));
7714 size(4);
7715 format %{ "SLLX $src1,$src2,$dst" %}
7716 opcode(Assembler::sllx_op3, Assembler::arith_op);
7717 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7718 ins_pipe(ialu_reg_reg);
7719 %}
7721 // Register Shift Left Immediate
7722 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7723 match(Set dst (LShiftL src1 src2));
7725 size(4);
7726 format %{ "SLLX $src1,$src2,$dst" %}
7727 opcode(Assembler::sllx_op3, Assembler::arith_op);
7728 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7729 ins_pipe(ialu_reg_imm);
7730 %}
7732 // Register Arithmetic Shift Right
7733 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7734 match(Set dst (RShiftI src1 src2));
7735 size(4);
7736 format %{ "SRA $src1,$src2,$dst" %}
7737 opcode(Assembler::sra_op3, Assembler::arith_op);
7738 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7739 ins_pipe(ialu_reg_reg);
7740 %}
7742 // Register Arithmetic Shift Right Immediate
7743 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7744 match(Set dst (RShiftI src1 src2));
7746 size(4);
7747 format %{ "SRA $src1,$src2,$dst" %}
7748 opcode(Assembler::sra_op3, Assembler::arith_op);
7749 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7750 ins_pipe(ialu_reg_imm);
7751 %}
7753 // Register Shift Right Arithmatic Long
7754 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7755 match(Set dst (RShiftL src1 src2));
7757 size(4);
7758 format %{ "SRAX $src1,$src2,$dst" %}
7759 opcode(Assembler::srax_op3, Assembler::arith_op);
7760 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7761 ins_pipe(ialu_reg_reg);
7762 %}
7764 // Register Shift Left Immediate
7765 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7766 match(Set dst (RShiftL src1 src2));
7768 size(4);
7769 format %{ "SRAX $src1,$src2,$dst" %}
7770 opcode(Assembler::srax_op3, Assembler::arith_op);
7771 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7772 ins_pipe(ialu_reg_imm);
7773 %}
7775 // Register Shift Right
7776 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7777 match(Set dst (URShiftI src1 src2));
7779 size(4);
7780 format %{ "SRL $src1,$src2,$dst" %}
7781 opcode(Assembler::srl_op3, Assembler::arith_op);
7782 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7783 ins_pipe(ialu_reg_reg);
7784 %}
7786 // Register Shift Right Immediate
7787 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7788 match(Set dst (URShiftI src1 src2));
7790 size(4);
7791 format %{ "SRL $src1,$src2,$dst" %}
7792 opcode(Assembler::srl_op3, Assembler::arith_op);
7793 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7794 ins_pipe(ialu_reg_imm);
7795 %}
7797 // Register Shift Right
7798 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7799 match(Set dst (URShiftL src1 src2));
7801 size(4);
7802 format %{ "SRLX $src1,$src2,$dst" %}
7803 opcode(Assembler::srlx_op3, Assembler::arith_op);
7804 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7805 ins_pipe(ialu_reg_reg);
7806 %}
7808 // Register Shift Right Immediate
7809 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7810 match(Set dst (URShiftL src1 src2));
7812 size(4);
7813 format %{ "SRLX $src1,$src2,$dst" %}
7814 opcode(Assembler::srlx_op3, Assembler::arith_op);
7815 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7816 ins_pipe(ialu_reg_imm);
7817 %}
7819 // Register Shift Right Immediate with a CastP2X
7820 #ifdef _LP64
7821 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
7822 match(Set dst (URShiftL (CastP2X src1) src2));
7823 size(4);
7824 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %}
7825 opcode(Assembler::srlx_op3, Assembler::arith_op);
7826 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7827 ins_pipe(ialu_reg_imm);
7828 %}
7829 #else
7830 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{
7831 match(Set dst (URShiftI (CastP2X src1) src2));
7832 size(4);
7833 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %}
7834 opcode(Assembler::srl_op3, Assembler::arith_op);
7835 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7836 ins_pipe(ialu_reg_imm);
7837 %}
7838 #endif
7841 //----------Floating Point Arithmetic Instructions-----------------------------
7843 // Add float single precision
7844 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
7845 match(Set dst (AddF src1 src2));
7847 size(4);
7848 format %{ "FADDS $src1,$src2,$dst" %}
7849 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf);
7850 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7851 ins_pipe(faddF_reg_reg);
7852 %}
7854 // Add float double precision
7855 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
7856 match(Set dst (AddD src1 src2));
7858 size(4);
7859 format %{ "FADDD $src1,$src2,$dst" %}
7860 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf);
7861 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7862 ins_pipe(faddD_reg_reg);
7863 %}
7865 // Sub float single precision
7866 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
7867 match(Set dst (SubF src1 src2));
7869 size(4);
7870 format %{ "FSUBS $src1,$src2,$dst" %}
7871 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf);
7872 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7873 ins_pipe(faddF_reg_reg);
7874 %}
7876 // Sub float double precision
7877 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
7878 match(Set dst (SubD src1 src2));
7880 size(4);
7881 format %{ "FSUBD $src1,$src2,$dst" %}
7882 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf);
7883 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7884 ins_pipe(faddD_reg_reg);
7885 %}
7887 // Mul float single precision
7888 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
7889 match(Set dst (MulF src1 src2));
7891 size(4);
7892 format %{ "FMULS $src1,$src2,$dst" %}
7893 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf);
7894 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7895 ins_pipe(fmulF_reg_reg);
7896 %}
7898 // Mul float double precision
7899 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
7900 match(Set dst (MulD src1 src2));
7902 size(4);
7903 format %{ "FMULD $src1,$src2,$dst" %}
7904 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf);
7905 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7906 ins_pipe(fmulD_reg_reg);
7907 %}
7909 // Div float single precision
7910 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
7911 match(Set dst (DivF src1 src2));
7913 size(4);
7914 format %{ "FDIVS $src1,$src2,$dst" %}
7915 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf);
7916 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7917 ins_pipe(fdivF_reg_reg);
7918 %}
7920 // Div float double precision
7921 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
7922 match(Set dst (DivD src1 src2));
7924 size(4);
7925 format %{ "FDIVD $src1,$src2,$dst" %}
7926 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf);
7927 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7928 ins_pipe(fdivD_reg_reg);
7929 %}
7931 // Absolute float double precision
7932 instruct absD_reg(regD dst, regD src) %{
7933 match(Set dst (AbsD src));
7935 format %{ "FABSd $src,$dst" %}
7936 ins_encode(fabsd(dst, src));
7937 ins_pipe(faddD_reg);
7938 %}
7940 // Absolute float single precision
7941 instruct absF_reg(regF dst, regF src) %{
7942 match(Set dst (AbsF src));
7944 format %{ "FABSs $src,$dst" %}
7945 ins_encode(fabss(dst, src));
7946 ins_pipe(faddF_reg);
7947 %}
7949 instruct negF_reg(regF dst, regF src) %{
7950 match(Set dst (NegF src));
7952 size(4);
7953 format %{ "FNEGs $src,$dst" %}
7954 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf);
7955 ins_encode(form3_opf_rs2F_rdF(src, dst));
7956 ins_pipe(faddF_reg);
7957 %}
7959 instruct negD_reg(regD dst, regD src) %{
7960 match(Set dst (NegD src));
7962 format %{ "FNEGd $src,$dst" %}
7963 ins_encode(fnegd(dst, src));
7964 ins_pipe(faddD_reg);
7965 %}
7967 // Sqrt float double precision
7968 instruct sqrtF_reg_reg(regF dst, regF src) %{
7969 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
7971 size(4);
7972 format %{ "FSQRTS $src,$dst" %}
7973 ins_encode(fsqrts(dst, src));
7974 ins_pipe(fdivF_reg_reg);
7975 %}
7977 // Sqrt float double precision
7978 instruct sqrtD_reg_reg(regD dst, regD src) %{
7979 match(Set dst (SqrtD src));
7981 size(4);
7982 format %{ "FSQRTD $src,$dst" %}
7983 ins_encode(fsqrtd(dst, src));
7984 ins_pipe(fdivD_reg_reg);
7985 %}
7987 //----------Logical Instructions-----------------------------------------------
7988 // And Instructions
7989 // Register And
7990 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7991 match(Set dst (AndI src1 src2));
7993 size(4);
7994 format %{ "AND $src1,$src2,$dst" %}
7995 opcode(Assembler::and_op3, Assembler::arith_op);
7996 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7997 ins_pipe(ialu_reg_reg);
7998 %}
8000 // Immediate And
8001 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8002 match(Set dst (AndI src1 src2));
8004 size(4);
8005 format %{ "AND $src1,$src2,$dst" %}
8006 opcode(Assembler::and_op3, Assembler::arith_op);
8007 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8008 ins_pipe(ialu_reg_imm);
8009 %}
8011 // Register And Long
8012 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8013 match(Set dst (AndL src1 src2));
8015 ins_cost(DEFAULT_COST);
8016 size(4);
8017 format %{ "AND $src1,$src2,$dst\t! long" %}
8018 opcode(Assembler::and_op3, Assembler::arith_op);
8019 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8020 ins_pipe(ialu_reg_reg);
8021 %}
8023 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
8024 match(Set dst (AndL src1 con));
8026 ins_cost(DEFAULT_COST);
8027 size(4);
8028 format %{ "AND $src1,$con,$dst\t! long" %}
8029 opcode(Assembler::and_op3, Assembler::arith_op);
8030 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8031 ins_pipe(ialu_reg_imm);
8032 %}
8034 // Or Instructions
8035 // Register Or
8036 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8037 match(Set dst (OrI src1 src2));
8039 size(4);
8040 format %{ "OR $src1,$src2,$dst" %}
8041 opcode(Assembler::or_op3, Assembler::arith_op);
8042 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8043 ins_pipe(ialu_reg_reg);
8044 %}
8046 // Immediate Or
8047 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8048 match(Set dst (OrI src1 src2));
8050 size(4);
8051 format %{ "OR $src1,$src2,$dst" %}
8052 opcode(Assembler::or_op3, Assembler::arith_op);
8053 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8054 ins_pipe(ialu_reg_imm);
8055 %}
8057 // Register Or Long
8058 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8059 match(Set dst (OrL src1 src2));
8061 ins_cost(DEFAULT_COST);
8062 size(4);
8063 format %{ "OR $src1,$src2,$dst\t! long" %}
8064 opcode(Assembler::or_op3, Assembler::arith_op);
8065 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8066 ins_pipe(ialu_reg_reg);
8067 %}
8069 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
8070 match(Set dst (OrL src1 con));
8071 ins_cost(DEFAULT_COST*2);
8073 ins_cost(DEFAULT_COST);
8074 size(4);
8075 format %{ "OR $src1,$con,$dst\t! long" %}
8076 opcode(Assembler::or_op3, Assembler::arith_op);
8077 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8078 ins_pipe(ialu_reg_imm);
8079 %}
8081 #ifndef _LP64
8083 // Use sp_ptr_RegP to match G2 (TLS register) without spilling.
8084 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
8085 match(Set dst (OrI src1 (CastP2X src2)));
8087 size(4);
8088 format %{ "OR $src1,$src2,$dst" %}
8089 opcode(Assembler::or_op3, Assembler::arith_op);
8090 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8091 ins_pipe(ialu_reg_reg);
8092 %}
8094 #else
8096 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
8097 match(Set dst (OrL src1 (CastP2X src2)));
8099 ins_cost(DEFAULT_COST);
8100 size(4);
8101 format %{ "OR $src1,$src2,$dst\t! long" %}
8102 opcode(Assembler::or_op3, Assembler::arith_op);
8103 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8104 ins_pipe(ialu_reg_reg);
8105 %}
8107 #endif
8109 // Xor Instructions
8110 // Register Xor
8111 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8112 match(Set dst (XorI src1 src2));
8114 size(4);
8115 format %{ "XOR $src1,$src2,$dst" %}
8116 opcode(Assembler::xor_op3, Assembler::arith_op);
8117 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8118 ins_pipe(ialu_reg_reg);
8119 %}
8121 // Immediate Xor
8122 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8123 match(Set dst (XorI src1 src2));
8125 size(4);
8126 format %{ "XOR $src1,$src2,$dst" %}
8127 opcode(Assembler::xor_op3, Assembler::arith_op);
8128 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8129 ins_pipe(ialu_reg_imm);
8130 %}
8132 // Register Xor Long
8133 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8134 match(Set dst (XorL src1 src2));
8136 ins_cost(DEFAULT_COST);
8137 size(4);
8138 format %{ "XOR $src1,$src2,$dst\t! long" %}
8139 opcode(Assembler::xor_op3, Assembler::arith_op);
8140 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8141 ins_pipe(ialu_reg_reg);
8142 %}
8144 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
8145 match(Set dst (XorL src1 con));
8147 ins_cost(DEFAULT_COST);
8148 size(4);
8149 format %{ "XOR $src1,$con,$dst\t! long" %}
8150 opcode(Assembler::xor_op3, Assembler::arith_op);
8151 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8152 ins_pipe(ialu_reg_imm);
8153 %}
8155 //----------Convert to Boolean-------------------------------------------------
8156 // Nice hack for 32-bit tests but doesn't work for
8157 // 64-bit pointers.
8158 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
8159 match(Set dst (Conv2B src));
8160 effect( KILL ccr );
8161 ins_cost(DEFAULT_COST*2);
8162 format %{ "CMP R_G0,$src\n\t"
8163 "ADDX R_G0,0,$dst" %}
8164 ins_encode( enc_to_bool( src, dst ) );
8165 ins_pipe(ialu_reg_ialu);
8166 %}
8168 #ifndef _LP64
8169 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
8170 match(Set dst (Conv2B src));
8171 effect( KILL ccr );
8172 ins_cost(DEFAULT_COST*2);
8173 format %{ "CMP R_G0,$src\n\t"
8174 "ADDX R_G0,0,$dst" %}
8175 ins_encode( enc_to_bool( src, dst ) );
8176 ins_pipe(ialu_reg_ialu);
8177 %}
8178 #else
8179 instruct convP2B( iRegI dst, iRegP src ) %{
8180 match(Set dst (Conv2B src));
8181 ins_cost(DEFAULT_COST*2);
8182 format %{ "MOV $src,$dst\n\t"
8183 "MOVRNZ $src,1,$dst" %}
8184 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) );
8185 ins_pipe(ialu_clr_and_mover);
8186 %}
8187 #endif
8189 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
8190 match(Set dst (CmpLTMask src zero));
8191 effect(KILL ccr);
8192 size(4);
8193 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %}
8194 ins_encode %{
8195 __ sra($src$$Register, 31, $dst$$Register);
8196 %}
8197 ins_pipe(ialu_reg_imm);
8198 %}
8200 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{
8201 match(Set dst (CmpLTMask p q));
8202 effect( KILL ccr );
8203 ins_cost(DEFAULT_COST*4);
8204 format %{ "CMP $p,$q\n\t"
8205 "MOV #0,$dst\n\t"
8206 "BLT,a .+8\n\t"
8207 "MOV #-1,$dst" %}
8208 ins_encode( enc_ltmask(p,q,dst) );
8209 ins_pipe(ialu_reg_reg_ialu);
8210 %}
8212 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{
8213 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
8214 effect(KILL ccr, TEMP tmp);
8215 ins_cost(DEFAULT_COST*3);
8217 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t"
8218 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t"
8219 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %}
8220 ins_encode( enc_cadd_cmpLTMask(p, q, y, tmp) );
8221 ins_pipe( cadd_cmpltmask );
8222 %}
8225 //-----------------------------------------------------------------
8226 // Direct raw moves between float and general registers using VIS3.
8228 // ins_pipe(faddF_reg);
8229 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{
8230 predicate(UseVIS >= 3);
8231 match(Set dst (MoveF2I src));
8233 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %}
8234 ins_encode %{
8235 __ movstouw($src$$FloatRegister, $dst$$Register);
8236 %}
8237 ins_pipe(ialu_reg_reg);
8238 %}
8240 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{
8241 predicate(UseVIS >= 3);
8242 match(Set dst (MoveI2F src));
8244 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %}
8245 ins_encode %{
8246 __ movwtos($src$$Register, $dst$$FloatRegister);
8247 %}
8248 ins_pipe(ialu_reg_reg);
8249 %}
8251 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{
8252 predicate(UseVIS >= 3);
8253 match(Set dst (MoveD2L src));
8255 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %}
8256 ins_encode %{
8257 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register);
8258 %}
8259 ins_pipe(ialu_reg_reg);
8260 %}
8262 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{
8263 predicate(UseVIS >= 3);
8264 match(Set dst (MoveL2D src));
8266 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %}
8267 ins_encode %{
8268 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg));
8269 %}
8270 ins_pipe(ialu_reg_reg);
8271 %}
8274 // Raw moves between float and general registers using stack.
8276 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{
8277 match(Set dst (MoveF2I src));
8278 effect(DEF dst, USE src);
8279 ins_cost(MEMORY_REF_COST);
8281 size(4);
8282 format %{ "LDUW $src,$dst\t! MoveF2I" %}
8283 opcode(Assembler::lduw_op3);
8284 ins_encode(simple_form3_mem_reg( src, dst ) );
8285 ins_pipe(iload_mem);
8286 %}
8288 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{
8289 match(Set dst (MoveI2F src));
8290 effect(DEF dst, USE src);
8291 ins_cost(MEMORY_REF_COST);
8293 size(4);
8294 format %{ "LDF $src,$dst\t! MoveI2F" %}
8295 opcode(Assembler::ldf_op3);
8296 ins_encode(simple_form3_mem_reg(src, dst));
8297 ins_pipe(floadF_stk);
8298 %}
8300 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{
8301 match(Set dst (MoveD2L src));
8302 effect(DEF dst, USE src);
8303 ins_cost(MEMORY_REF_COST);
8305 size(4);
8306 format %{ "LDX $src,$dst\t! MoveD2L" %}
8307 opcode(Assembler::ldx_op3);
8308 ins_encode(simple_form3_mem_reg( src, dst ) );
8309 ins_pipe(iload_mem);
8310 %}
8312 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{
8313 match(Set dst (MoveL2D src));
8314 effect(DEF dst, USE src);
8315 ins_cost(MEMORY_REF_COST);
8317 size(4);
8318 format %{ "LDDF $src,$dst\t! MoveL2D" %}
8319 opcode(Assembler::lddf_op3);
8320 ins_encode(simple_form3_mem_reg(src, dst));
8321 ins_pipe(floadD_stk);
8322 %}
8324 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{
8325 match(Set dst (MoveF2I src));
8326 effect(DEF dst, USE src);
8327 ins_cost(MEMORY_REF_COST);
8329 size(4);
8330 format %{ "STF $src,$dst\t! MoveF2I" %}
8331 opcode(Assembler::stf_op3);
8332 ins_encode(simple_form3_mem_reg(dst, src));
8333 ins_pipe(fstoreF_stk_reg);
8334 %}
8336 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
8337 match(Set dst (MoveI2F src));
8338 effect(DEF dst, USE src);
8339 ins_cost(MEMORY_REF_COST);
8341 size(4);
8342 format %{ "STW $src,$dst\t! MoveI2F" %}
8343 opcode(Assembler::stw_op3);
8344 ins_encode(simple_form3_mem_reg( dst, src ) );
8345 ins_pipe(istore_mem_reg);
8346 %}
8348 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{
8349 match(Set dst (MoveD2L src));
8350 effect(DEF dst, USE src);
8351 ins_cost(MEMORY_REF_COST);
8353 size(4);
8354 format %{ "STDF $src,$dst\t! MoveD2L" %}
8355 opcode(Assembler::stdf_op3);
8356 ins_encode(simple_form3_mem_reg(dst, src));
8357 ins_pipe(fstoreD_stk_reg);
8358 %}
8360 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
8361 match(Set dst (MoveL2D src));
8362 effect(DEF dst, USE src);
8363 ins_cost(MEMORY_REF_COST);
8365 size(4);
8366 format %{ "STX $src,$dst\t! MoveL2D" %}
8367 opcode(Assembler::stx_op3);
8368 ins_encode(simple_form3_mem_reg( dst, src ) );
8369 ins_pipe(istore_mem_reg);
8370 %}
8373 //----------Arithmetic Conversion Instructions---------------------------------
8374 // The conversions operations are all Alpha sorted. Please keep it that way!
8376 instruct convD2F_reg(regF dst, regD src) %{
8377 match(Set dst (ConvD2F src));
8378 size(4);
8379 format %{ "FDTOS $src,$dst" %}
8380 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf);
8381 ins_encode(form3_opf_rs2D_rdF(src, dst));
8382 ins_pipe(fcvtD2F);
8383 %}
8386 // Convert a double to an int in a float register.
8387 // If the double is a NAN, stuff a zero in instead.
8388 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{
8389 effect(DEF dst, USE src, KILL fcc0);
8390 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
8391 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8392 "FDTOI $src,$dst\t! convert in delay slot\n\t"
8393 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
8394 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
8395 "skip:" %}
8396 ins_encode(form_d2i_helper(src,dst));
8397 ins_pipe(fcvtD2I);
8398 %}
8400 instruct convD2I_stk(stackSlotI dst, regD src) %{
8401 match(Set dst (ConvD2I src));
8402 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8403 expand %{
8404 regF tmp;
8405 convD2I_helper(tmp, src);
8406 regF_to_stkI(dst, tmp);
8407 %}
8408 %}
8410 instruct convD2I_reg(iRegI dst, regD src) %{
8411 predicate(UseVIS >= 3);
8412 match(Set dst (ConvD2I src));
8413 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8414 expand %{
8415 regF tmp;
8416 convD2I_helper(tmp, src);
8417 MoveF2I_reg_reg(dst, tmp);
8418 %}
8419 %}
8422 // Convert a double to a long in a double register.
8423 // If the double is a NAN, stuff a zero in instead.
8424 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{
8425 effect(DEF dst, USE src, KILL fcc0);
8426 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
8427 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8428 "FDTOX $src,$dst\t! convert in delay slot\n\t"
8429 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
8430 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
8431 "skip:" %}
8432 ins_encode(form_d2l_helper(src,dst));
8433 ins_pipe(fcvtD2L);
8434 %}
8436 instruct convD2L_stk(stackSlotL dst, regD src) %{
8437 match(Set dst (ConvD2L src));
8438 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8439 expand %{
8440 regD tmp;
8441 convD2L_helper(tmp, src);
8442 regD_to_stkL(dst, tmp);
8443 %}
8444 %}
8446 instruct convD2L_reg(iRegL dst, regD src) %{
8447 predicate(UseVIS >= 3);
8448 match(Set dst (ConvD2L src));
8449 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8450 expand %{
8451 regD tmp;
8452 convD2L_helper(tmp, src);
8453 MoveD2L_reg_reg(dst, tmp);
8454 %}
8455 %}
8458 instruct convF2D_reg(regD dst, regF src) %{
8459 match(Set dst (ConvF2D src));
8460 format %{ "FSTOD $src,$dst" %}
8461 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf);
8462 ins_encode(form3_opf_rs2F_rdD(src, dst));
8463 ins_pipe(fcvtF2D);
8464 %}
8467 // Convert a float to an int in a float register.
8468 // If the float is a NAN, stuff a zero in instead.
8469 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{
8470 effect(DEF dst, USE src, KILL fcc0);
8471 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
8472 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8473 "FSTOI $src,$dst\t! convert in delay slot\n\t"
8474 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
8475 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
8476 "skip:" %}
8477 ins_encode(form_f2i_helper(src,dst));
8478 ins_pipe(fcvtF2I);
8479 %}
8481 instruct convF2I_stk(stackSlotI dst, regF src) %{
8482 match(Set dst (ConvF2I src));
8483 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8484 expand %{
8485 regF tmp;
8486 convF2I_helper(tmp, src);
8487 regF_to_stkI(dst, tmp);
8488 %}
8489 %}
8491 instruct convF2I_reg(iRegI dst, regF src) %{
8492 predicate(UseVIS >= 3);
8493 match(Set dst (ConvF2I src));
8494 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8495 expand %{
8496 regF tmp;
8497 convF2I_helper(tmp, src);
8498 MoveF2I_reg_reg(dst, tmp);
8499 %}
8500 %}
8503 // Convert a float to a long in a float register.
8504 // If the float is a NAN, stuff a zero in instead.
8505 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{
8506 effect(DEF dst, USE src, KILL fcc0);
8507 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
8508 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8509 "FSTOX $src,$dst\t! convert in delay slot\n\t"
8510 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
8511 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
8512 "skip:" %}
8513 ins_encode(form_f2l_helper(src,dst));
8514 ins_pipe(fcvtF2L);
8515 %}
8517 instruct convF2L_stk(stackSlotL dst, regF src) %{
8518 match(Set dst (ConvF2L src));
8519 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8520 expand %{
8521 regD tmp;
8522 convF2L_helper(tmp, src);
8523 regD_to_stkL(dst, tmp);
8524 %}
8525 %}
8527 instruct convF2L_reg(iRegL dst, regF src) %{
8528 predicate(UseVIS >= 3);
8529 match(Set dst (ConvF2L src));
8530 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8531 expand %{
8532 regD tmp;
8533 convF2L_helper(tmp, src);
8534 MoveD2L_reg_reg(dst, tmp);
8535 %}
8536 %}
8539 instruct convI2D_helper(regD dst, regF tmp) %{
8540 effect(USE tmp, DEF dst);
8541 format %{ "FITOD $tmp,$dst" %}
8542 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
8543 ins_encode(form3_opf_rs2F_rdD(tmp, dst));
8544 ins_pipe(fcvtI2D);
8545 %}
8547 instruct convI2D_stk(stackSlotI src, regD dst) %{
8548 match(Set dst (ConvI2D src));
8549 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8550 expand %{
8551 regF tmp;
8552 stkI_to_regF(tmp, src);
8553 convI2D_helper(dst, tmp);
8554 %}
8555 %}
8557 instruct convI2D_reg(regD_low dst, iRegI src) %{
8558 predicate(UseVIS >= 3);
8559 match(Set dst (ConvI2D src));
8560 expand %{
8561 regF tmp;
8562 MoveI2F_reg_reg(tmp, src);
8563 convI2D_helper(dst, tmp);
8564 %}
8565 %}
8567 instruct convI2D_mem(regD_low dst, memory mem) %{
8568 match(Set dst (ConvI2D (LoadI mem)));
8569 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8570 size(8);
8571 format %{ "LDF $mem,$dst\n\t"
8572 "FITOD $dst,$dst" %}
8573 opcode(Assembler::ldf_op3, Assembler::fitod_opf);
8574 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
8575 ins_pipe(floadF_mem);
8576 %}
8579 instruct convI2F_helper(regF dst, regF tmp) %{
8580 effect(DEF dst, USE tmp);
8581 format %{ "FITOS $tmp,$dst" %}
8582 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf);
8583 ins_encode(form3_opf_rs2F_rdF(tmp, dst));
8584 ins_pipe(fcvtI2F);
8585 %}
8587 instruct convI2F_stk(regF dst, stackSlotI src) %{
8588 match(Set dst (ConvI2F src));
8589 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8590 expand %{
8591 regF tmp;
8592 stkI_to_regF(tmp,src);
8593 convI2F_helper(dst, tmp);
8594 %}
8595 %}
8597 instruct convI2F_reg(regF dst, iRegI src) %{
8598 predicate(UseVIS >= 3);
8599 match(Set dst (ConvI2F src));
8600 ins_cost(DEFAULT_COST);
8601 expand %{
8602 regF tmp;
8603 MoveI2F_reg_reg(tmp, src);
8604 convI2F_helper(dst, tmp);
8605 %}
8606 %}
8608 instruct convI2F_mem( regF dst, memory mem ) %{
8609 match(Set dst (ConvI2F (LoadI mem)));
8610 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8611 size(8);
8612 format %{ "LDF $mem,$dst\n\t"
8613 "FITOS $dst,$dst" %}
8614 opcode(Assembler::ldf_op3, Assembler::fitos_opf);
8615 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
8616 ins_pipe(floadF_mem);
8617 %}
8620 instruct convI2L_reg(iRegL dst, iRegI src) %{
8621 match(Set dst (ConvI2L src));
8622 size(4);
8623 format %{ "SRA $src,0,$dst\t! int->long" %}
8624 opcode(Assembler::sra_op3, Assembler::arith_op);
8625 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8626 ins_pipe(ialu_reg_reg);
8627 %}
8629 // Zero-extend convert int to long
8630 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
8631 match(Set dst (AndL (ConvI2L src) mask) );
8632 size(4);
8633 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %}
8634 opcode(Assembler::srl_op3, Assembler::arith_op);
8635 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8636 ins_pipe(ialu_reg_reg);
8637 %}
8639 // Zero-extend long
8640 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
8641 match(Set dst (AndL src mask) );
8642 size(4);
8643 format %{ "SRL $src,0,$dst\t! zero-extend long" %}
8644 opcode(Assembler::srl_op3, Assembler::arith_op);
8645 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8646 ins_pipe(ialu_reg_reg);
8647 %}
8650 //-----------
8651 // Long to Double conversion using V8 opcodes.
8652 // Still useful because cheetah traps and becomes
8653 // amazingly slow for some common numbers.
8655 // Magic constant, 0x43300000
8656 instruct loadConI_x43300000(iRegI dst) %{
8657 effect(DEF dst);
8658 size(4);
8659 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %}
8660 ins_encode(SetHi22(0x43300000, dst));
8661 ins_pipe(ialu_none);
8662 %}
8664 // Magic constant, 0x41f00000
8665 instruct loadConI_x41f00000(iRegI dst) %{
8666 effect(DEF dst);
8667 size(4);
8668 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %}
8669 ins_encode(SetHi22(0x41f00000, dst));
8670 ins_pipe(ialu_none);
8671 %}
8673 // Construct a double from two float halves
8674 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{
8675 effect(DEF dst, USE src1, USE src2);
8676 size(8);
8677 format %{ "FMOVS $src1.hi,$dst.hi\n\t"
8678 "FMOVS $src2.lo,$dst.lo" %}
8679 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf);
8680 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst));
8681 ins_pipe(faddD_reg_reg);
8682 %}
8684 // Convert integer in high half of a double register (in the lower half of
8685 // the double register file) to double
8686 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{
8687 effect(DEF dst, USE src);
8688 size(4);
8689 format %{ "FITOD $src,$dst" %}
8690 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
8691 ins_encode(form3_opf_rs2D_rdD(src, dst));
8692 ins_pipe(fcvtLHi2D);
8693 %}
8695 // Add float double precision
8696 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{
8697 effect(DEF dst, USE src1, USE src2);
8698 size(4);
8699 format %{ "FADDD $src1,$src2,$dst" %}
8700 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf);
8701 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8702 ins_pipe(faddD_reg_reg);
8703 %}
8705 // Sub float double precision
8706 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{
8707 effect(DEF dst, USE src1, USE src2);
8708 size(4);
8709 format %{ "FSUBD $src1,$src2,$dst" %}
8710 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf);
8711 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8712 ins_pipe(faddD_reg_reg);
8713 %}
8715 // Mul float double precision
8716 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{
8717 effect(DEF dst, USE src1, USE src2);
8718 size(4);
8719 format %{ "FMULD $src1,$src2,$dst" %}
8720 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf);
8721 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8722 ins_pipe(fmulD_reg_reg);
8723 %}
8725 instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{
8726 match(Set dst (ConvL2D src));
8727 ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6);
8729 expand %{
8730 regD_low tmpsrc;
8731 iRegI ix43300000;
8732 iRegI ix41f00000;
8733 stackSlotL lx43300000;
8734 stackSlotL lx41f00000;
8735 regD_low dx43300000;
8736 regD dx41f00000;
8737 regD tmp1;
8738 regD_low tmp2;
8739 regD tmp3;
8740 regD tmp4;
8742 stkL_to_regD(tmpsrc, src);
8744 loadConI_x43300000(ix43300000);
8745 loadConI_x41f00000(ix41f00000);
8746 regI_to_stkLHi(lx43300000, ix43300000);
8747 regI_to_stkLHi(lx41f00000, ix41f00000);
8748 stkL_to_regD(dx43300000, lx43300000);
8749 stkL_to_regD(dx41f00000, lx41f00000);
8751 convI2D_regDHi_regD(tmp1, tmpsrc);
8752 regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc);
8753 subD_regD_regD(tmp3, tmp2, dx43300000);
8754 mulD_regD_regD(tmp4, tmp1, dx41f00000);
8755 addD_regD_regD(dst, tmp3, tmp4);
8756 %}
8757 %}
8759 // Long to Double conversion using fast fxtof
8760 instruct convL2D_helper(regD dst, regD tmp) %{
8761 effect(DEF dst, USE tmp);
8762 size(4);
8763 format %{ "FXTOD $tmp,$dst" %}
8764 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf);
8765 ins_encode(form3_opf_rs2D_rdD(tmp, dst));
8766 ins_pipe(fcvtL2D);
8767 %}
8769 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{
8770 predicate(VM_Version::has_fast_fxtof());
8771 match(Set dst (ConvL2D src));
8772 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST);
8773 expand %{
8774 regD tmp;
8775 stkL_to_regD(tmp, src);
8776 convL2D_helper(dst, tmp);
8777 %}
8778 %}
8780 instruct convL2D_reg(regD dst, iRegL src) %{
8781 predicate(UseVIS >= 3);
8782 match(Set dst (ConvL2D src));
8783 expand %{
8784 regD tmp;
8785 MoveL2D_reg_reg(tmp, src);
8786 convL2D_helper(dst, tmp);
8787 %}
8788 %}
8790 // Long to Float conversion using fast fxtof
8791 instruct convL2F_helper(regF dst, regD tmp) %{
8792 effect(DEF dst, USE tmp);
8793 size(4);
8794 format %{ "FXTOS $tmp,$dst" %}
8795 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf);
8796 ins_encode(form3_opf_rs2D_rdF(tmp, dst));
8797 ins_pipe(fcvtL2F);
8798 %}
8800 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{
8801 match(Set dst (ConvL2F src));
8802 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8803 expand %{
8804 regD tmp;
8805 stkL_to_regD(tmp, src);
8806 convL2F_helper(dst, tmp);
8807 %}
8808 %}
8810 instruct convL2F_reg(regF dst, iRegL src) %{
8811 predicate(UseVIS >= 3);
8812 match(Set dst (ConvL2F src));
8813 ins_cost(DEFAULT_COST);
8814 expand %{
8815 regD tmp;
8816 MoveL2D_reg_reg(tmp, src);
8817 convL2F_helper(dst, tmp);
8818 %}
8819 %}
8821 //-----------
8823 instruct convL2I_reg(iRegI dst, iRegL src) %{
8824 match(Set dst (ConvL2I src));
8825 #ifndef _LP64
8826 format %{ "MOV $src.lo,$dst\t! long->int" %}
8827 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) );
8828 ins_pipe(ialu_move_reg_I_to_L);
8829 #else
8830 size(4);
8831 format %{ "SRA $src,R_G0,$dst\t! long->int" %}
8832 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) );
8833 ins_pipe(ialu_reg);
8834 #endif
8835 %}
8837 // Register Shift Right Immediate
8838 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{
8839 match(Set dst (ConvL2I (RShiftL src cnt)));
8841 size(4);
8842 format %{ "SRAX $src,$cnt,$dst" %}
8843 opcode(Assembler::srax_op3, Assembler::arith_op);
8844 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) );
8845 ins_pipe(ialu_reg_imm);
8846 %}
8848 // Replicate scalar to packed byte values in Double register
8849 instruct Repl8B_reg_helper(iRegL dst, iRegI src) %{
8850 effect(DEF dst, USE src);
8851 format %{ "SLLX $src,56,$dst\n\t"
8852 "SRLX $dst, 8,O7\n\t"
8853 "OR $dst,O7,$dst\n\t"
8854 "SRLX $dst,16,O7\n\t"
8855 "OR $dst,O7,$dst\n\t"
8856 "SRLX $dst,32,O7\n\t"
8857 "OR $dst,O7,$dst\t! replicate8B" %}
8858 ins_encode( enc_repl8b(src, dst));
8859 ins_pipe(ialu_reg);
8860 %}
8862 // Replicate scalar to packed byte values in Double register
8863 instruct Repl8B_reg(stackSlotD dst, iRegI src) %{
8864 match(Set dst (Replicate8B src));
8865 expand %{
8866 iRegL tmp;
8867 Repl8B_reg_helper(tmp, src);
8868 regL_to_stkD(dst, tmp);
8869 %}
8870 %}
8872 // Replicate scalar constant to packed byte values in Double register
8873 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{
8874 match(Set dst (Replicate8B con));
8875 effect(KILL tmp);
8876 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %}
8877 ins_encode %{
8878 // XXX This is a quick fix for 6833573.
8879 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister);
8880 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register);
8881 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
8882 %}
8883 ins_pipe(loadConFD);
8884 %}
8886 // Replicate scalar to packed char values into stack slot
8887 instruct Repl4C_reg_helper(iRegL dst, iRegI src) %{
8888 effect(DEF dst, USE src);
8889 format %{ "SLLX $src,48,$dst\n\t"
8890 "SRLX $dst,16,O7\n\t"
8891 "OR $dst,O7,$dst\n\t"
8892 "SRLX $dst,32,O7\n\t"
8893 "OR $dst,O7,$dst\t! replicate4C" %}
8894 ins_encode( enc_repl4s(src, dst) );
8895 ins_pipe(ialu_reg);
8896 %}
8898 // Replicate scalar to packed char values into stack slot
8899 instruct Repl4C_reg(stackSlotD dst, iRegI src) %{
8900 match(Set dst (Replicate4C src));
8901 expand %{
8902 iRegL tmp;
8903 Repl4C_reg_helper(tmp, src);
8904 regL_to_stkD(dst, tmp);
8905 %}
8906 %}
8908 // Replicate scalar constant to packed char values in Double register
8909 instruct Repl4C_immI(regD dst, immI con, o7RegI tmp) %{
8910 match(Set dst (Replicate4C con));
8911 effect(KILL tmp);
8912 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4C($con)" %}
8913 ins_encode %{
8914 // XXX This is a quick fix for 6833573.
8915 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister);
8916 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register);
8917 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
8918 %}
8919 ins_pipe(loadConFD);
8920 %}
8922 // Replicate scalar to packed short values into stack slot
8923 instruct Repl4S_reg_helper(iRegL dst, iRegI src) %{
8924 effect(DEF dst, USE src);
8925 format %{ "SLLX $src,48,$dst\n\t"
8926 "SRLX $dst,16,O7\n\t"
8927 "OR $dst,O7,$dst\n\t"
8928 "SRLX $dst,32,O7\n\t"
8929 "OR $dst,O7,$dst\t! replicate4S" %}
8930 ins_encode( enc_repl4s(src, dst) );
8931 ins_pipe(ialu_reg);
8932 %}
8934 // Replicate scalar to packed short values into stack slot
8935 instruct Repl4S_reg(stackSlotD dst, iRegI src) %{
8936 match(Set dst (Replicate4S src));
8937 expand %{
8938 iRegL tmp;
8939 Repl4S_reg_helper(tmp, src);
8940 regL_to_stkD(dst, tmp);
8941 %}
8942 %}
8944 // Replicate scalar constant to packed short values in Double register
8945 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{
8946 match(Set dst (Replicate4S con));
8947 effect(KILL tmp);
8948 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %}
8949 ins_encode %{
8950 // XXX This is a quick fix for 6833573.
8951 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister);
8952 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register);
8953 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
8954 %}
8955 ins_pipe(loadConFD);
8956 %}
8958 // Replicate scalar to packed int values in Double register
8959 instruct Repl2I_reg_helper(iRegL dst, iRegI src) %{
8960 effect(DEF dst, USE src);
8961 format %{ "SLLX $src,32,$dst\n\t"
8962 "SRLX $dst,32,O7\n\t"
8963 "OR $dst,O7,$dst\t! replicate2I" %}
8964 ins_encode( enc_repl2i(src, dst));
8965 ins_pipe(ialu_reg);
8966 %}
8968 // Replicate scalar to packed int values in Double register
8969 instruct Repl2I_reg(stackSlotD dst, iRegI src) %{
8970 match(Set dst (Replicate2I src));
8971 expand %{
8972 iRegL tmp;
8973 Repl2I_reg_helper(tmp, src);
8974 regL_to_stkD(dst, tmp);
8975 %}
8976 %}
8978 // Replicate scalar zero constant to packed int values in Double register
8979 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{
8980 match(Set dst (Replicate2I con));
8981 effect(KILL tmp);
8982 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %}
8983 ins_encode %{
8984 // XXX This is a quick fix for 6833573.
8985 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister);
8986 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register);
8987 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
8988 %}
8989 ins_pipe(loadConFD);
8990 %}
8992 //----------Control Flow Instructions------------------------------------------
8993 // Compare Instructions
8994 // Compare Integers
8995 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{
8996 match(Set icc (CmpI op1 op2));
8997 effect( DEF icc, USE op1, USE op2 );
8999 size(4);
9000 format %{ "CMP $op1,$op2" %}
9001 opcode(Assembler::subcc_op3, Assembler::arith_op);
9002 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
9003 ins_pipe(ialu_cconly_reg_reg);
9004 %}
9006 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{
9007 match(Set icc (CmpU op1 op2));
9009 size(4);
9010 format %{ "CMP $op1,$op2\t! unsigned" %}
9011 opcode(Assembler::subcc_op3, Assembler::arith_op);
9012 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
9013 ins_pipe(ialu_cconly_reg_reg);
9014 %}
9016 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{
9017 match(Set icc (CmpI op1 op2));
9018 effect( DEF icc, USE op1 );
9020 size(4);
9021 format %{ "CMP $op1,$op2" %}
9022 opcode(Assembler::subcc_op3, Assembler::arith_op);
9023 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
9024 ins_pipe(ialu_cconly_reg_imm);
9025 %}
9027 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{
9028 match(Set icc (CmpI (AndI op1 op2) zero));
9030 size(4);
9031 format %{ "BTST $op2,$op1" %}
9032 opcode(Assembler::andcc_op3, Assembler::arith_op);
9033 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
9034 ins_pipe(ialu_cconly_reg_reg_zero);
9035 %}
9037 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{
9038 match(Set icc (CmpI (AndI op1 op2) zero));
9040 size(4);
9041 format %{ "BTST $op2,$op1" %}
9042 opcode(Assembler::andcc_op3, Assembler::arith_op);
9043 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
9044 ins_pipe(ialu_cconly_reg_imm_zero);
9045 %}
9047 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{
9048 match(Set xcc (CmpL op1 op2));
9049 effect( DEF xcc, USE op1, USE op2 );
9051 size(4);
9052 format %{ "CMP $op1,$op2\t\t! long" %}
9053 opcode(Assembler::subcc_op3, Assembler::arith_op);
9054 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
9055 ins_pipe(ialu_cconly_reg_reg);
9056 %}
9058 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{
9059 match(Set xcc (CmpL op1 con));
9060 effect( DEF xcc, USE op1, USE con );
9062 size(4);
9063 format %{ "CMP $op1,$con\t\t! long" %}
9064 opcode(Assembler::subcc_op3, Assembler::arith_op);
9065 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) );
9066 ins_pipe(ialu_cconly_reg_reg);
9067 %}
9069 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{
9070 match(Set xcc (CmpL (AndL op1 op2) zero));
9071 effect( DEF xcc, USE op1, USE op2 );
9073 size(4);
9074 format %{ "BTST $op1,$op2\t\t! long" %}
9075 opcode(Assembler::andcc_op3, Assembler::arith_op);
9076 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
9077 ins_pipe(ialu_cconly_reg_reg);
9078 %}
9080 // useful for checking the alignment of a pointer:
9081 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{
9082 match(Set xcc (CmpL (AndL op1 con) zero));
9083 effect( DEF xcc, USE op1, USE con );
9085 size(4);
9086 format %{ "BTST $op1,$con\t\t! long" %}
9087 opcode(Assembler::andcc_op3, Assembler::arith_op);
9088 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) );
9089 ins_pipe(ialu_cconly_reg_reg);
9090 %}
9092 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU13 op2 ) %{
9093 match(Set icc (CmpU op1 op2));
9095 size(4);
9096 format %{ "CMP $op1,$op2\t! unsigned" %}
9097 opcode(Assembler::subcc_op3, Assembler::arith_op);
9098 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
9099 ins_pipe(ialu_cconly_reg_imm);
9100 %}
9102 // Compare Pointers
9103 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{
9104 match(Set pcc (CmpP op1 op2));
9106 size(4);
9107 format %{ "CMP $op1,$op2\t! ptr" %}
9108 opcode(Assembler::subcc_op3, Assembler::arith_op);
9109 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
9110 ins_pipe(ialu_cconly_reg_reg);
9111 %}
9113 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{
9114 match(Set pcc (CmpP op1 op2));
9116 size(4);
9117 format %{ "CMP $op1,$op2\t! ptr" %}
9118 opcode(Assembler::subcc_op3, Assembler::arith_op);
9119 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
9120 ins_pipe(ialu_cconly_reg_imm);
9121 %}
9123 // Compare Narrow oops
9124 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{
9125 match(Set icc (CmpN op1 op2));
9127 size(4);
9128 format %{ "CMP $op1,$op2\t! compressed ptr" %}
9129 opcode(Assembler::subcc_op3, Assembler::arith_op);
9130 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
9131 ins_pipe(ialu_cconly_reg_reg);
9132 %}
9134 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{
9135 match(Set icc (CmpN op1 op2));
9137 size(4);
9138 format %{ "CMP $op1,$op2\t! compressed ptr" %}
9139 opcode(Assembler::subcc_op3, Assembler::arith_op);
9140 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
9141 ins_pipe(ialu_cconly_reg_imm);
9142 %}
9144 //----------Max and Min--------------------------------------------------------
9145 // Min Instructions
9146 // Conditional move for min
9147 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{
9148 effect( USE_DEF op2, USE op1, USE icc );
9150 size(4);
9151 format %{ "MOVlt icc,$op1,$op2\t! min" %}
9152 opcode(Assembler::less);
9153 ins_encode( enc_cmov_reg_minmax(op2,op1) );
9154 ins_pipe(ialu_reg_flags);
9155 %}
9157 // Min Register with Register.
9158 instruct minI_eReg(iRegI op1, iRegI op2) %{
9159 match(Set op2 (MinI op1 op2));
9160 ins_cost(DEFAULT_COST*2);
9161 expand %{
9162 flagsReg icc;
9163 compI_iReg(icc,op1,op2);
9164 cmovI_reg_lt(op2,op1,icc);
9165 %}
9166 %}
9168 // Max Instructions
9169 // Conditional move for max
9170 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{
9171 effect( USE_DEF op2, USE op1, USE icc );
9172 format %{ "MOVgt icc,$op1,$op2\t! max" %}
9173 opcode(Assembler::greater);
9174 ins_encode( enc_cmov_reg_minmax(op2,op1) );
9175 ins_pipe(ialu_reg_flags);
9176 %}
9178 // Max Register with Register
9179 instruct maxI_eReg(iRegI op1, iRegI op2) %{
9180 match(Set op2 (MaxI op1 op2));
9181 ins_cost(DEFAULT_COST*2);
9182 expand %{
9183 flagsReg icc;
9184 compI_iReg(icc,op1,op2);
9185 cmovI_reg_gt(op2,op1,icc);
9186 %}
9187 %}
9190 //----------Float Compares----------------------------------------------------
9191 // Compare floating, generate condition code
9192 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{
9193 match(Set fcc (CmpF src1 src2));
9195 size(4);
9196 format %{ "FCMPs $fcc,$src1,$src2" %}
9197 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf);
9198 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) );
9199 ins_pipe(faddF_fcc_reg_reg_zero);
9200 %}
9202 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{
9203 match(Set fcc (CmpD src1 src2));
9205 size(4);
9206 format %{ "FCMPd $fcc,$src1,$src2" %}
9207 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf);
9208 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) );
9209 ins_pipe(faddD_fcc_reg_reg_zero);
9210 %}
9213 // Compare floating, generate -1,0,1
9214 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{
9215 match(Set dst (CmpF3 src1 src2));
9216 effect(KILL fcc0);
9217 ins_cost(DEFAULT_COST*3+BRANCH_COST*3);
9218 format %{ "fcmpl $dst,$src1,$src2" %}
9219 // Primary = float
9220 opcode( true );
9221 ins_encode( floating_cmp( dst, src1, src2 ) );
9222 ins_pipe( floating_cmp );
9223 %}
9225 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{
9226 match(Set dst (CmpD3 src1 src2));
9227 effect(KILL fcc0);
9228 ins_cost(DEFAULT_COST*3+BRANCH_COST*3);
9229 format %{ "dcmpl $dst,$src1,$src2" %}
9230 // Primary = double (not float)
9231 opcode( false );
9232 ins_encode( floating_cmp( dst, src1, src2 ) );
9233 ins_pipe( floating_cmp );
9234 %}
9236 //----------Branches---------------------------------------------------------
9237 // Jump
9238 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above)
9239 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{
9240 match(Jump switch_val);
9242 ins_cost(350);
9244 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t"
9245 "LD [O7 + $switch_val], O7\n\t"
9246 "JUMP O7"
9247 %}
9248 ins_encode %{
9249 // Calculate table address into a register.
9250 Register table_reg;
9251 Register label_reg = O7;
9252 if (constant_offset() == 0) {
9253 table_reg = $constanttablebase;
9254 } else {
9255 table_reg = O7;
9256 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7);
9257 __ add($constanttablebase, con_offset, table_reg);
9258 }
9260 // Jump to base address + switch value
9261 __ ld_ptr(table_reg, $switch_val$$Register, label_reg);
9262 __ jmp(label_reg, G0);
9263 __ delayed()->nop();
9264 %}
9265 ins_pipe(ialu_reg_reg);
9266 %}
9268 // Direct Branch. Use V8 version with longer range.
9269 instruct branch(label labl) %{
9270 match(Goto);
9271 effect(USE labl);
9273 size(8);
9274 ins_cost(BRANCH_COST);
9275 format %{ "BA $labl" %}
9276 ins_encode %{
9277 Label* L = $labl$$label;
9278 __ ba(*L);
9279 __ delayed()->nop();
9280 %}
9281 ins_pipe(br);
9282 %}
9284 // Direct Branch, short with no delay slot
9285 instruct branch_short(label labl) %{
9286 match(Goto);
9287 predicate(UseCBCond);
9288 effect(USE labl);
9290 size(4);
9291 ins_cost(BRANCH_COST);
9292 format %{ "BA $labl\t! short branch" %}
9293 ins_encode %{
9294 Label* L = $labl$$label;
9295 assert(__ use_cbcond(*L), "back to back cbcond");
9296 __ ba_short(*L);
9297 %}
9298 ins_short_branch(1);
9299 ins_avoid_back_to_back(1);
9300 ins_pipe(cbcond_reg_imm);
9301 %}
9303 // Conditional Direct Branch
9304 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
9305 match(If cmp icc);
9306 effect(USE labl);
9308 size(8);
9309 ins_cost(BRANCH_COST);
9310 format %{ "BP$cmp $icc,$labl" %}
9311 // Prim = bits 24-22, Secnd = bits 31-30
9312 ins_encode( enc_bp( labl, cmp, icc ) );
9313 ins_pipe(br_cc);
9314 %}
9316 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
9317 match(If cmp icc);
9318 effect(USE labl);
9320 ins_cost(BRANCH_COST);
9321 format %{ "BP$cmp $icc,$labl" %}
9322 // Prim = bits 24-22, Secnd = bits 31-30
9323 ins_encode( enc_bp( labl, cmp, icc ) );
9324 ins_pipe(br_cc);
9325 %}
9327 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
9328 match(If cmp pcc);
9329 effect(USE labl);
9331 size(8);
9332 ins_cost(BRANCH_COST);
9333 format %{ "BP$cmp $pcc,$labl" %}
9334 ins_encode %{
9335 Label* L = $labl$$label;
9336 Assembler::Predict predict_taken =
9337 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9339 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9340 __ delayed()->nop();
9341 %}
9342 ins_pipe(br_cc);
9343 %}
9345 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{
9346 match(If cmp fcc);
9347 effect(USE labl);
9349 size(8);
9350 ins_cost(BRANCH_COST);
9351 format %{ "FBP$cmp $fcc,$labl" %}
9352 ins_encode %{
9353 Label* L = $labl$$label;
9354 Assembler::Predict predict_taken =
9355 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9357 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L);
9358 __ delayed()->nop();
9359 %}
9360 ins_pipe(br_fcc);
9361 %}
9363 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
9364 match(CountedLoopEnd cmp icc);
9365 effect(USE labl);
9367 size(8);
9368 ins_cost(BRANCH_COST);
9369 format %{ "BP$cmp $icc,$labl\t! Loop end" %}
9370 // Prim = bits 24-22, Secnd = bits 31-30
9371 ins_encode( enc_bp( labl, cmp, icc ) );
9372 ins_pipe(br_cc);
9373 %}
9375 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
9376 match(CountedLoopEnd cmp icc);
9377 effect(USE labl);
9379 size(8);
9380 ins_cost(BRANCH_COST);
9381 format %{ "BP$cmp $icc,$labl\t! Loop end" %}
9382 // Prim = bits 24-22, Secnd = bits 31-30
9383 ins_encode( enc_bp( labl, cmp, icc ) );
9384 ins_pipe(br_cc);
9385 %}
9387 // Compare and branch instructions
9388 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9389 match(If cmp (CmpI op1 op2));
9390 effect(USE labl, KILL icc);
9392 size(12);
9393 ins_cost(BRANCH_COST);
9394 format %{ "CMP $op1,$op2\t! int\n\t"
9395 "BP$cmp $labl" %}
9396 ins_encode %{
9397 Label* L = $labl$$label;
9398 Assembler::Predict predict_taken =
9399 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9400 __ cmp($op1$$Register, $op2$$Register);
9401 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9402 __ delayed()->nop();
9403 %}
9404 ins_pipe(cmp_br_reg_reg);
9405 %}
9407 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9408 match(If cmp (CmpI op1 op2));
9409 effect(USE labl, KILL icc);
9411 size(12);
9412 ins_cost(BRANCH_COST);
9413 format %{ "CMP $op1,$op2\t! int\n\t"
9414 "BP$cmp $labl" %}
9415 ins_encode %{
9416 Label* L = $labl$$label;
9417 Assembler::Predict predict_taken =
9418 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9419 __ cmp($op1$$Register, $op2$$constant);
9420 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9421 __ delayed()->nop();
9422 %}
9423 ins_pipe(cmp_br_reg_imm);
9424 %}
9426 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
9427 match(If cmp (CmpU op1 op2));
9428 effect(USE labl, KILL icc);
9430 size(12);
9431 ins_cost(BRANCH_COST);
9432 format %{ "CMP $op1,$op2\t! unsigned\n\t"
9433 "BP$cmp $labl" %}
9434 ins_encode %{
9435 Label* L = $labl$$label;
9436 Assembler::Predict predict_taken =
9437 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9438 __ cmp($op1$$Register, $op2$$Register);
9439 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9440 __ delayed()->nop();
9441 %}
9442 ins_pipe(cmp_br_reg_reg);
9443 %}
9445 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
9446 match(If cmp (CmpU op1 op2));
9447 effect(USE labl, KILL icc);
9449 size(12);
9450 ins_cost(BRANCH_COST);
9451 format %{ "CMP $op1,$op2\t! unsigned\n\t"
9452 "BP$cmp $labl" %}
9453 ins_encode %{
9454 Label* L = $labl$$label;
9455 Assembler::Predict predict_taken =
9456 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9457 __ cmp($op1$$Register, $op2$$constant);
9458 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9459 __ delayed()->nop();
9460 %}
9461 ins_pipe(cmp_br_reg_imm);
9462 %}
9464 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
9465 match(If cmp (CmpL op1 op2));
9466 effect(USE labl, KILL xcc);
9468 size(12);
9469 ins_cost(BRANCH_COST);
9470 format %{ "CMP $op1,$op2\t! long\n\t"
9471 "BP$cmp $labl" %}
9472 ins_encode %{
9473 Label* L = $labl$$label;
9474 Assembler::Predict predict_taken =
9475 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9476 __ cmp($op1$$Register, $op2$$Register);
9477 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9478 __ delayed()->nop();
9479 %}
9480 ins_pipe(cmp_br_reg_reg);
9481 %}
9483 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
9484 match(If cmp (CmpL op1 op2));
9485 effect(USE labl, KILL xcc);
9487 size(12);
9488 ins_cost(BRANCH_COST);
9489 format %{ "CMP $op1,$op2\t! long\n\t"
9490 "BP$cmp $labl" %}
9491 ins_encode %{
9492 Label* L = $labl$$label;
9493 Assembler::Predict predict_taken =
9494 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9495 __ cmp($op1$$Register, $op2$$constant);
9496 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9497 __ delayed()->nop();
9498 %}
9499 ins_pipe(cmp_br_reg_imm);
9500 %}
9502 // Compare Pointers and branch
9503 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9504 match(If cmp (CmpP op1 op2));
9505 effect(USE labl, KILL pcc);
9507 size(12);
9508 ins_cost(BRANCH_COST);
9509 format %{ "CMP $op1,$op2\t! ptr\n\t"
9510 "B$cmp $labl" %}
9511 ins_encode %{
9512 Label* L = $labl$$label;
9513 Assembler::Predict predict_taken =
9514 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9515 __ cmp($op1$$Register, $op2$$Register);
9516 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9517 __ delayed()->nop();
9518 %}
9519 ins_pipe(cmp_br_reg_reg);
9520 %}
9522 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9523 match(If cmp (CmpP op1 null));
9524 effect(USE labl, KILL pcc);
9526 size(12);
9527 ins_cost(BRANCH_COST);
9528 format %{ "CMP $op1,0\t! ptr\n\t"
9529 "B$cmp $labl" %}
9530 ins_encode %{
9531 Label* L = $labl$$label;
9532 Assembler::Predict predict_taken =
9533 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9534 __ cmp($op1$$Register, G0);
9535 // bpr() is not used here since it has shorter distance.
9536 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9537 __ delayed()->nop();
9538 %}
9539 ins_pipe(cmp_br_reg_reg);
9540 %}
9542 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9543 match(If cmp (CmpN op1 op2));
9544 effect(USE labl, KILL icc);
9546 size(12);
9547 ins_cost(BRANCH_COST);
9548 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
9549 "BP$cmp $labl" %}
9550 ins_encode %{
9551 Label* L = $labl$$label;
9552 Assembler::Predict predict_taken =
9553 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9554 __ cmp($op1$$Register, $op2$$Register);
9555 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9556 __ delayed()->nop();
9557 %}
9558 ins_pipe(cmp_br_reg_reg);
9559 %}
9561 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
9562 match(If cmp (CmpN op1 null));
9563 effect(USE labl, KILL icc);
9565 size(12);
9566 ins_cost(BRANCH_COST);
9567 format %{ "CMP $op1,0\t! compressed ptr\n\t"
9568 "BP$cmp $labl" %}
9569 ins_encode %{
9570 Label* L = $labl$$label;
9571 Assembler::Predict predict_taken =
9572 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9573 __ cmp($op1$$Register, G0);
9574 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9575 __ delayed()->nop();
9576 %}
9577 ins_pipe(cmp_br_reg_reg);
9578 %}
9580 // Loop back branch
9581 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9582 match(CountedLoopEnd cmp (CmpI op1 op2));
9583 effect(USE labl, KILL icc);
9585 size(12);
9586 ins_cost(BRANCH_COST);
9587 format %{ "CMP $op1,$op2\t! int\n\t"
9588 "BP$cmp $labl\t! Loop end" %}
9589 ins_encode %{
9590 Label* L = $labl$$label;
9591 Assembler::Predict predict_taken =
9592 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9593 __ cmp($op1$$Register, $op2$$Register);
9594 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9595 __ delayed()->nop();
9596 %}
9597 ins_pipe(cmp_br_reg_reg);
9598 %}
9600 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9601 match(CountedLoopEnd cmp (CmpI op1 op2));
9602 effect(USE labl, KILL icc);
9604 size(12);
9605 ins_cost(BRANCH_COST);
9606 format %{ "CMP $op1,$op2\t! int\n\t"
9607 "BP$cmp $labl\t! Loop end" %}
9608 ins_encode %{
9609 Label* L = $labl$$label;
9610 Assembler::Predict predict_taken =
9611 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9612 __ cmp($op1$$Register, $op2$$constant);
9613 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9614 __ delayed()->nop();
9615 %}
9616 ins_pipe(cmp_br_reg_imm);
9617 %}
9619 // Short compare and branch instructions
9620 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9621 match(If cmp (CmpI op1 op2));
9622 predicate(UseCBCond);
9623 effect(USE labl, KILL icc);
9625 size(4);
9626 ins_cost(BRANCH_COST);
9627 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
9628 ins_encode %{
9629 Label* L = $labl$$label;
9630 assert(__ use_cbcond(*L), "back to back cbcond");
9631 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9632 %}
9633 ins_short_branch(1);
9634 ins_avoid_back_to_back(1);
9635 ins_pipe(cbcond_reg_reg);
9636 %}
9638 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9639 match(If cmp (CmpI op1 op2));
9640 predicate(UseCBCond);
9641 effect(USE labl, KILL icc);
9643 size(4);
9644 ins_cost(BRANCH_COST);
9645 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
9646 ins_encode %{
9647 Label* L = $labl$$label;
9648 assert(__ use_cbcond(*L), "back to back cbcond");
9649 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9650 %}
9651 ins_short_branch(1);
9652 ins_avoid_back_to_back(1);
9653 ins_pipe(cbcond_reg_imm);
9654 %}
9656 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
9657 match(If cmp (CmpU op1 op2));
9658 predicate(UseCBCond);
9659 effect(USE labl, KILL icc);
9661 size(4);
9662 ins_cost(BRANCH_COST);
9663 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
9664 ins_encode %{
9665 Label* L = $labl$$label;
9666 assert(__ use_cbcond(*L), "back to back cbcond");
9667 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9668 %}
9669 ins_short_branch(1);
9670 ins_avoid_back_to_back(1);
9671 ins_pipe(cbcond_reg_reg);
9672 %}
9674 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
9675 match(If cmp (CmpU op1 op2));
9676 predicate(UseCBCond);
9677 effect(USE labl, KILL icc);
9679 size(4);
9680 ins_cost(BRANCH_COST);
9681 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
9682 ins_encode %{
9683 Label* L = $labl$$label;
9684 assert(__ use_cbcond(*L), "back to back cbcond");
9685 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9686 %}
9687 ins_short_branch(1);
9688 ins_avoid_back_to_back(1);
9689 ins_pipe(cbcond_reg_imm);
9690 %}
9692 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
9693 match(If cmp (CmpL op1 op2));
9694 predicate(UseCBCond);
9695 effect(USE labl, KILL xcc);
9697 size(4);
9698 ins_cost(BRANCH_COST);
9699 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9700 ins_encode %{
9701 Label* L = $labl$$label;
9702 assert(__ use_cbcond(*L), "back to back cbcond");
9703 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L);
9704 %}
9705 ins_short_branch(1);
9706 ins_avoid_back_to_back(1);
9707 ins_pipe(cbcond_reg_reg);
9708 %}
9710 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
9711 match(If cmp (CmpL op1 op2));
9712 predicate(UseCBCond);
9713 effect(USE labl, KILL xcc);
9715 size(4);
9716 ins_cost(BRANCH_COST);
9717 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9718 ins_encode %{
9719 Label* L = $labl$$label;
9720 assert(__ use_cbcond(*L), "back to back cbcond");
9721 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
9722 %}
9723 ins_short_branch(1);
9724 ins_avoid_back_to_back(1);
9725 ins_pipe(cbcond_reg_imm);
9726 %}
9728 // Compare Pointers and branch
9729 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9730 match(If cmp (CmpP op1 op2));
9731 predicate(UseCBCond);
9732 effect(USE labl, KILL pcc);
9734 size(4);
9735 ins_cost(BRANCH_COST);
9736 #ifdef _LP64
9737 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
9738 #else
9739 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %}
9740 #endif
9741 ins_encode %{
9742 Label* L = $labl$$label;
9743 assert(__ use_cbcond(*L), "back to back cbcond");
9744 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
9745 %}
9746 ins_short_branch(1);
9747 ins_avoid_back_to_back(1);
9748 ins_pipe(cbcond_reg_reg);
9749 %}
9751 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9752 match(If cmp (CmpP op1 null));
9753 predicate(UseCBCond);
9754 effect(USE labl, KILL pcc);
9756 size(4);
9757 ins_cost(BRANCH_COST);
9758 #ifdef _LP64
9759 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
9760 #else
9761 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %}
9762 #endif
9763 ins_encode %{
9764 Label* L = $labl$$label;
9765 assert(__ use_cbcond(*L), "back to back cbcond");
9766 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
9767 %}
9768 ins_short_branch(1);
9769 ins_avoid_back_to_back(1);
9770 ins_pipe(cbcond_reg_reg);
9771 %}
9773 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9774 match(If cmp (CmpN op1 op2));
9775 predicate(UseCBCond);
9776 effect(USE labl, KILL icc);
9778 size(4);
9779 ins_cost(BRANCH_COST);
9780 format %{ "CWB$cmp $op1,op2,$labl\t! compressed ptr" %}
9781 ins_encode %{
9782 Label* L = $labl$$label;
9783 assert(__ use_cbcond(*L), "back to back cbcond");
9784 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9785 %}
9786 ins_short_branch(1);
9787 ins_avoid_back_to_back(1);
9788 ins_pipe(cbcond_reg_reg);
9789 %}
9791 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
9792 match(If cmp (CmpN op1 null));
9793 predicate(UseCBCond);
9794 effect(USE labl, KILL icc);
9796 size(4);
9797 ins_cost(BRANCH_COST);
9798 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %}
9799 ins_encode %{
9800 Label* L = $labl$$label;
9801 assert(__ use_cbcond(*L), "back to back cbcond");
9802 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L);
9803 %}
9804 ins_short_branch(1);
9805 ins_avoid_back_to_back(1);
9806 ins_pipe(cbcond_reg_reg);
9807 %}
9809 // Loop back branch
9810 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9811 match(CountedLoopEnd cmp (CmpI op1 op2));
9812 predicate(UseCBCond);
9813 effect(USE labl, KILL icc);
9815 size(4);
9816 ins_cost(BRANCH_COST);
9817 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
9818 ins_encode %{
9819 Label* L = $labl$$label;
9820 assert(__ use_cbcond(*L), "back to back cbcond");
9821 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9822 %}
9823 ins_short_branch(1);
9824 ins_avoid_back_to_back(1);
9825 ins_pipe(cbcond_reg_reg);
9826 %}
9828 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9829 match(CountedLoopEnd cmp (CmpI op1 op2));
9830 predicate(UseCBCond);
9831 effect(USE labl, KILL icc);
9833 size(4);
9834 ins_cost(BRANCH_COST);
9835 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
9836 ins_encode %{
9837 Label* L = $labl$$label;
9838 assert(__ use_cbcond(*L), "back to back cbcond");
9839 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9840 %}
9841 ins_short_branch(1);
9842 ins_avoid_back_to_back(1);
9843 ins_pipe(cbcond_reg_imm);
9844 %}
9846 // Branch-on-register tests all 64 bits. We assume that values
9847 // in 64-bit registers always remains zero or sign extended
9848 // unless our code munges the high bits. Interrupts can chop
9849 // the high order bits to zero or sign at any time.
9850 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{
9851 match(If cmp (CmpI op1 zero));
9852 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9853 effect(USE labl);
9855 size(8);
9856 ins_cost(BRANCH_COST);
9857 format %{ "BR$cmp $op1,$labl" %}
9858 ins_encode( enc_bpr( labl, cmp, op1 ) );
9859 ins_pipe(br_reg);
9860 %}
9862 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{
9863 match(If cmp (CmpP op1 null));
9864 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9865 effect(USE labl);
9867 size(8);
9868 ins_cost(BRANCH_COST);
9869 format %{ "BR$cmp $op1,$labl" %}
9870 ins_encode( enc_bpr( labl, cmp, op1 ) );
9871 ins_pipe(br_reg);
9872 %}
9874 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{
9875 match(If cmp (CmpL op1 zero));
9876 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9877 effect(USE labl);
9879 size(8);
9880 ins_cost(BRANCH_COST);
9881 format %{ "BR$cmp $op1,$labl" %}
9882 ins_encode( enc_bpr( labl, cmp, op1 ) );
9883 ins_pipe(br_reg);
9884 %}
9887 // ============================================================================
9888 // Long Compare
9889 //
9890 // Currently we hold longs in 2 registers. Comparing such values efficiently
9891 // is tricky. The flavor of compare used depends on whether we are testing
9892 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit.
9893 // The GE test is the negated LT test. The LE test can be had by commuting
9894 // the operands (yielding a GE test) and then negating; negate again for the
9895 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the
9896 // NE test is negated from that.
9898 // Due to a shortcoming in the ADLC, it mixes up expressions like:
9899 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the
9900 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections
9901 // are collapsed internally in the ADLC's dfa-gen code. The match for
9902 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the
9903 // foo match ends up with the wrong leaf. One fix is to not match both
9904 // reg-reg and reg-zero forms of long-compare. This is unfortunate because
9905 // both forms beat the trinary form of long-compare and both are very useful
9906 // on Intel which has so few registers.
9908 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{
9909 match(If cmp xcc);
9910 effect(USE labl);
9912 size(8);
9913 ins_cost(BRANCH_COST);
9914 format %{ "BP$cmp $xcc,$labl" %}
9915 ins_encode %{
9916 Label* L = $labl$$label;
9917 Assembler::Predict predict_taken =
9918 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9920 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9921 __ delayed()->nop();
9922 %}
9923 ins_pipe(br_cc);
9924 %}
9926 // Manifest a CmpL3 result in an integer register. Very painful.
9927 // This is the test to avoid.
9928 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{
9929 match(Set dst (CmpL3 src1 src2) );
9930 effect( KILL ccr );
9931 ins_cost(6*DEFAULT_COST);
9932 size(24);
9933 format %{ "CMP $src1,$src2\t\t! long\n"
9934 "\tBLT,a,pn done\n"
9935 "\tMOV -1,$dst\t! delay slot\n"
9936 "\tBGT,a,pn done\n"
9937 "\tMOV 1,$dst\t! delay slot\n"
9938 "\tCLR $dst\n"
9939 "done:" %}
9940 ins_encode( cmpl_flag(src1,src2,dst) );
9941 ins_pipe(cmpL_reg);
9942 %}
9944 // Conditional move
9945 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{
9946 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
9947 ins_cost(150);
9948 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %}
9949 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9950 ins_pipe(ialu_reg);
9951 %}
9953 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{
9954 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
9955 ins_cost(140);
9956 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %}
9957 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9958 ins_pipe(ialu_imm);
9959 %}
9961 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{
9962 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
9963 ins_cost(150);
9964 format %{ "MOV$cmp $xcc,$src,$dst" %}
9965 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9966 ins_pipe(ialu_reg);
9967 %}
9969 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{
9970 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
9971 ins_cost(140);
9972 format %{ "MOV$cmp $xcc,$src,$dst" %}
9973 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9974 ins_pipe(ialu_imm);
9975 %}
9977 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{
9978 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src)));
9979 ins_cost(150);
9980 format %{ "MOV$cmp $xcc,$src,$dst" %}
9981 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9982 ins_pipe(ialu_reg);
9983 %}
9985 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{
9986 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
9987 ins_cost(150);
9988 format %{ "MOV$cmp $xcc,$src,$dst" %}
9989 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9990 ins_pipe(ialu_reg);
9991 %}
9993 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{
9994 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
9995 ins_cost(140);
9996 format %{ "MOV$cmp $xcc,$src,$dst" %}
9997 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9998 ins_pipe(ialu_imm);
9999 %}
10001 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{
10002 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src)));
10003 ins_cost(150);
10004 opcode(0x101);
10005 format %{ "FMOVS$cmp $xcc,$src,$dst" %}
10006 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
10007 ins_pipe(int_conditional_float_move);
10008 %}
10010 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{
10011 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
10012 ins_cost(150);
10013 opcode(0x102);
10014 format %{ "FMOVD$cmp $xcc,$src,$dst" %}
10015 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
10016 ins_pipe(int_conditional_float_move);
10017 %}
10019 // ============================================================================
10020 // Safepoint Instruction
10021 instruct safePoint_poll(iRegP poll) %{
10022 match(SafePoint poll);
10023 effect(USE poll);
10025 size(4);
10026 #ifdef _LP64
10027 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %}
10028 #else
10029 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %}
10030 #endif
10031 ins_encode %{
10032 __ relocate(relocInfo::poll_type);
10033 __ ld_ptr($poll$$Register, 0, G0);
10034 %}
10035 ins_pipe(loadPollP);
10036 %}
10038 // ============================================================================
10039 // Call Instructions
10040 // Call Java Static Instruction
10041 instruct CallStaticJavaDirect( method meth ) %{
10042 match(CallStaticJava);
10043 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
10044 effect(USE meth);
10046 size(8);
10047 ins_cost(CALL_COST);
10048 format %{ "CALL,static ; NOP ==> " %}
10049 ins_encode( Java_Static_Call( meth ), call_epilog );
10050 ins_pipe(simple_call);
10051 %}
10053 // Call Java Static Instruction (method handle version)
10054 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{
10055 match(CallStaticJava);
10056 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
10057 effect(USE meth, KILL l7_mh_SP_save);
10059 size(16);
10060 ins_cost(CALL_COST);
10061 format %{ "CALL,static/MethodHandle" %}
10062 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog);
10063 ins_pipe(simple_call);
10064 %}
10066 // Call Java Dynamic Instruction
10067 instruct CallDynamicJavaDirect( method meth ) %{
10068 match(CallDynamicJava);
10069 effect(USE meth);
10071 ins_cost(CALL_COST);
10072 format %{ "SET (empty),R_G5\n\t"
10073 "CALL,dynamic ; NOP ==> " %}
10074 ins_encode( Java_Dynamic_Call( meth ), call_epilog );
10075 ins_pipe(call);
10076 %}
10078 // Call Runtime Instruction
10079 instruct CallRuntimeDirect(method meth, l7RegP l7) %{
10080 match(CallRuntime);
10081 effect(USE meth, KILL l7);
10082 ins_cost(CALL_COST);
10083 format %{ "CALL,runtime" %}
10084 ins_encode( Java_To_Runtime( meth ),
10085 call_epilog, adjust_long_from_native_call );
10086 ins_pipe(simple_call);
10087 %}
10089 // Call runtime without safepoint - same as CallRuntime
10090 instruct CallLeafDirect(method meth, l7RegP l7) %{
10091 match(CallLeaf);
10092 effect(USE meth, KILL l7);
10093 ins_cost(CALL_COST);
10094 format %{ "CALL,runtime leaf" %}
10095 ins_encode( Java_To_Runtime( meth ),
10096 call_epilog,
10097 adjust_long_from_native_call );
10098 ins_pipe(simple_call);
10099 %}
10101 // Call runtime without safepoint - same as CallLeaf
10102 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{
10103 match(CallLeafNoFP);
10104 effect(USE meth, KILL l7);
10105 ins_cost(CALL_COST);
10106 format %{ "CALL,runtime leaf nofp" %}
10107 ins_encode( Java_To_Runtime( meth ),
10108 call_epilog,
10109 adjust_long_from_native_call );
10110 ins_pipe(simple_call);
10111 %}
10113 // Tail Call; Jump from runtime stub to Java code.
10114 // Also known as an 'interprocedural jump'.
10115 // Target of jump will eventually return to caller.
10116 // TailJump below removes the return address.
10117 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{
10118 match(TailCall jump_target method_oop );
10120 ins_cost(CALL_COST);
10121 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %}
10122 ins_encode(form_jmpl(jump_target));
10123 ins_pipe(tail_call);
10124 %}
10127 // Return Instruction
10128 instruct Ret() %{
10129 match(Return);
10131 // The epilogue node did the ret already.
10132 size(0);
10133 format %{ "! return" %}
10134 ins_encode();
10135 ins_pipe(empty);
10136 %}
10139 // Tail Jump; remove the return address; jump to target.
10140 // TailCall above leaves the return address around.
10141 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
10142 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
10143 // "restore" before this instruction (in Epilogue), we need to materialize it
10144 // in %i0.
10145 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{
10146 match( TailJump jump_target ex_oop );
10147 ins_cost(CALL_COST);
10148 format %{ "! discard R_O7\n\t"
10149 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %}
10150 ins_encode(form_jmpl_set_exception_pc(jump_target));
10151 // opcode(Assembler::jmpl_op3, Assembler::arith_op);
10152 // The hack duplicates the exception oop into G3, so that CreateEx can use it there.
10153 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() );
10154 ins_pipe(tail_call);
10155 %}
10157 // Create exception oop: created by stack-crawling runtime code.
10158 // Created exception is now available to this handler, and is setup
10159 // just prior to jumping to this handler. No code emitted.
10160 instruct CreateException( o0RegP ex_oop )
10161 %{
10162 match(Set ex_oop (CreateEx));
10163 ins_cost(0);
10165 size(0);
10166 // use the following format syntax
10167 format %{ "! exception oop is in R_O0; no code emitted" %}
10168 ins_encode();
10169 ins_pipe(empty);
10170 %}
10173 // Rethrow exception:
10174 // The exception oop will come in the first argument position.
10175 // Then JUMP (not call) to the rethrow stub code.
10176 instruct RethrowException()
10177 %{
10178 match(Rethrow);
10179 ins_cost(CALL_COST);
10181 // use the following format syntax
10182 format %{ "Jmp rethrow_stub" %}
10183 ins_encode(enc_rethrow);
10184 ins_pipe(tail_call);
10185 %}
10188 // Die now
10189 instruct ShouldNotReachHere( )
10190 %{
10191 match(Halt);
10192 ins_cost(CALL_COST);
10194 size(4);
10195 // Use the following format syntax
10196 format %{ "ILLTRAP ; ShouldNotReachHere" %}
10197 ins_encode( form2_illtrap() );
10198 ins_pipe(tail_call);
10199 %}
10201 // ============================================================================
10202 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
10203 // array for an instance of the superklass. Set a hidden internal cache on a
10204 // hit (cache is checked with exposed code in gen_subtype_check()). Return
10205 // not zero for a miss or zero for a hit. The encoding ALSO sets flags.
10206 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{
10207 match(Set index (PartialSubtypeCheck sub super));
10208 effect( KILL pcc, KILL o7 );
10209 ins_cost(DEFAULT_COST*10);
10210 format %{ "CALL PartialSubtypeCheck\n\tNOP" %}
10211 ins_encode( enc_PartialSubtypeCheck() );
10212 ins_pipe(partial_subtype_check_pipe);
10213 %}
10215 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{
10216 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero));
10217 effect( KILL idx, KILL o7 );
10218 ins_cost(DEFAULT_COST*10);
10219 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %}
10220 ins_encode( enc_PartialSubtypeCheck() );
10221 ins_pipe(partial_subtype_check_pipe);
10222 %}
10225 // ============================================================================
10226 // inlined locking and unlocking
10228 instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, o7RegP scratch ) %{
10229 match(Set pcc (FastLock object box));
10231 effect(KILL scratch, TEMP scratch2);
10232 ins_cost(100);
10234 format %{ "FASTLOCK $object, $box; KILL $scratch, $scratch2, $box" %}
10235 ins_encode( Fast_Lock(object, box, scratch, scratch2) );
10236 ins_pipe(long_memory_op);
10237 %}
10240 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, o7RegP scratch ) %{
10241 match(Set pcc (FastUnlock object box));
10242 effect(KILL scratch, TEMP scratch2);
10243 ins_cost(100);
10245 format %{ "FASTUNLOCK $object, $box; KILL $scratch, $scratch2, $box" %}
10246 ins_encode( Fast_Unlock(object, box, scratch, scratch2) );
10247 ins_pipe(long_memory_op);
10248 %}
10250 // The encodings are generic.
10251 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{
10252 predicate(!use_block_zeroing(n->in(2)) );
10253 match(Set dummy (ClearArray cnt base));
10254 effect(TEMP temp, KILL ccr);
10255 ins_cost(300);
10256 format %{ "MOV $cnt,$temp\n"
10257 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n"
10258 " BRge loop\t\t! Clearing loop\n"
10259 " STX G0,[$base+$temp]\t! delay slot" %}
10261 ins_encode %{
10262 // Compiler ensures base is doubleword aligned and cnt is count of doublewords
10263 Register nof_bytes_arg = $cnt$$Register;
10264 Register nof_bytes_tmp = $temp$$Register;
10265 Register base_pointer_arg = $base$$Register;
10267 Label loop;
10268 __ mov(nof_bytes_arg, nof_bytes_tmp);
10270 // Loop and clear, walking backwards through the array.
10271 // nof_bytes_tmp (if >0) is always the number of bytes to zero
10272 __ bind(loop);
10273 __ deccc(nof_bytes_tmp, 8);
10274 __ br(Assembler::greaterEqual, true, Assembler::pt, loop);
10275 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp);
10276 // %%%% this mini-loop must not cross a cache boundary!
10277 %}
10278 ins_pipe(long_memory_op);
10279 %}
10281 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{
10282 predicate(use_block_zeroing(n->in(2)));
10283 match(Set dummy (ClearArray cnt base));
10284 effect(USE_KILL cnt, USE_KILL base, KILL ccr);
10285 ins_cost(300);
10286 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
10288 ins_encode %{
10290 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
10291 Register to = $base$$Register;
10292 Register count = $cnt$$Register;
10294 Label Ldone;
10295 __ nop(); // Separate short branches
10296 // Use BIS for zeroing (temp is not used).
10297 __ bis_zeroing(to, count, G0, Ldone);
10298 __ bind(Ldone);
10300 %}
10301 ins_pipe(long_memory_op);
10302 %}
10304 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{
10305 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit));
10306 match(Set dummy (ClearArray cnt base));
10307 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr);
10308 ins_cost(300);
10309 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
10311 ins_encode %{
10313 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
10314 Register to = $base$$Register;
10315 Register count = $cnt$$Register;
10316 Register temp = $tmp$$Register;
10318 Label Ldone;
10319 __ nop(); // Separate short branches
10320 // Use BIS for zeroing
10321 __ bis_zeroing(to, count, temp, Ldone);
10322 __ bind(Ldone);
10324 %}
10325 ins_pipe(long_memory_op);
10326 %}
10328 instruct string_compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
10329 o7RegI tmp, flagsReg ccr) %{
10330 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10331 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp);
10332 ins_cost(300);
10333 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %}
10334 ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result) );
10335 ins_pipe(long_memory_op);
10336 %}
10338 instruct string_equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result,
10339 o7RegI tmp, flagsReg ccr) %{
10340 match(Set result (StrEquals (Binary str1 str2) cnt));
10341 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr);
10342 ins_cost(300);
10343 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp" %}
10344 ins_encode( enc_String_Equals(str1, str2, cnt, result) );
10345 ins_pipe(long_memory_op);
10346 %}
10348 instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result,
10349 o7RegI tmp2, flagsReg ccr) %{
10350 match(Set result (AryEq ary1 ary2));
10351 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr);
10352 ins_cost(300);
10353 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %}
10354 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, result));
10355 ins_pipe(long_memory_op);
10356 %}
10359 //---------- Zeros Count Instructions ------------------------------------------
10361 instruct countLeadingZerosI(iRegI dst, iRegI src, iRegI tmp, flagsReg cr) %{
10362 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10363 match(Set dst (CountLeadingZerosI src));
10364 effect(TEMP dst, TEMP tmp, KILL cr);
10366 // x |= (x >> 1);
10367 // x |= (x >> 2);
10368 // x |= (x >> 4);
10369 // x |= (x >> 8);
10370 // x |= (x >> 16);
10371 // return (WORDBITS - popc(x));
10372 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t"
10373 "SRL $src,0,$dst\t! 32-bit zero extend\n\t"
10374 "OR $dst,$tmp,$dst\n\t"
10375 "SRL $dst,2,$tmp\n\t"
10376 "OR $dst,$tmp,$dst\n\t"
10377 "SRL $dst,4,$tmp\n\t"
10378 "OR $dst,$tmp,$dst\n\t"
10379 "SRL $dst,8,$tmp\n\t"
10380 "OR $dst,$tmp,$dst\n\t"
10381 "SRL $dst,16,$tmp\n\t"
10382 "OR $dst,$tmp,$dst\n\t"
10383 "POPC $dst,$dst\n\t"
10384 "MOV 32,$tmp\n\t"
10385 "SUB $tmp,$dst,$dst" %}
10386 ins_encode %{
10387 Register Rdst = $dst$$Register;
10388 Register Rsrc = $src$$Register;
10389 Register Rtmp = $tmp$$Register;
10390 __ srl(Rsrc, 1, Rtmp);
10391 __ srl(Rsrc, 0, Rdst);
10392 __ or3(Rdst, Rtmp, Rdst);
10393 __ srl(Rdst, 2, Rtmp);
10394 __ or3(Rdst, Rtmp, Rdst);
10395 __ srl(Rdst, 4, Rtmp);
10396 __ or3(Rdst, Rtmp, Rdst);
10397 __ srl(Rdst, 8, Rtmp);
10398 __ or3(Rdst, Rtmp, Rdst);
10399 __ srl(Rdst, 16, Rtmp);
10400 __ or3(Rdst, Rtmp, Rdst);
10401 __ popc(Rdst, Rdst);
10402 __ mov(BitsPerInt, Rtmp);
10403 __ sub(Rtmp, Rdst, Rdst);
10404 %}
10405 ins_pipe(ialu_reg);
10406 %}
10408 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{
10409 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10410 match(Set dst (CountLeadingZerosL src));
10411 effect(TEMP dst, TEMP tmp, KILL cr);
10413 // x |= (x >> 1);
10414 // x |= (x >> 2);
10415 // x |= (x >> 4);
10416 // x |= (x >> 8);
10417 // x |= (x >> 16);
10418 // x |= (x >> 32);
10419 // return (WORDBITS - popc(x));
10420 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t"
10421 "OR $src,$tmp,$dst\n\t"
10422 "SRLX $dst,2,$tmp\n\t"
10423 "OR $dst,$tmp,$dst\n\t"
10424 "SRLX $dst,4,$tmp\n\t"
10425 "OR $dst,$tmp,$dst\n\t"
10426 "SRLX $dst,8,$tmp\n\t"
10427 "OR $dst,$tmp,$dst\n\t"
10428 "SRLX $dst,16,$tmp\n\t"
10429 "OR $dst,$tmp,$dst\n\t"
10430 "SRLX $dst,32,$tmp\n\t"
10431 "OR $dst,$tmp,$dst\n\t"
10432 "POPC $dst,$dst\n\t"
10433 "MOV 64,$tmp\n\t"
10434 "SUB $tmp,$dst,$dst" %}
10435 ins_encode %{
10436 Register Rdst = $dst$$Register;
10437 Register Rsrc = $src$$Register;
10438 Register Rtmp = $tmp$$Register;
10439 __ srlx(Rsrc, 1, Rtmp);
10440 __ or3( Rsrc, Rtmp, Rdst);
10441 __ srlx(Rdst, 2, Rtmp);
10442 __ or3( Rdst, Rtmp, Rdst);
10443 __ srlx(Rdst, 4, Rtmp);
10444 __ or3( Rdst, Rtmp, Rdst);
10445 __ srlx(Rdst, 8, Rtmp);
10446 __ or3( Rdst, Rtmp, Rdst);
10447 __ srlx(Rdst, 16, Rtmp);
10448 __ or3( Rdst, Rtmp, Rdst);
10449 __ srlx(Rdst, 32, Rtmp);
10450 __ or3( Rdst, Rtmp, Rdst);
10451 __ popc(Rdst, Rdst);
10452 __ mov(BitsPerLong, Rtmp);
10453 __ sub(Rtmp, Rdst, Rdst);
10454 %}
10455 ins_pipe(ialu_reg);
10456 %}
10458 instruct countTrailingZerosI(iRegI dst, iRegI src, flagsReg cr) %{
10459 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10460 match(Set dst (CountTrailingZerosI src));
10461 effect(TEMP dst, KILL cr);
10463 // return popc(~x & (x - 1));
10464 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t"
10465 "ANDN $dst,$src,$dst\n\t"
10466 "SRL $dst,R_G0,$dst\n\t"
10467 "POPC $dst,$dst" %}
10468 ins_encode %{
10469 Register Rdst = $dst$$Register;
10470 Register Rsrc = $src$$Register;
10471 __ sub(Rsrc, 1, Rdst);
10472 __ andn(Rdst, Rsrc, Rdst);
10473 __ srl(Rdst, G0, Rdst);
10474 __ popc(Rdst, Rdst);
10475 %}
10476 ins_pipe(ialu_reg);
10477 %}
10479 instruct countTrailingZerosL(iRegI dst, iRegL src, flagsReg cr) %{
10480 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10481 match(Set dst (CountTrailingZerosL src));
10482 effect(TEMP dst, KILL cr);
10484 // return popc(~x & (x - 1));
10485 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t"
10486 "ANDN $dst,$src,$dst\n\t"
10487 "POPC $dst,$dst" %}
10488 ins_encode %{
10489 Register Rdst = $dst$$Register;
10490 Register Rsrc = $src$$Register;
10491 __ sub(Rsrc, 1, Rdst);
10492 __ andn(Rdst, Rsrc, Rdst);
10493 __ popc(Rdst, Rdst);
10494 %}
10495 ins_pipe(ialu_reg);
10496 %}
10499 //---------- Population Count Instructions -------------------------------------
10501 instruct popCountI(iRegI dst, iRegI src) %{
10502 predicate(UsePopCountInstruction);
10503 match(Set dst (PopCountI src));
10505 format %{ "POPC $src, $dst" %}
10506 ins_encode %{
10507 __ popc($src$$Register, $dst$$Register);
10508 %}
10509 ins_pipe(ialu_reg);
10510 %}
10512 // Note: Long.bitCount(long) returns an int.
10513 instruct popCountL(iRegI dst, iRegL src) %{
10514 predicate(UsePopCountInstruction);
10515 match(Set dst (PopCountL src));
10517 format %{ "POPC $src, $dst" %}
10518 ins_encode %{
10519 __ popc($src$$Register, $dst$$Register);
10520 %}
10521 ins_pipe(ialu_reg);
10522 %}
10525 // ============================================================================
10526 //------------Bytes reverse--------------------------------------------------
10528 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{
10529 match(Set dst (ReverseBytesI src));
10531 // Op cost is artificially doubled to make sure that load or store
10532 // instructions are preferred over this one which requires a spill
10533 // onto a stack slot.
10534 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10535 format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
10537 ins_encode %{
10538 __ set($src$$disp + STACK_BIAS, O7);
10539 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10540 %}
10541 ins_pipe( iload_mem );
10542 %}
10544 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
10545 match(Set dst (ReverseBytesL src));
10547 // Op cost is artificially doubled to make sure that load or store
10548 // instructions are preferred over this one which requires a spill
10549 // onto a stack slot.
10550 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10551 format %{ "LDXA $src, $dst\t!asi=primary_little" %}
10553 ins_encode %{
10554 __ set($src$$disp + STACK_BIAS, O7);
10555 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10556 %}
10557 ins_pipe( iload_mem );
10558 %}
10560 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{
10561 match(Set dst (ReverseBytesUS src));
10563 // Op cost is artificially doubled to make sure that load or store
10564 // instructions are preferred over this one which requires a spill
10565 // onto a stack slot.
10566 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10567 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %}
10569 ins_encode %{
10570 // the value was spilled as an int so bias the load
10571 __ set($src$$disp + STACK_BIAS + 2, O7);
10572 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10573 %}
10574 ins_pipe( iload_mem );
10575 %}
10577 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{
10578 match(Set dst (ReverseBytesS src));
10580 // Op cost is artificially doubled to make sure that load or store
10581 // instructions are preferred over this one which requires a spill
10582 // onto a stack slot.
10583 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10584 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %}
10586 ins_encode %{
10587 // the value was spilled as an int so bias the load
10588 __ set($src$$disp + STACK_BIAS + 2, O7);
10589 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10590 %}
10591 ins_pipe( iload_mem );
10592 %}
10594 // Load Integer reversed byte order
10595 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{
10596 match(Set dst (ReverseBytesI (LoadI src)));
10598 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
10599 size(4);
10600 format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
10602 ins_encode %{
10603 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10604 %}
10605 ins_pipe(iload_mem);
10606 %}
10608 // Load Long - aligned and reversed
10609 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{
10610 match(Set dst (ReverseBytesL (LoadL src)));
10612 ins_cost(MEMORY_REF_COST);
10613 size(4);
10614 format %{ "LDXA $src, $dst\t!asi=primary_little" %}
10616 ins_encode %{
10617 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10618 %}
10619 ins_pipe(iload_mem);
10620 %}
10622 // Load unsigned short / char reversed byte order
10623 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{
10624 match(Set dst (ReverseBytesUS (LoadUS src)));
10626 ins_cost(MEMORY_REF_COST);
10627 size(4);
10628 format %{ "LDUHA $src, $dst\t!asi=primary_little" %}
10630 ins_encode %{
10631 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10632 %}
10633 ins_pipe(iload_mem);
10634 %}
10636 // Load short reversed byte order
10637 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{
10638 match(Set dst (ReverseBytesS (LoadS src)));
10640 ins_cost(MEMORY_REF_COST);
10641 size(4);
10642 format %{ "LDSHA $src, $dst\t!asi=primary_little" %}
10644 ins_encode %{
10645 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10646 %}
10647 ins_pipe(iload_mem);
10648 %}
10650 // Store Integer reversed byte order
10651 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{
10652 match(Set dst (StoreI dst (ReverseBytesI src)));
10654 ins_cost(MEMORY_REF_COST);
10655 size(4);
10656 format %{ "STWA $src, $dst\t!asi=primary_little" %}
10658 ins_encode %{
10659 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10660 %}
10661 ins_pipe(istore_mem_reg);
10662 %}
10664 // Store Long reversed byte order
10665 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{
10666 match(Set dst (StoreL dst (ReverseBytesL src)));
10668 ins_cost(MEMORY_REF_COST);
10669 size(4);
10670 format %{ "STXA $src, $dst\t!asi=primary_little" %}
10672 ins_encode %{
10673 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10674 %}
10675 ins_pipe(istore_mem_reg);
10676 %}
10678 // Store unsighed short/char reversed byte order
10679 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{
10680 match(Set dst (StoreC dst (ReverseBytesUS src)));
10682 ins_cost(MEMORY_REF_COST);
10683 size(4);
10684 format %{ "STHA $src, $dst\t!asi=primary_little" %}
10686 ins_encode %{
10687 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10688 %}
10689 ins_pipe(istore_mem_reg);
10690 %}
10692 // Store short reversed byte order
10693 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{
10694 match(Set dst (StoreC dst (ReverseBytesS src)));
10696 ins_cost(MEMORY_REF_COST);
10697 size(4);
10698 format %{ "STHA $src, $dst\t!asi=primary_little" %}
10700 ins_encode %{
10701 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10702 %}
10703 ins_pipe(istore_mem_reg);
10704 %}
10706 //----------PEEPHOLE RULES-----------------------------------------------------
10707 // These must follow all instruction definitions as they use the names
10708 // defined in the instructions definitions.
10709 //
10710 // peepmatch ( root_instr_name [preceding_instruction]* );
10711 //
10712 // peepconstraint %{
10713 // (instruction_number.operand_name relational_op instruction_number.operand_name
10714 // [, ...] );
10715 // // instruction numbers are zero-based using left to right order in peepmatch
10716 //
10717 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
10718 // // provide an instruction_number.operand_name for each operand that appears
10719 // // in the replacement instruction's match rule
10720 //
10721 // ---------VM FLAGS---------------------------------------------------------
10722 //
10723 // All peephole optimizations can be turned off using -XX:-OptoPeephole
10724 //
10725 // Each peephole rule is given an identifying number starting with zero and
10726 // increasing by one in the order seen by the parser. An individual peephole
10727 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
10728 // on the command-line.
10729 //
10730 // ---------CURRENT LIMITATIONS----------------------------------------------
10731 //
10732 // Only match adjacent instructions in same basic block
10733 // Only equality constraints
10734 // Only constraints between operands, not (0.dest_reg == EAX_enc)
10735 // Only one replacement instruction
10736 //
10737 // ---------EXAMPLE----------------------------------------------------------
10738 //
10739 // // pertinent parts of existing instructions in architecture description
10740 // instruct movI(eRegI dst, eRegI src) %{
10741 // match(Set dst (CopyI src));
10742 // %}
10743 //
10744 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
10745 // match(Set dst (AddI dst src));
10746 // effect(KILL cr);
10747 // %}
10748 //
10749 // // Change (inc mov) to lea
10750 // peephole %{
10751 // // increment preceeded by register-register move
10752 // peepmatch ( incI_eReg movI );
10753 // // require that the destination register of the increment
10754 // // match the destination register of the move
10755 // peepconstraint ( 0.dst == 1.dst );
10756 // // construct a replacement instruction that sets
10757 // // the destination to ( move's source register + one )
10758 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) );
10759 // %}
10760 //
10762 // // Change load of spilled value to only a spill
10763 // instruct storeI(memory mem, eRegI src) %{
10764 // match(Set mem (StoreI mem src));
10765 // %}
10766 //
10767 // instruct loadI(eRegI dst, memory mem) %{
10768 // match(Set dst (LoadI mem));
10769 // %}
10770 //
10771 // peephole %{
10772 // peepmatch ( loadI storeI );
10773 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
10774 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
10775 // %}
10777 //----------SMARTSPILL RULES---------------------------------------------------
10778 // These must follow all instruction definitions as they use the names
10779 // defined in the instructions definitions.
10780 //
10781 // SPARC will probably not have any of these rules due to RISC instruction set.
10783 //----------PIPELINE-----------------------------------------------------------
10784 // Rules which define the behavior of the target architectures pipeline.