Tue, 11 Sep 2012 20:20:38 -0400
7196681: NPG: Some JSR 292 tests crash in Windows exception handler
Summary: There was a rogue os::breakpoint() call in log_dependency left over from the jsr292 merge. Also changed verify_oop() calls for metadata to verify_{method,klass}_ptr.
Reviewed-by: kvn, twisti
1 //
2 // Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 // or visit www.oracle.com if you need additional information or have any
21 // questions.
22 //
23 //
25 // SPARC Architecture Description File
27 //----------REGISTER DEFINITION BLOCK------------------------------------------
28 // This information is used by the matcher and the register allocator to
29 // describe individual registers and classes of registers within the target
30 // archtecture.
31 register %{
32 //----------Architecture Description Register Definitions----------------------
33 // General Registers
34 // "reg_def" name ( register save type, C convention save type,
35 // ideal register type, encoding, vm name );
36 // Register Save Types:
37 //
38 // NS = No-Save: The register allocator assumes that these registers
39 // can be used without saving upon entry to the method, &
40 // that they do not need to be saved at call sites.
41 //
42 // SOC = Save-On-Call: The register allocator assumes that these registers
43 // can be used without saving upon entry to the method,
44 // but that they must be saved at call sites.
45 //
46 // SOE = Save-On-Entry: The register allocator assumes that these registers
47 // must be saved before using them upon entry to the
48 // method, but they do not need to be saved at call
49 // sites.
50 //
51 // AS = Always-Save: The register allocator assumes that these registers
52 // must be saved before using them upon entry to the
53 // method, & that they must be saved at call sites.
54 //
55 // Ideal Register Type is used to determine how to save & restore a
56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
58 //
59 // The encoding number is the actual bit-pattern placed into the opcodes.
62 // ----------------------------
63 // Integer/Long Registers
64 // ----------------------------
66 // Need to expose the hi/lo aspect of 64-bit registers
67 // This register set is used for both the 64-bit build and
68 // the 32-bit build with 1-register longs.
70 // Global Registers 0-7
71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next());
72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg());
73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next());
74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg());
75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next());
76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg());
77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next());
78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg());
79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next());
80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg());
81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next());
82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg());
83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next());
84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg());
85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next());
86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg());
88 // Output Registers 0-7
89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next());
90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg());
91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next());
92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg());
93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next());
94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg());
95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next());
96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg());
97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next());
98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg());
99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next());
100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg());
101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next());
102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg());
103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next());
104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg());
106 // Local Registers 0-7
107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next());
108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg());
109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next());
110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg());
111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next());
112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg());
113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next());
114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg());
115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next());
116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg());
117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next());
118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg());
119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next());
120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg());
121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next());
122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg());
124 // Input Registers 0-7
125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next());
126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg());
127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next());
128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg());
129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next());
130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg());
131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next());
132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg());
133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next());
134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg());
135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next());
136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg());
137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next());
138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next());
140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg());
142 // ----------------------------
143 // Float/Double Registers
144 // ----------------------------
146 // Float Registers
147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
180 // Double Registers
181 // The rules of ADL require that double registers be defined in pairs.
182 // Each pair must be two 32-bit values, but not necessarily a pair of
183 // single float registers. In each pair, ADLC-assigned register numbers
184 // must be adjacent, with the lower number even. Finally, when the
185 // CPU stores such a register pair to memory, the word associated with
186 // the lower ADLC-assigned number must be stored to the lower address.
188 // These definitions specify the actual bit encodings of the sparc
189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp
190 // wants 0-63, so we have to convert every time we want to use fp regs
191 // with the macroassembler, using reg_to_DoubleFloatRegister_object().
192 // 255 is a flag meaning "don't go here".
193 // I believe we can't handle callee-save doubles D32 and up until
194 // the place in the sparc stack crawler that asserts on the 255 is
195 // fixed up.
196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg());
197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next());
198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg());
199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next());
200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg());
201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next());
202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg());
203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next());
204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg());
205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next());
206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg());
207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next());
208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg());
209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next());
210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg());
211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next());
212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg());
213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next());
214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg());
215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next());
216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg());
217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next());
218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg());
219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next());
220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg());
221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next());
222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg());
223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next());
224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg());
225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next());
226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg());
227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next());
230 // ----------------------------
231 // Special Registers
232 // Condition Codes Flag Registers
233 // I tried to break out ICC and XCC but it's not very pretty.
234 // Every Sparc instruction which defs/kills one also kills the other.
235 // Hence every compare instruction which defs one kind of flags ends
236 // up needing a kill of the other.
237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad());
241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad());
242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad());
244 // ----------------------------
245 // Specify the enum values for the registers. These enums are only used by the
246 // OptoReg "class". We can convert these enum values at will to VMReg when needed
247 // for visibility to the rest of the vm. The order of this enum influences the
248 // register allocator so having the freedom to set this order and not be stuck
249 // with the order that is natural for the rest of the vm is worth it.
250 alloc_class chunk0(
251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H,
252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H,
253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H,
254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H);
256 // Note that a register is not allocatable unless it is also mentioned
257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg.
259 alloc_class chunk1(
260 // The first registers listed here are those most likely to be used
261 // as temporaries. We move F0..F7 away from the front of the list,
262 // to reduce the likelihood of interferences with parameters and
263 // return values. Likewise, we avoid using F0/F1 for parameters,
264 // since they are used for return values.
265 // This FPU fine-tuning is worth about 1% on the SPEC geomean.
266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,
268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31,
269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values
270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,
271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x,
272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,
273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x);
275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3);
277 //----------Architecture Description Register Classes--------------------------
278 // Several register classes are automatically defined based upon information in
279 // this architecture description.
280 // 1) reg_class inline_cache_reg ( as defined in frame section )
281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section )
282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
283 //
285 // G0 is not included in integer class since it has special meaning.
286 reg_class g0_reg(R_G0);
288 // ----------------------------
289 // Integer Register Classes
290 // ----------------------------
291 // Exclusions from i_reg:
292 // R_G0: hardwired zero
293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java)
294 // R_G6: reserved by Solaris ABI to tools
295 // R_G7: reserved by Solaris ABI to libthread
296 // R_O7: Used as a temp in many encodings
297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
299 // Class for all integer registers, except the G registers. This is used for
300 // encodings which use G registers as temps. The regular inputs to such
301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator
302 // will not put an input into a temp register.
303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
305 reg_class g1_regI(R_G1);
306 reg_class g3_regI(R_G3);
307 reg_class g4_regI(R_G4);
308 reg_class o0_regI(R_O0);
309 reg_class o7_regI(R_O7);
311 // ----------------------------
312 // Pointer Register Classes
313 // ----------------------------
314 #ifdef _LP64
315 // 64-bit build means 64-bit pointers means hi/lo pairs
316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
320 // Lock encodings use G3 and G4 internally
321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5,
322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
325 // Special class for storeP instructions, which can store SP or RPC to TLS.
326 // It is also used for memory addressing, allowing direct TLS addressing.
327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP,
329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP );
331 // R_L7 is the lowest-priority callee-save (i.e., NS) register
332 // We use it to save R_G2 across calls out of Java.
333 reg_class l7_regP(R_L7H,R_L7);
335 // Other special pointer regs
336 reg_class g1_regP(R_G1H,R_G1);
337 reg_class g2_regP(R_G2H,R_G2);
338 reg_class g3_regP(R_G3H,R_G3);
339 reg_class g4_regP(R_G4H,R_G4);
340 reg_class g5_regP(R_G5H,R_G5);
341 reg_class i0_regP(R_I0H,R_I0);
342 reg_class o0_regP(R_O0H,R_O0);
343 reg_class o1_regP(R_O1H,R_O1);
344 reg_class o2_regP(R_O2H,R_O2);
345 reg_class o7_regP(R_O7H,R_O7);
347 #else // _LP64
348 // 32-bit build means 32-bit pointers means 1 register.
349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5,
350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
353 // Lock encodings use G3 and G4 internally
354 reg_class lock_ptr_reg(R_G1, R_G5,
355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
358 // Special class for storeP instructions, which can store SP or RPC to TLS.
359 // It is also used for memory addressing, allowing direct TLS addressing.
360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5,
361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP,
362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP);
364 // R_L7 is the lowest-priority callee-save (i.e., NS) register
365 // We use it to save R_G2 across calls out of Java.
366 reg_class l7_regP(R_L7);
368 // Other special pointer regs
369 reg_class g1_regP(R_G1);
370 reg_class g2_regP(R_G2);
371 reg_class g3_regP(R_G3);
372 reg_class g4_regP(R_G4);
373 reg_class g5_regP(R_G5);
374 reg_class i0_regP(R_I0);
375 reg_class o0_regP(R_O0);
376 reg_class o1_regP(R_O1);
377 reg_class o2_regP(R_O2);
378 reg_class o7_regP(R_O7);
379 #endif // _LP64
382 // ----------------------------
383 // Long Register Classes
384 // ----------------------------
385 // Longs in 1 register. Aligned adjacent hi/lo pairs.
386 // Note: O7 is never in this class; it is sometimes used as an encoding temp.
387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
389 #ifdef _LP64
390 // 64-bit, longs in 1 register: use all 64-bit integer registers
391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's.
392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
394 #endif // _LP64
395 );
397 reg_class g1_regL(R_G1H,R_G1);
398 reg_class g3_regL(R_G3H,R_G3);
399 reg_class o2_regL(R_O2H,R_O2);
400 reg_class o7_regL(R_O7H,R_O7);
402 // ----------------------------
403 // Special Class for Condition Code Flags Register
404 reg_class int_flags(CCR);
405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3);
406 reg_class float_flag0(FCC0);
409 // ----------------------------
410 // Float Point Register Classes
411 // ----------------------------
412 // Skip F30/F31, they are reserved for mem-mem copies
413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
415 // Paired floating point registers--they show up in the same order as the floats,
416 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
417 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
418 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,
419 /* Use extra V9 double registers; this AD file does not support V8 */
420 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x,
421 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x
422 );
424 // Paired floating point registers--they show up in the same order as the floats,
425 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
426 // This class is usable for mis-aligned loads as happen in I2C adapters.
427 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
428 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
429 %}
431 //----------DEFINITION BLOCK---------------------------------------------------
432 // Define name --> value mappings to inform the ADLC of an integer valued name
433 // Current support includes integer values in the range [0, 0x7FFFFFFF]
434 // Format:
435 // int_def <name> ( <int_value>, <expression>);
436 // Generated Code in ad_<arch>.hpp
437 // #define <name> (<expression>)
438 // // value == <int_value>
439 // Generated code in ad_<arch>.cpp adlc_verification()
440 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
441 //
442 definitions %{
443 // The default cost (of an ALU instruction).
444 int_def DEFAULT_COST ( 100, 100);
445 int_def HUGE_COST (1000000, 1000000);
447 // Memory refs are twice as expensive as run-of-the-mill.
448 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
450 // Branches are even more expensive.
451 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
452 int_def CALL_COST ( 300, DEFAULT_COST * 3);
453 %}
456 //----------SOURCE BLOCK-------------------------------------------------------
457 // This is a block of C++ code which provides values, functions, and
458 // definitions necessary in the rest of the architecture description
459 source_hpp %{
460 // Must be visible to the DFA in dfa_sparc.cpp
461 extern bool can_branch_register( Node *bol, Node *cmp );
463 extern bool use_block_zeroing(Node* count);
465 // Macros to extract hi & lo halves from a long pair.
466 // G0 is not part of any long pair, so assert on that.
467 // Prevents accidentally using G1 instead of G0.
468 #define LONG_HI_REG(x) (x)
469 #define LONG_LO_REG(x) (x)
471 %}
473 source %{
474 #define __ _masm.
476 // tertiary op of a LoadP or StoreP encoding
477 #define REGP_OP true
479 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding);
480 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding);
481 static Register reg_to_register_object(int register_encoding);
483 // Used by the DFA in dfa_sparc.cpp.
484 // Check for being able to use a V9 branch-on-register. Requires a
485 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign-
486 // extended. Doesn't work following an integer ADD, for example, because of
487 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On
488 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and
489 // replace them with zero, which could become sign-extension in a different OS
490 // release. There's no obvious reason why an interrupt will ever fill these
491 // bits with non-zero junk (the registers are reloaded with standard LD
492 // instructions which either zero-fill or sign-fill).
493 bool can_branch_register( Node *bol, Node *cmp ) {
494 if( !BranchOnRegister ) return false;
495 #ifdef _LP64
496 if( cmp->Opcode() == Op_CmpP )
497 return true; // No problems with pointer compares
498 #endif
499 if( cmp->Opcode() == Op_CmpL )
500 return true; // No problems with long compares
502 if( !SparcV9RegsHiBitsZero ) return false;
503 if( bol->as_Bool()->_test._test != BoolTest::ne &&
504 bol->as_Bool()->_test._test != BoolTest::eq )
505 return false;
507 // Check for comparing against a 'safe' value. Any operation which
508 // clears out the high word is safe. Thus, loads and certain shifts
509 // are safe, as are non-negative constants. Any operation which
510 // preserves zero bits in the high word is safe as long as each of its
511 // inputs are safe. Thus, phis and bitwise booleans are safe if their
512 // inputs are safe. At present, the only important case to recognize
513 // seems to be loads. Constants should fold away, and shifts &
514 // logicals can use the 'cc' forms.
515 Node *x = cmp->in(1);
516 if( x->is_Load() ) return true;
517 if( x->is_Phi() ) {
518 for( uint i = 1; i < x->req(); i++ )
519 if( !x->in(i)->is_Load() )
520 return false;
521 return true;
522 }
523 return false;
524 }
526 bool use_block_zeroing(Node* count) {
527 // Use BIS for zeroing if count is not constant
528 // or it is >= BlockZeroingLowLimit.
529 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit);
530 }
532 // ****************************************************************************
534 // REQUIRED FUNCTIONALITY
536 // !!!!! Special hack to get all type of calls to specify the byte offset
537 // from the start of the call to the point where the return address
538 // will point.
539 // The "return address" is the address of the call instruction, plus 8.
541 int MachCallStaticJavaNode::ret_addr_offset() {
542 int offset = NativeCall::instruction_size; // call; delay slot
543 if (_method_handle_invoke)
544 offset += 4; // restore SP
545 return offset;
546 }
548 int MachCallDynamicJavaNode::ret_addr_offset() {
549 int vtable_index = this->_vtable_index;
550 if (vtable_index < 0) {
551 // must be invalid_vtable_index, not nonvirtual_vtable_index
552 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
553 return (NativeMovConstReg::instruction_size +
554 NativeCall::instruction_size); // sethi; setlo; call; delay slot
555 } else {
556 assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
557 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
558 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
559 int klass_load_size;
560 if (UseCompressedOops && UseCompressedKlassPointers) {
561 assert(Universe::heap() != NULL, "java heap should be initialized");
562 if (Universe::narrow_oop_base() == NULL)
563 klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass()
564 else
565 klass_load_size = 3*BytesPerInstWord;
566 } else {
567 klass_load_size = 1*BytesPerInstWord;
568 }
569 if (Assembler::is_simm13(v_off)) {
570 return klass_load_size +
571 (2*BytesPerInstWord + // ld_ptr, ld_ptr
572 NativeCall::instruction_size); // call; delay slot
573 } else {
574 return klass_load_size +
575 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr
576 NativeCall::instruction_size); // call; delay slot
577 }
578 }
579 }
581 int MachCallRuntimeNode::ret_addr_offset() {
582 #ifdef _LP64
583 if (MacroAssembler::is_far_target(entry_point())) {
584 return NativeFarCall::instruction_size;
585 } else {
586 return NativeCall::instruction_size;
587 }
588 #else
589 return NativeCall::instruction_size; // call; delay slot
590 #endif
591 }
593 // Indicate if the safepoint node needs the polling page as an input.
594 // Since Sparc does not have absolute addressing, it does.
595 bool SafePointNode::needs_polling_address_input() {
596 return true;
597 }
599 // emit an interrupt that is caught by the debugger (for debugging compiler)
600 void emit_break(CodeBuffer &cbuf) {
601 MacroAssembler _masm(&cbuf);
602 __ breakpoint_trap();
603 }
605 #ifndef PRODUCT
606 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
607 st->print("TA");
608 }
609 #endif
611 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
612 emit_break(cbuf);
613 }
615 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
616 return MachNode::size(ra_);
617 }
619 // Traceable jump
620 void emit_jmpl(CodeBuffer &cbuf, int jump_target) {
621 MacroAssembler _masm(&cbuf);
622 Register rdest = reg_to_register_object(jump_target);
623 __ JMP(rdest, 0);
624 __ delayed()->nop();
625 }
627 // Traceable jump and set exception pc
628 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) {
629 MacroAssembler _masm(&cbuf);
630 Register rdest = reg_to_register_object(jump_target);
631 __ JMP(rdest, 0);
632 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc );
633 }
635 void emit_nop(CodeBuffer &cbuf) {
636 MacroAssembler _masm(&cbuf);
637 __ nop();
638 }
640 void emit_illtrap(CodeBuffer &cbuf) {
641 MacroAssembler _masm(&cbuf);
642 __ illtrap(0);
643 }
646 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) {
647 assert(n->rule() != loadUB_rule, "");
649 intptr_t offset = 0;
650 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP
651 const Node* addr = n->get_base_and_disp(offset, adr_type);
652 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP");
653 assert(addr != NULL && addr != (Node*)-1, "invalid addr");
654 assert(addr->bottom_type()->isa_oopptr() == atype, "");
655 atype = atype->add_offset(offset);
656 assert(disp32 == offset, "wrong disp32");
657 return atype->_offset;
658 }
661 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) {
662 assert(n->rule() != loadUB_rule, "");
664 intptr_t offset = 0;
665 Node* addr = n->in(2);
666 assert(addr->bottom_type()->isa_oopptr() == atype, "");
667 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) {
668 Node* a = addr->in(2/*AddPNode::Address*/);
669 Node* o = addr->in(3/*AddPNode::Offset*/);
670 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot;
671 atype = a->bottom_type()->is_ptr()->add_offset(offset);
672 assert(atype->isa_oop_ptr(), "still an oop");
673 }
674 offset = atype->is_ptr()->_offset;
675 if (offset != Type::OffsetBot) offset += disp32;
676 return offset;
677 }
679 static inline jdouble replicate_immI(int con, int count, int width) {
680 // Load a constant replicated "count" times with width "width"
681 assert(count*width == 8 && width <= 4, "sanity");
682 int bit_width = width * 8;
683 jlong val = con;
684 val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits
685 for (int i = 0; i < count - 1; i++) {
686 val |= (val << bit_width);
687 }
688 jdouble dval = *((jdouble*) &val); // coerce to double type
689 return dval;
690 }
692 static inline jdouble replicate_immF(float con) {
693 // Replicate float con 2 times and pack into vector.
694 int val = *((int*)&con);
695 jlong lval = val;
696 lval = (lval << 32) | (lval & 0xFFFFFFFFl);
697 jdouble dval = *((jdouble*) &lval); // coerce to double type
698 return dval;
699 }
701 // Standard Sparc opcode form2 field breakdown
702 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) {
703 f0 &= (1<<19)-1; // Mask displacement to 19 bits
704 int op = (f30 << 30) |
705 (f29 << 29) |
706 (f25 << 25) |
707 (f22 << 22) |
708 (f20 << 20) |
709 (f19 << 19) |
710 (f0 << 0);
711 cbuf.insts()->emit_int32(op);
712 }
714 // Standard Sparc opcode form2 field breakdown
715 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) {
716 f0 >>= 10; // Drop 10 bits
717 f0 &= (1<<22)-1; // Mask displacement to 22 bits
718 int op = (f30 << 30) |
719 (f25 << 25) |
720 (f22 << 22) |
721 (f0 << 0);
722 cbuf.insts()->emit_int32(op);
723 }
725 // Standard Sparc opcode form3 field breakdown
726 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) {
727 int op = (f30 << 30) |
728 (f25 << 25) |
729 (f19 << 19) |
730 (f14 << 14) |
731 (f5 << 5) |
732 (f0 << 0);
733 cbuf.insts()->emit_int32(op);
734 }
736 // Standard Sparc opcode form3 field breakdown
737 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) {
738 simm13 &= (1<<13)-1; // Mask to 13 bits
739 int op = (f30 << 30) |
740 (f25 << 25) |
741 (f19 << 19) |
742 (f14 << 14) |
743 (1 << 13) | // bit to indicate immediate-mode
744 (simm13<<0);
745 cbuf.insts()->emit_int32(op);
746 }
748 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) {
749 simm10 &= (1<<10)-1; // Mask to 10 bits
750 emit3_simm13(cbuf,f30,f25,f19,f14,simm10);
751 }
753 #ifdef ASSERT
754 // Helper function for VerifyOops in emit_form3_mem_reg
755 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) {
756 warning("VerifyOops encountered unexpected instruction:");
757 n->dump(2);
758 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]);
759 }
760 #endif
763 void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary,
764 int src1_enc, int disp32, int src2_enc, int dst_enc) {
766 #ifdef ASSERT
767 // The following code implements the +VerifyOops feature.
768 // It verifies oop values which are loaded into or stored out of
769 // the current method activation. +VerifyOops complements techniques
770 // like ScavengeALot, because it eagerly inspects oops in transit,
771 // as they enter or leave the stack, as opposed to ScavengeALot,
772 // which inspects oops "at rest", in the stack or heap, at safepoints.
773 // For this reason, +VerifyOops can sometimes detect bugs very close
774 // to their point of creation. It can also serve as a cross-check
775 // on the validity of oop maps, when used toegether with ScavengeALot.
777 // It would be good to verify oops at other points, especially
778 // when an oop is used as a base pointer for a load or store.
779 // This is presently difficult, because it is hard to know when
780 // a base address is biased or not. (If we had such information,
781 // it would be easy and useful to make a two-argument version of
782 // verify_oop which unbiases the base, and performs verification.)
784 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary");
785 bool is_verified_oop_base = false;
786 bool is_verified_oop_load = false;
787 bool is_verified_oop_store = false;
788 int tmp_enc = -1;
789 if (VerifyOops && src1_enc != R_SP_enc) {
790 // classify the op, mainly for an assert check
791 int st_op = 0, ld_op = 0;
792 switch (primary) {
793 case Assembler::stb_op3: st_op = Op_StoreB; break;
794 case Assembler::sth_op3: st_op = Op_StoreC; break;
795 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0
796 case Assembler::stw_op3: st_op = Op_StoreI; break;
797 case Assembler::std_op3: st_op = Op_StoreL; break;
798 case Assembler::stf_op3: st_op = Op_StoreF; break;
799 case Assembler::stdf_op3: st_op = Op_StoreD; break;
801 case Assembler::ldsb_op3: ld_op = Op_LoadB; break;
802 case Assembler::ldub_op3: ld_op = Op_LoadUB; break;
803 case Assembler::lduh_op3: ld_op = Op_LoadUS; break;
804 case Assembler::ldsh_op3: ld_op = Op_LoadS; break;
805 case Assembler::ldx_op3: // may become LoadP or stay LoadI
806 case Assembler::ldsw_op3: // may become LoadP or stay LoadI
807 case Assembler::lduw_op3: ld_op = Op_LoadI; break;
808 case Assembler::ldd_op3: ld_op = Op_LoadL; break;
809 case Assembler::ldf_op3: ld_op = Op_LoadF; break;
810 case Assembler::lddf_op3: ld_op = Op_LoadD; break;
811 case Assembler::prefetch_op3: ld_op = Op_LoadI; break;
813 default: ShouldNotReachHere();
814 }
815 if (tertiary == REGP_OP) {
816 if (st_op == Op_StoreI) st_op = Op_StoreP;
817 else if (ld_op == Op_LoadI) ld_op = Op_LoadP;
818 else ShouldNotReachHere();
819 if (st_op) {
820 // a store
821 // inputs are (0:control, 1:memory, 2:address, 3:value)
822 Node* n2 = n->in(3);
823 if (n2 != NULL) {
824 const Type* t = n2->bottom_type();
825 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false;
826 }
827 } else {
828 // a load
829 const Type* t = n->bottom_type();
830 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false;
831 }
832 }
834 if (ld_op) {
835 // a Load
836 // inputs are (0:control, 1:memory, 2:address)
837 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases
838 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) &&
839 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) &&
840 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) &&
841 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) &&
842 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) &&
843 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) &&
844 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) &&
845 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) &&
846 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) &&
847 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
848 !(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) &&
849 !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
850 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) &&
851 !(n->ideal_Opcode()==Op_LoadVector && ld_op==Op_LoadD) &&
852 !(n->rule() == loadUB_rule)) {
853 verify_oops_warning(n, n->ideal_Opcode(), ld_op);
854 }
855 } else if (st_op) {
856 // a Store
857 // inputs are (0:control, 1:memory, 2:address, 3:value)
858 if (!(n->ideal_Opcode()==st_op) && // Following are special cases
859 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) &&
860 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) &&
861 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) &&
862 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) &&
863 !(n->ideal_Opcode()==Op_StoreVector && st_op==Op_StoreD) &&
864 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) {
865 verify_oops_warning(n, n->ideal_Opcode(), st_op);
866 }
867 }
869 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) {
870 Node* addr = n->in(2);
871 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) {
872 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr?
873 if (atype != NULL) {
874 intptr_t offset = get_offset_from_base(n, atype, disp32);
875 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32);
876 if (offset != offset_2) {
877 get_offset_from_base(n, atype, disp32);
878 get_offset_from_base_2(n, atype, disp32);
879 }
880 assert(offset == offset_2, "different offsets");
881 if (offset == disp32) {
882 // we now know that src1 is a true oop pointer
883 is_verified_oop_base = true;
884 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) {
885 if( primary == Assembler::ldd_op3 ) {
886 is_verified_oop_base = false; // Cannot 'ldd' into O7
887 } else {
888 tmp_enc = dst_enc;
889 dst_enc = R_O7_enc; // Load into O7; preserve source oop
890 assert(src1_enc != dst_enc, "");
891 }
892 }
893 }
894 if (st_op && (( offset == oopDesc::klass_offset_in_bytes())
895 || offset == oopDesc::mark_offset_in_bytes())) {
896 // loading the mark should not be allowed either, but
897 // we don't check this since it conflicts with InlineObjectHash
898 // usage of LoadINode to get the mark. We could keep the
899 // check if we create a new LoadMarkNode
900 // but do not verify the object before its header is initialized
901 ShouldNotReachHere();
902 }
903 }
904 }
905 }
906 }
907 #endif
909 uint instr;
910 instr = (Assembler::ldst_op << 30)
911 | (dst_enc << 25)
912 | (primary << 19)
913 | (src1_enc << 14);
915 uint index = src2_enc;
916 int disp = disp32;
918 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc)
919 disp += STACK_BIAS;
921 // We should have a compiler bailout here rather than a guarantee.
922 // Better yet would be some mechanism to handle variable-size matches correctly.
923 guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" );
925 if( disp == 0 ) {
926 // use reg-reg form
927 // bit 13 is already zero
928 instr |= index;
929 } else {
930 // use reg-imm form
931 instr |= 0x00002000; // set bit 13 to one
932 instr |= disp & 0x1FFF;
933 }
935 cbuf.insts()->emit_int32(instr);
937 #ifdef ASSERT
938 {
939 MacroAssembler _masm(&cbuf);
940 if (is_verified_oop_base) {
941 __ verify_oop(reg_to_register_object(src1_enc));
942 }
943 if (is_verified_oop_store) {
944 __ verify_oop(reg_to_register_object(dst_enc));
945 }
946 if (tmp_enc != -1) {
947 __ mov(O7, reg_to_register_object(tmp_enc));
948 }
949 if (is_verified_oop_load) {
950 __ verify_oop(reg_to_register_object(dst_enc));
951 }
952 }
953 #endif
954 }
956 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) {
957 // The method which records debug information at every safepoint
958 // expects the call to be the first instruction in the snippet as
959 // it creates a PcDesc structure which tracks the offset of a call
960 // from the start of the codeBlob. This offset is computed as
961 // code_end() - code_begin() of the code which has been emitted
962 // so far.
963 // In this particular case we have skirted around the problem by
964 // putting the "mov" instruction in the delay slot but the problem
965 // may bite us again at some other point and a cleaner/generic
966 // solution using relocations would be needed.
967 MacroAssembler _masm(&cbuf);
968 __ set_inst_mark();
970 // We flush the current window just so that there is a valid stack copy
971 // the fact that the current window becomes active again instantly is
972 // not a problem there is nothing live in it.
974 #ifdef ASSERT
975 int startpos = __ offset();
976 #endif /* ASSERT */
978 __ call((address)entry_point, rtype);
980 if (preserve_g2) __ delayed()->mov(G2, L7);
981 else __ delayed()->nop();
983 if (preserve_g2) __ mov(L7, G2);
985 #ifdef ASSERT
986 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) {
987 #ifdef _LP64
988 // Trash argument dump slots.
989 __ set(0xb0b8ac0db0b8ac0d, G1);
990 __ mov(G1, G5);
991 __ stx(G1, SP, STACK_BIAS + 0x80);
992 __ stx(G1, SP, STACK_BIAS + 0x88);
993 __ stx(G1, SP, STACK_BIAS + 0x90);
994 __ stx(G1, SP, STACK_BIAS + 0x98);
995 __ stx(G1, SP, STACK_BIAS + 0xA0);
996 __ stx(G1, SP, STACK_BIAS + 0xA8);
997 #else // _LP64
998 // this is also a native call, so smash the first 7 stack locations,
999 // and the various registers
1001 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset],
1002 // while [SP+0x44..0x58] are the argument dump slots.
1003 __ set((intptr_t)0xbaadf00d, G1);
1004 __ mov(G1, G5);
1005 __ sllx(G1, 32, G1);
1006 __ or3(G1, G5, G1);
1007 __ mov(G1, G5);
1008 __ stx(G1, SP, 0x40);
1009 __ stx(G1, SP, 0x48);
1010 __ stx(G1, SP, 0x50);
1011 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot
1012 #endif // _LP64
1013 }
1014 #endif /*ASSERT*/
1015 }
1017 //=============================================================================
1018 // REQUIRED FUNCTIONALITY for encoding
1019 void emit_lo(CodeBuffer &cbuf, int val) { }
1020 void emit_hi(CodeBuffer &cbuf, int val) { }
1023 //=============================================================================
1024 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask();
1026 int Compile::ConstantTable::calculate_table_base_offset() const {
1027 if (UseRDPCForConstantTableBase) {
1028 // The table base offset might be less but then it fits into
1029 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit).
1030 return Assembler::min_simm13();
1031 } else {
1032 int offset = -(size() / 2);
1033 if (!Assembler::is_simm13(offset)) {
1034 offset = Assembler::min_simm13();
1035 }
1036 return offset;
1037 }
1038 }
1040 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1041 Compile* C = ra_->C;
1042 Compile::ConstantTable& constant_table = C->constant_table();
1043 MacroAssembler _masm(&cbuf);
1045 Register r = as_Register(ra_->get_encode(this));
1046 CodeSection* consts_section = __ code()->consts();
1047 int consts_size = consts_section->align_at_start(consts_section->size());
1048 assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size));
1050 if (UseRDPCForConstantTableBase) {
1051 // For the following RDPC logic to work correctly the consts
1052 // section must be allocated right before the insts section. This
1053 // assert checks for that. The layout and the SECT_* constants
1054 // are defined in src/share/vm/asm/codeBuffer.hpp.
1055 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be");
1056 int insts_offset = __ offset();
1058 // Layout:
1059 //
1060 // |----------- consts section ------------|----------- insts section -----------...
1061 // |------ constant table -----|- padding -|------------------x----
1062 // \ current PC (RDPC instruction)
1063 // |<------------- consts_size ----------->|<- insts_offset ->|
1064 // \ table base
1065 // The table base offset is later added to the load displacement
1066 // so it has to be negative.
1067 int table_base_offset = -(consts_size + insts_offset);
1068 int disp;
1070 // If the displacement from the current PC to the constant table
1071 // base fits into simm13 we set the constant table base to the
1072 // current PC.
1073 if (Assembler::is_simm13(table_base_offset)) {
1074 constant_table.set_table_base_offset(table_base_offset);
1075 disp = 0;
1076 } else {
1077 // Otherwise we set the constant table base offset to the
1078 // maximum negative displacement of load instructions to keep
1079 // the disp as small as possible:
1080 //
1081 // |<------------- consts_size ----------->|<- insts_offset ->|
1082 // |<--------- min_simm13 --------->|<-------- disp --------->|
1083 // \ table base
1084 table_base_offset = Assembler::min_simm13();
1085 constant_table.set_table_base_offset(table_base_offset);
1086 disp = (consts_size + insts_offset) + table_base_offset;
1087 }
1089 __ rdpc(r);
1091 if (disp != 0) {
1092 assert(r != O7, "need temporary");
1093 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r);
1094 }
1095 }
1096 else {
1097 // Materialize the constant table base.
1098 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1099 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1100 AddressLiteral base(baseaddr, rspec);
1101 __ set(base, r);
1102 }
1103 }
1105 uint MachConstantBaseNode::size(PhaseRegAlloc*) const {
1106 if (UseRDPCForConstantTableBase) {
1107 // This is really the worst case but generally it's only 1 instruction.
1108 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord;
1109 } else {
1110 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord;
1111 }
1112 }
1114 #ifndef PRODUCT
1115 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1116 char reg[128];
1117 ra_->dump_register(this, reg);
1118 if (UseRDPCForConstantTableBase) {
1119 st->print("RDPC %s\t! constant table base", reg);
1120 } else {
1121 st->print("SET &constanttable,%s\t! constant table base", reg);
1122 }
1123 }
1124 #endif
1127 //=============================================================================
1129 #ifndef PRODUCT
1130 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1131 Compile* C = ra_->C;
1133 for (int i = 0; i < OptoPrologueNops; i++) {
1134 st->print_cr("NOP"); st->print("\t");
1135 }
1137 if( VerifyThread ) {
1138 st->print_cr("Verify_Thread"); st->print("\t");
1139 }
1141 size_t framesize = C->frame_slots() << LogBytesPerInt;
1143 // Calls to C2R adapters often do not accept exceptional returns.
1144 // We require that their callers must bang for them. But be careful, because
1145 // some VM calls (such as call site linkage) can use several kilobytes of
1146 // stack. But the stack safety zone should account for that.
1147 // See bugs 4446381, 4468289, 4497237.
1148 if (C->need_stack_bang(framesize)) {
1149 st->print_cr("! stack bang"); st->print("\t");
1150 }
1152 if (Assembler::is_simm13(-framesize)) {
1153 st->print ("SAVE R_SP,-%d,R_SP",framesize);
1154 } else {
1155 st->print_cr("SETHI R_SP,hi%%(-%d),R_G3",framesize); st->print("\t");
1156 st->print_cr("ADD R_G3,lo%%(-%d),R_G3",framesize); st->print("\t");
1157 st->print ("SAVE R_SP,R_G3,R_SP");
1158 }
1160 }
1161 #endif
1163 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1164 Compile* C = ra_->C;
1165 MacroAssembler _masm(&cbuf);
1167 for (int i = 0; i < OptoPrologueNops; i++) {
1168 __ nop();
1169 }
1171 __ verify_thread();
1173 size_t framesize = C->frame_slots() << LogBytesPerInt;
1174 assert(framesize >= 16*wordSize, "must have room for reg. save area");
1175 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1177 // Calls to C2R adapters often do not accept exceptional returns.
1178 // We require that their callers must bang for them. But be careful, because
1179 // some VM calls (such as call site linkage) can use several kilobytes of
1180 // stack. But the stack safety zone should account for that.
1181 // See bugs 4446381, 4468289, 4497237.
1182 if (C->need_stack_bang(framesize)) {
1183 __ generate_stack_overflow_check(framesize);
1184 }
1186 if (Assembler::is_simm13(-framesize)) {
1187 __ save(SP, -framesize, SP);
1188 } else {
1189 __ sethi(-framesize & ~0x3ff, G3);
1190 __ add(G3, -framesize & 0x3ff, G3);
1191 __ save(SP, G3, SP);
1192 }
1193 C->set_frame_complete( __ offset() );
1195 if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) {
1196 // NOTE: We set the table base offset here because users might be
1197 // emitted before MachConstantBaseNode.
1198 Compile::ConstantTable& constant_table = C->constant_table();
1199 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1200 }
1201 }
1203 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1204 return MachNode::size(ra_);
1205 }
1207 int MachPrologNode::reloc() const {
1208 return 10; // a large enough number
1209 }
1211 //=============================================================================
1212 #ifndef PRODUCT
1213 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1214 Compile* C = ra_->C;
1216 if( do_polling() && ra_->C->is_method_compilation() ) {
1217 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
1218 #ifdef _LP64
1219 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
1220 #else
1221 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t");
1222 #endif
1223 }
1225 if( do_polling() )
1226 st->print("RET\n\t");
1228 st->print("RESTORE");
1229 }
1230 #endif
1232 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1233 MacroAssembler _masm(&cbuf);
1234 Compile* C = ra_->C;
1236 __ verify_thread();
1238 // If this does safepoint polling, then do it here
1239 if( do_polling() && ra_->C->is_method_compilation() ) {
1240 AddressLiteral polling_page(os::get_polling_page());
1241 __ sethi(polling_page, L0);
1242 __ relocate(relocInfo::poll_return_type);
1243 __ ld_ptr( L0, 0, G0 );
1244 }
1246 // If this is a return, then stuff the restore in the delay slot
1247 if( do_polling() ) {
1248 __ ret();
1249 __ delayed()->restore();
1250 } else {
1251 __ restore();
1252 }
1253 }
1255 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1256 return MachNode::size(ra_);
1257 }
1259 int MachEpilogNode::reloc() const {
1260 return 16; // a large enough number
1261 }
1263 const Pipeline * MachEpilogNode::pipeline() const {
1264 return MachNode::pipeline_class();
1265 }
1267 int MachEpilogNode::safepoint_offset() const {
1268 assert( do_polling(), "no return for this epilog node");
1269 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord;
1270 }
1272 //=============================================================================
1274 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1275 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1276 static enum RC rc_class( OptoReg::Name reg ) {
1277 if( !OptoReg::is_valid(reg) ) return rc_bad;
1278 if (OptoReg::is_stack(reg)) return rc_stack;
1279 VMReg r = OptoReg::as_VMReg(reg);
1280 if (r->is_Register()) return rc_int;
1281 assert(r->is_FloatRegister(), "must be");
1282 return rc_float;
1283 }
1285 static int impl_helper( const MachNode *mach, CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) {
1286 if( cbuf ) {
1287 // Better yet would be some mechanism to handle variable-size matches correctly
1288 if (!Assembler::is_simm13(offset + STACK_BIAS)) {
1289 ra_->C->record_method_not_compilable("unable to handle large constant offsets");
1290 } else {
1291 emit_form3_mem_reg(*cbuf, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]);
1292 }
1293 }
1294 #ifndef PRODUCT
1295 else if( !do_size ) {
1296 if( size != 0 ) st->print("\n\t");
1297 if( is_load ) st->print("%s [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg));
1298 else st->print("%s R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset);
1299 }
1300 #endif
1301 return size+4;
1302 }
1304 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int op1, int op2, const char *op_str, int size, outputStream* st ) {
1305 if( cbuf ) emit3( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src] );
1306 #ifndef PRODUCT
1307 else if( !do_size ) {
1308 if( size != 0 ) st->print("\n\t");
1309 st->print("%s R_%s,R_%s\t! spill",op_str,OptoReg::regname(src),OptoReg::regname(dst));
1310 }
1311 #endif
1312 return size+4;
1313 }
1315 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
1316 PhaseRegAlloc *ra_,
1317 bool do_size,
1318 outputStream* st ) const {
1319 // Get registers to move
1320 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1321 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1322 OptoReg::Name dst_second = ra_->get_reg_second(this );
1323 OptoReg::Name dst_first = ra_->get_reg_first(this );
1325 enum RC src_second_rc = rc_class(src_second);
1326 enum RC src_first_rc = rc_class(src_first);
1327 enum RC dst_second_rc = rc_class(dst_second);
1328 enum RC dst_first_rc = rc_class(dst_first);
1330 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1332 // Generate spill code!
1333 int size = 0;
1335 if( src_first == dst_first && src_second == dst_second )
1336 return size; // Self copy, no move
1338 // --------------------------------------
1339 // Check for mem-mem move. Load into unused float registers and fall into
1340 // the float-store case.
1341 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1342 int offset = ra_->reg2offset(src_first);
1343 // Further check for aligned-adjacent pair, so we can use a double load
1344 if( (src_first&1)==0 && src_first+1 == src_second ) {
1345 src_second = OptoReg::Name(R_F31_num);
1346 src_second_rc = rc_float;
1347 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::lddf_op3,"LDDF",size, st);
1348 } else {
1349 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::ldf_op3 ,"LDF ",size, st);
1350 }
1351 src_first = OptoReg::Name(R_F30_num);
1352 src_first_rc = rc_float;
1353 }
1355 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) {
1356 int offset = ra_->reg2offset(src_second);
1357 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F31_num,Assembler::ldf_op3,"LDF ",size, st);
1358 src_second = OptoReg::Name(R_F31_num);
1359 src_second_rc = rc_float;
1360 }
1362 // --------------------------------------
1363 // Check for float->int copy; requires a trip through memory
1364 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) {
1365 int offset = frame::register_save_words*wordSize;
1366 if (cbuf) {
1367 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 );
1368 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1369 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1370 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 );
1371 }
1372 #ifndef PRODUCT
1373 else if (!do_size) {
1374 if (size != 0) st->print("\n\t");
1375 st->print( "SUB R_SP,16,R_SP\n");
1376 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1377 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1378 st->print("\tADD R_SP,16,R_SP\n");
1379 }
1380 #endif
1381 size += 16;
1382 }
1384 // Check for float->int copy on T4
1385 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) {
1386 // Further check for aligned-adjacent pair, so we can use a double move
1387 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
1388 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st);
1389 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st);
1390 }
1391 // Check for int->float copy on T4
1392 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) {
1393 // Further check for aligned-adjacent pair, so we can use a double move
1394 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
1395 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st);
1396 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st);
1397 }
1399 // --------------------------------------
1400 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations.
1401 // In such cases, I have to do the big-endian swap. For aligned targets, the
1402 // hardware does the flop for me. Doubles are always aligned, so no problem
1403 // there. Misaligned sources only come from native-long-returns (handled
1404 // special below).
1405 #ifndef _LP64
1406 if( src_first_rc == rc_int && // source is already big-endian
1407 src_second_rc != rc_bad && // 64-bit move
1408 ((dst_first&1)!=0 || dst_second != dst_first+1) ) { // misaligned dst
1409 assert( (src_first&1)==0 && src_second == src_first+1, "source must be aligned" );
1410 // Do the big-endian flop.
1411 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ;
1412 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc;
1413 }
1414 #endif
1416 // --------------------------------------
1417 // Check for integer reg-reg copy
1418 if( src_first_rc == rc_int && dst_first_rc == rc_int ) {
1419 #ifndef _LP64
1420 if( src_first == R_O0_num && src_second == R_O1_num ) { // Check for the evil O0/O1 native long-return case
1421 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1422 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1423 // operand contains the least significant word of the 64-bit value and vice versa.
1424 OptoReg::Name tmp = OptoReg::Name(R_O7_num);
1425 assert( (dst_first&1)==0 && dst_second == dst_first+1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
1426 // Shift O0 left in-place, zero-extend O1, then OR them into the dst
1427 if( cbuf ) {
1428 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020 );
1429 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000 );
1430 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second] );
1431 #ifndef PRODUCT
1432 } else if( !do_size ) {
1433 if( size != 0 ) st->print("\n\t");
1434 st->print("SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
1435 st->print("SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
1436 st->print("OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
1437 #endif
1438 }
1439 return size+12;
1440 }
1441 else if( dst_first == R_I0_num && dst_second == R_I1_num ) {
1442 // returning a long value in I0/I1
1443 // a SpillCopy must be able to target a return instruction's reg_class
1444 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1445 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1446 // operand contains the least significant word of the 64-bit value and vice versa.
1447 OptoReg::Name tdest = dst_first;
1449 if (src_first == dst_first) {
1450 tdest = OptoReg::Name(R_O7_num);
1451 size += 4;
1452 }
1454 if( cbuf ) {
1455 assert( (src_first&1) == 0 && (src_first+1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
1456 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1
1457 // ShrL_reg_imm6
1458 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000 );
1459 // ShrR_reg_imm6 src, 0, dst
1460 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000 );
1461 if (tdest != dst_first) {
1462 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest] );
1463 }
1464 }
1465 #ifndef PRODUCT
1466 else if( !do_size ) {
1467 if( size != 0 ) st->print("\n\t"); // %%%%% !!!!!
1468 st->print("SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
1469 st->print("SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
1470 if (tdest != dst_first) {
1471 st->print("MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
1472 }
1473 }
1474 #endif // PRODUCT
1475 return size+8;
1476 }
1477 #endif // !_LP64
1478 // Else normal reg-reg copy
1479 assert( src_second != dst_first, "smashed second before evacuating it" );
1480 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::or_op3,0,"MOV ",size, st);
1481 assert( (src_first&1) == 0 && (dst_first&1) == 0, "never move second-halves of int registers" );
1482 // This moves an aligned adjacent pair.
1483 // See if we are done.
1484 if( src_first+1 == src_second && dst_first+1 == dst_second )
1485 return size;
1486 }
1488 // Check for integer store
1489 if( src_first_rc == rc_int && dst_first_rc == rc_stack ) {
1490 int offset = ra_->reg2offset(dst_first);
1491 // Further check for aligned-adjacent pair, so we can use a double store
1492 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1493 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stx_op3,"STX ",size, st);
1494 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stw_op3,"STW ",size, st);
1495 }
1497 // Check for integer load
1498 if( dst_first_rc == rc_int && src_first_rc == rc_stack ) {
1499 int offset = ra_->reg2offset(src_first);
1500 // Further check for aligned-adjacent pair, so we can use a double load
1501 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1502 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldx_op3 ,"LDX ",size, st);
1503 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1504 }
1506 // Check for float reg-reg copy
1507 if( src_first_rc == rc_float && dst_first_rc == rc_float ) {
1508 // Further check for aligned-adjacent pair, so we can use a double move
1509 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1510 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovd_opf,"FMOVD",size, st);
1511 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovs_opf,"FMOVS",size, st);
1512 }
1514 // Check for float store
1515 if( src_first_rc == rc_float && dst_first_rc == rc_stack ) {
1516 int offset = ra_->reg2offset(dst_first);
1517 // Further check for aligned-adjacent pair, so we can use a double store
1518 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1519 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stdf_op3,"STDF",size, st);
1520 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1521 }
1523 // Check for float load
1524 if( dst_first_rc == rc_float && src_first_rc == rc_stack ) {
1525 int offset = ra_->reg2offset(src_first);
1526 // Further check for aligned-adjacent pair, so we can use a double load
1527 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1528 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lddf_op3,"LDDF",size, st);
1529 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldf_op3 ,"LDF ",size, st);
1530 }
1532 // --------------------------------------------------------------------
1533 // Check for hi bits still needing moving. Only happens for misaligned
1534 // arguments to native calls.
1535 if( src_second == dst_second )
1536 return size; // Self copy; no move
1537 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
1539 #ifndef _LP64
1540 // In the LP64 build, all registers can be moved as aligned/adjacent
1541 // pairs, so there's never any need to move the high bits separately.
1542 // The 32-bit builds have to deal with the 32-bit ABI which can force
1543 // all sorts of silly alignment problems.
1545 // Check for integer reg-reg copy. Hi bits are stuck up in the top
1546 // 32-bits of a 64-bit register, but are needed in low bits of another
1547 // register (else it's a hi-bits-to-hi-bits copy which should have
1548 // happened already as part of a 64-bit move)
1549 if( src_second_rc == rc_int && dst_second_rc == rc_int ) {
1550 assert( (src_second&1)==1, "its the evil O0/O1 native return case" );
1551 assert( (dst_second&1)==0, "should have moved with 1 64-bit move" );
1552 // Shift src_second down to dst_second's low bits.
1553 if( cbuf ) {
1554 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 );
1555 #ifndef PRODUCT
1556 } else if( !do_size ) {
1557 if( size != 0 ) st->print("\n\t");
1558 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(dst_second));
1559 #endif
1560 }
1561 return size+4;
1562 }
1564 // Check for high word integer store. Must down-shift the hi bits
1565 // into a temp register, then fall into the case of storing int bits.
1566 if( src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second&1)==1 ) {
1567 // Shift src_second down to dst_second's low bits.
1568 if( cbuf ) {
1569 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 );
1570 #ifndef PRODUCT
1571 } else if( !do_size ) {
1572 if( size != 0 ) st->print("\n\t");
1573 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(R_O7_num));
1574 #endif
1575 }
1576 size+=4;
1577 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num!
1578 }
1580 // Check for high word integer load
1581 if( dst_second_rc == rc_int && src_second_rc == rc_stack )
1582 return impl_helper(this,cbuf,ra_,do_size,true ,ra_->reg2offset(src_second),dst_second,Assembler::lduw_op3,"LDUW",size, st);
1584 // Check for high word integer store
1585 if( src_second_rc == rc_int && dst_second_rc == rc_stack )
1586 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stw_op3 ,"STW ",size, st);
1588 // Check for high word float store
1589 if( src_second_rc == rc_float && dst_second_rc == rc_stack )
1590 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stf_op3 ,"STF ",size, st);
1592 #endif // !_LP64
1594 Unimplemented();
1595 }
1597 #ifndef PRODUCT
1598 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1599 implementation( NULL, ra_, false, st );
1600 }
1601 #endif
1603 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1604 implementation( &cbuf, ra_, false, NULL );
1605 }
1607 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1608 return implementation( NULL, ra_, true, NULL );
1609 }
1611 //=============================================================================
1612 #ifndef PRODUCT
1613 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const {
1614 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1615 }
1616 #endif
1618 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1619 MacroAssembler _masm(&cbuf);
1620 for(int i = 0; i < _count; i += 1) {
1621 __ nop();
1622 }
1623 }
1625 uint MachNopNode::size(PhaseRegAlloc *ra_) const {
1626 return 4 * _count;
1627 }
1630 //=============================================================================
1631 #ifndef PRODUCT
1632 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1633 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1634 int reg = ra_->get_reg_first(this);
1635 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]);
1636 }
1637 #endif
1639 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1640 MacroAssembler _masm(&cbuf);
1641 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS;
1642 int reg = ra_->get_encode(this);
1644 if (Assembler::is_simm13(offset)) {
1645 __ add(SP, offset, reg_to_register_object(reg));
1646 } else {
1647 __ set(offset, O7);
1648 __ add(SP, O7, reg_to_register_object(reg));
1649 }
1650 }
1652 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1653 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
1654 assert(ra_ == ra_->C->regalloc(), "sanity");
1655 return ra_->C->scratch_emit_size(this);
1656 }
1658 //=============================================================================
1660 // emit call stub, compiled java to interpretor
1661 void emit_java_to_interp(CodeBuffer &cbuf ) {
1663 // Stub is fixed up when the corresponding call is converted from calling
1664 // compiled code to calling interpreted code.
1665 // set (empty), G5
1666 // jmp -1
1668 address mark = cbuf.insts_mark(); // get mark within main instrs section
1670 MacroAssembler _masm(&cbuf);
1672 address base =
1673 __ start_a_stub(Compile::MAX_stubs_size);
1674 if (base == NULL) return; // CodeBuffer::expand failed
1676 // static stub relocation stores the instruction address of the call
1677 __ relocate(static_stub_Relocation::spec(mark));
1679 __ set_metadata(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode()));
1681 __ set_inst_mark();
1682 AddressLiteral addrlit(-1);
1683 __ JUMP(addrlit, G3, 0);
1685 __ delayed()->nop();
1687 // Update current stubs pointer and restore code_end.
1688 __ end_a_stub();
1689 }
1691 // size of call stub, compiled java to interpretor
1692 uint size_java_to_interp() {
1693 // This doesn't need to be accurate but it must be larger or equal to
1694 // the real size of the stub.
1695 return (NativeMovConstReg::instruction_size + // sethi/setlo;
1696 NativeJump::instruction_size + // sethi; jmp; nop
1697 (TraceJumps ? 20 * BytesPerInstWord : 0) );
1698 }
1699 // relocation entries for call stub, compiled java to interpretor
1700 uint reloc_java_to_interp() {
1701 return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call
1702 }
1705 //=============================================================================
1706 #ifndef PRODUCT
1707 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1708 st->print_cr("\nUEP:");
1709 #ifdef _LP64
1710 if (UseCompressedOops) {
1711 assert(Universe::heap() != NULL, "java heap should be initialized");
1712 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
1713 st->print_cr("\tSLL R_G5,3,R_G5");
1714 if (Universe::narrow_oop_base() != NULL)
1715 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
1716 } else {
1717 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1718 }
1719 st->print_cr("\tCMP R_G5,R_G3" );
1720 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
1721 #else // _LP64
1722 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1723 st->print_cr("\tCMP R_G5,R_G3" );
1724 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2");
1725 #endif // _LP64
1726 }
1727 #endif
1729 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1730 MacroAssembler _masm(&cbuf);
1731 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
1732 Register temp_reg = G3;
1733 assert( G5_ic_reg != temp_reg, "conflicting registers" );
1735 // Load klass from receiver
1736 __ load_klass(O0, temp_reg);
1737 // Compare against expected klass
1738 __ cmp(temp_reg, G5_ic_reg);
1739 // Branch to miss code, checks xcc or icc depending
1740 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2);
1741 }
1743 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1744 return MachNode::size(ra_);
1745 }
1748 //=============================================================================
1750 uint size_exception_handler() {
1751 if (TraceJumps) {
1752 return (400); // just a guess
1753 }
1754 return ( NativeJump::instruction_size ); // sethi;jmp;nop
1755 }
1757 uint size_deopt_handler() {
1758 if (TraceJumps) {
1759 return (400); // just a guess
1760 }
1761 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore
1762 }
1764 // Emit exception handler code.
1765 int emit_exception_handler(CodeBuffer& cbuf) {
1766 Register temp_reg = G3;
1767 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point());
1768 MacroAssembler _masm(&cbuf);
1770 address base =
1771 __ start_a_stub(size_exception_handler());
1772 if (base == NULL) return 0; // CodeBuffer::expand failed
1774 int offset = __ offset();
1776 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp
1777 __ delayed()->nop();
1779 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1781 __ end_a_stub();
1783 return offset;
1784 }
1786 int emit_deopt_handler(CodeBuffer& cbuf) {
1787 // Can't use any of the current frame's registers as we may have deopted
1788 // at a poll and everything (including G3) can be live.
1789 Register temp_reg = L0;
1790 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
1791 MacroAssembler _masm(&cbuf);
1793 address base =
1794 __ start_a_stub(size_deopt_handler());
1795 if (base == NULL) return 0; // CodeBuffer::expand failed
1797 int offset = __ offset();
1798 __ save_frame(0);
1799 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp
1800 __ delayed()->restore();
1802 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1804 __ end_a_stub();
1805 return offset;
1807 }
1809 // Given a register encoding, produce a Integer Register object
1810 static Register reg_to_register_object(int register_encoding) {
1811 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding");
1812 return as_Register(register_encoding);
1813 }
1815 // Given a register encoding, produce a single-precision Float Register object
1816 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) {
1817 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding");
1818 return as_SingleFloatRegister(register_encoding);
1819 }
1821 // Given a register encoding, produce a double-precision Float Register object
1822 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) {
1823 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding");
1824 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding");
1825 return as_DoubleFloatRegister(register_encoding);
1826 }
1828 const bool Matcher::match_rule_supported(int opcode) {
1829 if (!has_match_rule(opcode))
1830 return false;
1832 switch (opcode) {
1833 case Op_CountLeadingZerosI:
1834 case Op_CountLeadingZerosL:
1835 case Op_CountTrailingZerosI:
1836 case Op_CountTrailingZerosL:
1837 case Op_PopCountI:
1838 case Op_PopCountL:
1839 if (!UsePopCountInstruction)
1840 return false;
1841 break;
1842 }
1844 return true; // Per default match rules are supported.
1845 }
1847 int Matcher::regnum_to_fpu_offset(int regnum) {
1848 return regnum - 32; // The FP registers are in the second chunk
1849 }
1851 #ifdef ASSERT
1852 address last_rethrow = NULL; // debugging aid for Rethrow encoding
1853 #endif
1855 // Vector width in bytes
1856 const int Matcher::vector_width_in_bytes(BasicType bt) {
1857 assert(MaxVectorSize == 8, "");
1858 return 8;
1859 }
1861 // Vector ideal reg
1862 const int Matcher::vector_ideal_reg(int size) {
1863 assert(MaxVectorSize == 8, "");
1864 return Op_RegD;
1865 }
1867 // Limits on vector size (number of elements) loaded into vector.
1868 const int Matcher::max_vector_size(const BasicType bt) {
1869 assert(is_java_primitive(bt), "only primitive type vectors");
1870 return vector_width_in_bytes(bt)/type2aelembytes(bt);
1871 }
1873 const int Matcher::min_vector_size(const BasicType bt) {
1874 return max_vector_size(bt); // Same as max.
1875 }
1877 // SPARC doesn't support misaligned vectors store/load.
1878 const bool Matcher::misaligned_vectors_ok() {
1879 return false;
1880 }
1882 // USII supports fxtof through the whole range of number, USIII doesn't
1883 const bool Matcher::convL2FSupported(void) {
1884 return VM_Version::has_fast_fxtof();
1885 }
1887 // Is this branch offset short enough that a short branch can be used?
1888 //
1889 // NOTE: If the platform does not provide any short branch variants, then
1890 // this method should return false for offset 0.
1891 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1892 // The passed offset is relative to address of the branch.
1893 // Don't need to adjust the offset.
1894 return UseCBCond && Assembler::is_simm12(offset);
1895 }
1897 const bool Matcher::isSimpleConstant64(jlong value) {
1898 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1899 // Depends on optimizations in MacroAssembler::setx.
1900 int hi = (int)(value >> 32);
1901 int lo = (int)(value & ~0);
1902 return (hi == 0) || (hi == -1) || (lo == 0);
1903 }
1905 // No scaling for the parameter the ClearArray node.
1906 const bool Matcher::init_array_count_is_in_bytes = true;
1908 // Threshold size for cleararray.
1909 const int Matcher::init_array_short_size = 8 * BytesPerLong;
1911 // No additional cost for CMOVL.
1912 const int Matcher::long_cmove_cost() { return 0; }
1914 // CMOVF/CMOVD are expensive on T4 and on SPARC64.
1915 const int Matcher::float_cmove_cost() {
1916 return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0;
1917 }
1919 // Should the Matcher clone shifts on addressing modes, expecting them to
1920 // be subsumed into complex addressing expressions or compute them into
1921 // registers? True for Intel but false for most RISCs
1922 const bool Matcher::clone_shift_expressions = false;
1924 // Do we need to mask the count passed to shift instructions or does
1925 // the cpu only look at the lower 5/6 bits anyway?
1926 const bool Matcher::need_masked_shift_count = false;
1928 bool Matcher::narrow_oop_use_complex_address() {
1929 NOT_LP64(ShouldNotCallThis());
1930 assert(UseCompressedOops, "only for compressed oops code");
1931 return false;
1932 }
1934 // Is it better to copy float constants, or load them directly from memory?
1935 // Intel can load a float constant from a direct address, requiring no
1936 // extra registers. Most RISCs will have to materialize an address into a
1937 // register first, so they would do better to copy the constant from stack.
1938 const bool Matcher::rematerialize_float_constants = false;
1940 // If CPU can load and store mis-aligned doubles directly then no fixup is
1941 // needed. Else we split the double into 2 integer pieces and move it
1942 // piece-by-piece. Only happens when passing doubles into C code as the
1943 // Java calling convention forces doubles to be aligned.
1944 #ifdef _LP64
1945 const bool Matcher::misaligned_doubles_ok = true;
1946 #else
1947 const bool Matcher::misaligned_doubles_ok = false;
1948 #endif
1950 // No-op on SPARC.
1951 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
1952 }
1954 // Advertise here if the CPU requires explicit rounding operations
1955 // to implement the UseStrictFP mode.
1956 const bool Matcher::strict_fp_requires_explicit_rounding = false;
1958 // Are floats conerted to double when stored to stack during deoptimization?
1959 // Sparc does not handle callee-save floats.
1960 bool Matcher::float_in_double() { return false; }
1962 // Do ints take an entire long register or just half?
1963 // Note that we if-def off of _LP64.
1964 // The relevant question is how the int is callee-saved. In _LP64
1965 // the whole long is written but de-opt'ing will have to extract
1966 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
1967 #ifdef _LP64
1968 const bool Matcher::int_in_long = true;
1969 #else
1970 const bool Matcher::int_in_long = false;
1971 #endif
1973 // Return whether or not this register is ever used as an argument. This
1974 // function is used on startup to build the trampoline stubs in generateOptoStub.
1975 // Registers not mentioned will be killed by the VM call in the trampoline, and
1976 // arguments in those registers not be available to the callee.
1977 bool Matcher::can_be_java_arg( int reg ) {
1978 // Standard sparc 6 args in registers
1979 if( reg == R_I0_num ||
1980 reg == R_I1_num ||
1981 reg == R_I2_num ||
1982 reg == R_I3_num ||
1983 reg == R_I4_num ||
1984 reg == R_I5_num ) return true;
1985 #ifdef _LP64
1986 // 64-bit builds can pass 64-bit pointers and longs in
1987 // the high I registers
1988 if( reg == R_I0H_num ||
1989 reg == R_I1H_num ||
1990 reg == R_I2H_num ||
1991 reg == R_I3H_num ||
1992 reg == R_I4H_num ||
1993 reg == R_I5H_num ) return true;
1995 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) {
1996 return true;
1997 }
1999 #else
2000 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
2001 // Longs cannot be passed in O regs, because O regs become I regs
2002 // after a 'save' and I regs get their high bits chopped off on
2003 // interrupt.
2004 if( reg == R_G1H_num || reg == R_G1_num ) return true;
2005 if( reg == R_G4H_num || reg == R_G4_num ) return true;
2006 #endif
2007 // A few float args in registers
2008 if( reg >= R_F0_num && reg <= R_F7_num ) return true;
2010 return false;
2011 }
2013 bool Matcher::is_spillable_arg( int reg ) {
2014 return can_be_java_arg(reg);
2015 }
2017 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
2018 // Use hardware SDIVX instruction when it is
2019 // faster than a code which use multiply.
2020 return VM_Version::has_fast_idiv();
2021 }
2023 // Register for DIVI projection of divmodI
2024 RegMask Matcher::divI_proj_mask() {
2025 ShouldNotReachHere();
2026 return RegMask();
2027 }
2029 // Register for MODI projection of divmodI
2030 RegMask Matcher::modI_proj_mask() {
2031 ShouldNotReachHere();
2032 return RegMask();
2033 }
2035 // Register for DIVL projection of divmodL
2036 RegMask Matcher::divL_proj_mask() {
2037 ShouldNotReachHere();
2038 return RegMask();
2039 }
2041 // Register for MODL projection of divmodL
2042 RegMask Matcher::modL_proj_mask() {
2043 ShouldNotReachHere();
2044 return RegMask();
2045 }
2047 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2048 return L7_REGP_mask();
2049 }
2051 %}
2054 // The intptr_t operand types, defined by textual substitution.
2055 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
2056 #ifdef _LP64
2057 #define immX immL
2058 #define immX13 immL13
2059 #define immX13m7 immL13m7
2060 #define iRegX iRegL
2061 #define g1RegX g1RegL
2062 #else
2063 #define immX immI
2064 #define immX13 immI13
2065 #define immX13m7 immI13m7
2066 #define iRegX iRegI
2067 #define g1RegX g1RegI
2068 #endif
2070 //----------ENCODING BLOCK-----------------------------------------------------
2071 // This block specifies the encoding classes used by the compiler to output
2072 // byte streams. Encoding classes are parameterized macros used by
2073 // Machine Instruction Nodes in order to generate the bit encoding of the
2074 // instruction. Operands specify their base encoding interface with the
2075 // interface keyword. There are currently supported four interfaces,
2076 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
2077 // operand to generate a function which returns its register number when
2078 // queried. CONST_INTER causes an operand to generate a function which
2079 // returns the value of the constant when queried. MEMORY_INTER causes an
2080 // operand to generate four functions which return the Base Register, the
2081 // Index Register, the Scale Value, and the Offset Value of the operand when
2082 // queried. COND_INTER causes an operand to generate six functions which
2083 // return the encoding code (ie - encoding bits for the instruction)
2084 // associated with each basic boolean condition for a conditional instruction.
2085 //
2086 // Instructions specify two basic values for encoding. Again, a function
2087 // is available to check if the constant displacement is an oop. They use the
2088 // ins_encode keyword to specify their encoding classes (which must be
2089 // a sequence of enc_class names, and their parameters, specified in
2090 // the encoding block), and they use the
2091 // opcode keyword to specify, in order, their primary, secondary, and
2092 // tertiary opcode. Only the opcode sections which a particular instruction
2093 // needs for encoding need to be specified.
2094 encode %{
2095 enc_class enc_untested %{
2096 #ifdef ASSERT
2097 MacroAssembler _masm(&cbuf);
2098 __ untested("encoding");
2099 #endif
2100 %}
2102 enc_class form3_mem_reg( memory mem, iRegI dst ) %{
2103 emit_form3_mem_reg(cbuf, this, $primary, $tertiary,
2104 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
2105 %}
2107 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{
2108 emit_form3_mem_reg(cbuf, this, $primary, -1,
2109 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
2110 %}
2112 enc_class form3_mem_prefetch_read( memory mem ) %{
2113 emit_form3_mem_reg(cbuf, this, $primary, -1,
2114 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
2115 %}
2117 enc_class form3_mem_prefetch_write( memory mem ) %{
2118 emit_form3_mem_reg(cbuf, this, $primary, -1,
2119 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/);
2120 %}
2122 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{
2123 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4");
2124 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
2125 guarantee($mem$$index == R_G0_enc, "double index?");
2126 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc );
2127 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg );
2128 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 );
2129 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc );
2130 %}
2132 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{
2133 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4");
2134 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
2135 guarantee($mem$$index == R_G0_enc, "double index?");
2136 // Load long with 2 instructions
2137 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 );
2138 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 );
2139 %}
2141 //%%% form3_mem_plus_4_reg is a hack--get rid of it
2142 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{
2143 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4");
2144 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg);
2145 %}
2147 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{
2148 // Encode a reg-reg copy. If it is useless, then empty encoding.
2149 if( $rs2$$reg != $rd$$reg )
2150 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg );
2151 %}
2153 // Target lo half of long
2154 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{
2155 // Encode a reg-reg copy. If it is useless, then empty encoding.
2156 if( $rs2$$reg != LONG_LO_REG($rd$$reg) )
2157 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg );
2158 %}
2160 // Source lo half of long
2161 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{
2162 // Encode a reg-reg copy. If it is useless, then empty encoding.
2163 if( LONG_LO_REG($rs2$$reg) != $rd$$reg )
2164 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) );
2165 %}
2167 // Target hi half of long
2168 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{
2169 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 );
2170 %}
2172 // Source lo half of long, and leave it sign extended.
2173 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{
2174 // Sign extend low half
2175 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 );
2176 %}
2178 // Source hi half of long, and leave it sign extended.
2179 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{
2180 // Shift high half to low half
2181 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 );
2182 %}
2184 // Source hi half of long
2185 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{
2186 // Encode a reg-reg copy. If it is useless, then empty encoding.
2187 if( LONG_HI_REG($rs2$$reg) != $rd$$reg )
2188 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) );
2189 %}
2191 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{
2192 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg );
2193 %}
2195 enc_class enc_to_bool( iRegI src, iRegI dst ) %{
2196 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg );
2197 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 );
2198 %}
2200 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{
2201 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg );
2202 // clear if nothing else is happening
2203 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 );
2204 // blt,a,pn done
2205 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 );
2206 // mov dst,-1 in delay slot
2207 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 );
2208 %}
2210 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{
2211 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F );
2212 %}
2214 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{
2215 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 );
2216 %}
2218 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{
2219 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg );
2220 %}
2222 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{
2223 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant );
2224 %}
2226 enc_class move_return_pc_to_o1() %{
2227 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset );
2228 %}
2230 #ifdef _LP64
2231 /* %%% merge with enc_to_bool */
2232 enc_class enc_convP2B( iRegI dst, iRegP src ) %{
2233 MacroAssembler _masm(&cbuf);
2235 Register src_reg = reg_to_register_object($src$$reg);
2236 Register dst_reg = reg_to_register_object($dst$$reg);
2237 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg);
2238 %}
2239 #endif
2241 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{
2242 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)))
2243 MacroAssembler _masm(&cbuf);
2245 Register p_reg = reg_to_register_object($p$$reg);
2246 Register q_reg = reg_to_register_object($q$$reg);
2247 Register y_reg = reg_to_register_object($y$$reg);
2248 Register tmp_reg = reg_to_register_object($tmp$$reg);
2250 __ subcc( p_reg, q_reg, p_reg );
2251 __ add ( p_reg, y_reg, tmp_reg );
2252 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg );
2253 %}
2255 enc_class form_d2i_helper(regD src, regF dst) %{
2256 // fcmp %fcc0,$src,$src
2257 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2258 // branch %fcc0 not-nan, predict taken
2259 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2260 // fdtoi $src,$dst
2261 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg );
2262 // fitos $dst,$dst (if nan)
2263 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg );
2264 // clear $dst (if nan)
2265 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg );
2266 // carry on here...
2267 %}
2269 enc_class form_d2l_helper(regD src, regD dst) %{
2270 // fcmp %fcc0,$src,$src check for NAN
2271 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2272 // branch %fcc0 not-nan, predict taken
2273 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2274 // fdtox $src,$dst convert in delay slot
2275 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg );
2276 // fxtod $dst,$dst (if nan)
2277 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg );
2278 // clear $dst (if nan)
2279 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg );
2280 // carry on here...
2281 %}
2283 enc_class form_f2i_helper(regF src, regF dst) %{
2284 // fcmps %fcc0,$src,$src
2285 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg );
2286 // branch %fcc0 not-nan, predict taken
2287 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2288 // fstoi $src,$dst
2289 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg );
2290 // fitos $dst,$dst (if nan)
2291 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg );
2292 // clear $dst (if nan)
2293 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg );
2294 // carry on here...
2295 %}
2297 enc_class form_f2l_helper(regF src, regD dst) %{
2298 // fcmps %fcc0,$src,$src
2299 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg );
2300 // branch %fcc0 not-nan, predict taken
2301 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2302 // fstox $src,$dst
2303 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg );
2304 // fxtod $dst,$dst (if nan)
2305 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg );
2306 // clear $dst (if nan)
2307 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg );
2308 // carry on here...
2309 %}
2311 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2312 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2313 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2314 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2316 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %}
2318 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2319 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %}
2321 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{
2322 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2323 %}
2325 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{
2326 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2327 %}
2329 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{
2330 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2331 %}
2333 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{
2334 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2335 %}
2337 enc_class form3_convI2F(regF rs2, regF rd) %{
2338 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg);
2339 %}
2341 // Encloding class for traceable jumps
2342 enc_class form_jmpl(g3RegP dest) %{
2343 emit_jmpl(cbuf, $dest$$reg);
2344 %}
2346 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{
2347 emit_jmpl_set_exception_pc(cbuf, $dest$$reg);
2348 %}
2350 enc_class form2_nop() %{
2351 emit_nop(cbuf);
2352 %}
2354 enc_class form2_illtrap() %{
2355 emit_illtrap(cbuf);
2356 %}
2359 // Compare longs and convert into -1, 0, 1.
2360 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{
2361 // CMP $src1,$src2
2362 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg );
2363 // blt,a,pn done
2364 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 );
2365 // mov dst,-1 in delay slot
2366 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 );
2367 // bgt,a,pn done
2368 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 );
2369 // mov dst,1 in delay slot
2370 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 );
2371 // CLR $dst
2372 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 );
2373 %}
2375 enc_class enc_PartialSubtypeCheck() %{
2376 MacroAssembler _masm(&cbuf);
2377 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type);
2378 __ delayed()->nop();
2379 %}
2381 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{
2382 MacroAssembler _masm(&cbuf);
2383 Label* L = $labl$$label;
2384 Assembler::Predict predict_taken =
2385 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
2387 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
2388 __ delayed()->nop();
2389 %}
2391 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{
2392 MacroAssembler _masm(&cbuf);
2393 Label* L = $labl$$label;
2394 Assembler::Predict predict_taken =
2395 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
2397 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L);
2398 __ delayed()->nop();
2399 %}
2401 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{
2402 int op = (Assembler::arith_op << 30) |
2403 ($dst$$reg << 25) |
2404 (Assembler::movcc_op3 << 19) |
2405 (1 << 18) | // cc2 bit for 'icc'
2406 ($cmp$$cmpcode << 14) |
2407 (0 << 13) | // select register move
2408 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc'
2409 ($src$$reg << 0);
2410 cbuf.insts()->emit_int32(op);
2411 %}
2413 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{
2414 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits
2415 int op = (Assembler::arith_op << 30) |
2416 ($dst$$reg << 25) |
2417 (Assembler::movcc_op3 << 19) |
2418 (1 << 18) | // cc2 bit for 'icc'
2419 ($cmp$$cmpcode << 14) |
2420 (1 << 13) | // select immediate move
2421 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc'
2422 (simm11 << 0);
2423 cbuf.insts()->emit_int32(op);
2424 %}
2426 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{
2427 int op = (Assembler::arith_op << 30) |
2428 ($dst$$reg << 25) |
2429 (Assembler::movcc_op3 << 19) |
2430 (0 << 18) | // cc2 bit for 'fccX'
2431 ($cmp$$cmpcode << 14) |
2432 (0 << 13) | // select register move
2433 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3
2434 ($src$$reg << 0);
2435 cbuf.insts()->emit_int32(op);
2436 %}
2438 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{
2439 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits
2440 int op = (Assembler::arith_op << 30) |
2441 ($dst$$reg << 25) |
2442 (Assembler::movcc_op3 << 19) |
2443 (0 << 18) | // cc2 bit for 'fccX'
2444 ($cmp$$cmpcode << 14) |
2445 (1 << 13) | // select immediate move
2446 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3
2447 (simm11 << 0);
2448 cbuf.insts()->emit_int32(op);
2449 %}
2451 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{
2452 int op = (Assembler::arith_op << 30) |
2453 ($dst$$reg << 25) |
2454 (Assembler::fpop2_op3 << 19) |
2455 (0 << 18) |
2456 ($cmp$$cmpcode << 14) |
2457 (1 << 13) | // select register move
2458 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc'
2459 ($primary << 5) | // select single, double or quad
2460 ($src$$reg << 0);
2461 cbuf.insts()->emit_int32(op);
2462 %}
2464 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{
2465 int op = (Assembler::arith_op << 30) |
2466 ($dst$$reg << 25) |
2467 (Assembler::fpop2_op3 << 19) |
2468 (0 << 18) |
2469 ($cmp$$cmpcode << 14) |
2470 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX'
2471 ($primary << 5) | // select single, double or quad
2472 ($src$$reg << 0);
2473 cbuf.insts()->emit_int32(op);
2474 %}
2476 // Used by the MIN/MAX encodings. Same as a CMOV, but
2477 // the condition comes from opcode-field instead of an argument.
2478 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{
2479 int op = (Assembler::arith_op << 30) |
2480 ($dst$$reg << 25) |
2481 (Assembler::movcc_op3 << 19) |
2482 (1 << 18) | // cc2 bit for 'icc'
2483 ($primary << 14) |
2484 (0 << 13) | // select register move
2485 (0 << 11) | // cc1, cc0 bits for 'icc'
2486 ($src$$reg << 0);
2487 cbuf.insts()->emit_int32(op);
2488 %}
2490 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{
2491 int op = (Assembler::arith_op << 30) |
2492 ($dst$$reg << 25) |
2493 (Assembler::movcc_op3 << 19) |
2494 (6 << 16) | // cc2 bit for 'xcc'
2495 ($primary << 14) |
2496 (0 << 13) | // select register move
2497 (0 << 11) | // cc1, cc0 bits for 'icc'
2498 ($src$$reg << 0);
2499 cbuf.insts()->emit_int32(op);
2500 %}
2502 enc_class Set13( immI13 src, iRegI rd ) %{
2503 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant );
2504 %}
2506 enc_class SetHi22( immI src, iRegI rd ) %{
2507 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant );
2508 %}
2510 enc_class Set32( immI src, iRegI rd ) %{
2511 MacroAssembler _masm(&cbuf);
2512 __ set($src$$constant, reg_to_register_object($rd$$reg));
2513 %}
2515 enc_class call_epilog %{
2516 if( VerifyStackAtCalls ) {
2517 MacroAssembler _masm(&cbuf);
2518 int framesize = ra_->C->frame_slots() << LogBytesPerInt;
2519 Register temp_reg = G3;
2520 __ add(SP, framesize, temp_reg);
2521 __ cmp(temp_reg, FP);
2522 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc);
2523 }
2524 %}
2526 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value
2527 // to G1 so the register allocator will not have to deal with the misaligned register
2528 // pair.
2529 enc_class adjust_long_from_native_call %{
2530 #ifndef _LP64
2531 if (returns_long()) {
2532 // sllx O0,32,O0
2533 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 );
2534 // srl O1,0,O1
2535 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 );
2536 // or O0,O1,G1
2537 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc );
2538 }
2539 #endif
2540 %}
2542 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
2543 // CALL directly to the runtime
2544 // The user of this is responsible for ensuring that R_L7 is empty (killed).
2545 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type,
2546 /*preserve_g2=*/true);
2547 %}
2549 enc_class preserve_SP %{
2550 MacroAssembler _masm(&cbuf);
2551 __ mov(SP, L7_mh_SP_save);
2552 %}
2554 enc_class restore_SP %{
2555 MacroAssembler _masm(&cbuf);
2556 __ mov(L7_mh_SP_save, SP);
2557 %}
2559 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
2560 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2561 // who we intended to call.
2562 if ( !_method ) {
2563 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type);
2564 } else if (_optimized_virtual) {
2565 emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type);
2566 } else {
2567 emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type);
2568 }
2569 if( _method ) { // Emit stub for static call
2570 emit_java_to_interp(cbuf);
2571 }
2572 %}
2574 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
2575 MacroAssembler _masm(&cbuf);
2576 __ set_inst_mark();
2577 int vtable_index = this->_vtable_index;
2578 // MachCallDynamicJavaNode::ret_addr_offset uses this same test
2579 if (vtable_index < 0) {
2580 // must be invalid_vtable_index, not nonvirtual_vtable_index
2581 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
2582 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2583 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()");
2584 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub");
2585 __ ic_call((address)$meth$$method);
2586 } else {
2587 assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
2588 // Just go thru the vtable
2589 // get receiver klass (receiver already checked for non-null)
2590 // If we end up going thru a c2i adapter interpreter expects method in G5
2591 int off = __ offset();
2592 __ load_klass(O0, G3_scratch);
2593 int klass_load_size;
2594 if (UseCompressedOops && UseCompressedKlassPointers) {
2595 assert(Universe::heap() != NULL, "java heap should be initialized");
2596 if (Universe::narrow_oop_base() == NULL)
2597 klass_load_size = 2*BytesPerInstWord;
2598 else
2599 klass_load_size = 3*BytesPerInstWord;
2600 } else {
2601 klass_load_size = 1*BytesPerInstWord;
2602 }
2603 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
2604 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
2605 if (Assembler::is_simm13(v_off)) {
2606 __ ld_ptr(G3, v_off, G5_method);
2607 } else {
2608 // Generate 2 instructions
2609 __ Assembler::sethi(v_off & ~0x3ff, G5_method);
2610 __ or3(G5_method, v_off & 0x3ff, G5_method);
2611 // ld_ptr, set_hi, set
2612 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord,
2613 "Unexpected instruction size(s)");
2614 __ ld_ptr(G3, G5_method, G5_method);
2615 }
2616 // NOTE: for vtable dispatches, the vtable entry will never be null.
2617 // However it may very well end up in handle_wrong_method if the
2618 // method is abstract for the particular class.
2619 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);
2620 // jump to target (either compiled code or c2iadapter)
2621 __ jmpl(G3_scratch, G0, O7);
2622 __ delayed()->nop();
2623 }
2624 %}
2626 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL
2627 MacroAssembler _masm(&cbuf);
2629 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2630 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because
2631 // we might be calling a C2I adapter which needs it.
2633 assert(temp_reg != G5_ic_reg, "conflicting registers");
2634 // Load nmethod
2635 __ ld_ptr(G5_ic_reg, in_bytes(Method::from_compiled_offset()), temp_reg);
2637 // CALL to compiled java, indirect the contents of G3
2638 __ set_inst_mark();
2639 __ callr(temp_reg, G0);
2640 __ delayed()->nop();
2641 %}
2643 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{
2644 MacroAssembler _masm(&cbuf);
2645 Register Rdividend = reg_to_register_object($src1$$reg);
2646 Register Rdivisor = reg_to_register_object($src2$$reg);
2647 Register Rresult = reg_to_register_object($dst$$reg);
2649 __ sra(Rdivisor, 0, Rdivisor);
2650 __ sra(Rdividend, 0, Rdividend);
2651 __ sdivx(Rdividend, Rdivisor, Rresult);
2652 %}
2654 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{
2655 MacroAssembler _masm(&cbuf);
2657 Register Rdividend = reg_to_register_object($src1$$reg);
2658 int divisor = $imm$$constant;
2659 Register Rresult = reg_to_register_object($dst$$reg);
2661 __ sra(Rdividend, 0, Rdividend);
2662 __ sdivx(Rdividend, divisor, Rresult);
2663 %}
2665 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{
2666 MacroAssembler _masm(&cbuf);
2667 Register Rsrc1 = reg_to_register_object($src1$$reg);
2668 Register Rsrc2 = reg_to_register_object($src2$$reg);
2669 Register Rdst = reg_to_register_object($dst$$reg);
2671 __ sra( Rsrc1, 0, Rsrc1 );
2672 __ sra( Rsrc2, 0, Rsrc2 );
2673 __ mulx( Rsrc1, Rsrc2, Rdst );
2674 __ srlx( Rdst, 32, Rdst );
2675 %}
2677 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{
2678 MacroAssembler _masm(&cbuf);
2679 Register Rdividend = reg_to_register_object($src1$$reg);
2680 Register Rdivisor = reg_to_register_object($src2$$reg);
2681 Register Rresult = reg_to_register_object($dst$$reg);
2682 Register Rscratch = reg_to_register_object($scratch$$reg);
2684 assert(Rdividend != Rscratch, "");
2685 assert(Rdivisor != Rscratch, "");
2687 __ sra(Rdividend, 0, Rdividend);
2688 __ sra(Rdivisor, 0, Rdivisor);
2689 __ sdivx(Rdividend, Rdivisor, Rscratch);
2690 __ mulx(Rscratch, Rdivisor, Rscratch);
2691 __ sub(Rdividend, Rscratch, Rresult);
2692 %}
2694 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{
2695 MacroAssembler _masm(&cbuf);
2697 Register Rdividend = reg_to_register_object($src1$$reg);
2698 int divisor = $imm$$constant;
2699 Register Rresult = reg_to_register_object($dst$$reg);
2700 Register Rscratch = reg_to_register_object($scratch$$reg);
2702 assert(Rdividend != Rscratch, "");
2704 __ sra(Rdividend, 0, Rdividend);
2705 __ sdivx(Rdividend, divisor, Rscratch);
2706 __ mulx(Rscratch, divisor, Rscratch);
2707 __ sub(Rdividend, Rscratch, Rresult);
2708 %}
2710 enc_class fabss (sflt_reg dst, sflt_reg src) %{
2711 MacroAssembler _masm(&cbuf);
2713 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2714 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2716 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst);
2717 %}
2719 enc_class fabsd (dflt_reg dst, dflt_reg src) %{
2720 MacroAssembler _masm(&cbuf);
2722 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2723 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2725 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst);
2726 %}
2728 enc_class fnegd (dflt_reg dst, dflt_reg src) %{
2729 MacroAssembler _masm(&cbuf);
2731 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2732 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2734 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst);
2735 %}
2737 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{
2738 MacroAssembler _masm(&cbuf);
2740 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2741 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2743 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst);
2744 %}
2746 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{
2747 MacroAssembler _masm(&cbuf);
2749 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2750 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2752 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst);
2753 %}
2755 enc_class fmovs (dflt_reg dst, dflt_reg src) %{
2756 MacroAssembler _masm(&cbuf);
2758 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2759 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2761 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst);
2762 %}
2764 enc_class fmovd (dflt_reg dst, dflt_reg src) %{
2765 MacroAssembler _masm(&cbuf);
2767 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2768 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2770 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst);
2771 %}
2773 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
2774 MacroAssembler _masm(&cbuf);
2776 Register Roop = reg_to_register_object($oop$$reg);
2777 Register Rbox = reg_to_register_object($box$$reg);
2778 Register Rscratch = reg_to_register_object($scratch$$reg);
2779 Register Rmark = reg_to_register_object($scratch2$$reg);
2781 assert(Roop != Rscratch, "");
2782 assert(Roop != Rmark, "");
2783 assert(Rbox != Rscratch, "");
2784 assert(Rbox != Rmark, "");
2786 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining);
2787 %}
2789 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
2790 MacroAssembler _masm(&cbuf);
2792 Register Roop = reg_to_register_object($oop$$reg);
2793 Register Rbox = reg_to_register_object($box$$reg);
2794 Register Rscratch = reg_to_register_object($scratch$$reg);
2795 Register Rmark = reg_to_register_object($scratch2$$reg);
2797 assert(Roop != Rscratch, "");
2798 assert(Roop != Rmark, "");
2799 assert(Rbox != Rscratch, "");
2800 assert(Rbox != Rmark, "");
2802 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining);
2803 %}
2805 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{
2806 MacroAssembler _masm(&cbuf);
2807 Register Rmem = reg_to_register_object($mem$$reg);
2808 Register Rold = reg_to_register_object($old$$reg);
2809 Register Rnew = reg_to_register_object($new$$reg);
2811 // casx_under_lock picks 1 of 3 encodings:
2812 // For 32-bit pointers you get a 32-bit CAS
2813 // For 64-bit pointers you get a 64-bit CASX
2814 __ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
2815 __ cmp( Rold, Rnew );
2816 %}
2818 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{
2819 Register Rmem = reg_to_register_object($mem$$reg);
2820 Register Rold = reg_to_register_object($old$$reg);
2821 Register Rnew = reg_to_register_object($new$$reg);
2823 MacroAssembler _masm(&cbuf);
2824 __ mov(Rnew, O7);
2825 __ casx(Rmem, Rold, O7);
2826 __ cmp( Rold, O7 );
2827 %}
2829 // raw int cas, used for compareAndSwap
2830 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{
2831 Register Rmem = reg_to_register_object($mem$$reg);
2832 Register Rold = reg_to_register_object($old$$reg);
2833 Register Rnew = reg_to_register_object($new$$reg);
2835 MacroAssembler _masm(&cbuf);
2836 __ mov(Rnew, O7);
2837 __ cas(Rmem, Rold, O7);
2838 __ cmp( Rold, O7 );
2839 %}
2841 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{
2842 Register Rres = reg_to_register_object($res$$reg);
2844 MacroAssembler _masm(&cbuf);
2845 __ mov(1, Rres);
2846 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres );
2847 %}
2849 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{
2850 Register Rres = reg_to_register_object($res$$reg);
2852 MacroAssembler _masm(&cbuf);
2853 __ mov(1, Rres);
2854 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres );
2855 %}
2857 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{
2858 MacroAssembler _masm(&cbuf);
2859 Register Rdst = reg_to_register_object($dst$$reg);
2860 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg)
2861 : reg_to_DoubleFloatRegister_object($src1$$reg);
2862 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg)
2863 : reg_to_DoubleFloatRegister_object($src2$$reg);
2865 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1)
2866 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst);
2867 %}
2870 enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{
2871 Label Ldone, Lloop;
2872 MacroAssembler _masm(&cbuf);
2874 Register str1_reg = reg_to_register_object($str1$$reg);
2875 Register str2_reg = reg_to_register_object($str2$$reg);
2876 Register cnt1_reg = reg_to_register_object($cnt1$$reg);
2877 Register cnt2_reg = reg_to_register_object($cnt2$$reg);
2878 Register result_reg = reg_to_register_object($result$$reg);
2880 assert(result_reg != str1_reg &&
2881 result_reg != str2_reg &&
2882 result_reg != cnt1_reg &&
2883 result_reg != cnt2_reg ,
2884 "need different registers");
2886 // Compute the minimum of the string lengths(str1_reg) and the
2887 // difference of the string lengths (stack)
2889 // See if the lengths are different, and calculate min in str1_reg.
2890 // Stash diff in O7 in case we need it for a tie-breaker.
2891 Label Lskip;
2892 __ subcc(cnt1_reg, cnt2_reg, O7);
2893 __ sll(cnt1_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit
2894 __ br(Assembler::greater, true, Assembler::pt, Lskip);
2895 // cnt2 is shorter, so use its count:
2896 __ delayed()->sll(cnt2_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit
2897 __ bind(Lskip);
2899 // reallocate cnt1_reg, cnt2_reg, result_reg
2900 // Note: limit_reg holds the string length pre-scaled by 2
2901 Register limit_reg = cnt1_reg;
2902 Register chr2_reg = cnt2_reg;
2903 Register chr1_reg = result_reg;
2904 // str{12} are the base pointers
2906 // Is the minimum length zero?
2907 __ cmp(limit_reg, (int)(0 * sizeof(jchar))); // use cast to resolve overloading ambiguity
2908 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2909 __ delayed()->mov(O7, result_reg); // result is difference in lengths
2911 // Load first characters
2912 __ lduh(str1_reg, 0, chr1_reg);
2913 __ lduh(str2_reg, 0, chr2_reg);
2915 // Compare first characters
2916 __ subcc(chr1_reg, chr2_reg, chr1_reg);
2917 __ br(Assembler::notZero, false, Assembler::pt, Ldone);
2918 assert(chr1_reg == result_reg, "result must be pre-placed");
2919 __ delayed()->nop();
2921 {
2922 // Check after comparing first character to see if strings are equivalent
2923 Label LSkip2;
2924 // Check if the strings start at same location
2925 __ cmp(str1_reg, str2_reg);
2926 __ brx(Assembler::notEqual, true, Assembler::pt, LSkip2);
2927 __ delayed()->nop();
2929 // Check if the length difference is zero (in O7)
2930 __ cmp(G0, O7);
2931 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2932 __ delayed()->mov(G0, result_reg); // result is zero
2934 // Strings might not be equal
2935 __ bind(LSkip2);
2936 }
2938 __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg);
2939 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2940 __ delayed()->mov(O7, result_reg); // result is difference in lengths
2942 // Shift str1_reg and str2_reg to the end of the arrays, negate limit
2943 __ add(str1_reg, limit_reg, str1_reg);
2944 __ add(str2_reg, limit_reg, str2_reg);
2945 __ neg(chr1_reg, limit_reg); // limit = -(limit-2)
2947 // Compare the rest of the characters
2948 __ lduh(str1_reg, limit_reg, chr1_reg);
2949 __ bind(Lloop);
2950 // __ lduh(str1_reg, limit_reg, chr1_reg); // hoisted
2951 __ lduh(str2_reg, limit_reg, chr2_reg);
2952 __ subcc(chr1_reg, chr2_reg, chr1_reg);
2953 __ br(Assembler::notZero, false, Assembler::pt, Ldone);
2954 assert(chr1_reg == result_reg, "result must be pre-placed");
2955 __ delayed()->inccc(limit_reg, sizeof(jchar));
2956 // annul LDUH if branch is not taken to prevent access past end of string
2957 __ br(Assembler::notZero, true, Assembler::pt, Lloop);
2958 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted
2960 // If strings are equal up to min length, return the length difference.
2961 __ mov(O7, result_reg);
2963 // Otherwise, return the difference between the first mismatched chars.
2964 __ bind(Ldone);
2965 %}
2967 enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result) %{
2968 Label Lword_loop, Lpost_word, Lchar, Lchar_loop, Ldone;
2969 MacroAssembler _masm(&cbuf);
2971 Register str1_reg = reg_to_register_object($str1$$reg);
2972 Register str2_reg = reg_to_register_object($str2$$reg);
2973 Register cnt_reg = reg_to_register_object($cnt$$reg);
2974 Register tmp1_reg = O7;
2975 Register result_reg = reg_to_register_object($result$$reg);
2977 assert(result_reg != str1_reg &&
2978 result_reg != str2_reg &&
2979 result_reg != cnt_reg &&
2980 result_reg != tmp1_reg ,
2981 "need different registers");
2983 __ cmp(str1_reg, str2_reg); //same char[] ?
2984 __ brx(Assembler::equal, true, Assembler::pn, Ldone);
2985 __ delayed()->add(G0, 1, result_reg);
2987 __ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn);
2988 __ delayed()->add(G0, 1, result_reg); // count == 0
2990 //rename registers
2991 Register limit_reg = cnt_reg;
2992 Register chr1_reg = result_reg;
2993 Register chr2_reg = tmp1_reg;
2995 //check for alignment and position the pointers to the ends
2996 __ or3(str1_reg, str2_reg, chr1_reg);
2997 __ andcc(chr1_reg, 0x3, chr1_reg);
2998 // notZero means at least one not 4-byte aligned.
2999 // We could optimize the case when both arrays are not aligned
3000 // but it is not frequent case and it requires additional checks.
3001 __ br(Assembler::notZero, false, Assembler::pn, Lchar); // char by char compare
3002 __ delayed()->sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); // set byte count
3004 // Compare char[] arrays aligned to 4 bytes.
3005 __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg,
3006 chr1_reg, chr2_reg, Ldone);
3007 __ ba(Ldone);
3008 __ delayed()->add(G0, 1, result_reg);
3010 // char by char compare
3011 __ bind(Lchar);
3012 __ add(str1_reg, limit_reg, str1_reg);
3013 __ add(str2_reg, limit_reg, str2_reg);
3014 __ neg(limit_reg); //negate count
3016 __ lduh(str1_reg, limit_reg, chr1_reg);
3017 // Lchar_loop
3018 __ bind(Lchar_loop);
3019 __ lduh(str2_reg, limit_reg, chr2_reg);
3020 __ cmp(chr1_reg, chr2_reg);
3021 __ br(Assembler::notEqual, true, Assembler::pt, Ldone);
3022 __ delayed()->mov(G0, result_reg); //not equal
3023 __ inccc(limit_reg, sizeof(jchar));
3024 // annul LDUH if branch is not taken to prevent access past end of string
3025 __ br(Assembler::notZero, true, Assembler::pt, Lchar_loop);
3026 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted
3028 __ add(G0, 1, result_reg); //equal
3030 __ bind(Ldone);
3031 %}
3033 enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI result) %{
3034 Label Lvector, Ldone, Lloop;
3035 MacroAssembler _masm(&cbuf);
3037 Register ary1_reg = reg_to_register_object($ary1$$reg);
3038 Register ary2_reg = reg_to_register_object($ary2$$reg);
3039 Register tmp1_reg = reg_to_register_object($tmp1$$reg);
3040 Register tmp2_reg = O7;
3041 Register result_reg = reg_to_register_object($result$$reg);
3043 int length_offset = arrayOopDesc::length_offset_in_bytes();
3044 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
3046 // return true if the same array
3047 __ cmp(ary1_reg, ary2_reg);
3048 __ brx(Assembler::equal, true, Assembler::pn, Ldone);
3049 __ delayed()->add(G0, 1, result_reg); // equal
3051 __ br_null(ary1_reg, true, Assembler::pn, Ldone);
3052 __ delayed()->mov(G0, result_reg); // not equal
3054 __ br_null(ary2_reg, true, Assembler::pn, Ldone);
3055 __ delayed()->mov(G0, result_reg); // not equal
3057 //load the lengths of arrays
3058 __ ld(Address(ary1_reg, length_offset), tmp1_reg);
3059 __ ld(Address(ary2_reg, length_offset), tmp2_reg);
3061 // return false if the two arrays are not equal length
3062 __ cmp(tmp1_reg, tmp2_reg);
3063 __ br(Assembler::notEqual, true, Assembler::pn, Ldone);
3064 __ delayed()->mov(G0, result_reg); // not equal
3066 __ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn);
3067 __ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal
3069 // load array addresses
3070 __ add(ary1_reg, base_offset, ary1_reg);
3071 __ add(ary2_reg, base_offset, ary2_reg);
3073 // renaming registers
3074 Register chr1_reg = result_reg; // for characters in ary1
3075 Register chr2_reg = tmp2_reg; // for characters in ary2
3076 Register limit_reg = tmp1_reg; // length
3078 // set byte count
3079 __ sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg);
3081 // Compare char[] arrays aligned to 4 bytes.
3082 __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg,
3083 chr1_reg, chr2_reg, Ldone);
3084 __ add(G0, 1, result_reg); // equals
3086 __ bind(Ldone);
3087 %}
3089 enc_class enc_rethrow() %{
3090 cbuf.set_insts_mark();
3091 Register temp_reg = G3;
3092 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub());
3093 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg");
3094 MacroAssembler _masm(&cbuf);
3095 #ifdef ASSERT
3096 __ save_frame(0);
3097 AddressLiteral last_rethrow_addrlit(&last_rethrow);
3098 __ sethi(last_rethrow_addrlit, L1);
3099 Address addr(L1, last_rethrow_addrlit.low10());
3100 __ get_pc(L2);
3101 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
3102 __ st_ptr(L2, addr);
3103 __ restore();
3104 #endif
3105 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp
3106 __ delayed()->nop();
3107 %}
3109 enc_class emit_mem_nop() %{
3110 // Generates the instruction LDUXA [o6,g0],#0x82,g0
3111 cbuf.insts()->emit_int32((unsigned int) 0xc0839040);
3112 %}
3114 enc_class emit_fadd_nop() %{
3115 // Generates the instruction FMOVS f31,f31
3116 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f);
3117 %}
3119 enc_class emit_br_nop() %{
3120 // Generates the instruction BPN,PN .
3121 cbuf.insts()->emit_int32((unsigned int) 0x00400000);
3122 %}
3124 enc_class enc_membar_acquire %{
3125 MacroAssembler _masm(&cbuf);
3126 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) );
3127 %}
3129 enc_class enc_membar_release %{
3130 MacroAssembler _masm(&cbuf);
3131 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) );
3132 %}
3134 enc_class enc_membar_volatile %{
3135 MacroAssembler _masm(&cbuf);
3136 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3137 %}
3139 %}
3141 //----------FRAME--------------------------------------------------------------
3142 // Definition of frame structure and management information.
3143 //
3144 // S T A C K L A Y O U T Allocators stack-slot number
3145 // | (to get allocators register number
3146 // G Owned by | | v add VMRegImpl::stack0)
3147 // r CALLER | |
3148 // o | +--------+ pad to even-align allocators stack-slot
3149 // w V | pad0 | numbers; owned by CALLER
3150 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3151 // h ^ | in | 5
3152 // | | args | 4 Holes in incoming args owned by SELF
3153 // | | | | 3
3154 // | | +--------+
3155 // V | | old out| Empty on Intel, window on Sparc
3156 // | old |preserve| Must be even aligned.
3157 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned
3158 // | | in | 3 area for Intel ret address
3159 // Owned by |preserve| Empty on Sparc.
3160 // SELF +--------+
3161 // | | pad2 | 2 pad to align old SP
3162 // | +--------+ 1
3163 // | | locks | 0
3164 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned
3165 // | | pad1 | 11 pad to align new SP
3166 // | +--------+
3167 // | | | 10
3168 // | | spills | 9 spills
3169 // V | | 8 (pad0 slot for callee)
3170 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3171 // ^ | out | 7
3172 // | | args | 6 Holes in outgoing args owned by CALLEE
3173 // Owned by +--------+
3174 // CALLEE | new out| 6 Empty on Intel, window on Sparc
3175 // | new |preserve| Must be even-aligned.
3176 // | SP-+--------+----> Matcher::_new_SP, even aligned
3177 // | | |
3178 //
3179 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3180 // known from SELF's arguments and the Java calling convention.
3181 // Region 6-7 is determined per call site.
3182 // Note 2: If the calling convention leaves holes in the incoming argument
3183 // area, those holes are owned by SELF. Holes in the outgoing area
3184 // are owned by the CALLEE. Holes should not be nessecary in the
3185 // incoming area, as the Java calling convention is completely under
3186 // the control of the AD file. Doubles can be sorted and packed to
3187 // avoid holes. Holes in the outgoing arguments may be nessecary for
3188 // varargs C calling conventions.
3189 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3190 // even aligned with pad0 as needed.
3191 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3192 // region 6-11 is even aligned; it may be padded out more so that
3193 // the region from SP to FP meets the minimum stack alignment.
3195 frame %{
3196 // What direction does stack grow in (assumed to be same for native & Java)
3197 stack_direction(TOWARDS_LOW);
3199 // These two registers define part of the calling convention
3200 // between compiled code and the interpreter.
3201 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C
3202 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter
3204 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3205 cisc_spilling_operand_name(indOffset);
3207 // Number of stack slots consumed by a Monitor enter
3208 #ifdef _LP64
3209 sync_stack_slots(2);
3210 #else
3211 sync_stack_slots(1);
3212 #endif
3214 // Compiled code's Frame Pointer
3215 frame_pointer(R_SP);
3217 // Stack alignment requirement
3218 stack_alignment(StackAlignmentInBytes);
3219 // LP64: Alignment size in bytes (128-bit -> 16 bytes)
3220 // !LP64: Alignment size in bytes (64-bit -> 8 bytes)
3222 // Number of stack slots between incoming argument block and the start of
3223 // a new frame. The PROLOG must add this many slots to the stack. The
3224 // EPILOG must remove this many slots.
3225 in_preserve_stack_slots(0);
3227 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3228 // for calls to C. Supports the var-args backing area for register parms.
3229 // ADLC doesn't support parsing expressions, so I folded the math by hand.
3230 #ifdef _LP64
3231 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word
3232 varargs_C_out_slots_killed(12);
3233 #else
3234 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word
3235 varargs_C_out_slots_killed( 7);
3236 #endif
3238 // The after-PROLOG location of the return address. Location of
3239 // return address specifies a type (REG or STACK) and a number
3240 // representing the register number (i.e. - use a register name) or
3241 // stack slot.
3242 return_addr(REG R_I7); // Ret Addr is in register I7
3244 // Body of function which returns an OptoRegs array locating
3245 // arguments either in registers or in stack slots for calling
3246 // java
3247 calling_convention %{
3248 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing);
3250 %}
3252 // Body of function which returns an OptoRegs array locating
3253 // arguments either in registers or in stack slots for callin
3254 // C.
3255 c_calling_convention %{
3256 // This is obviously always outgoing
3257 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
3258 %}
3260 // Location of native (C/C++) and interpreter return values. This is specified to
3261 // be the same as Java. In the 32-bit VM, long values are actually returned from
3262 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying
3263 // to and from the register pairs is done by the appropriate call and epilog
3264 // opcodes. This simplifies the register allocator.
3265 c_return_value %{
3266 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3267 #ifdef _LP64
3268 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3269 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3270 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3271 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3272 #else // !_LP64
3273 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3274 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3275 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3276 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3277 #endif
3278 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3279 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3280 %}
3282 // Location of compiled Java return values. Same as C
3283 return_value %{
3284 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3285 #ifdef _LP64
3286 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3287 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3288 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3289 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3290 #else // !_LP64
3291 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3292 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3293 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3294 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3295 #endif
3296 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3297 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3298 %}
3300 %}
3303 //----------ATTRIBUTES---------------------------------------------------------
3304 //----------Operand Attributes-------------------------------------------------
3305 op_attrib op_cost(1); // Required cost attribute
3307 //----------Instruction Attributes---------------------------------------------
3308 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
3309 ins_attrib ins_size(32); // Required size attribute (in bits)
3310 ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back
3311 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3312 // non-matching short branch variant of some
3313 // long branch?
3315 //----------OPERANDS-----------------------------------------------------------
3316 // Operand definitions must precede instruction definitions for correct parsing
3317 // in the ADLC because operands constitute user defined types which are used in
3318 // instruction definitions.
3320 //----------Simple Operands----------------------------------------------------
3321 // Immediate Operands
3322 // Integer Immediate: 32-bit
3323 operand immI() %{
3324 match(ConI);
3326 op_cost(0);
3327 // formats are generated automatically for constants and base registers
3328 format %{ %}
3329 interface(CONST_INTER);
3330 %}
3332 // Integer Immediate: 8-bit
3333 operand immI8() %{
3334 predicate(Assembler::is_simm8(n->get_int()));
3335 match(ConI);
3336 op_cost(0);
3337 format %{ %}
3338 interface(CONST_INTER);
3339 %}
3341 // Integer Immediate: 13-bit
3342 operand immI13() %{
3343 predicate(Assembler::is_simm13(n->get_int()));
3344 match(ConI);
3345 op_cost(0);
3347 format %{ %}
3348 interface(CONST_INTER);
3349 %}
3351 // Integer Immediate: 13-bit minus 7
3352 operand immI13m7() %{
3353 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095));
3354 match(ConI);
3355 op_cost(0);
3357 format %{ %}
3358 interface(CONST_INTER);
3359 %}
3361 // Integer Immediate: 16-bit
3362 operand immI16() %{
3363 predicate(Assembler::is_simm16(n->get_int()));
3364 match(ConI);
3365 op_cost(0);
3366 format %{ %}
3367 interface(CONST_INTER);
3368 %}
3370 // Unsigned (positive) Integer Immediate: 13-bit
3371 operand immU13() %{
3372 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int()));
3373 match(ConI);
3374 op_cost(0);
3376 format %{ %}
3377 interface(CONST_INTER);
3378 %}
3380 // Integer Immediate: 6-bit
3381 operand immU6() %{
3382 predicate(n->get_int() >= 0 && n->get_int() <= 63);
3383 match(ConI);
3384 op_cost(0);
3385 format %{ %}
3386 interface(CONST_INTER);
3387 %}
3389 // Integer Immediate: 11-bit
3390 operand immI11() %{
3391 predicate(Assembler::is_simm11(n->get_int()));
3392 match(ConI);
3393 op_cost(0);
3394 format %{ %}
3395 interface(CONST_INTER);
3396 %}
3398 // Integer Immediate: 5-bit
3399 operand immI5() %{
3400 predicate(Assembler::is_simm5(n->get_int()));
3401 match(ConI);
3402 op_cost(0);
3403 format %{ %}
3404 interface(CONST_INTER);
3405 %}
3407 // Integer Immediate: 0-bit
3408 operand immI0() %{
3409 predicate(n->get_int() == 0);
3410 match(ConI);
3411 op_cost(0);
3413 format %{ %}
3414 interface(CONST_INTER);
3415 %}
3417 // Integer Immediate: the value 10
3418 operand immI10() %{
3419 predicate(n->get_int() == 10);
3420 match(ConI);
3421 op_cost(0);
3423 format %{ %}
3424 interface(CONST_INTER);
3425 %}
3427 // Integer Immediate: the values 0-31
3428 operand immU5() %{
3429 predicate(n->get_int() >= 0 && n->get_int() <= 31);
3430 match(ConI);
3431 op_cost(0);
3433 format %{ %}
3434 interface(CONST_INTER);
3435 %}
3437 // Integer Immediate: the values 1-31
3438 operand immI_1_31() %{
3439 predicate(n->get_int() >= 1 && n->get_int() <= 31);
3440 match(ConI);
3441 op_cost(0);
3443 format %{ %}
3444 interface(CONST_INTER);
3445 %}
3447 // Integer Immediate: the values 32-63
3448 operand immI_32_63() %{
3449 predicate(n->get_int() >= 32 && n->get_int() <= 63);
3450 match(ConI);
3451 op_cost(0);
3453 format %{ %}
3454 interface(CONST_INTER);
3455 %}
3457 // Immediates for special shifts (sign extend)
3459 // Integer Immediate: the value 16
3460 operand immI_16() %{
3461 predicate(n->get_int() == 16);
3462 match(ConI);
3463 op_cost(0);
3465 format %{ %}
3466 interface(CONST_INTER);
3467 %}
3469 // Integer Immediate: the value 24
3470 operand immI_24() %{
3471 predicate(n->get_int() == 24);
3472 match(ConI);
3473 op_cost(0);
3475 format %{ %}
3476 interface(CONST_INTER);
3477 %}
3479 // Integer Immediate: the value 255
3480 operand immI_255() %{
3481 predicate( n->get_int() == 255 );
3482 match(ConI);
3483 op_cost(0);
3485 format %{ %}
3486 interface(CONST_INTER);
3487 %}
3489 // Integer Immediate: the value 65535
3490 operand immI_65535() %{
3491 predicate(n->get_int() == 65535);
3492 match(ConI);
3493 op_cost(0);
3495 format %{ %}
3496 interface(CONST_INTER);
3497 %}
3499 // Long Immediate: the value FF
3500 operand immL_FF() %{
3501 predicate( n->get_long() == 0xFFL );
3502 match(ConL);
3503 op_cost(0);
3505 format %{ %}
3506 interface(CONST_INTER);
3507 %}
3509 // Long Immediate: the value FFFF
3510 operand immL_FFFF() %{
3511 predicate( n->get_long() == 0xFFFFL );
3512 match(ConL);
3513 op_cost(0);
3515 format %{ %}
3516 interface(CONST_INTER);
3517 %}
3519 // Pointer Immediate: 32 or 64-bit
3520 operand immP() %{
3521 match(ConP);
3523 op_cost(5);
3524 // formats are generated automatically for constants and base registers
3525 format %{ %}
3526 interface(CONST_INTER);
3527 %}
3529 #ifdef _LP64
3530 // Pointer Immediate: 64-bit
3531 operand immP_set() %{
3532 predicate(!VM_Version::is_niagara_plus());
3533 match(ConP);
3535 op_cost(5);
3536 // formats are generated automatically for constants and base registers
3537 format %{ %}
3538 interface(CONST_INTER);
3539 %}
3541 // Pointer Immediate: 64-bit
3542 // From Niagara2 processors on a load should be better than materializing.
3543 operand immP_load() %{
3544 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3)));
3545 match(ConP);
3547 op_cost(5);
3548 // formats are generated automatically for constants and base registers
3549 format %{ %}
3550 interface(CONST_INTER);
3551 %}
3553 // Pointer Immediate: 64-bit
3554 operand immP_no_oop_cheap() %{
3555 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3));
3556 match(ConP);
3558 op_cost(5);
3559 // formats are generated automatically for constants and base registers
3560 format %{ %}
3561 interface(CONST_INTER);
3562 %}
3563 #endif
3565 operand immP13() %{
3566 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
3567 match(ConP);
3568 op_cost(0);
3570 format %{ %}
3571 interface(CONST_INTER);
3572 %}
3574 operand immP0() %{
3575 predicate(n->get_ptr() == 0);
3576 match(ConP);
3577 op_cost(0);
3579 format %{ %}
3580 interface(CONST_INTER);
3581 %}
3583 operand immP_poll() %{
3584 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3585 match(ConP);
3587 // formats are generated automatically for constants and base registers
3588 format %{ %}
3589 interface(CONST_INTER);
3590 %}
3592 // Pointer Immediate
3593 operand immN()
3594 %{
3595 match(ConN);
3597 op_cost(10);
3598 format %{ %}
3599 interface(CONST_INTER);
3600 %}
3602 // NULL Pointer Immediate
3603 operand immN0()
3604 %{
3605 predicate(n->get_narrowcon() == 0);
3606 match(ConN);
3608 op_cost(0);
3609 format %{ %}
3610 interface(CONST_INTER);
3611 %}
3613 operand immL() %{
3614 match(ConL);
3615 op_cost(40);
3616 // formats are generated automatically for constants and base registers
3617 format %{ %}
3618 interface(CONST_INTER);
3619 %}
3621 operand immL0() %{
3622 predicate(n->get_long() == 0L);
3623 match(ConL);
3624 op_cost(0);
3625 // formats are generated automatically for constants and base registers
3626 format %{ %}
3627 interface(CONST_INTER);
3628 %}
3630 // Integer Immediate: 5-bit
3631 operand immL5() %{
3632 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long()));
3633 match(ConL);
3634 op_cost(0);
3635 format %{ %}
3636 interface(CONST_INTER);
3637 %}
3639 // Long Immediate: 13-bit
3640 operand immL13() %{
3641 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L));
3642 match(ConL);
3643 op_cost(0);
3645 format %{ %}
3646 interface(CONST_INTER);
3647 %}
3649 // Long Immediate: 13-bit minus 7
3650 operand immL13m7() %{
3651 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L));
3652 match(ConL);
3653 op_cost(0);
3655 format %{ %}
3656 interface(CONST_INTER);
3657 %}
3659 // Long Immediate: low 32-bit mask
3660 operand immL_32bits() %{
3661 predicate(n->get_long() == 0xFFFFFFFFL);
3662 match(ConL);
3663 op_cost(0);
3665 format %{ %}
3666 interface(CONST_INTER);
3667 %}
3669 // Long Immediate: cheap (materialize in <= 3 instructions)
3670 operand immL_cheap() %{
3671 predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3672 match(ConL);
3673 op_cost(0);
3675 format %{ %}
3676 interface(CONST_INTER);
3677 %}
3679 // Long Immediate: expensive (materialize in > 3 instructions)
3680 operand immL_expensive() %{
3681 predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3);
3682 match(ConL);
3683 op_cost(0);
3685 format %{ %}
3686 interface(CONST_INTER);
3687 %}
3689 // Double Immediate
3690 operand immD() %{
3691 match(ConD);
3693 op_cost(40);
3694 format %{ %}
3695 interface(CONST_INTER);
3696 %}
3698 operand immD0() %{
3699 #ifdef _LP64
3700 // on 64-bit architectures this comparision is faster
3701 predicate(jlong_cast(n->getd()) == 0);
3702 #else
3703 predicate((n->getd() == 0) && (fpclass(n->getd()) == FP_PZERO));
3704 #endif
3705 match(ConD);
3707 op_cost(0);
3708 format %{ %}
3709 interface(CONST_INTER);
3710 %}
3712 // Float Immediate
3713 operand immF() %{
3714 match(ConF);
3716 op_cost(20);
3717 format %{ %}
3718 interface(CONST_INTER);
3719 %}
3721 // Float Immediate: 0
3722 operand immF0() %{
3723 predicate((n->getf() == 0) && (fpclass(n->getf()) == FP_PZERO));
3724 match(ConF);
3726 op_cost(0);
3727 format %{ %}
3728 interface(CONST_INTER);
3729 %}
3731 // Integer Register Operands
3732 // Integer Register
3733 operand iRegI() %{
3734 constraint(ALLOC_IN_RC(int_reg));
3735 match(RegI);
3737 match(notemp_iRegI);
3738 match(g1RegI);
3739 match(o0RegI);
3740 match(iRegIsafe);
3742 format %{ %}
3743 interface(REG_INTER);
3744 %}
3746 operand notemp_iRegI() %{
3747 constraint(ALLOC_IN_RC(notemp_int_reg));
3748 match(RegI);
3750 match(o0RegI);
3752 format %{ %}
3753 interface(REG_INTER);
3754 %}
3756 operand o0RegI() %{
3757 constraint(ALLOC_IN_RC(o0_regI));
3758 match(iRegI);
3760 format %{ %}
3761 interface(REG_INTER);
3762 %}
3764 // Pointer Register
3765 operand iRegP() %{
3766 constraint(ALLOC_IN_RC(ptr_reg));
3767 match(RegP);
3769 match(lock_ptr_RegP);
3770 match(g1RegP);
3771 match(g2RegP);
3772 match(g3RegP);
3773 match(g4RegP);
3774 match(i0RegP);
3775 match(o0RegP);
3776 match(o1RegP);
3777 match(l7RegP);
3779 format %{ %}
3780 interface(REG_INTER);
3781 %}
3783 operand sp_ptr_RegP() %{
3784 constraint(ALLOC_IN_RC(sp_ptr_reg));
3785 match(RegP);
3786 match(iRegP);
3788 format %{ %}
3789 interface(REG_INTER);
3790 %}
3792 operand lock_ptr_RegP() %{
3793 constraint(ALLOC_IN_RC(lock_ptr_reg));
3794 match(RegP);
3795 match(i0RegP);
3796 match(o0RegP);
3797 match(o1RegP);
3798 match(l7RegP);
3800 format %{ %}
3801 interface(REG_INTER);
3802 %}
3804 operand g1RegP() %{
3805 constraint(ALLOC_IN_RC(g1_regP));
3806 match(iRegP);
3808 format %{ %}
3809 interface(REG_INTER);
3810 %}
3812 operand g2RegP() %{
3813 constraint(ALLOC_IN_RC(g2_regP));
3814 match(iRegP);
3816 format %{ %}
3817 interface(REG_INTER);
3818 %}
3820 operand g3RegP() %{
3821 constraint(ALLOC_IN_RC(g3_regP));
3822 match(iRegP);
3824 format %{ %}
3825 interface(REG_INTER);
3826 %}
3828 operand g1RegI() %{
3829 constraint(ALLOC_IN_RC(g1_regI));
3830 match(iRegI);
3832 format %{ %}
3833 interface(REG_INTER);
3834 %}
3836 operand g3RegI() %{
3837 constraint(ALLOC_IN_RC(g3_regI));
3838 match(iRegI);
3840 format %{ %}
3841 interface(REG_INTER);
3842 %}
3844 operand g4RegI() %{
3845 constraint(ALLOC_IN_RC(g4_regI));
3846 match(iRegI);
3848 format %{ %}
3849 interface(REG_INTER);
3850 %}
3852 operand g4RegP() %{
3853 constraint(ALLOC_IN_RC(g4_regP));
3854 match(iRegP);
3856 format %{ %}
3857 interface(REG_INTER);
3858 %}
3860 operand i0RegP() %{
3861 constraint(ALLOC_IN_RC(i0_regP));
3862 match(iRegP);
3864 format %{ %}
3865 interface(REG_INTER);
3866 %}
3868 operand o0RegP() %{
3869 constraint(ALLOC_IN_RC(o0_regP));
3870 match(iRegP);
3872 format %{ %}
3873 interface(REG_INTER);
3874 %}
3876 operand o1RegP() %{
3877 constraint(ALLOC_IN_RC(o1_regP));
3878 match(iRegP);
3880 format %{ %}
3881 interface(REG_INTER);
3882 %}
3884 operand o2RegP() %{
3885 constraint(ALLOC_IN_RC(o2_regP));
3886 match(iRegP);
3888 format %{ %}
3889 interface(REG_INTER);
3890 %}
3892 operand o7RegP() %{
3893 constraint(ALLOC_IN_RC(o7_regP));
3894 match(iRegP);
3896 format %{ %}
3897 interface(REG_INTER);
3898 %}
3900 operand l7RegP() %{
3901 constraint(ALLOC_IN_RC(l7_regP));
3902 match(iRegP);
3904 format %{ %}
3905 interface(REG_INTER);
3906 %}
3908 operand o7RegI() %{
3909 constraint(ALLOC_IN_RC(o7_regI));
3910 match(iRegI);
3912 format %{ %}
3913 interface(REG_INTER);
3914 %}
3916 operand iRegN() %{
3917 constraint(ALLOC_IN_RC(int_reg));
3918 match(RegN);
3920 format %{ %}
3921 interface(REG_INTER);
3922 %}
3924 // Long Register
3925 operand iRegL() %{
3926 constraint(ALLOC_IN_RC(long_reg));
3927 match(RegL);
3929 format %{ %}
3930 interface(REG_INTER);
3931 %}
3933 operand o2RegL() %{
3934 constraint(ALLOC_IN_RC(o2_regL));
3935 match(iRegL);
3937 format %{ %}
3938 interface(REG_INTER);
3939 %}
3941 operand o7RegL() %{
3942 constraint(ALLOC_IN_RC(o7_regL));
3943 match(iRegL);
3945 format %{ %}
3946 interface(REG_INTER);
3947 %}
3949 operand g1RegL() %{
3950 constraint(ALLOC_IN_RC(g1_regL));
3951 match(iRegL);
3953 format %{ %}
3954 interface(REG_INTER);
3955 %}
3957 operand g3RegL() %{
3958 constraint(ALLOC_IN_RC(g3_regL));
3959 match(iRegL);
3961 format %{ %}
3962 interface(REG_INTER);
3963 %}
3965 // Int Register safe
3966 // This is 64bit safe
3967 operand iRegIsafe() %{
3968 constraint(ALLOC_IN_RC(long_reg));
3970 match(iRegI);
3972 format %{ %}
3973 interface(REG_INTER);
3974 %}
3976 // Condition Code Flag Register
3977 operand flagsReg() %{
3978 constraint(ALLOC_IN_RC(int_flags));
3979 match(RegFlags);
3981 format %{ "ccr" %} // both ICC and XCC
3982 interface(REG_INTER);
3983 %}
3985 // Condition Code Register, unsigned comparisons.
3986 operand flagsRegU() %{
3987 constraint(ALLOC_IN_RC(int_flags));
3988 match(RegFlags);
3990 format %{ "icc_U" %}
3991 interface(REG_INTER);
3992 %}
3994 // Condition Code Register, pointer comparisons.
3995 operand flagsRegP() %{
3996 constraint(ALLOC_IN_RC(int_flags));
3997 match(RegFlags);
3999 #ifdef _LP64
4000 format %{ "xcc_P" %}
4001 #else
4002 format %{ "icc_P" %}
4003 #endif
4004 interface(REG_INTER);
4005 %}
4007 // Condition Code Register, long comparisons.
4008 operand flagsRegL() %{
4009 constraint(ALLOC_IN_RC(int_flags));
4010 match(RegFlags);
4012 format %{ "xcc_L" %}
4013 interface(REG_INTER);
4014 %}
4016 // Condition Code Register, floating comparisons, unordered same as "less".
4017 operand flagsRegF() %{
4018 constraint(ALLOC_IN_RC(float_flags));
4019 match(RegFlags);
4020 match(flagsRegF0);
4022 format %{ %}
4023 interface(REG_INTER);
4024 %}
4026 operand flagsRegF0() %{
4027 constraint(ALLOC_IN_RC(float_flag0));
4028 match(RegFlags);
4030 format %{ %}
4031 interface(REG_INTER);
4032 %}
4035 // Condition Code Flag Register used by long compare
4036 operand flagsReg_long_LTGE() %{
4037 constraint(ALLOC_IN_RC(int_flags));
4038 match(RegFlags);
4039 format %{ "icc_LTGE" %}
4040 interface(REG_INTER);
4041 %}
4042 operand flagsReg_long_EQNE() %{
4043 constraint(ALLOC_IN_RC(int_flags));
4044 match(RegFlags);
4045 format %{ "icc_EQNE" %}
4046 interface(REG_INTER);
4047 %}
4048 operand flagsReg_long_LEGT() %{
4049 constraint(ALLOC_IN_RC(int_flags));
4050 match(RegFlags);
4051 format %{ "icc_LEGT" %}
4052 interface(REG_INTER);
4053 %}
4056 operand regD() %{
4057 constraint(ALLOC_IN_RC(dflt_reg));
4058 match(RegD);
4060 match(regD_low);
4062 format %{ %}
4063 interface(REG_INTER);
4064 %}
4066 operand regF() %{
4067 constraint(ALLOC_IN_RC(sflt_reg));
4068 match(RegF);
4070 format %{ %}
4071 interface(REG_INTER);
4072 %}
4074 operand regD_low() %{
4075 constraint(ALLOC_IN_RC(dflt_low_reg));
4076 match(regD);
4078 format %{ %}
4079 interface(REG_INTER);
4080 %}
4082 // Special Registers
4084 // Method Register
4085 operand inline_cache_regP(iRegP reg) %{
4086 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1
4087 match(reg);
4088 format %{ %}
4089 interface(REG_INTER);
4090 %}
4092 operand interpreter_method_oop_regP(iRegP reg) %{
4093 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1
4094 match(reg);
4095 format %{ %}
4096 interface(REG_INTER);
4097 %}
4100 //----------Complex Operands---------------------------------------------------
4101 // Indirect Memory Reference
4102 operand indirect(sp_ptr_RegP reg) %{
4103 constraint(ALLOC_IN_RC(sp_ptr_reg));
4104 match(reg);
4106 op_cost(100);
4107 format %{ "[$reg]" %}
4108 interface(MEMORY_INTER) %{
4109 base($reg);
4110 index(0x0);
4111 scale(0x0);
4112 disp(0x0);
4113 %}
4114 %}
4116 // Indirect with simm13 Offset
4117 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{
4118 constraint(ALLOC_IN_RC(sp_ptr_reg));
4119 match(AddP reg offset);
4121 op_cost(100);
4122 format %{ "[$reg + $offset]" %}
4123 interface(MEMORY_INTER) %{
4124 base($reg);
4125 index(0x0);
4126 scale(0x0);
4127 disp($offset);
4128 %}
4129 %}
4131 // Indirect with simm13 Offset minus 7
4132 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{
4133 constraint(ALLOC_IN_RC(sp_ptr_reg));
4134 match(AddP reg offset);
4136 op_cost(100);
4137 format %{ "[$reg + $offset]" %}
4138 interface(MEMORY_INTER) %{
4139 base($reg);
4140 index(0x0);
4141 scale(0x0);
4142 disp($offset);
4143 %}
4144 %}
4146 // Note: Intel has a swapped version also, like this:
4147 //operand indOffsetX(iRegI reg, immP offset) %{
4148 // constraint(ALLOC_IN_RC(int_reg));
4149 // match(AddP offset reg);
4150 //
4151 // op_cost(100);
4152 // format %{ "[$reg + $offset]" %}
4153 // interface(MEMORY_INTER) %{
4154 // base($reg);
4155 // index(0x0);
4156 // scale(0x0);
4157 // disp($offset);
4158 // %}
4159 //%}
4160 //// However, it doesn't make sense for SPARC, since
4161 // we have no particularly good way to embed oops in
4162 // single instructions.
4164 // Indirect with Register Index
4165 operand indIndex(iRegP addr, iRegX index) %{
4166 constraint(ALLOC_IN_RC(ptr_reg));
4167 match(AddP addr index);
4169 op_cost(100);
4170 format %{ "[$addr + $index]" %}
4171 interface(MEMORY_INTER) %{
4172 base($addr);
4173 index($index);
4174 scale(0x0);
4175 disp(0x0);
4176 %}
4177 %}
4179 //----------Special Memory Operands--------------------------------------------
4180 // Stack Slot Operand - This operand is used for loading and storing temporary
4181 // values on the stack where a match requires a value to
4182 // flow through memory.
4183 operand stackSlotI(sRegI reg) %{
4184 constraint(ALLOC_IN_RC(stack_slots));
4185 op_cost(100);
4186 //match(RegI);
4187 format %{ "[$reg]" %}
4188 interface(MEMORY_INTER) %{
4189 base(0xE); // R_SP
4190 index(0x0);
4191 scale(0x0);
4192 disp($reg); // Stack Offset
4193 %}
4194 %}
4196 operand stackSlotP(sRegP reg) %{
4197 constraint(ALLOC_IN_RC(stack_slots));
4198 op_cost(100);
4199 //match(RegP);
4200 format %{ "[$reg]" %}
4201 interface(MEMORY_INTER) %{
4202 base(0xE); // R_SP
4203 index(0x0);
4204 scale(0x0);
4205 disp($reg); // Stack Offset
4206 %}
4207 %}
4209 operand stackSlotF(sRegF reg) %{
4210 constraint(ALLOC_IN_RC(stack_slots));
4211 op_cost(100);
4212 //match(RegF);
4213 format %{ "[$reg]" %}
4214 interface(MEMORY_INTER) %{
4215 base(0xE); // R_SP
4216 index(0x0);
4217 scale(0x0);
4218 disp($reg); // Stack Offset
4219 %}
4220 %}
4221 operand stackSlotD(sRegD reg) %{
4222 constraint(ALLOC_IN_RC(stack_slots));
4223 op_cost(100);
4224 //match(RegD);
4225 format %{ "[$reg]" %}
4226 interface(MEMORY_INTER) %{
4227 base(0xE); // R_SP
4228 index(0x0);
4229 scale(0x0);
4230 disp($reg); // Stack Offset
4231 %}
4232 %}
4233 operand stackSlotL(sRegL reg) %{
4234 constraint(ALLOC_IN_RC(stack_slots));
4235 op_cost(100);
4236 //match(RegL);
4237 format %{ "[$reg]" %}
4238 interface(MEMORY_INTER) %{
4239 base(0xE); // R_SP
4240 index(0x0);
4241 scale(0x0);
4242 disp($reg); // Stack Offset
4243 %}
4244 %}
4246 // Operands for expressing Control Flow
4247 // NOTE: Label is a predefined operand which should not be redefined in
4248 // the AD file. It is generically handled within the ADLC.
4250 //----------Conditional Branch Operands----------------------------------------
4251 // Comparison Op - This is the operation of the comparison, and is limited to
4252 // the following set of codes:
4253 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
4254 //
4255 // Other attributes of the comparison, such as unsignedness, are specified
4256 // by the comparison instruction that sets a condition code flags register.
4257 // That result is represented by a flags operand whose subtype is appropriate
4258 // to the unsignedness (etc.) of the comparison.
4259 //
4260 // Later, the instruction which matches both the Comparison Op (a Bool) and
4261 // the flags (produced by the Cmp) specifies the coding of the comparison op
4262 // by matching a specific subtype of Bool operand below, such as cmpOpU.
4264 operand cmpOp() %{
4265 match(Bool);
4267 format %{ "" %}
4268 interface(COND_INTER) %{
4269 equal(0x1);
4270 not_equal(0x9);
4271 less(0x3);
4272 greater_equal(0xB);
4273 less_equal(0x2);
4274 greater(0xA);
4275 %}
4276 %}
4278 // Comparison Op, unsigned
4279 operand cmpOpU() %{
4280 match(Bool);
4282 format %{ "u" %}
4283 interface(COND_INTER) %{
4284 equal(0x1);
4285 not_equal(0x9);
4286 less(0x5);
4287 greater_equal(0xD);
4288 less_equal(0x4);
4289 greater(0xC);
4290 %}
4291 %}
4293 // Comparison Op, pointer (same as unsigned)
4294 operand cmpOpP() %{
4295 match(Bool);
4297 format %{ "p" %}
4298 interface(COND_INTER) %{
4299 equal(0x1);
4300 not_equal(0x9);
4301 less(0x5);
4302 greater_equal(0xD);
4303 less_equal(0x4);
4304 greater(0xC);
4305 %}
4306 %}
4308 // Comparison Op, branch-register encoding
4309 operand cmpOp_reg() %{
4310 match(Bool);
4312 format %{ "" %}
4313 interface(COND_INTER) %{
4314 equal (0x1);
4315 not_equal (0x5);
4316 less (0x3);
4317 greater_equal(0x7);
4318 less_equal (0x2);
4319 greater (0x6);
4320 %}
4321 %}
4323 // Comparison Code, floating, unordered same as less
4324 operand cmpOpF() %{
4325 match(Bool);
4327 format %{ "fl" %}
4328 interface(COND_INTER) %{
4329 equal(0x9);
4330 not_equal(0x1);
4331 less(0x3);
4332 greater_equal(0xB);
4333 less_equal(0xE);
4334 greater(0x6);
4335 %}
4336 %}
4338 // Used by long compare
4339 operand cmpOp_commute() %{
4340 match(Bool);
4342 format %{ "" %}
4343 interface(COND_INTER) %{
4344 equal(0x1);
4345 not_equal(0x9);
4346 less(0xA);
4347 greater_equal(0x2);
4348 less_equal(0xB);
4349 greater(0x3);
4350 %}
4351 %}
4353 //----------OPERAND CLASSES----------------------------------------------------
4354 // Operand Classes are groups of operands that are used to simplify
4355 // instruction definitions by not requiring the AD writer to specify separate
4356 // instructions for every form of operand when the instruction accepts
4357 // multiple operand types with the same basic encoding and format. The classic
4358 // case of this is memory operands.
4359 opclass memory( indirect, indOffset13, indIndex );
4360 opclass indIndexMemory( indIndex );
4362 //----------PIPELINE-----------------------------------------------------------
4363 pipeline %{
4365 //----------ATTRIBUTES---------------------------------------------------------
4366 attributes %{
4367 fixed_size_instructions; // Fixed size instructions
4368 branch_has_delay_slot; // Branch has delay slot following
4369 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle
4370 instruction_unit_size = 4; // An instruction is 4 bytes long
4371 instruction_fetch_unit_size = 16; // The processor fetches one line
4372 instruction_fetch_units = 1; // of 16 bytes
4374 // List of nop instructions
4375 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR );
4376 %}
4378 //----------RESOURCES----------------------------------------------------------
4379 // Resources are the functional units available to the machine
4380 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1);
4382 //----------PIPELINE DESCRIPTION-----------------------------------------------
4383 // Pipeline Description specifies the stages in the machine's pipeline
4385 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D);
4387 //----------PIPELINE CLASSES---------------------------------------------------
4388 // Pipeline Classes describe the stages in which input and output are
4389 // referenced by the hardware pipeline.
4391 // Integer ALU reg-reg operation
4392 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
4393 single_instruction;
4394 dst : E(write);
4395 src1 : R(read);
4396 src2 : R(read);
4397 IALU : R;
4398 %}
4400 // Integer ALU reg-reg long operation
4401 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
4402 instruction_count(2);
4403 dst : E(write);
4404 src1 : R(read);
4405 src2 : R(read);
4406 IALU : R;
4407 IALU : R;
4408 %}
4410 // Integer ALU reg-reg long dependent operation
4411 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{
4412 instruction_count(1); multiple_bundles;
4413 dst : E(write);
4414 src1 : R(read);
4415 src2 : R(read);
4416 cr : E(write);
4417 IALU : R(2);
4418 %}
4420 // Integer ALU reg-imm operaion
4421 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{
4422 single_instruction;
4423 dst : E(write);
4424 src1 : R(read);
4425 IALU : R;
4426 %}
4428 // Integer ALU reg-reg operation with condition code
4429 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{
4430 single_instruction;
4431 dst : E(write);
4432 cr : E(write);
4433 src1 : R(read);
4434 src2 : R(read);
4435 IALU : R;
4436 %}
4438 // Integer ALU reg-imm operation with condition code
4439 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{
4440 single_instruction;
4441 dst : E(write);
4442 cr : E(write);
4443 src1 : R(read);
4444 IALU : R;
4445 %}
4447 // Integer ALU zero-reg operation
4448 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
4449 single_instruction;
4450 dst : E(write);
4451 src2 : R(read);
4452 IALU : R;
4453 %}
4455 // Integer ALU zero-reg operation with condition code only
4456 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{
4457 single_instruction;
4458 cr : E(write);
4459 src : R(read);
4460 IALU : R;
4461 %}
4463 // Integer ALU reg-reg operation with condition code only
4464 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
4465 single_instruction;
4466 cr : E(write);
4467 src1 : R(read);
4468 src2 : R(read);
4469 IALU : R;
4470 %}
4472 // Integer ALU reg-imm operation with condition code only
4473 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{
4474 single_instruction;
4475 cr : E(write);
4476 src1 : R(read);
4477 IALU : R;
4478 %}
4480 // Integer ALU reg-reg-zero operation with condition code only
4481 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{
4482 single_instruction;
4483 cr : E(write);
4484 src1 : R(read);
4485 src2 : R(read);
4486 IALU : R;
4487 %}
4489 // Integer ALU reg-imm-zero operation with condition code only
4490 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{
4491 single_instruction;
4492 cr : E(write);
4493 src1 : R(read);
4494 IALU : R;
4495 %}
4497 // Integer ALU reg-reg operation with condition code, src1 modified
4498 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
4499 single_instruction;
4500 cr : E(write);
4501 src1 : E(write);
4502 src1 : R(read);
4503 src2 : R(read);
4504 IALU : R;
4505 %}
4507 // Integer ALU reg-imm operation with condition code, src1 modified
4508 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{
4509 single_instruction;
4510 cr : E(write);
4511 src1 : E(write);
4512 src1 : R(read);
4513 IALU : R;
4514 %}
4516 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{
4517 multiple_bundles;
4518 dst : E(write)+4;
4519 cr : E(write);
4520 src1 : R(read);
4521 src2 : R(read);
4522 IALU : R(3);
4523 BR : R(2);
4524 %}
4526 // Integer ALU operation
4527 pipe_class ialu_none(iRegI dst) %{
4528 single_instruction;
4529 dst : E(write);
4530 IALU : R;
4531 %}
4533 // Integer ALU reg operation
4534 pipe_class ialu_reg(iRegI dst, iRegI src) %{
4535 single_instruction; may_have_no_code;
4536 dst : E(write);
4537 src : R(read);
4538 IALU : R;
4539 %}
4541 // Integer ALU reg conditional operation
4542 // This instruction has a 1 cycle stall, and cannot execute
4543 // in the same cycle as the instruction setting the condition
4544 // code. We kludge this by pretending to read the condition code
4545 // 1 cycle earlier, and by marking the functional units as busy
4546 // for 2 cycles with the result available 1 cycle later than
4547 // is really the case.
4548 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{
4549 single_instruction;
4550 op2_out : C(write);
4551 op1 : R(read);
4552 cr : R(read); // This is really E, with a 1 cycle stall
4553 BR : R(2);
4554 MS : R(2);
4555 %}
4557 #ifdef _LP64
4558 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
4559 instruction_count(1); multiple_bundles;
4560 dst : C(write)+1;
4561 src : R(read)+1;
4562 IALU : R(1);
4563 BR : E(2);
4564 MS : E(2);
4565 %}
4566 #endif
4568 // Integer ALU reg operation
4569 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
4570 single_instruction; may_have_no_code;
4571 dst : E(write);
4572 src : R(read);
4573 IALU : R;
4574 %}
4575 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{
4576 single_instruction; may_have_no_code;
4577 dst : E(write);
4578 src : R(read);
4579 IALU : R;
4580 %}
4582 // Two integer ALU reg operations
4583 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{
4584 instruction_count(2);
4585 dst : E(write);
4586 src : R(read);
4587 A0 : R;
4588 A1 : R;
4589 %}
4591 // Two integer ALU reg operations
4592 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{
4593 instruction_count(2); may_have_no_code;
4594 dst : E(write);
4595 src : R(read);
4596 A0 : R;
4597 A1 : R;
4598 %}
4600 // Integer ALU imm operation
4601 pipe_class ialu_imm(iRegI dst, immI13 src) %{
4602 single_instruction;
4603 dst : E(write);
4604 IALU : R;
4605 %}
4607 // Integer ALU reg-reg with carry operation
4608 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{
4609 single_instruction;
4610 dst : E(write);
4611 src1 : R(read);
4612 src2 : R(read);
4613 IALU : R;
4614 %}
4616 // Integer ALU cc operation
4617 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{
4618 single_instruction;
4619 dst : E(write);
4620 cc : R(read);
4621 IALU : R;
4622 %}
4624 // Integer ALU cc / second IALU operation
4625 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{
4626 instruction_count(1); multiple_bundles;
4627 dst : E(write)+1;
4628 src : R(read);
4629 IALU : R;
4630 %}
4632 // Integer ALU cc / second IALU operation
4633 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{
4634 instruction_count(1); multiple_bundles;
4635 dst : E(write)+1;
4636 p : R(read);
4637 q : R(read);
4638 IALU : R;
4639 %}
4641 // Integer ALU hi-lo-reg operation
4642 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{
4643 instruction_count(1); multiple_bundles;
4644 dst : E(write)+1;
4645 IALU : R(2);
4646 %}
4648 // Float ALU hi-lo-reg operation (with temp)
4649 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{
4650 instruction_count(1); multiple_bundles;
4651 dst : E(write)+1;
4652 IALU : R(2);
4653 %}
4655 // Long Constant
4656 pipe_class loadConL( iRegL dst, immL src ) %{
4657 instruction_count(2); multiple_bundles;
4658 dst : E(write)+1;
4659 IALU : R(2);
4660 IALU : R(2);
4661 %}
4663 // Pointer Constant
4664 pipe_class loadConP( iRegP dst, immP src ) %{
4665 instruction_count(0); multiple_bundles;
4666 fixed_latency(6);
4667 %}
4669 // Polling Address
4670 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
4671 #ifdef _LP64
4672 instruction_count(0); multiple_bundles;
4673 fixed_latency(6);
4674 #else
4675 dst : E(write);
4676 IALU : R;
4677 #endif
4678 %}
4680 // Long Constant small
4681 pipe_class loadConLlo( iRegL dst, immL src ) %{
4682 instruction_count(2);
4683 dst : E(write);
4684 IALU : R;
4685 IALU : R;
4686 %}
4688 // [PHH] This is wrong for 64-bit. See LdImmF/D.
4689 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{
4690 instruction_count(1); multiple_bundles;
4691 src : R(read);
4692 dst : M(write)+1;
4693 IALU : R;
4694 MS : E;
4695 %}
4697 // Integer ALU nop operation
4698 pipe_class ialu_nop() %{
4699 single_instruction;
4700 IALU : R;
4701 %}
4703 // Integer ALU nop operation
4704 pipe_class ialu_nop_A0() %{
4705 single_instruction;
4706 A0 : R;
4707 %}
4709 // Integer ALU nop operation
4710 pipe_class ialu_nop_A1() %{
4711 single_instruction;
4712 A1 : R;
4713 %}
4715 // Integer Multiply reg-reg operation
4716 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
4717 single_instruction;
4718 dst : E(write);
4719 src1 : R(read);
4720 src2 : R(read);
4721 MS : R(5);
4722 %}
4724 // Integer Multiply reg-imm operation
4725 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{
4726 single_instruction;
4727 dst : E(write);
4728 src1 : R(read);
4729 MS : R(5);
4730 %}
4732 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
4733 single_instruction;
4734 dst : E(write)+4;
4735 src1 : R(read);
4736 src2 : R(read);
4737 MS : R(6);
4738 %}
4740 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{
4741 single_instruction;
4742 dst : E(write)+4;
4743 src1 : R(read);
4744 MS : R(6);
4745 %}
4747 // Integer Divide reg-reg
4748 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{
4749 instruction_count(1); multiple_bundles;
4750 dst : E(write);
4751 temp : E(write);
4752 src1 : R(read);
4753 src2 : R(read);
4754 temp : R(read);
4755 MS : R(38);
4756 %}
4758 // Integer Divide reg-imm
4759 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{
4760 instruction_count(1); multiple_bundles;
4761 dst : E(write);
4762 temp : E(write);
4763 src1 : R(read);
4764 temp : R(read);
4765 MS : R(38);
4766 %}
4768 // Long Divide
4769 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
4770 dst : E(write)+71;
4771 src1 : R(read);
4772 src2 : R(read)+1;
4773 MS : R(70);
4774 %}
4776 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{
4777 dst : E(write)+71;
4778 src1 : R(read);
4779 MS : R(70);
4780 %}
4782 // Floating Point Add Float
4783 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{
4784 single_instruction;
4785 dst : X(write);
4786 src1 : E(read);
4787 src2 : E(read);
4788 FA : R;
4789 %}
4791 // Floating Point Add Double
4792 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{
4793 single_instruction;
4794 dst : X(write);
4795 src1 : E(read);
4796 src2 : E(read);
4797 FA : R;
4798 %}
4800 // Floating Point Conditional Move based on integer flags
4801 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{
4802 single_instruction;
4803 dst : X(write);
4804 src : E(read);
4805 cr : R(read);
4806 FA : R(2);
4807 BR : R(2);
4808 %}
4810 // Floating Point Conditional Move based on integer flags
4811 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{
4812 single_instruction;
4813 dst : X(write);
4814 src : E(read);
4815 cr : R(read);
4816 FA : R(2);
4817 BR : R(2);
4818 %}
4820 // Floating Point Multiply Float
4821 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{
4822 single_instruction;
4823 dst : X(write);
4824 src1 : E(read);
4825 src2 : E(read);
4826 FM : R;
4827 %}
4829 // Floating Point Multiply Double
4830 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{
4831 single_instruction;
4832 dst : X(write);
4833 src1 : E(read);
4834 src2 : E(read);
4835 FM : R;
4836 %}
4838 // Floating Point Divide Float
4839 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{
4840 single_instruction;
4841 dst : X(write);
4842 src1 : E(read);
4843 src2 : E(read);
4844 FM : R;
4845 FDIV : C(14);
4846 %}
4848 // Floating Point Divide Double
4849 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{
4850 single_instruction;
4851 dst : X(write);
4852 src1 : E(read);
4853 src2 : E(read);
4854 FM : R;
4855 FDIV : C(17);
4856 %}
4858 // Floating Point Move/Negate/Abs Float
4859 pipe_class faddF_reg(regF dst, regF src) %{
4860 single_instruction;
4861 dst : W(write);
4862 src : E(read);
4863 FA : R(1);
4864 %}
4866 // Floating Point Move/Negate/Abs Double
4867 pipe_class faddD_reg(regD dst, regD src) %{
4868 single_instruction;
4869 dst : W(write);
4870 src : E(read);
4871 FA : R;
4872 %}
4874 // Floating Point Convert F->D
4875 pipe_class fcvtF2D(regD dst, regF src) %{
4876 single_instruction;
4877 dst : X(write);
4878 src : E(read);
4879 FA : R;
4880 %}
4882 // Floating Point Convert I->D
4883 pipe_class fcvtI2D(regD dst, regF src) %{
4884 single_instruction;
4885 dst : X(write);
4886 src : E(read);
4887 FA : R;
4888 %}
4890 // Floating Point Convert LHi->D
4891 pipe_class fcvtLHi2D(regD dst, regD src) %{
4892 single_instruction;
4893 dst : X(write);
4894 src : E(read);
4895 FA : R;
4896 %}
4898 // Floating Point Convert L->D
4899 pipe_class fcvtL2D(regD dst, regF src) %{
4900 single_instruction;
4901 dst : X(write);
4902 src : E(read);
4903 FA : R;
4904 %}
4906 // Floating Point Convert L->F
4907 pipe_class fcvtL2F(regD dst, regF src) %{
4908 single_instruction;
4909 dst : X(write);
4910 src : E(read);
4911 FA : R;
4912 %}
4914 // Floating Point Convert D->F
4915 pipe_class fcvtD2F(regD dst, regF src) %{
4916 single_instruction;
4917 dst : X(write);
4918 src : E(read);
4919 FA : R;
4920 %}
4922 // Floating Point Convert I->L
4923 pipe_class fcvtI2L(regD dst, regF src) %{
4924 single_instruction;
4925 dst : X(write);
4926 src : E(read);
4927 FA : R;
4928 %}
4930 // Floating Point Convert D->F
4931 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{
4932 instruction_count(1); multiple_bundles;
4933 dst : X(write)+6;
4934 src : E(read);
4935 FA : R;
4936 %}
4938 // Floating Point Convert D->L
4939 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{
4940 instruction_count(1); multiple_bundles;
4941 dst : X(write)+6;
4942 src : E(read);
4943 FA : R;
4944 %}
4946 // Floating Point Convert F->I
4947 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{
4948 instruction_count(1); multiple_bundles;
4949 dst : X(write)+6;
4950 src : E(read);
4951 FA : R;
4952 %}
4954 // Floating Point Convert F->L
4955 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{
4956 instruction_count(1); multiple_bundles;
4957 dst : X(write)+6;
4958 src : E(read);
4959 FA : R;
4960 %}
4962 // Floating Point Convert I->F
4963 pipe_class fcvtI2F(regF dst, regF src) %{
4964 single_instruction;
4965 dst : X(write);
4966 src : E(read);
4967 FA : R;
4968 %}
4970 // Floating Point Compare
4971 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{
4972 single_instruction;
4973 cr : X(write);
4974 src1 : E(read);
4975 src2 : E(read);
4976 FA : R;
4977 %}
4979 // Floating Point Compare
4980 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{
4981 single_instruction;
4982 cr : X(write);
4983 src1 : E(read);
4984 src2 : E(read);
4985 FA : R;
4986 %}
4988 // Floating Add Nop
4989 pipe_class fadd_nop() %{
4990 single_instruction;
4991 FA : R;
4992 %}
4994 // Integer Store to Memory
4995 pipe_class istore_mem_reg(memory mem, iRegI src) %{
4996 single_instruction;
4997 mem : R(read);
4998 src : C(read);
4999 MS : R;
5000 %}
5002 // Integer Store to Memory
5003 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{
5004 single_instruction;
5005 mem : R(read);
5006 src : C(read);
5007 MS : R;
5008 %}
5010 // Integer Store Zero to Memory
5011 pipe_class istore_mem_zero(memory mem, immI0 src) %{
5012 single_instruction;
5013 mem : R(read);
5014 MS : R;
5015 %}
5017 // Special Stack Slot Store
5018 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{
5019 single_instruction;
5020 stkSlot : R(read);
5021 src : C(read);
5022 MS : R;
5023 %}
5025 // Special Stack Slot Store
5026 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{
5027 instruction_count(2); multiple_bundles;
5028 stkSlot : R(read);
5029 src : C(read);
5030 MS : R(2);
5031 %}
5033 // Float Store
5034 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{
5035 single_instruction;
5036 mem : R(read);
5037 src : C(read);
5038 MS : R;
5039 %}
5041 // Float Store
5042 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{
5043 single_instruction;
5044 mem : R(read);
5045 MS : R;
5046 %}
5048 // Double Store
5049 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{
5050 instruction_count(1);
5051 mem : R(read);
5052 src : C(read);
5053 MS : R;
5054 %}
5056 // Double Store
5057 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{
5058 single_instruction;
5059 mem : R(read);
5060 MS : R;
5061 %}
5063 // Special Stack Slot Float Store
5064 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{
5065 single_instruction;
5066 stkSlot : R(read);
5067 src : C(read);
5068 MS : R;
5069 %}
5071 // Special Stack Slot Double Store
5072 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{
5073 single_instruction;
5074 stkSlot : R(read);
5075 src : C(read);
5076 MS : R;
5077 %}
5079 // Integer Load (when sign bit propagation not needed)
5080 pipe_class iload_mem(iRegI dst, memory mem) %{
5081 single_instruction;
5082 mem : R(read);
5083 dst : C(write);
5084 MS : R;
5085 %}
5087 // Integer Load from stack operand
5088 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{
5089 single_instruction;
5090 mem : R(read);
5091 dst : C(write);
5092 MS : R;
5093 %}
5095 // Integer Load (when sign bit propagation or masking is needed)
5096 pipe_class iload_mask_mem(iRegI dst, memory mem) %{
5097 single_instruction;
5098 mem : R(read);
5099 dst : M(write);
5100 MS : R;
5101 %}
5103 // Float Load
5104 pipe_class floadF_mem(regF dst, memory mem) %{
5105 single_instruction;
5106 mem : R(read);
5107 dst : M(write);
5108 MS : R;
5109 %}
5111 // Float Load
5112 pipe_class floadD_mem(regD dst, memory mem) %{
5113 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case
5114 mem : R(read);
5115 dst : M(write);
5116 MS : R;
5117 %}
5119 // Float Load
5120 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{
5121 single_instruction;
5122 stkSlot : R(read);
5123 dst : M(write);
5124 MS : R;
5125 %}
5127 // Float Load
5128 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{
5129 single_instruction;
5130 stkSlot : R(read);
5131 dst : M(write);
5132 MS : R;
5133 %}
5135 // Memory Nop
5136 pipe_class mem_nop() %{
5137 single_instruction;
5138 MS : R;
5139 %}
5141 pipe_class sethi(iRegP dst, immI src) %{
5142 single_instruction;
5143 dst : E(write);
5144 IALU : R;
5145 %}
5147 pipe_class loadPollP(iRegP poll) %{
5148 single_instruction;
5149 poll : R(read);
5150 MS : R;
5151 %}
5153 pipe_class br(Universe br, label labl) %{
5154 single_instruction_with_delay_slot;
5155 BR : R;
5156 %}
5158 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{
5159 single_instruction_with_delay_slot;
5160 cr : E(read);
5161 BR : R;
5162 %}
5164 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{
5165 single_instruction_with_delay_slot;
5166 op1 : E(read);
5167 BR : R;
5168 MS : R;
5169 %}
5171 // Compare and branch
5172 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{
5173 instruction_count(2); has_delay_slot;
5174 cr : E(write);
5175 src1 : R(read);
5176 src2 : R(read);
5177 IALU : R;
5178 BR : R;
5179 %}
5181 // Compare and branch
5182 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{
5183 instruction_count(2); has_delay_slot;
5184 cr : E(write);
5185 src1 : R(read);
5186 IALU : R;
5187 BR : R;
5188 %}
5190 // Compare and branch using cbcond
5191 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{
5192 single_instruction;
5193 src1 : E(read);
5194 src2 : E(read);
5195 IALU : R;
5196 BR : R;
5197 %}
5199 // Compare and branch using cbcond
5200 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{
5201 single_instruction;
5202 src1 : E(read);
5203 IALU : R;
5204 BR : R;
5205 %}
5207 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{
5208 single_instruction_with_delay_slot;
5209 cr : E(read);
5210 BR : R;
5211 %}
5213 pipe_class br_nop() %{
5214 single_instruction;
5215 BR : R;
5216 %}
5218 pipe_class simple_call(method meth) %{
5219 instruction_count(2); multiple_bundles; force_serialization;
5220 fixed_latency(100);
5221 BR : R(1);
5222 MS : R(1);
5223 A0 : R(1);
5224 %}
5226 pipe_class compiled_call(method meth) %{
5227 instruction_count(1); multiple_bundles; force_serialization;
5228 fixed_latency(100);
5229 MS : R(1);
5230 %}
5232 pipe_class call(method meth) %{
5233 instruction_count(0); multiple_bundles; force_serialization;
5234 fixed_latency(100);
5235 %}
5237 pipe_class tail_call(Universe ignore, label labl) %{
5238 single_instruction; has_delay_slot;
5239 fixed_latency(100);
5240 BR : R(1);
5241 MS : R(1);
5242 %}
5244 pipe_class ret(Universe ignore) %{
5245 single_instruction; has_delay_slot;
5246 BR : R(1);
5247 MS : R(1);
5248 %}
5250 pipe_class ret_poll(g3RegP poll) %{
5251 instruction_count(3); has_delay_slot;
5252 poll : E(read);
5253 MS : R;
5254 %}
5256 // The real do-nothing guy
5257 pipe_class empty( ) %{
5258 instruction_count(0);
5259 %}
5261 pipe_class long_memory_op() %{
5262 instruction_count(0); multiple_bundles; force_serialization;
5263 fixed_latency(25);
5264 MS : R(1);
5265 %}
5267 // Check-cast
5268 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{
5269 array : R(read);
5270 match : R(read);
5271 IALU : R(2);
5272 BR : R(2);
5273 MS : R;
5274 %}
5276 // Convert FPU flags into +1,0,-1
5277 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{
5278 src1 : E(read);
5279 src2 : E(read);
5280 dst : E(write);
5281 FA : R;
5282 MS : R(2);
5283 BR : R(2);
5284 %}
5286 // Compare for p < q, and conditionally add y
5287 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{
5288 p : E(read);
5289 q : E(read);
5290 y : E(read);
5291 IALU : R(3)
5292 %}
5294 // Perform a compare, then move conditionally in a branch delay slot.
5295 pipe_class min_max( iRegI src2, iRegI srcdst ) %{
5296 src2 : E(read);
5297 srcdst : E(read);
5298 IALU : R;
5299 BR : R;
5300 %}
5302 // Define the class for the Nop node
5303 define %{
5304 MachNop = ialu_nop;
5305 %}
5307 %}
5309 //----------INSTRUCTIONS-------------------------------------------------------
5311 //------------Special Stack Slot instructions - no match rules-----------------
5312 instruct stkI_to_regF(regF dst, stackSlotI src) %{
5313 // No match rule to avoid chain rule match.
5314 effect(DEF dst, USE src);
5315 ins_cost(MEMORY_REF_COST);
5316 size(4);
5317 format %{ "LDF $src,$dst\t! stkI to regF" %}
5318 opcode(Assembler::ldf_op3);
5319 ins_encode(simple_form3_mem_reg(src, dst));
5320 ins_pipe(floadF_stk);
5321 %}
5323 instruct stkL_to_regD(regD dst, stackSlotL src) %{
5324 // No match rule to avoid chain rule match.
5325 effect(DEF dst, USE src);
5326 ins_cost(MEMORY_REF_COST);
5327 size(4);
5328 format %{ "LDDF $src,$dst\t! stkL to regD" %}
5329 opcode(Assembler::lddf_op3);
5330 ins_encode(simple_form3_mem_reg(src, dst));
5331 ins_pipe(floadD_stk);
5332 %}
5334 instruct regF_to_stkI(stackSlotI dst, regF src) %{
5335 // No match rule to avoid chain rule match.
5336 effect(DEF dst, USE src);
5337 ins_cost(MEMORY_REF_COST);
5338 size(4);
5339 format %{ "STF $src,$dst\t! regF to stkI" %}
5340 opcode(Assembler::stf_op3);
5341 ins_encode(simple_form3_mem_reg(dst, src));
5342 ins_pipe(fstoreF_stk_reg);
5343 %}
5345 instruct regD_to_stkL(stackSlotL dst, regD src) %{
5346 // No match rule to avoid chain rule match.
5347 effect(DEF dst, USE src);
5348 ins_cost(MEMORY_REF_COST);
5349 size(4);
5350 format %{ "STDF $src,$dst\t! regD to stkL" %}
5351 opcode(Assembler::stdf_op3);
5352 ins_encode(simple_form3_mem_reg(dst, src));
5353 ins_pipe(fstoreD_stk_reg);
5354 %}
5356 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{
5357 effect(DEF dst, USE src);
5358 ins_cost(MEMORY_REF_COST*2);
5359 size(8);
5360 format %{ "STW $src,$dst.hi\t! long\n\t"
5361 "STW R_G0,$dst.lo" %}
5362 opcode(Assembler::stw_op3);
5363 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0));
5364 ins_pipe(lstoreI_stk_reg);
5365 %}
5367 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{
5368 // No match rule to avoid chain rule match.
5369 effect(DEF dst, USE src);
5370 ins_cost(MEMORY_REF_COST);
5371 size(4);
5372 format %{ "STX $src,$dst\t! regL to stkD" %}
5373 opcode(Assembler::stx_op3);
5374 ins_encode(simple_form3_mem_reg( dst, src ) );
5375 ins_pipe(istore_stk_reg);
5376 %}
5378 //---------- Chain stack slots between similar types --------
5380 // Load integer from stack slot
5381 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{
5382 match(Set dst src);
5383 ins_cost(MEMORY_REF_COST);
5385 size(4);
5386 format %{ "LDUW $src,$dst\t!stk" %}
5387 opcode(Assembler::lduw_op3);
5388 ins_encode(simple_form3_mem_reg( src, dst ) );
5389 ins_pipe(iload_mem);
5390 %}
5392 // Store integer to stack slot
5393 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{
5394 match(Set dst src);
5395 ins_cost(MEMORY_REF_COST);
5397 size(4);
5398 format %{ "STW $src,$dst\t!stk" %}
5399 opcode(Assembler::stw_op3);
5400 ins_encode(simple_form3_mem_reg( dst, src ) );
5401 ins_pipe(istore_mem_reg);
5402 %}
5404 // Load long from stack slot
5405 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{
5406 match(Set dst src);
5408 ins_cost(MEMORY_REF_COST);
5409 size(4);
5410 format %{ "LDX $src,$dst\t! long" %}
5411 opcode(Assembler::ldx_op3);
5412 ins_encode(simple_form3_mem_reg( src, dst ) );
5413 ins_pipe(iload_mem);
5414 %}
5416 // Store long to stack slot
5417 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{
5418 match(Set dst src);
5420 ins_cost(MEMORY_REF_COST);
5421 size(4);
5422 format %{ "STX $src,$dst\t! long" %}
5423 opcode(Assembler::stx_op3);
5424 ins_encode(simple_form3_mem_reg( dst, src ) );
5425 ins_pipe(istore_mem_reg);
5426 %}
5428 #ifdef _LP64
5429 // Load pointer from stack slot, 64-bit encoding
5430 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5431 match(Set dst src);
5432 ins_cost(MEMORY_REF_COST);
5433 size(4);
5434 format %{ "LDX $src,$dst\t!ptr" %}
5435 opcode(Assembler::ldx_op3);
5436 ins_encode(simple_form3_mem_reg( src, dst ) );
5437 ins_pipe(iload_mem);
5438 %}
5440 // Store pointer to stack slot
5441 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5442 match(Set dst src);
5443 ins_cost(MEMORY_REF_COST);
5444 size(4);
5445 format %{ "STX $src,$dst\t!ptr" %}
5446 opcode(Assembler::stx_op3);
5447 ins_encode(simple_form3_mem_reg( dst, src ) );
5448 ins_pipe(istore_mem_reg);
5449 %}
5450 #else // _LP64
5451 // Load pointer from stack slot, 32-bit encoding
5452 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5453 match(Set dst src);
5454 ins_cost(MEMORY_REF_COST);
5455 format %{ "LDUW $src,$dst\t!ptr" %}
5456 opcode(Assembler::lduw_op3, Assembler::ldst_op);
5457 ins_encode(simple_form3_mem_reg( src, dst ) );
5458 ins_pipe(iload_mem);
5459 %}
5461 // Store pointer to stack slot
5462 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5463 match(Set dst src);
5464 ins_cost(MEMORY_REF_COST);
5465 format %{ "STW $src,$dst\t!ptr" %}
5466 opcode(Assembler::stw_op3, Assembler::ldst_op);
5467 ins_encode(simple_form3_mem_reg( dst, src ) );
5468 ins_pipe(istore_mem_reg);
5469 %}
5470 #endif // _LP64
5472 //------------Special Nop instructions for bundling - no match rules-----------
5473 // Nop using the A0 functional unit
5474 instruct Nop_A0() %{
5475 ins_cost(0);
5477 format %{ "NOP ! Alu Pipeline" %}
5478 opcode(Assembler::or_op3, Assembler::arith_op);
5479 ins_encode( form2_nop() );
5480 ins_pipe(ialu_nop_A0);
5481 %}
5483 // Nop using the A1 functional unit
5484 instruct Nop_A1( ) %{
5485 ins_cost(0);
5487 format %{ "NOP ! Alu Pipeline" %}
5488 opcode(Assembler::or_op3, Assembler::arith_op);
5489 ins_encode( form2_nop() );
5490 ins_pipe(ialu_nop_A1);
5491 %}
5493 // Nop using the memory functional unit
5494 instruct Nop_MS( ) %{
5495 ins_cost(0);
5497 format %{ "NOP ! Memory Pipeline" %}
5498 ins_encode( emit_mem_nop );
5499 ins_pipe(mem_nop);
5500 %}
5502 // Nop using the floating add functional unit
5503 instruct Nop_FA( ) %{
5504 ins_cost(0);
5506 format %{ "NOP ! Floating Add Pipeline" %}
5507 ins_encode( emit_fadd_nop );
5508 ins_pipe(fadd_nop);
5509 %}
5511 // Nop using the branch functional unit
5512 instruct Nop_BR( ) %{
5513 ins_cost(0);
5515 format %{ "NOP ! Branch Pipeline" %}
5516 ins_encode( emit_br_nop );
5517 ins_pipe(br_nop);
5518 %}
5520 //----------Load/Store/Move Instructions---------------------------------------
5521 //----------Load Instructions--------------------------------------------------
5522 // Load Byte (8bit signed)
5523 instruct loadB(iRegI dst, memory mem) %{
5524 match(Set dst (LoadB mem));
5525 ins_cost(MEMORY_REF_COST);
5527 size(4);
5528 format %{ "LDSB $mem,$dst\t! byte" %}
5529 ins_encode %{
5530 __ ldsb($mem$$Address, $dst$$Register);
5531 %}
5532 ins_pipe(iload_mask_mem);
5533 %}
5535 // Load Byte (8bit signed) into a Long Register
5536 instruct loadB2L(iRegL dst, memory mem) %{
5537 match(Set dst (ConvI2L (LoadB mem)));
5538 ins_cost(MEMORY_REF_COST);
5540 size(4);
5541 format %{ "LDSB $mem,$dst\t! byte -> long" %}
5542 ins_encode %{
5543 __ ldsb($mem$$Address, $dst$$Register);
5544 %}
5545 ins_pipe(iload_mask_mem);
5546 %}
5548 // Load Unsigned Byte (8bit UNsigned) into an int reg
5549 instruct loadUB(iRegI dst, memory mem) %{
5550 match(Set dst (LoadUB mem));
5551 ins_cost(MEMORY_REF_COST);
5553 size(4);
5554 format %{ "LDUB $mem,$dst\t! ubyte" %}
5555 ins_encode %{
5556 __ ldub($mem$$Address, $dst$$Register);
5557 %}
5558 ins_pipe(iload_mem);
5559 %}
5561 // Load Unsigned Byte (8bit UNsigned) into a Long Register
5562 instruct loadUB2L(iRegL dst, memory mem) %{
5563 match(Set dst (ConvI2L (LoadUB mem)));
5564 ins_cost(MEMORY_REF_COST);
5566 size(4);
5567 format %{ "LDUB $mem,$dst\t! ubyte -> long" %}
5568 ins_encode %{
5569 __ ldub($mem$$Address, $dst$$Register);
5570 %}
5571 ins_pipe(iload_mem);
5572 %}
5574 // Load Unsigned Byte (8 bit UNsigned) with 8-bit mask into Long Register
5575 instruct loadUB2L_immI8(iRegL dst, memory mem, immI8 mask) %{
5576 match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
5577 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5579 size(2*4);
5580 format %{ "LDUB $mem,$dst\t# ubyte & 8-bit mask -> long\n\t"
5581 "AND $dst,$mask,$dst" %}
5582 ins_encode %{
5583 __ ldub($mem$$Address, $dst$$Register);
5584 __ and3($dst$$Register, $mask$$constant, $dst$$Register);
5585 %}
5586 ins_pipe(iload_mem);
5587 %}
5589 // Load Short (16bit signed)
5590 instruct loadS(iRegI dst, memory mem) %{
5591 match(Set dst (LoadS mem));
5592 ins_cost(MEMORY_REF_COST);
5594 size(4);
5595 format %{ "LDSH $mem,$dst\t! short" %}
5596 ins_encode %{
5597 __ ldsh($mem$$Address, $dst$$Register);
5598 %}
5599 ins_pipe(iload_mask_mem);
5600 %}
5602 // Load Short (16 bit signed) to Byte (8 bit signed)
5603 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5604 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
5605 ins_cost(MEMORY_REF_COST);
5607 size(4);
5609 format %{ "LDSB $mem+1,$dst\t! short -> byte" %}
5610 ins_encode %{
5611 __ ldsb($mem$$Address, $dst$$Register, 1);
5612 %}
5613 ins_pipe(iload_mask_mem);
5614 %}
5616 // Load Short (16bit signed) into a Long Register
5617 instruct loadS2L(iRegL dst, memory mem) %{
5618 match(Set dst (ConvI2L (LoadS mem)));
5619 ins_cost(MEMORY_REF_COST);
5621 size(4);
5622 format %{ "LDSH $mem,$dst\t! short -> long" %}
5623 ins_encode %{
5624 __ ldsh($mem$$Address, $dst$$Register);
5625 %}
5626 ins_pipe(iload_mask_mem);
5627 %}
5629 // Load Unsigned Short/Char (16bit UNsigned)
5630 instruct loadUS(iRegI dst, memory mem) %{
5631 match(Set dst (LoadUS mem));
5632 ins_cost(MEMORY_REF_COST);
5634 size(4);
5635 format %{ "LDUH $mem,$dst\t! ushort/char" %}
5636 ins_encode %{
5637 __ lduh($mem$$Address, $dst$$Register);
5638 %}
5639 ins_pipe(iload_mem);
5640 %}
5642 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
5643 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5644 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour));
5645 ins_cost(MEMORY_REF_COST);
5647 size(4);
5648 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %}
5649 ins_encode %{
5650 __ ldsb($mem$$Address, $dst$$Register, 1);
5651 %}
5652 ins_pipe(iload_mask_mem);
5653 %}
5655 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register
5656 instruct loadUS2L(iRegL dst, memory mem) %{
5657 match(Set dst (ConvI2L (LoadUS mem)));
5658 ins_cost(MEMORY_REF_COST);
5660 size(4);
5661 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %}
5662 ins_encode %{
5663 __ lduh($mem$$Address, $dst$$Register);
5664 %}
5665 ins_pipe(iload_mem);
5666 %}
5668 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register
5669 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
5670 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5671 ins_cost(MEMORY_REF_COST);
5673 size(4);
5674 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %}
5675 ins_encode %{
5676 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE
5677 %}
5678 ins_pipe(iload_mem);
5679 %}
5681 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register
5682 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{
5683 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5684 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5686 size(2*4);
5687 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t"
5688 "AND $dst,$mask,$dst" %}
5689 ins_encode %{
5690 Register Rdst = $dst$$Register;
5691 __ lduh($mem$$Address, Rdst);
5692 __ and3(Rdst, $mask$$constant, Rdst);
5693 %}
5694 ins_pipe(iload_mem);
5695 %}
5697 // Load Unsigned Short/Char (16bit UNsigned) with a 16-bit mask into a Long Register
5698 instruct loadUS2L_immI16(iRegL dst, memory mem, immI16 mask, iRegL tmp) %{
5699 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5700 effect(TEMP dst, TEMP tmp);
5701 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
5703 size((3+1)*4); // set may use two instructions.
5704 format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t"
5705 "SET $mask,$tmp\n\t"
5706 "AND $dst,$tmp,$dst" %}
5707 ins_encode %{
5708 Register Rdst = $dst$$Register;
5709 Register Rtmp = $tmp$$Register;
5710 __ lduh($mem$$Address, Rdst);
5711 __ set($mask$$constant, Rtmp);
5712 __ and3(Rdst, Rtmp, Rdst);
5713 %}
5714 ins_pipe(iload_mem);
5715 %}
5717 // Load Integer
5718 instruct loadI(iRegI dst, memory mem) %{
5719 match(Set dst (LoadI mem));
5720 ins_cost(MEMORY_REF_COST);
5722 size(4);
5723 format %{ "LDUW $mem,$dst\t! int" %}
5724 ins_encode %{
5725 __ lduw($mem$$Address, $dst$$Register);
5726 %}
5727 ins_pipe(iload_mem);
5728 %}
5730 // Load Integer to Byte (8 bit signed)
5731 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5732 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5733 ins_cost(MEMORY_REF_COST);
5735 size(4);
5737 format %{ "LDSB $mem+3,$dst\t! int -> byte" %}
5738 ins_encode %{
5739 __ ldsb($mem$$Address, $dst$$Register, 3);
5740 %}
5741 ins_pipe(iload_mask_mem);
5742 %}
5744 // Load Integer to Unsigned Byte (8 bit UNsigned)
5745 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{
5746 match(Set dst (AndI (LoadI mem) mask));
5747 ins_cost(MEMORY_REF_COST);
5749 size(4);
5751 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %}
5752 ins_encode %{
5753 __ ldub($mem$$Address, $dst$$Register, 3);
5754 %}
5755 ins_pipe(iload_mask_mem);
5756 %}
5758 // Load Integer to Short (16 bit signed)
5759 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{
5760 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5761 ins_cost(MEMORY_REF_COST);
5763 size(4);
5765 format %{ "LDSH $mem+2,$dst\t! int -> short" %}
5766 ins_encode %{
5767 __ ldsh($mem$$Address, $dst$$Register, 2);
5768 %}
5769 ins_pipe(iload_mask_mem);
5770 %}
5772 // Load Integer to Unsigned Short (16 bit UNsigned)
5773 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{
5774 match(Set dst (AndI (LoadI mem) mask));
5775 ins_cost(MEMORY_REF_COST);
5777 size(4);
5779 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %}
5780 ins_encode %{
5781 __ lduh($mem$$Address, $dst$$Register, 2);
5782 %}
5783 ins_pipe(iload_mask_mem);
5784 %}
5786 // Load Integer into a Long Register
5787 instruct loadI2L(iRegL dst, memory mem) %{
5788 match(Set dst (ConvI2L (LoadI mem)));
5789 ins_cost(MEMORY_REF_COST);
5791 size(4);
5792 format %{ "LDSW $mem,$dst\t! int -> long" %}
5793 ins_encode %{
5794 __ ldsw($mem$$Address, $dst$$Register);
5795 %}
5796 ins_pipe(iload_mask_mem);
5797 %}
5799 // Load Integer with mask 0xFF into a Long Register
5800 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
5801 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5802 ins_cost(MEMORY_REF_COST);
5804 size(4);
5805 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %}
5806 ins_encode %{
5807 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE
5808 %}
5809 ins_pipe(iload_mem);
5810 %}
5812 // Load Integer with mask 0xFFFF into a Long Register
5813 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{
5814 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5815 ins_cost(MEMORY_REF_COST);
5817 size(4);
5818 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %}
5819 ins_encode %{
5820 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE
5821 %}
5822 ins_pipe(iload_mem);
5823 %}
5825 // Load Integer with a 13-bit mask into a Long Register
5826 instruct loadI2L_immI13(iRegL dst, memory mem, immI13 mask) %{
5827 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5828 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5830 size(2*4);
5831 format %{ "LDUW $mem,$dst\t! int & 13-bit mask -> long\n\t"
5832 "AND $dst,$mask,$dst" %}
5833 ins_encode %{
5834 Register Rdst = $dst$$Register;
5835 __ lduw($mem$$Address, Rdst);
5836 __ and3(Rdst, $mask$$constant, Rdst);
5837 %}
5838 ins_pipe(iload_mem);
5839 %}
5841 // Load Integer with a 32-bit mask into a Long Register
5842 instruct loadI2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{
5843 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5844 effect(TEMP dst, TEMP tmp);
5845 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
5847 size((3+1)*4); // set may use two instructions.
5848 format %{ "LDUW $mem,$dst\t! int & 32-bit mask -> long\n\t"
5849 "SET $mask,$tmp\n\t"
5850 "AND $dst,$tmp,$dst" %}
5851 ins_encode %{
5852 Register Rdst = $dst$$Register;
5853 Register Rtmp = $tmp$$Register;
5854 __ lduw($mem$$Address, Rdst);
5855 __ set($mask$$constant, Rtmp);
5856 __ and3(Rdst, Rtmp, Rdst);
5857 %}
5858 ins_pipe(iload_mem);
5859 %}
5861 // Load Unsigned Integer into a Long Register
5862 instruct loadUI2L(iRegL dst, memory mem) %{
5863 match(Set dst (LoadUI2L mem));
5864 ins_cost(MEMORY_REF_COST);
5866 size(4);
5867 format %{ "LDUW $mem,$dst\t! uint -> long" %}
5868 ins_encode %{
5869 __ lduw($mem$$Address, $dst$$Register);
5870 %}
5871 ins_pipe(iload_mem);
5872 %}
5874 // Load Long - aligned
5875 instruct loadL(iRegL dst, memory mem ) %{
5876 match(Set dst (LoadL mem));
5877 ins_cost(MEMORY_REF_COST);
5879 size(4);
5880 format %{ "LDX $mem,$dst\t! long" %}
5881 ins_encode %{
5882 __ ldx($mem$$Address, $dst$$Register);
5883 %}
5884 ins_pipe(iload_mem);
5885 %}
5887 // Load Long - UNaligned
5888 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{
5889 match(Set dst (LoadL_unaligned mem));
5890 effect(KILL tmp);
5891 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
5892 size(16);
5893 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n"
5894 "\tLDUW $mem ,$dst\n"
5895 "\tSLLX #32, $dst, $dst\n"
5896 "\tOR $dst, R_O7, $dst" %}
5897 opcode(Assembler::lduw_op3);
5898 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst ));
5899 ins_pipe(iload_mem);
5900 %}
5902 // Load Range
5903 instruct loadRange(iRegI dst, memory mem) %{
5904 match(Set dst (LoadRange mem));
5905 ins_cost(MEMORY_REF_COST);
5907 size(4);
5908 format %{ "LDUW $mem,$dst\t! range" %}
5909 opcode(Assembler::lduw_op3);
5910 ins_encode(simple_form3_mem_reg( mem, dst ) );
5911 ins_pipe(iload_mem);
5912 %}
5914 // Load Integer into %f register (for fitos/fitod)
5915 instruct loadI_freg(regF dst, memory mem) %{
5916 match(Set dst (LoadI mem));
5917 ins_cost(MEMORY_REF_COST);
5918 size(4);
5920 format %{ "LDF $mem,$dst\t! for fitos/fitod" %}
5921 opcode(Assembler::ldf_op3);
5922 ins_encode(simple_form3_mem_reg( mem, dst ) );
5923 ins_pipe(floadF_mem);
5924 %}
5926 // Load Pointer
5927 instruct loadP(iRegP dst, memory mem) %{
5928 match(Set dst (LoadP mem));
5929 ins_cost(MEMORY_REF_COST);
5930 size(4);
5932 #ifndef _LP64
5933 format %{ "LDUW $mem,$dst\t! ptr" %}
5934 ins_encode %{
5935 __ lduw($mem$$Address, $dst$$Register);
5936 %}
5937 #else
5938 format %{ "LDX $mem,$dst\t! ptr" %}
5939 ins_encode %{
5940 __ ldx($mem$$Address, $dst$$Register);
5941 %}
5942 #endif
5943 ins_pipe(iload_mem);
5944 %}
5946 // Load Compressed Pointer
5947 instruct loadN(iRegN dst, memory mem) %{
5948 match(Set dst (LoadN mem));
5949 ins_cost(MEMORY_REF_COST);
5950 size(4);
5952 format %{ "LDUW $mem,$dst\t! compressed ptr" %}
5953 ins_encode %{
5954 __ lduw($mem$$Address, $dst$$Register);
5955 %}
5956 ins_pipe(iload_mem);
5957 %}
5959 // Load Klass Pointer
5960 instruct loadKlass(iRegP dst, memory mem) %{
5961 match(Set dst (LoadKlass mem));
5962 ins_cost(MEMORY_REF_COST);
5963 size(4);
5965 #ifndef _LP64
5966 format %{ "LDUW $mem,$dst\t! klass ptr" %}
5967 ins_encode %{
5968 __ lduw($mem$$Address, $dst$$Register);
5969 %}
5970 #else
5971 format %{ "LDX $mem,$dst\t! klass ptr" %}
5972 ins_encode %{
5973 __ ldx($mem$$Address, $dst$$Register);
5974 %}
5975 #endif
5976 ins_pipe(iload_mem);
5977 %}
5979 // Load narrow Klass Pointer
5980 instruct loadNKlass(iRegN dst, memory mem) %{
5981 match(Set dst (LoadNKlass mem));
5982 ins_cost(MEMORY_REF_COST);
5983 size(4);
5985 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
5986 ins_encode %{
5987 __ lduw($mem$$Address, $dst$$Register);
5988 %}
5989 ins_pipe(iload_mem);
5990 %}
5992 // Load Double
5993 instruct loadD(regD dst, memory mem) %{
5994 match(Set dst (LoadD mem));
5995 ins_cost(MEMORY_REF_COST);
5997 size(4);
5998 format %{ "LDDF $mem,$dst" %}
5999 opcode(Assembler::lddf_op3);
6000 ins_encode(simple_form3_mem_reg( mem, dst ) );
6001 ins_pipe(floadD_mem);
6002 %}
6004 // Load Double - UNaligned
6005 instruct loadD_unaligned(regD_low dst, memory mem ) %{
6006 match(Set dst (LoadD_unaligned mem));
6007 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
6008 size(8);
6009 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n"
6010 "\tLDF $mem+4,$dst.lo\t!" %}
6011 opcode(Assembler::ldf_op3);
6012 ins_encode( form3_mem_reg_double_unaligned( mem, dst ));
6013 ins_pipe(iload_mem);
6014 %}
6016 // Load Float
6017 instruct loadF(regF dst, memory mem) %{
6018 match(Set dst (LoadF mem));
6019 ins_cost(MEMORY_REF_COST);
6021 size(4);
6022 format %{ "LDF $mem,$dst" %}
6023 opcode(Assembler::ldf_op3);
6024 ins_encode(simple_form3_mem_reg( mem, dst ) );
6025 ins_pipe(floadF_mem);
6026 %}
6028 // Load Constant
6029 instruct loadConI( iRegI dst, immI src ) %{
6030 match(Set dst src);
6031 ins_cost(DEFAULT_COST * 3/2);
6032 format %{ "SET $src,$dst" %}
6033 ins_encode( Set32(src, dst) );
6034 ins_pipe(ialu_hi_lo_reg);
6035 %}
6037 instruct loadConI13( iRegI dst, immI13 src ) %{
6038 match(Set dst src);
6040 size(4);
6041 format %{ "MOV $src,$dst" %}
6042 ins_encode( Set13( src, dst ) );
6043 ins_pipe(ialu_imm);
6044 %}
6046 #ifndef _LP64
6047 instruct loadConP(iRegP dst, immP con) %{
6048 match(Set dst con);
6049 ins_cost(DEFAULT_COST * 3/2);
6050 format %{ "SET $con,$dst\t!ptr" %}
6051 ins_encode %{
6052 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
6053 intptr_t val = $con$$constant;
6054 if (constant_reloc == relocInfo::oop_type) {
6055 __ set_oop_constant((jobject) val, $dst$$Register);
6056 } else if (constant_reloc == relocInfo::metadata_type) {
6057 __ set_metadata_constant((Metadata*)val, $dst$$Register);
6058 } else { // non-oop pointers, e.g. card mark base, heap top
6059 assert(constant_reloc == relocInfo::none, "unexpected reloc type");
6060 __ set(val, $dst$$Register);
6061 }
6062 %}
6063 ins_pipe(loadConP);
6064 %}
6065 #else
6066 instruct loadConP_set(iRegP dst, immP_set con) %{
6067 match(Set dst con);
6068 ins_cost(DEFAULT_COST * 3/2);
6069 format %{ "SET $con,$dst\t! ptr" %}
6070 ins_encode %{
6071 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
6072 intptr_t val = $con$$constant;
6073 if (constant_reloc == relocInfo::oop_type) {
6074 __ set_oop_constant((jobject) val, $dst$$Register);
6075 } else if (constant_reloc == relocInfo::metadata_type) {
6076 __ set_metadata_constant((Metadata*)val, $dst$$Register);
6077 } else { // non-oop pointers, e.g. card mark base, heap top
6078 assert(constant_reloc == relocInfo::none, "unexpected reloc type");
6079 __ set(val, $dst$$Register);
6080 }
6081 %}
6082 ins_pipe(loadConP);
6083 %}
6085 instruct loadConP_load(iRegP dst, immP_load con) %{
6086 match(Set dst con);
6087 ins_cost(MEMORY_REF_COST);
6088 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %}
6089 ins_encode %{
6090 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
6091 __ ld_ptr($constanttablebase, con_offset, $dst$$Register);
6092 %}
6093 ins_pipe(loadConP);
6094 %}
6096 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
6097 match(Set dst con);
6098 ins_cost(DEFAULT_COST * 3/2);
6099 format %{ "SET $con,$dst\t! non-oop ptr" %}
6100 ins_encode %{
6101 __ set($con$$constant, $dst$$Register);
6102 %}
6103 ins_pipe(loadConP);
6104 %}
6105 #endif // _LP64
6107 instruct loadConP0(iRegP dst, immP0 src) %{
6108 match(Set dst src);
6110 size(4);
6111 format %{ "CLR $dst\t!ptr" %}
6112 ins_encode %{
6113 __ clr($dst$$Register);
6114 %}
6115 ins_pipe(ialu_imm);
6116 %}
6118 instruct loadConP_poll(iRegP dst, immP_poll src) %{
6119 match(Set dst src);
6120 ins_cost(DEFAULT_COST);
6121 format %{ "SET $src,$dst\t!ptr" %}
6122 ins_encode %{
6123 AddressLiteral polling_page(os::get_polling_page());
6124 __ sethi(polling_page, reg_to_register_object($dst$$reg));
6125 %}
6126 ins_pipe(loadConP_poll);
6127 %}
6129 instruct loadConN0(iRegN dst, immN0 src) %{
6130 match(Set dst src);
6132 size(4);
6133 format %{ "CLR $dst\t! compressed NULL ptr" %}
6134 ins_encode %{
6135 __ clr($dst$$Register);
6136 %}
6137 ins_pipe(ialu_imm);
6138 %}
6140 instruct loadConN(iRegN dst, immN src) %{
6141 match(Set dst src);
6142 ins_cost(DEFAULT_COST * 3/2);
6143 format %{ "SET $src,$dst\t! compressed ptr" %}
6144 ins_encode %{
6145 Register dst = $dst$$Register;
6146 __ set_narrow_oop((jobject)$src$$constant, dst);
6147 %}
6148 ins_pipe(ialu_hi_lo_reg);
6149 %}
6151 // Materialize long value (predicated by immL_cheap).
6152 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{
6153 match(Set dst con);
6154 effect(KILL tmp);
6155 ins_cost(DEFAULT_COST * 3);
6156 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %}
6157 ins_encode %{
6158 __ set64($con$$constant, $dst$$Register, $tmp$$Register);
6159 %}
6160 ins_pipe(loadConL);
6161 %}
6163 // Load long value from constant table (predicated by immL_expensive).
6164 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{
6165 match(Set dst con);
6166 ins_cost(MEMORY_REF_COST);
6167 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %}
6168 ins_encode %{
6169 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
6170 __ ldx($constanttablebase, con_offset, $dst$$Register);
6171 %}
6172 ins_pipe(loadConL);
6173 %}
6175 instruct loadConL0( iRegL dst, immL0 src ) %{
6176 match(Set dst src);
6177 ins_cost(DEFAULT_COST);
6178 size(4);
6179 format %{ "CLR $dst\t! long" %}
6180 ins_encode( Set13( src, dst ) );
6181 ins_pipe(ialu_imm);
6182 %}
6184 instruct loadConL13( iRegL dst, immL13 src ) %{
6185 match(Set dst src);
6186 ins_cost(DEFAULT_COST * 2);
6188 size(4);
6189 format %{ "MOV $src,$dst\t! long" %}
6190 ins_encode( Set13( src, dst ) );
6191 ins_pipe(ialu_imm);
6192 %}
6194 instruct loadConF(regF dst, immF con, o7RegI tmp) %{
6195 match(Set dst con);
6196 effect(KILL tmp);
6197 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %}
6198 ins_encode %{
6199 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
6200 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister);
6201 %}
6202 ins_pipe(loadConFD);
6203 %}
6205 instruct loadConD(regD dst, immD con, o7RegI tmp) %{
6206 match(Set dst con);
6207 effect(KILL tmp);
6208 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %}
6209 ins_encode %{
6210 // XXX This is a quick fix for 6833573.
6211 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister);
6212 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
6213 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
6214 %}
6215 ins_pipe(loadConFD);
6216 %}
6218 // Prefetch instructions.
6219 // Must be safe to execute with invalid address (cannot fault).
6221 instruct prefetchr( memory mem ) %{
6222 match( PrefetchRead mem );
6223 ins_cost(MEMORY_REF_COST);
6224 size(4);
6226 format %{ "PREFETCH $mem,0\t! Prefetch read-many" %}
6227 opcode(Assembler::prefetch_op3);
6228 ins_encode( form3_mem_prefetch_read( mem ) );
6229 ins_pipe(iload_mem);
6230 %}
6232 instruct prefetchw( memory mem ) %{
6233 match( PrefetchWrite mem );
6234 ins_cost(MEMORY_REF_COST);
6235 size(4);
6237 format %{ "PREFETCH $mem,2\t! Prefetch write-many (and read)" %}
6238 opcode(Assembler::prefetch_op3);
6239 ins_encode( form3_mem_prefetch_write( mem ) );
6240 ins_pipe(iload_mem);
6241 %}
6243 // Prefetch instructions for allocation.
6245 instruct prefetchAlloc( memory mem ) %{
6246 predicate(AllocatePrefetchInstr == 0);
6247 match( PrefetchAllocation mem );
6248 ins_cost(MEMORY_REF_COST);
6249 size(4);
6251 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %}
6252 opcode(Assembler::prefetch_op3);
6253 ins_encode( form3_mem_prefetch_write( mem ) );
6254 ins_pipe(iload_mem);
6255 %}
6257 // Use BIS instruction to prefetch for allocation.
6258 // Could fault, need space at the end of TLAB.
6259 instruct prefetchAlloc_bis( iRegP dst ) %{
6260 predicate(AllocatePrefetchInstr == 1);
6261 match( PrefetchAllocation dst );
6262 ins_cost(MEMORY_REF_COST);
6263 size(4);
6265 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %}
6266 ins_encode %{
6267 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
6268 %}
6269 ins_pipe(istore_mem_reg);
6270 %}
6272 // Next code is used for finding next cache line address to prefetch.
6273 #ifndef _LP64
6274 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{
6275 match(Set dst (CastX2P (AndI (CastP2X src) mask)));
6276 ins_cost(DEFAULT_COST);
6277 size(4);
6279 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6280 ins_encode %{
6281 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6282 %}
6283 ins_pipe(ialu_reg_imm);
6284 %}
6285 #else
6286 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
6287 match(Set dst (CastX2P (AndL (CastP2X src) mask)));
6288 ins_cost(DEFAULT_COST);
6289 size(4);
6291 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6292 ins_encode %{
6293 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6294 %}
6295 ins_pipe(ialu_reg_imm);
6296 %}
6297 #endif
6299 //----------Store Instructions-------------------------------------------------
6300 // Store Byte
6301 instruct storeB(memory mem, iRegI src) %{
6302 match(Set mem (StoreB mem src));
6303 ins_cost(MEMORY_REF_COST);
6305 size(4);
6306 format %{ "STB $src,$mem\t! byte" %}
6307 opcode(Assembler::stb_op3);
6308 ins_encode(simple_form3_mem_reg( mem, src ) );
6309 ins_pipe(istore_mem_reg);
6310 %}
6312 instruct storeB0(memory mem, immI0 src) %{
6313 match(Set mem (StoreB mem src));
6314 ins_cost(MEMORY_REF_COST);
6316 size(4);
6317 format %{ "STB $src,$mem\t! byte" %}
6318 opcode(Assembler::stb_op3);
6319 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6320 ins_pipe(istore_mem_zero);
6321 %}
6323 instruct storeCM0(memory mem, immI0 src) %{
6324 match(Set mem (StoreCM mem src));
6325 ins_cost(MEMORY_REF_COST);
6327 size(4);
6328 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %}
6329 opcode(Assembler::stb_op3);
6330 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6331 ins_pipe(istore_mem_zero);
6332 %}
6334 // Store Char/Short
6335 instruct storeC(memory mem, iRegI src) %{
6336 match(Set mem (StoreC mem src));
6337 ins_cost(MEMORY_REF_COST);
6339 size(4);
6340 format %{ "STH $src,$mem\t! short" %}
6341 opcode(Assembler::sth_op3);
6342 ins_encode(simple_form3_mem_reg( mem, src ) );
6343 ins_pipe(istore_mem_reg);
6344 %}
6346 instruct storeC0(memory mem, immI0 src) %{
6347 match(Set mem (StoreC mem src));
6348 ins_cost(MEMORY_REF_COST);
6350 size(4);
6351 format %{ "STH $src,$mem\t! short" %}
6352 opcode(Assembler::sth_op3);
6353 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6354 ins_pipe(istore_mem_zero);
6355 %}
6357 // Store Integer
6358 instruct storeI(memory mem, iRegI src) %{
6359 match(Set mem (StoreI mem src));
6360 ins_cost(MEMORY_REF_COST);
6362 size(4);
6363 format %{ "STW $src,$mem" %}
6364 opcode(Assembler::stw_op3);
6365 ins_encode(simple_form3_mem_reg( mem, src ) );
6366 ins_pipe(istore_mem_reg);
6367 %}
6369 // Store Long
6370 instruct storeL(memory mem, iRegL src) %{
6371 match(Set mem (StoreL mem src));
6372 ins_cost(MEMORY_REF_COST);
6373 size(4);
6374 format %{ "STX $src,$mem\t! long" %}
6375 opcode(Assembler::stx_op3);
6376 ins_encode(simple_form3_mem_reg( mem, src ) );
6377 ins_pipe(istore_mem_reg);
6378 %}
6380 instruct storeI0(memory mem, immI0 src) %{
6381 match(Set mem (StoreI mem src));
6382 ins_cost(MEMORY_REF_COST);
6384 size(4);
6385 format %{ "STW $src,$mem" %}
6386 opcode(Assembler::stw_op3);
6387 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6388 ins_pipe(istore_mem_zero);
6389 %}
6391 instruct storeL0(memory mem, immL0 src) %{
6392 match(Set mem (StoreL mem src));
6393 ins_cost(MEMORY_REF_COST);
6395 size(4);
6396 format %{ "STX $src,$mem" %}
6397 opcode(Assembler::stx_op3);
6398 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6399 ins_pipe(istore_mem_zero);
6400 %}
6402 // Store Integer from float register (used after fstoi)
6403 instruct storeI_Freg(memory mem, regF src) %{
6404 match(Set mem (StoreI mem src));
6405 ins_cost(MEMORY_REF_COST);
6407 size(4);
6408 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %}
6409 opcode(Assembler::stf_op3);
6410 ins_encode(simple_form3_mem_reg( mem, src ) );
6411 ins_pipe(fstoreF_mem_reg);
6412 %}
6414 // Store Pointer
6415 instruct storeP(memory dst, sp_ptr_RegP src) %{
6416 match(Set dst (StoreP dst src));
6417 ins_cost(MEMORY_REF_COST);
6418 size(4);
6420 #ifndef _LP64
6421 format %{ "STW $src,$dst\t! ptr" %}
6422 opcode(Assembler::stw_op3, 0, REGP_OP);
6423 #else
6424 format %{ "STX $src,$dst\t! ptr" %}
6425 opcode(Assembler::stx_op3, 0, REGP_OP);
6426 #endif
6427 ins_encode( form3_mem_reg( dst, src ) );
6428 ins_pipe(istore_mem_spORreg);
6429 %}
6431 instruct storeP0(memory dst, immP0 src) %{
6432 match(Set dst (StoreP dst src));
6433 ins_cost(MEMORY_REF_COST);
6434 size(4);
6436 #ifndef _LP64
6437 format %{ "STW $src,$dst\t! ptr" %}
6438 opcode(Assembler::stw_op3, 0, REGP_OP);
6439 #else
6440 format %{ "STX $src,$dst\t! ptr" %}
6441 opcode(Assembler::stx_op3, 0, REGP_OP);
6442 #endif
6443 ins_encode( form3_mem_reg( dst, R_G0 ) );
6444 ins_pipe(istore_mem_zero);
6445 %}
6447 // Store Compressed Pointer
6448 instruct storeN(memory dst, iRegN src) %{
6449 match(Set dst (StoreN dst src));
6450 ins_cost(MEMORY_REF_COST);
6451 size(4);
6453 format %{ "STW $src,$dst\t! compressed ptr" %}
6454 ins_encode %{
6455 Register base = as_Register($dst$$base);
6456 Register index = as_Register($dst$$index);
6457 Register src = $src$$Register;
6458 if (index != G0) {
6459 __ stw(src, base, index);
6460 } else {
6461 __ stw(src, base, $dst$$disp);
6462 }
6463 %}
6464 ins_pipe(istore_mem_spORreg);
6465 %}
6467 instruct storeN0(memory dst, immN0 src) %{
6468 match(Set dst (StoreN dst src));
6469 ins_cost(MEMORY_REF_COST);
6470 size(4);
6472 format %{ "STW $src,$dst\t! compressed ptr" %}
6473 ins_encode %{
6474 Register base = as_Register($dst$$base);
6475 Register index = as_Register($dst$$index);
6476 if (index != G0) {
6477 __ stw(0, base, index);
6478 } else {
6479 __ stw(0, base, $dst$$disp);
6480 }
6481 %}
6482 ins_pipe(istore_mem_zero);
6483 %}
6485 // Store Double
6486 instruct storeD( memory mem, regD src) %{
6487 match(Set mem (StoreD mem src));
6488 ins_cost(MEMORY_REF_COST);
6490 size(4);
6491 format %{ "STDF $src,$mem" %}
6492 opcode(Assembler::stdf_op3);
6493 ins_encode(simple_form3_mem_reg( mem, src ) );
6494 ins_pipe(fstoreD_mem_reg);
6495 %}
6497 instruct storeD0( memory mem, immD0 src) %{
6498 match(Set mem (StoreD mem src));
6499 ins_cost(MEMORY_REF_COST);
6501 size(4);
6502 format %{ "STX $src,$mem" %}
6503 opcode(Assembler::stx_op3);
6504 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6505 ins_pipe(fstoreD_mem_zero);
6506 %}
6508 // Store Float
6509 instruct storeF( memory mem, regF src) %{
6510 match(Set mem (StoreF mem src));
6511 ins_cost(MEMORY_REF_COST);
6513 size(4);
6514 format %{ "STF $src,$mem" %}
6515 opcode(Assembler::stf_op3);
6516 ins_encode(simple_form3_mem_reg( mem, src ) );
6517 ins_pipe(fstoreF_mem_reg);
6518 %}
6520 instruct storeF0( memory mem, immF0 src) %{
6521 match(Set mem (StoreF mem src));
6522 ins_cost(MEMORY_REF_COST);
6524 size(4);
6525 format %{ "STW $src,$mem\t! storeF0" %}
6526 opcode(Assembler::stw_op3);
6527 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6528 ins_pipe(fstoreF_mem_zero);
6529 %}
6531 // Convert oop pointer into compressed form
6532 instruct encodeHeapOop(iRegN dst, iRegP src) %{
6533 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
6534 match(Set dst (EncodeP src));
6535 format %{ "encode_heap_oop $src, $dst" %}
6536 ins_encode %{
6537 __ encode_heap_oop($src$$Register, $dst$$Register);
6538 %}
6539 ins_pipe(ialu_reg);
6540 %}
6542 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{
6543 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
6544 match(Set dst (EncodeP src));
6545 format %{ "encode_heap_oop_not_null $src, $dst" %}
6546 ins_encode %{
6547 __ encode_heap_oop_not_null($src$$Register, $dst$$Register);
6548 %}
6549 ins_pipe(ialu_reg);
6550 %}
6552 instruct decodeHeapOop(iRegP dst, iRegN src) %{
6553 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
6554 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant);
6555 match(Set dst (DecodeN src));
6556 format %{ "decode_heap_oop $src, $dst" %}
6557 ins_encode %{
6558 __ decode_heap_oop($src$$Register, $dst$$Register);
6559 %}
6560 ins_pipe(ialu_reg);
6561 %}
6563 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{
6564 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
6565 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
6566 match(Set dst (DecodeN src));
6567 format %{ "decode_heap_oop_not_null $src, $dst" %}
6568 ins_encode %{
6569 __ decode_heap_oop_not_null($src$$Register, $dst$$Register);
6570 %}
6571 ins_pipe(ialu_reg);
6572 %}
6575 //----------MemBar Instructions-----------------------------------------------
6576 // Memory barrier flavors
6578 instruct membar_acquire() %{
6579 match(MemBarAcquire);
6580 ins_cost(4*MEMORY_REF_COST);
6582 size(0);
6583 format %{ "MEMBAR-acquire" %}
6584 ins_encode( enc_membar_acquire );
6585 ins_pipe(long_memory_op);
6586 %}
6588 instruct membar_acquire_lock() %{
6589 match(MemBarAcquireLock);
6590 ins_cost(0);
6592 size(0);
6593 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %}
6594 ins_encode( );
6595 ins_pipe(empty);
6596 %}
6598 instruct membar_release() %{
6599 match(MemBarRelease);
6600 ins_cost(4*MEMORY_REF_COST);
6602 size(0);
6603 format %{ "MEMBAR-release" %}
6604 ins_encode( enc_membar_release );
6605 ins_pipe(long_memory_op);
6606 %}
6608 instruct membar_release_lock() %{
6609 match(MemBarReleaseLock);
6610 ins_cost(0);
6612 size(0);
6613 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %}
6614 ins_encode( );
6615 ins_pipe(empty);
6616 %}
6618 instruct membar_volatile() %{
6619 match(MemBarVolatile);
6620 ins_cost(4*MEMORY_REF_COST);
6622 size(4);
6623 format %{ "MEMBAR-volatile" %}
6624 ins_encode( enc_membar_volatile );
6625 ins_pipe(long_memory_op);
6626 %}
6628 instruct unnecessary_membar_volatile() %{
6629 match(MemBarVolatile);
6630 predicate(Matcher::post_store_load_barrier(n));
6631 ins_cost(0);
6633 size(0);
6634 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %}
6635 ins_encode( );
6636 ins_pipe(empty);
6637 %}
6639 instruct membar_storestore() %{
6640 match(MemBarStoreStore);
6641 ins_cost(0);
6643 size(0);
6644 format %{ "!MEMBAR-storestore (empty encoding)" %}
6645 ins_encode( );
6646 ins_pipe(empty);
6647 %}
6649 //----------Register Move Instructions-----------------------------------------
6650 instruct roundDouble_nop(regD dst) %{
6651 match(Set dst (RoundDouble dst));
6652 ins_cost(0);
6653 // SPARC results are already "rounded" (i.e., normal-format IEEE)
6654 ins_encode( );
6655 ins_pipe(empty);
6656 %}
6659 instruct roundFloat_nop(regF dst) %{
6660 match(Set dst (RoundFloat dst));
6661 ins_cost(0);
6662 // SPARC results are already "rounded" (i.e., normal-format IEEE)
6663 ins_encode( );
6664 ins_pipe(empty);
6665 %}
6668 // Cast Index to Pointer for unsafe natives
6669 instruct castX2P(iRegX src, iRegP dst) %{
6670 match(Set dst (CastX2P src));
6672 format %{ "MOV $src,$dst\t! IntX->Ptr" %}
6673 ins_encode( form3_g0_rs2_rd_move( src, dst ) );
6674 ins_pipe(ialu_reg);
6675 %}
6677 // Cast Pointer to Index for unsafe natives
6678 instruct castP2X(iRegP src, iRegX dst) %{
6679 match(Set dst (CastP2X src));
6681 format %{ "MOV $src,$dst\t! Ptr->IntX" %}
6682 ins_encode( form3_g0_rs2_rd_move( src, dst ) );
6683 ins_pipe(ialu_reg);
6684 %}
6686 instruct stfSSD(stackSlotD stkSlot, regD src) %{
6687 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6688 match(Set stkSlot src); // chain rule
6689 ins_cost(MEMORY_REF_COST);
6690 format %{ "STDF $src,$stkSlot\t!stk" %}
6691 opcode(Assembler::stdf_op3);
6692 ins_encode(simple_form3_mem_reg(stkSlot, src));
6693 ins_pipe(fstoreD_stk_reg);
6694 %}
6696 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{
6697 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6698 match(Set dst stkSlot); // chain rule
6699 ins_cost(MEMORY_REF_COST);
6700 format %{ "LDDF $stkSlot,$dst\t!stk" %}
6701 opcode(Assembler::lddf_op3);
6702 ins_encode(simple_form3_mem_reg(stkSlot, dst));
6703 ins_pipe(floadD_stk);
6704 %}
6706 instruct stfSSF(stackSlotF stkSlot, regF src) %{
6707 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6708 match(Set stkSlot src); // chain rule
6709 ins_cost(MEMORY_REF_COST);
6710 format %{ "STF $src,$stkSlot\t!stk" %}
6711 opcode(Assembler::stf_op3);
6712 ins_encode(simple_form3_mem_reg(stkSlot, src));
6713 ins_pipe(fstoreF_stk_reg);
6714 %}
6716 //----------Conditional Move---------------------------------------------------
6717 // Conditional move
6718 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{
6719 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6720 ins_cost(150);
6721 format %{ "MOV$cmp $pcc,$src,$dst" %}
6722 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6723 ins_pipe(ialu_reg);
6724 %}
6726 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{
6727 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6728 ins_cost(140);
6729 format %{ "MOV$cmp $pcc,$src,$dst" %}
6730 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
6731 ins_pipe(ialu_imm);
6732 %}
6734 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{
6735 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6736 ins_cost(150);
6737 size(4);
6738 format %{ "MOV$cmp $icc,$src,$dst" %}
6739 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6740 ins_pipe(ialu_reg);
6741 %}
6743 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{
6744 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6745 ins_cost(140);
6746 size(4);
6747 format %{ "MOV$cmp $icc,$src,$dst" %}
6748 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6749 ins_pipe(ialu_imm);
6750 %}
6752 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
6753 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6754 ins_cost(150);
6755 size(4);
6756 format %{ "MOV$cmp $icc,$src,$dst" %}
6757 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6758 ins_pipe(ialu_reg);
6759 %}
6761 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
6762 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6763 ins_cost(140);
6764 size(4);
6765 format %{ "MOV$cmp $icc,$src,$dst" %}
6766 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6767 ins_pipe(ialu_imm);
6768 %}
6770 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{
6771 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src)));
6772 ins_cost(150);
6773 size(4);
6774 format %{ "MOV$cmp $fcc,$src,$dst" %}
6775 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6776 ins_pipe(ialu_reg);
6777 %}
6779 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{
6780 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src)));
6781 ins_cost(140);
6782 size(4);
6783 format %{ "MOV$cmp $fcc,$src,$dst" %}
6784 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) );
6785 ins_pipe(ialu_imm);
6786 %}
6788 // Conditional move for RegN. Only cmov(reg,reg).
6789 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{
6790 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src)));
6791 ins_cost(150);
6792 format %{ "MOV$cmp $pcc,$src,$dst" %}
6793 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6794 ins_pipe(ialu_reg);
6795 %}
6797 // This instruction also works with CmpN so we don't need cmovNN_reg.
6798 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{
6799 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
6800 ins_cost(150);
6801 size(4);
6802 format %{ "MOV$cmp $icc,$src,$dst" %}
6803 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6804 ins_pipe(ialu_reg);
6805 %}
6807 // This instruction also works with CmpN so we don't need cmovNN_reg.
6808 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{
6809 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
6810 ins_cost(150);
6811 size(4);
6812 format %{ "MOV$cmp $icc,$src,$dst" %}
6813 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6814 ins_pipe(ialu_reg);
6815 %}
6817 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{
6818 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src)));
6819 ins_cost(150);
6820 size(4);
6821 format %{ "MOV$cmp $fcc,$src,$dst" %}
6822 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6823 ins_pipe(ialu_reg);
6824 %}
6826 // Conditional move
6827 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{
6828 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6829 ins_cost(150);
6830 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %}
6831 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6832 ins_pipe(ialu_reg);
6833 %}
6835 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{
6836 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6837 ins_cost(140);
6838 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %}
6839 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
6840 ins_pipe(ialu_imm);
6841 %}
6843 // This instruction also works with CmpN so we don't need cmovPN_reg.
6844 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{
6845 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6846 ins_cost(150);
6848 size(4);
6849 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6850 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6851 ins_pipe(ialu_reg);
6852 %}
6854 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
6855 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6856 ins_cost(150);
6858 size(4);
6859 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6860 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6861 ins_pipe(ialu_reg);
6862 %}
6864 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
6865 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6866 ins_cost(140);
6868 size(4);
6869 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6870 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6871 ins_pipe(ialu_imm);
6872 %}
6874 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
6875 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6876 ins_cost(140);
6878 size(4);
6879 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6880 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6881 ins_pipe(ialu_imm);
6882 %}
6884 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{
6885 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
6886 ins_cost(150);
6887 size(4);
6888 format %{ "MOV$cmp $fcc,$src,$dst" %}
6889 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6890 ins_pipe(ialu_imm);
6891 %}
6893 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{
6894 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
6895 ins_cost(140);
6896 size(4);
6897 format %{ "MOV$cmp $fcc,$src,$dst" %}
6898 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) );
6899 ins_pipe(ialu_imm);
6900 %}
6902 // Conditional move
6903 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{
6904 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src)));
6905 ins_cost(150);
6906 opcode(0x101);
6907 format %{ "FMOVD$cmp $pcc,$src,$dst" %}
6908 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6909 ins_pipe(int_conditional_float_move);
6910 %}
6912 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{
6913 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
6914 ins_cost(150);
6916 size(4);
6917 format %{ "FMOVS$cmp $icc,$src,$dst" %}
6918 opcode(0x101);
6919 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
6920 ins_pipe(int_conditional_float_move);
6921 %}
6923 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
6924 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
6925 ins_cost(150);
6927 size(4);
6928 format %{ "FMOVS$cmp $icc,$src,$dst" %}
6929 opcode(0x101);
6930 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
6931 ins_pipe(int_conditional_float_move);
6932 %}
6934 // Conditional move,
6935 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{
6936 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src)));
6937 ins_cost(150);
6938 size(4);
6939 format %{ "FMOVF$cmp $fcc,$src,$dst" %}
6940 opcode(0x1);
6941 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) );
6942 ins_pipe(int_conditional_double_move);
6943 %}
6945 // Conditional move
6946 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{
6947 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src)));
6948 ins_cost(150);
6949 size(4);
6950 opcode(0x102);
6951 format %{ "FMOVD$cmp $pcc,$src,$dst" %}
6952 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6953 ins_pipe(int_conditional_double_move);
6954 %}
6956 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{
6957 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
6958 ins_cost(150);
6960 size(4);
6961 format %{ "FMOVD$cmp $icc,$src,$dst" %}
6962 opcode(0x102);
6963 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
6964 ins_pipe(int_conditional_double_move);
6965 %}
6967 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
6968 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
6969 ins_cost(150);
6971 size(4);
6972 format %{ "FMOVD$cmp $icc,$src,$dst" %}
6973 opcode(0x102);
6974 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
6975 ins_pipe(int_conditional_double_move);
6976 %}
6978 // Conditional move,
6979 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{
6980 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src)));
6981 ins_cost(150);
6982 size(4);
6983 format %{ "FMOVD$cmp $fcc,$src,$dst" %}
6984 opcode(0x2);
6985 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) );
6986 ins_pipe(int_conditional_double_move);
6987 %}
6989 // Conditional move
6990 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{
6991 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
6992 ins_cost(150);
6993 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %}
6994 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6995 ins_pipe(ialu_reg);
6996 %}
6998 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{
6999 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
7000 ins_cost(140);
7001 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %}
7002 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
7003 ins_pipe(ialu_imm);
7004 %}
7006 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
7007 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
7008 ins_cost(150);
7010 size(4);
7011 format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
7012 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
7013 ins_pipe(ialu_reg);
7014 %}
7017 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
7018 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
7019 ins_cost(150);
7021 size(4);
7022 format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
7023 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
7024 ins_pipe(ialu_reg);
7025 %}
7028 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{
7029 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src)));
7030 ins_cost(150);
7032 size(4);
7033 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %}
7034 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
7035 ins_pipe(ialu_reg);
7036 %}
7040 //----------OS and Locking Instructions----------------------------------------
7042 // This name is KNOWN by the ADLC and cannot be changed.
7043 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
7044 // for this guy.
7045 instruct tlsLoadP(g2RegP dst) %{
7046 match(Set dst (ThreadLocal));
7048 size(0);
7049 ins_cost(0);
7050 format %{ "# TLS is in G2" %}
7051 ins_encode( /*empty encoding*/ );
7052 ins_pipe(ialu_none);
7053 %}
7055 instruct checkCastPP( iRegP dst ) %{
7056 match(Set dst (CheckCastPP dst));
7058 size(0);
7059 format %{ "# checkcastPP of $dst" %}
7060 ins_encode( /*empty encoding*/ );
7061 ins_pipe(empty);
7062 %}
7065 instruct castPP( iRegP dst ) %{
7066 match(Set dst (CastPP dst));
7067 format %{ "# castPP of $dst" %}
7068 ins_encode( /*empty encoding*/ );
7069 ins_pipe(empty);
7070 %}
7072 instruct castII( iRegI dst ) %{
7073 match(Set dst (CastII dst));
7074 format %{ "# castII of $dst" %}
7075 ins_encode( /*empty encoding*/ );
7076 ins_cost(0);
7077 ins_pipe(empty);
7078 %}
7080 //----------Arithmetic Instructions--------------------------------------------
7081 // Addition Instructions
7082 // Register Addition
7083 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7084 match(Set dst (AddI src1 src2));
7086 size(4);
7087 format %{ "ADD $src1,$src2,$dst" %}
7088 ins_encode %{
7089 __ add($src1$$Register, $src2$$Register, $dst$$Register);
7090 %}
7091 ins_pipe(ialu_reg_reg);
7092 %}
7094 // Immediate Addition
7095 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7096 match(Set dst (AddI src1 src2));
7098 size(4);
7099 format %{ "ADD $src1,$src2,$dst" %}
7100 opcode(Assembler::add_op3, Assembler::arith_op);
7101 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7102 ins_pipe(ialu_reg_imm);
7103 %}
7105 // Pointer Register Addition
7106 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{
7107 match(Set dst (AddP src1 src2));
7109 size(4);
7110 format %{ "ADD $src1,$src2,$dst" %}
7111 opcode(Assembler::add_op3, Assembler::arith_op);
7112 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7113 ins_pipe(ialu_reg_reg);
7114 %}
7116 // Pointer Immediate Addition
7117 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{
7118 match(Set dst (AddP src1 src2));
7120 size(4);
7121 format %{ "ADD $src1,$src2,$dst" %}
7122 opcode(Assembler::add_op3, Assembler::arith_op);
7123 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7124 ins_pipe(ialu_reg_imm);
7125 %}
7127 // Long Addition
7128 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7129 match(Set dst (AddL src1 src2));
7131 size(4);
7132 format %{ "ADD $src1,$src2,$dst\t! long" %}
7133 opcode(Assembler::add_op3, Assembler::arith_op);
7134 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7135 ins_pipe(ialu_reg_reg);
7136 %}
7138 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7139 match(Set dst (AddL src1 con));
7141 size(4);
7142 format %{ "ADD $src1,$con,$dst" %}
7143 opcode(Assembler::add_op3, Assembler::arith_op);
7144 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7145 ins_pipe(ialu_reg_imm);
7146 %}
7148 //----------Conditional_store--------------------------------------------------
7149 // Conditional-store of the updated heap-top.
7150 // Used during allocation of the shared heap.
7151 // Sets flags (EQ) on success. Implemented with a CASA on Sparc.
7153 // LoadP-locked. Same as a regular pointer load when used with a compare-swap
7154 instruct loadPLocked(iRegP dst, memory mem) %{
7155 match(Set dst (LoadPLocked mem));
7156 ins_cost(MEMORY_REF_COST);
7158 #ifndef _LP64
7159 size(4);
7160 format %{ "LDUW $mem,$dst\t! ptr" %}
7161 opcode(Assembler::lduw_op3, 0, REGP_OP);
7162 #else
7163 format %{ "LDX $mem,$dst\t! ptr" %}
7164 opcode(Assembler::ldx_op3, 0, REGP_OP);
7165 #endif
7166 ins_encode( form3_mem_reg( mem, dst ) );
7167 ins_pipe(iload_mem);
7168 %}
7170 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{
7171 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
7172 effect( KILL newval );
7173 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t"
7174 "CMP R_G3,$oldval\t\t! See if we made progress" %}
7175 ins_encode( enc_cas(heap_top_ptr,oldval,newval) );
7176 ins_pipe( long_memory_op );
7177 %}
7179 // Conditional-store of an int value.
7180 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{
7181 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval)));
7182 effect( KILL newval );
7183 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
7184 "CMP $oldval,$newval\t\t! See if we made progress" %}
7185 ins_encode( enc_cas(mem_ptr,oldval,newval) );
7186 ins_pipe( long_memory_op );
7187 %}
7189 // Conditional-store of a long value.
7190 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{
7191 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval)));
7192 effect( KILL newval );
7193 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
7194 "CMP $oldval,$newval\t\t! See if we made progress" %}
7195 ins_encode( enc_cas(mem_ptr,oldval,newval) );
7196 ins_pipe( long_memory_op );
7197 %}
7199 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
7201 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7202 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
7203 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7204 format %{
7205 "MOV $newval,O7\n\t"
7206 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7207 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7208 "MOV 1,$res\n\t"
7209 "MOVne xcc,R_G0,$res"
7210 %}
7211 ins_encode( enc_casx(mem_ptr, oldval, newval),
7212 enc_lflags_ne_to_boolean(res) );
7213 ins_pipe( long_memory_op );
7214 %}
7217 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7218 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
7219 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7220 format %{
7221 "MOV $newval,O7\n\t"
7222 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7223 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7224 "MOV 1,$res\n\t"
7225 "MOVne icc,R_G0,$res"
7226 %}
7227 ins_encode( enc_casi(mem_ptr, oldval, newval),
7228 enc_iflags_ne_to_boolean(res) );
7229 ins_pipe( long_memory_op );
7230 %}
7232 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7233 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
7234 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7235 format %{
7236 "MOV $newval,O7\n\t"
7237 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7238 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7239 "MOV 1,$res\n\t"
7240 "MOVne xcc,R_G0,$res"
7241 %}
7242 #ifdef _LP64
7243 ins_encode( enc_casx(mem_ptr, oldval, newval),
7244 enc_lflags_ne_to_boolean(res) );
7245 #else
7246 ins_encode( enc_casi(mem_ptr, oldval, newval),
7247 enc_iflags_ne_to_boolean(res) );
7248 #endif
7249 ins_pipe( long_memory_op );
7250 %}
7252 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7253 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
7254 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7255 format %{
7256 "MOV $newval,O7\n\t"
7257 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7258 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7259 "MOV 1,$res\n\t"
7260 "MOVne icc,R_G0,$res"
7261 %}
7262 ins_encode( enc_casi(mem_ptr, oldval, newval),
7263 enc_iflags_ne_to_boolean(res) );
7264 ins_pipe( long_memory_op );
7265 %}
7267 //---------------------
7268 // Subtraction Instructions
7269 // Register Subtraction
7270 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7271 match(Set dst (SubI src1 src2));
7273 size(4);
7274 format %{ "SUB $src1,$src2,$dst" %}
7275 opcode(Assembler::sub_op3, Assembler::arith_op);
7276 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7277 ins_pipe(ialu_reg_reg);
7278 %}
7280 // Immediate Subtraction
7281 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7282 match(Set dst (SubI src1 src2));
7284 size(4);
7285 format %{ "SUB $src1,$src2,$dst" %}
7286 opcode(Assembler::sub_op3, Assembler::arith_op);
7287 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7288 ins_pipe(ialu_reg_imm);
7289 %}
7291 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
7292 match(Set dst (SubI zero src2));
7294 size(4);
7295 format %{ "NEG $src2,$dst" %}
7296 opcode(Assembler::sub_op3, Assembler::arith_op);
7297 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) );
7298 ins_pipe(ialu_zero_reg);
7299 %}
7301 // Long subtraction
7302 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7303 match(Set dst (SubL src1 src2));
7305 size(4);
7306 format %{ "SUB $src1,$src2,$dst\t! long" %}
7307 opcode(Assembler::sub_op3, Assembler::arith_op);
7308 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7309 ins_pipe(ialu_reg_reg);
7310 %}
7312 // Immediate Subtraction
7313 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7314 match(Set dst (SubL src1 con));
7316 size(4);
7317 format %{ "SUB $src1,$con,$dst\t! long" %}
7318 opcode(Assembler::sub_op3, Assembler::arith_op);
7319 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7320 ins_pipe(ialu_reg_imm);
7321 %}
7323 // Long negation
7324 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{
7325 match(Set dst (SubL zero src2));
7327 size(4);
7328 format %{ "NEG $src2,$dst\t! long" %}
7329 opcode(Assembler::sub_op3, Assembler::arith_op);
7330 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) );
7331 ins_pipe(ialu_zero_reg);
7332 %}
7334 // Multiplication Instructions
7335 // Integer Multiplication
7336 // Register Multiplication
7337 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7338 match(Set dst (MulI src1 src2));
7340 size(4);
7341 format %{ "MULX $src1,$src2,$dst" %}
7342 opcode(Assembler::mulx_op3, Assembler::arith_op);
7343 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7344 ins_pipe(imul_reg_reg);
7345 %}
7347 // Immediate Multiplication
7348 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7349 match(Set dst (MulI src1 src2));
7351 size(4);
7352 format %{ "MULX $src1,$src2,$dst" %}
7353 opcode(Assembler::mulx_op3, Assembler::arith_op);
7354 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7355 ins_pipe(imul_reg_imm);
7356 %}
7358 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7359 match(Set dst (MulL src1 src2));
7360 ins_cost(DEFAULT_COST * 5);
7361 size(4);
7362 format %{ "MULX $src1,$src2,$dst\t! long" %}
7363 opcode(Assembler::mulx_op3, Assembler::arith_op);
7364 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7365 ins_pipe(mulL_reg_reg);
7366 %}
7368 // Immediate Multiplication
7369 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7370 match(Set dst (MulL src1 src2));
7371 ins_cost(DEFAULT_COST * 5);
7372 size(4);
7373 format %{ "MULX $src1,$src2,$dst" %}
7374 opcode(Assembler::mulx_op3, Assembler::arith_op);
7375 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7376 ins_pipe(mulL_reg_imm);
7377 %}
7379 // Integer Division
7380 // Register Division
7381 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{
7382 match(Set dst (DivI src1 src2));
7383 ins_cost((2+71)*DEFAULT_COST);
7385 format %{ "SRA $src2,0,$src2\n\t"
7386 "SRA $src1,0,$src1\n\t"
7387 "SDIVX $src1,$src2,$dst" %}
7388 ins_encode( idiv_reg( src1, src2, dst ) );
7389 ins_pipe(sdiv_reg_reg);
7390 %}
7392 // Immediate Division
7393 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{
7394 match(Set dst (DivI src1 src2));
7395 ins_cost((2+71)*DEFAULT_COST);
7397 format %{ "SRA $src1,0,$src1\n\t"
7398 "SDIVX $src1,$src2,$dst" %}
7399 ins_encode( idiv_imm( src1, src2, dst ) );
7400 ins_pipe(sdiv_reg_imm);
7401 %}
7403 //----------Div-By-10-Expansion------------------------------------------------
7404 // Extract hi bits of a 32x32->64 bit multiply.
7405 // Expand rule only, not matched
7406 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{
7407 effect( DEF dst, USE src1, USE src2 );
7408 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t"
7409 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %}
7410 ins_encode( enc_mul_hi(dst,src1,src2));
7411 ins_pipe(sdiv_reg_reg);
7412 %}
7414 // Magic constant, reciprocal of 10
7415 instruct loadConI_x66666667(iRegIsafe dst) %{
7416 effect( DEF dst );
7418 size(8);
7419 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %}
7420 ins_encode( Set32(0x66666667, dst) );
7421 ins_pipe(ialu_hi_lo_reg);
7422 %}
7424 // Register Shift Right Arithmetic Long by 32-63
7425 instruct sra_31( iRegI dst, iRegI src ) %{
7426 effect( DEF dst, USE src );
7427 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %}
7428 ins_encode( form3_rs1_rd_copysign_hi(src,dst) );
7429 ins_pipe(ialu_reg_reg);
7430 %}
7432 // Arithmetic Shift Right by 8-bit immediate
7433 instruct sra_reg_2( iRegI dst, iRegI src ) %{
7434 effect( DEF dst, USE src );
7435 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %}
7436 opcode(Assembler::sra_op3, Assembler::arith_op);
7437 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) );
7438 ins_pipe(ialu_reg_imm);
7439 %}
7441 // Integer DIV with 10
7442 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{
7443 match(Set dst (DivI src div));
7444 ins_cost((6+6)*DEFAULT_COST);
7445 expand %{
7446 iRegIsafe tmp1; // Killed temps;
7447 iRegIsafe tmp2; // Killed temps;
7448 iRegI tmp3; // Killed temps;
7449 iRegI tmp4; // Killed temps;
7450 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1
7451 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2
7452 sra_31( tmp3, src ); // SRA src,31 -> tmp3
7453 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4
7454 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst
7455 %}
7456 %}
7458 // Register Long Division
7459 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7460 match(Set dst (DivL src1 src2));
7461 ins_cost(DEFAULT_COST*71);
7462 size(4);
7463 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7464 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7465 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7466 ins_pipe(divL_reg_reg);
7467 %}
7469 // Register Long Division
7470 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7471 match(Set dst (DivL src1 src2));
7472 ins_cost(DEFAULT_COST*71);
7473 size(4);
7474 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7475 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7476 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7477 ins_pipe(divL_reg_imm);
7478 %}
7480 // Integer Remainder
7481 // Register Remainder
7482 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{
7483 match(Set dst (ModI src1 src2));
7484 effect( KILL ccr, KILL temp);
7486 format %{ "SREM $src1,$src2,$dst" %}
7487 ins_encode( irem_reg(src1, src2, dst, temp) );
7488 ins_pipe(sdiv_reg_reg);
7489 %}
7491 // Immediate Remainder
7492 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{
7493 match(Set dst (ModI src1 src2));
7494 effect( KILL ccr, KILL temp);
7496 format %{ "SREM $src1,$src2,$dst" %}
7497 ins_encode( irem_imm(src1, src2, dst, temp) );
7498 ins_pipe(sdiv_reg_imm);
7499 %}
7501 // Register Long Remainder
7502 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7503 effect(DEF dst, USE src1, USE src2);
7504 size(4);
7505 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7506 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7507 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7508 ins_pipe(divL_reg_reg);
7509 %}
7511 // Register Long Division
7512 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{
7513 effect(DEF dst, USE src1, USE src2);
7514 size(4);
7515 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7516 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7517 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7518 ins_pipe(divL_reg_imm);
7519 %}
7521 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7522 effect(DEF dst, USE src1, USE src2);
7523 size(4);
7524 format %{ "MULX $src1,$src2,$dst\t! long" %}
7525 opcode(Assembler::mulx_op3, Assembler::arith_op);
7526 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7527 ins_pipe(mulL_reg_reg);
7528 %}
7530 // Immediate Multiplication
7531 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{
7532 effect(DEF dst, USE src1, USE src2);
7533 size(4);
7534 format %{ "MULX $src1,$src2,$dst" %}
7535 opcode(Assembler::mulx_op3, Assembler::arith_op);
7536 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7537 ins_pipe(mulL_reg_imm);
7538 %}
7540 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7541 effect(DEF dst, USE src1, USE src2);
7542 size(4);
7543 format %{ "SUB $src1,$src2,$dst\t! long" %}
7544 opcode(Assembler::sub_op3, Assembler::arith_op);
7545 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7546 ins_pipe(ialu_reg_reg);
7547 %}
7549 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
7550 effect(DEF dst, USE src1, USE src2);
7551 size(4);
7552 format %{ "SUB $src1,$src2,$dst\t! long" %}
7553 opcode(Assembler::sub_op3, Assembler::arith_op);
7554 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7555 ins_pipe(ialu_reg_reg);
7556 %}
7558 // Register Long Remainder
7559 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7560 match(Set dst (ModL src1 src2));
7561 ins_cost(DEFAULT_COST*(71 + 6 + 1));
7562 expand %{
7563 iRegL tmp1;
7564 iRegL tmp2;
7565 divL_reg_reg_1(tmp1, src1, src2);
7566 mulL_reg_reg_1(tmp2, tmp1, src2);
7567 subL_reg_reg_1(dst, src1, tmp2);
7568 %}
7569 %}
7571 // Register Long Remainder
7572 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7573 match(Set dst (ModL src1 src2));
7574 ins_cost(DEFAULT_COST*(71 + 6 + 1));
7575 expand %{
7576 iRegL tmp1;
7577 iRegL tmp2;
7578 divL_reg_imm13_1(tmp1, src1, src2);
7579 mulL_reg_imm13_1(tmp2, tmp1, src2);
7580 subL_reg_reg_2 (dst, src1, tmp2);
7581 %}
7582 %}
7584 // Integer Shift Instructions
7585 // Register Shift Left
7586 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7587 match(Set dst (LShiftI src1 src2));
7589 size(4);
7590 format %{ "SLL $src1,$src2,$dst" %}
7591 opcode(Assembler::sll_op3, Assembler::arith_op);
7592 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7593 ins_pipe(ialu_reg_reg);
7594 %}
7596 // Register Shift Left Immediate
7597 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7598 match(Set dst (LShiftI src1 src2));
7600 size(4);
7601 format %{ "SLL $src1,$src2,$dst" %}
7602 opcode(Assembler::sll_op3, Assembler::arith_op);
7603 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7604 ins_pipe(ialu_reg_imm);
7605 %}
7607 // Register Shift Left
7608 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7609 match(Set dst (LShiftL src1 src2));
7611 size(4);
7612 format %{ "SLLX $src1,$src2,$dst" %}
7613 opcode(Assembler::sllx_op3, Assembler::arith_op);
7614 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7615 ins_pipe(ialu_reg_reg);
7616 %}
7618 // Register Shift Left Immediate
7619 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7620 match(Set dst (LShiftL src1 src2));
7622 size(4);
7623 format %{ "SLLX $src1,$src2,$dst" %}
7624 opcode(Assembler::sllx_op3, Assembler::arith_op);
7625 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7626 ins_pipe(ialu_reg_imm);
7627 %}
7629 // Register Arithmetic Shift Right
7630 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7631 match(Set dst (RShiftI src1 src2));
7632 size(4);
7633 format %{ "SRA $src1,$src2,$dst" %}
7634 opcode(Assembler::sra_op3, Assembler::arith_op);
7635 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7636 ins_pipe(ialu_reg_reg);
7637 %}
7639 // Register Arithmetic Shift Right Immediate
7640 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7641 match(Set dst (RShiftI src1 src2));
7643 size(4);
7644 format %{ "SRA $src1,$src2,$dst" %}
7645 opcode(Assembler::sra_op3, Assembler::arith_op);
7646 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7647 ins_pipe(ialu_reg_imm);
7648 %}
7650 // Register Shift Right Arithmatic Long
7651 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7652 match(Set dst (RShiftL src1 src2));
7654 size(4);
7655 format %{ "SRAX $src1,$src2,$dst" %}
7656 opcode(Assembler::srax_op3, Assembler::arith_op);
7657 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7658 ins_pipe(ialu_reg_reg);
7659 %}
7661 // Register Shift Left Immediate
7662 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7663 match(Set dst (RShiftL src1 src2));
7665 size(4);
7666 format %{ "SRAX $src1,$src2,$dst" %}
7667 opcode(Assembler::srax_op3, Assembler::arith_op);
7668 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7669 ins_pipe(ialu_reg_imm);
7670 %}
7672 // Register Shift Right
7673 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7674 match(Set dst (URShiftI src1 src2));
7676 size(4);
7677 format %{ "SRL $src1,$src2,$dst" %}
7678 opcode(Assembler::srl_op3, Assembler::arith_op);
7679 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7680 ins_pipe(ialu_reg_reg);
7681 %}
7683 // Register Shift Right Immediate
7684 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7685 match(Set dst (URShiftI src1 src2));
7687 size(4);
7688 format %{ "SRL $src1,$src2,$dst" %}
7689 opcode(Assembler::srl_op3, Assembler::arith_op);
7690 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7691 ins_pipe(ialu_reg_imm);
7692 %}
7694 // Register Shift Right
7695 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7696 match(Set dst (URShiftL src1 src2));
7698 size(4);
7699 format %{ "SRLX $src1,$src2,$dst" %}
7700 opcode(Assembler::srlx_op3, Assembler::arith_op);
7701 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7702 ins_pipe(ialu_reg_reg);
7703 %}
7705 // Register Shift Right Immediate
7706 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7707 match(Set dst (URShiftL src1 src2));
7709 size(4);
7710 format %{ "SRLX $src1,$src2,$dst" %}
7711 opcode(Assembler::srlx_op3, Assembler::arith_op);
7712 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7713 ins_pipe(ialu_reg_imm);
7714 %}
7716 // Register Shift Right Immediate with a CastP2X
7717 #ifdef _LP64
7718 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
7719 match(Set dst (URShiftL (CastP2X src1) src2));
7720 size(4);
7721 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %}
7722 opcode(Assembler::srlx_op3, Assembler::arith_op);
7723 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7724 ins_pipe(ialu_reg_imm);
7725 %}
7726 #else
7727 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{
7728 match(Set dst (URShiftI (CastP2X src1) src2));
7729 size(4);
7730 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %}
7731 opcode(Assembler::srl_op3, Assembler::arith_op);
7732 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7733 ins_pipe(ialu_reg_imm);
7734 %}
7735 #endif
7738 //----------Floating Point Arithmetic Instructions-----------------------------
7740 // Add float single precision
7741 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
7742 match(Set dst (AddF src1 src2));
7744 size(4);
7745 format %{ "FADDS $src1,$src2,$dst" %}
7746 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf);
7747 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7748 ins_pipe(faddF_reg_reg);
7749 %}
7751 // Add float double precision
7752 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
7753 match(Set dst (AddD src1 src2));
7755 size(4);
7756 format %{ "FADDD $src1,$src2,$dst" %}
7757 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf);
7758 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7759 ins_pipe(faddD_reg_reg);
7760 %}
7762 // Sub float single precision
7763 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
7764 match(Set dst (SubF src1 src2));
7766 size(4);
7767 format %{ "FSUBS $src1,$src2,$dst" %}
7768 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf);
7769 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7770 ins_pipe(faddF_reg_reg);
7771 %}
7773 // Sub float double precision
7774 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
7775 match(Set dst (SubD src1 src2));
7777 size(4);
7778 format %{ "FSUBD $src1,$src2,$dst" %}
7779 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf);
7780 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7781 ins_pipe(faddD_reg_reg);
7782 %}
7784 // Mul float single precision
7785 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
7786 match(Set dst (MulF src1 src2));
7788 size(4);
7789 format %{ "FMULS $src1,$src2,$dst" %}
7790 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf);
7791 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7792 ins_pipe(fmulF_reg_reg);
7793 %}
7795 // Mul float double precision
7796 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
7797 match(Set dst (MulD src1 src2));
7799 size(4);
7800 format %{ "FMULD $src1,$src2,$dst" %}
7801 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf);
7802 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7803 ins_pipe(fmulD_reg_reg);
7804 %}
7806 // Div float single precision
7807 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
7808 match(Set dst (DivF src1 src2));
7810 size(4);
7811 format %{ "FDIVS $src1,$src2,$dst" %}
7812 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf);
7813 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7814 ins_pipe(fdivF_reg_reg);
7815 %}
7817 // Div float double precision
7818 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
7819 match(Set dst (DivD src1 src2));
7821 size(4);
7822 format %{ "FDIVD $src1,$src2,$dst" %}
7823 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf);
7824 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7825 ins_pipe(fdivD_reg_reg);
7826 %}
7828 // Absolute float double precision
7829 instruct absD_reg(regD dst, regD src) %{
7830 match(Set dst (AbsD src));
7832 format %{ "FABSd $src,$dst" %}
7833 ins_encode(fabsd(dst, src));
7834 ins_pipe(faddD_reg);
7835 %}
7837 // Absolute float single precision
7838 instruct absF_reg(regF dst, regF src) %{
7839 match(Set dst (AbsF src));
7841 format %{ "FABSs $src,$dst" %}
7842 ins_encode(fabss(dst, src));
7843 ins_pipe(faddF_reg);
7844 %}
7846 instruct negF_reg(regF dst, regF src) %{
7847 match(Set dst (NegF src));
7849 size(4);
7850 format %{ "FNEGs $src,$dst" %}
7851 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf);
7852 ins_encode(form3_opf_rs2F_rdF(src, dst));
7853 ins_pipe(faddF_reg);
7854 %}
7856 instruct negD_reg(regD dst, regD src) %{
7857 match(Set dst (NegD src));
7859 format %{ "FNEGd $src,$dst" %}
7860 ins_encode(fnegd(dst, src));
7861 ins_pipe(faddD_reg);
7862 %}
7864 // Sqrt float double precision
7865 instruct sqrtF_reg_reg(regF dst, regF src) %{
7866 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
7868 size(4);
7869 format %{ "FSQRTS $src,$dst" %}
7870 ins_encode(fsqrts(dst, src));
7871 ins_pipe(fdivF_reg_reg);
7872 %}
7874 // Sqrt float double precision
7875 instruct sqrtD_reg_reg(regD dst, regD src) %{
7876 match(Set dst (SqrtD src));
7878 size(4);
7879 format %{ "FSQRTD $src,$dst" %}
7880 ins_encode(fsqrtd(dst, src));
7881 ins_pipe(fdivD_reg_reg);
7882 %}
7884 //----------Logical Instructions-----------------------------------------------
7885 // And Instructions
7886 // Register And
7887 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7888 match(Set dst (AndI src1 src2));
7890 size(4);
7891 format %{ "AND $src1,$src2,$dst" %}
7892 opcode(Assembler::and_op3, Assembler::arith_op);
7893 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7894 ins_pipe(ialu_reg_reg);
7895 %}
7897 // Immediate And
7898 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7899 match(Set dst (AndI src1 src2));
7901 size(4);
7902 format %{ "AND $src1,$src2,$dst" %}
7903 opcode(Assembler::and_op3, Assembler::arith_op);
7904 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7905 ins_pipe(ialu_reg_imm);
7906 %}
7908 // Register And Long
7909 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7910 match(Set dst (AndL src1 src2));
7912 ins_cost(DEFAULT_COST);
7913 size(4);
7914 format %{ "AND $src1,$src2,$dst\t! long" %}
7915 opcode(Assembler::and_op3, Assembler::arith_op);
7916 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7917 ins_pipe(ialu_reg_reg);
7918 %}
7920 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7921 match(Set dst (AndL src1 con));
7923 ins_cost(DEFAULT_COST);
7924 size(4);
7925 format %{ "AND $src1,$con,$dst\t! long" %}
7926 opcode(Assembler::and_op3, Assembler::arith_op);
7927 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7928 ins_pipe(ialu_reg_imm);
7929 %}
7931 // Or Instructions
7932 // Register Or
7933 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7934 match(Set dst (OrI src1 src2));
7936 size(4);
7937 format %{ "OR $src1,$src2,$dst" %}
7938 opcode(Assembler::or_op3, Assembler::arith_op);
7939 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7940 ins_pipe(ialu_reg_reg);
7941 %}
7943 // Immediate Or
7944 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7945 match(Set dst (OrI src1 src2));
7947 size(4);
7948 format %{ "OR $src1,$src2,$dst" %}
7949 opcode(Assembler::or_op3, Assembler::arith_op);
7950 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7951 ins_pipe(ialu_reg_imm);
7952 %}
7954 // Register Or Long
7955 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7956 match(Set dst (OrL src1 src2));
7958 ins_cost(DEFAULT_COST);
7959 size(4);
7960 format %{ "OR $src1,$src2,$dst\t! long" %}
7961 opcode(Assembler::or_op3, Assembler::arith_op);
7962 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7963 ins_pipe(ialu_reg_reg);
7964 %}
7966 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7967 match(Set dst (OrL src1 con));
7968 ins_cost(DEFAULT_COST*2);
7970 ins_cost(DEFAULT_COST);
7971 size(4);
7972 format %{ "OR $src1,$con,$dst\t! long" %}
7973 opcode(Assembler::or_op3, Assembler::arith_op);
7974 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7975 ins_pipe(ialu_reg_imm);
7976 %}
7978 #ifndef _LP64
7980 // Use sp_ptr_RegP to match G2 (TLS register) without spilling.
7981 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
7982 match(Set dst (OrI src1 (CastP2X src2)));
7984 size(4);
7985 format %{ "OR $src1,$src2,$dst" %}
7986 opcode(Assembler::or_op3, Assembler::arith_op);
7987 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7988 ins_pipe(ialu_reg_reg);
7989 %}
7991 #else
7993 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
7994 match(Set dst (OrL src1 (CastP2X src2)));
7996 ins_cost(DEFAULT_COST);
7997 size(4);
7998 format %{ "OR $src1,$src2,$dst\t! long" %}
7999 opcode(Assembler::or_op3, Assembler::arith_op);
8000 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8001 ins_pipe(ialu_reg_reg);
8002 %}
8004 #endif
8006 // Xor Instructions
8007 // Register Xor
8008 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8009 match(Set dst (XorI src1 src2));
8011 size(4);
8012 format %{ "XOR $src1,$src2,$dst" %}
8013 opcode(Assembler::xor_op3, Assembler::arith_op);
8014 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8015 ins_pipe(ialu_reg_reg);
8016 %}
8018 // Immediate Xor
8019 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8020 match(Set dst (XorI src1 src2));
8022 size(4);
8023 format %{ "XOR $src1,$src2,$dst" %}
8024 opcode(Assembler::xor_op3, Assembler::arith_op);
8025 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8026 ins_pipe(ialu_reg_imm);
8027 %}
8029 // Register Xor Long
8030 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8031 match(Set dst (XorL src1 src2));
8033 ins_cost(DEFAULT_COST);
8034 size(4);
8035 format %{ "XOR $src1,$src2,$dst\t! long" %}
8036 opcode(Assembler::xor_op3, Assembler::arith_op);
8037 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8038 ins_pipe(ialu_reg_reg);
8039 %}
8041 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
8042 match(Set dst (XorL src1 con));
8044 ins_cost(DEFAULT_COST);
8045 size(4);
8046 format %{ "XOR $src1,$con,$dst\t! long" %}
8047 opcode(Assembler::xor_op3, Assembler::arith_op);
8048 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8049 ins_pipe(ialu_reg_imm);
8050 %}
8052 //----------Convert to Boolean-------------------------------------------------
8053 // Nice hack for 32-bit tests but doesn't work for
8054 // 64-bit pointers.
8055 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
8056 match(Set dst (Conv2B src));
8057 effect( KILL ccr );
8058 ins_cost(DEFAULT_COST*2);
8059 format %{ "CMP R_G0,$src\n\t"
8060 "ADDX R_G0,0,$dst" %}
8061 ins_encode( enc_to_bool( src, dst ) );
8062 ins_pipe(ialu_reg_ialu);
8063 %}
8065 #ifndef _LP64
8066 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
8067 match(Set dst (Conv2B src));
8068 effect( KILL ccr );
8069 ins_cost(DEFAULT_COST*2);
8070 format %{ "CMP R_G0,$src\n\t"
8071 "ADDX R_G0,0,$dst" %}
8072 ins_encode( enc_to_bool( src, dst ) );
8073 ins_pipe(ialu_reg_ialu);
8074 %}
8075 #else
8076 instruct convP2B( iRegI dst, iRegP src ) %{
8077 match(Set dst (Conv2B src));
8078 ins_cost(DEFAULT_COST*2);
8079 format %{ "MOV $src,$dst\n\t"
8080 "MOVRNZ $src,1,$dst" %}
8081 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) );
8082 ins_pipe(ialu_clr_and_mover);
8083 %}
8084 #endif
8086 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
8087 match(Set dst (CmpLTMask src zero));
8088 effect(KILL ccr);
8089 size(4);
8090 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %}
8091 ins_encode %{
8092 __ sra($src$$Register, 31, $dst$$Register);
8093 %}
8094 ins_pipe(ialu_reg_imm);
8095 %}
8097 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{
8098 match(Set dst (CmpLTMask p q));
8099 effect( KILL ccr );
8100 ins_cost(DEFAULT_COST*4);
8101 format %{ "CMP $p,$q\n\t"
8102 "MOV #0,$dst\n\t"
8103 "BLT,a .+8\n\t"
8104 "MOV #-1,$dst" %}
8105 ins_encode( enc_ltmask(p,q,dst) );
8106 ins_pipe(ialu_reg_reg_ialu);
8107 %}
8109 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{
8110 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
8111 effect(KILL ccr, TEMP tmp);
8112 ins_cost(DEFAULT_COST*3);
8114 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t"
8115 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t"
8116 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %}
8117 ins_encode( enc_cadd_cmpLTMask(p, q, y, tmp) );
8118 ins_pipe( cadd_cmpltmask );
8119 %}
8122 //-----------------------------------------------------------------
8123 // Direct raw moves between float and general registers using VIS3.
8125 // ins_pipe(faddF_reg);
8126 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{
8127 predicate(UseVIS >= 3);
8128 match(Set dst (MoveF2I src));
8130 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %}
8131 ins_encode %{
8132 __ movstouw($src$$FloatRegister, $dst$$Register);
8133 %}
8134 ins_pipe(ialu_reg_reg);
8135 %}
8137 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{
8138 predicate(UseVIS >= 3);
8139 match(Set dst (MoveI2F src));
8141 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %}
8142 ins_encode %{
8143 __ movwtos($src$$Register, $dst$$FloatRegister);
8144 %}
8145 ins_pipe(ialu_reg_reg);
8146 %}
8148 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{
8149 predicate(UseVIS >= 3);
8150 match(Set dst (MoveD2L src));
8152 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %}
8153 ins_encode %{
8154 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register);
8155 %}
8156 ins_pipe(ialu_reg_reg);
8157 %}
8159 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{
8160 predicate(UseVIS >= 3);
8161 match(Set dst (MoveL2D src));
8163 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %}
8164 ins_encode %{
8165 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg));
8166 %}
8167 ins_pipe(ialu_reg_reg);
8168 %}
8171 // Raw moves between float and general registers using stack.
8173 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{
8174 match(Set dst (MoveF2I src));
8175 effect(DEF dst, USE src);
8176 ins_cost(MEMORY_REF_COST);
8178 size(4);
8179 format %{ "LDUW $src,$dst\t! MoveF2I" %}
8180 opcode(Assembler::lduw_op3);
8181 ins_encode(simple_form3_mem_reg( src, dst ) );
8182 ins_pipe(iload_mem);
8183 %}
8185 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{
8186 match(Set dst (MoveI2F src));
8187 effect(DEF dst, USE src);
8188 ins_cost(MEMORY_REF_COST);
8190 size(4);
8191 format %{ "LDF $src,$dst\t! MoveI2F" %}
8192 opcode(Assembler::ldf_op3);
8193 ins_encode(simple_form3_mem_reg(src, dst));
8194 ins_pipe(floadF_stk);
8195 %}
8197 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{
8198 match(Set dst (MoveD2L src));
8199 effect(DEF dst, USE src);
8200 ins_cost(MEMORY_REF_COST);
8202 size(4);
8203 format %{ "LDX $src,$dst\t! MoveD2L" %}
8204 opcode(Assembler::ldx_op3);
8205 ins_encode(simple_form3_mem_reg( src, dst ) );
8206 ins_pipe(iload_mem);
8207 %}
8209 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{
8210 match(Set dst (MoveL2D src));
8211 effect(DEF dst, USE src);
8212 ins_cost(MEMORY_REF_COST);
8214 size(4);
8215 format %{ "LDDF $src,$dst\t! MoveL2D" %}
8216 opcode(Assembler::lddf_op3);
8217 ins_encode(simple_form3_mem_reg(src, dst));
8218 ins_pipe(floadD_stk);
8219 %}
8221 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{
8222 match(Set dst (MoveF2I src));
8223 effect(DEF dst, USE src);
8224 ins_cost(MEMORY_REF_COST);
8226 size(4);
8227 format %{ "STF $src,$dst\t! MoveF2I" %}
8228 opcode(Assembler::stf_op3);
8229 ins_encode(simple_form3_mem_reg(dst, src));
8230 ins_pipe(fstoreF_stk_reg);
8231 %}
8233 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
8234 match(Set dst (MoveI2F src));
8235 effect(DEF dst, USE src);
8236 ins_cost(MEMORY_REF_COST);
8238 size(4);
8239 format %{ "STW $src,$dst\t! MoveI2F" %}
8240 opcode(Assembler::stw_op3);
8241 ins_encode(simple_form3_mem_reg( dst, src ) );
8242 ins_pipe(istore_mem_reg);
8243 %}
8245 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{
8246 match(Set dst (MoveD2L src));
8247 effect(DEF dst, USE src);
8248 ins_cost(MEMORY_REF_COST);
8250 size(4);
8251 format %{ "STDF $src,$dst\t! MoveD2L" %}
8252 opcode(Assembler::stdf_op3);
8253 ins_encode(simple_form3_mem_reg(dst, src));
8254 ins_pipe(fstoreD_stk_reg);
8255 %}
8257 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
8258 match(Set dst (MoveL2D src));
8259 effect(DEF dst, USE src);
8260 ins_cost(MEMORY_REF_COST);
8262 size(4);
8263 format %{ "STX $src,$dst\t! MoveL2D" %}
8264 opcode(Assembler::stx_op3);
8265 ins_encode(simple_form3_mem_reg( dst, src ) );
8266 ins_pipe(istore_mem_reg);
8267 %}
8270 //----------Arithmetic Conversion Instructions---------------------------------
8271 // The conversions operations are all Alpha sorted. Please keep it that way!
8273 instruct convD2F_reg(regF dst, regD src) %{
8274 match(Set dst (ConvD2F src));
8275 size(4);
8276 format %{ "FDTOS $src,$dst" %}
8277 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf);
8278 ins_encode(form3_opf_rs2D_rdF(src, dst));
8279 ins_pipe(fcvtD2F);
8280 %}
8283 // Convert a double to an int in a float register.
8284 // If the double is a NAN, stuff a zero in instead.
8285 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{
8286 effect(DEF dst, USE src, KILL fcc0);
8287 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
8288 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8289 "FDTOI $src,$dst\t! convert in delay slot\n\t"
8290 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
8291 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
8292 "skip:" %}
8293 ins_encode(form_d2i_helper(src,dst));
8294 ins_pipe(fcvtD2I);
8295 %}
8297 instruct convD2I_stk(stackSlotI dst, regD src) %{
8298 match(Set dst (ConvD2I src));
8299 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8300 expand %{
8301 regF tmp;
8302 convD2I_helper(tmp, src);
8303 regF_to_stkI(dst, tmp);
8304 %}
8305 %}
8307 instruct convD2I_reg(iRegI dst, regD src) %{
8308 predicate(UseVIS >= 3);
8309 match(Set dst (ConvD2I src));
8310 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8311 expand %{
8312 regF tmp;
8313 convD2I_helper(tmp, src);
8314 MoveF2I_reg_reg(dst, tmp);
8315 %}
8316 %}
8319 // Convert a double to a long in a double register.
8320 // If the double is a NAN, stuff a zero in instead.
8321 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{
8322 effect(DEF dst, USE src, KILL fcc0);
8323 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
8324 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8325 "FDTOX $src,$dst\t! convert in delay slot\n\t"
8326 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
8327 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
8328 "skip:" %}
8329 ins_encode(form_d2l_helper(src,dst));
8330 ins_pipe(fcvtD2L);
8331 %}
8333 instruct convD2L_stk(stackSlotL dst, regD src) %{
8334 match(Set dst (ConvD2L src));
8335 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8336 expand %{
8337 regD tmp;
8338 convD2L_helper(tmp, src);
8339 regD_to_stkL(dst, tmp);
8340 %}
8341 %}
8343 instruct convD2L_reg(iRegL dst, regD src) %{
8344 predicate(UseVIS >= 3);
8345 match(Set dst (ConvD2L src));
8346 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8347 expand %{
8348 regD tmp;
8349 convD2L_helper(tmp, src);
8350 MoveD2L_reg_reg(dst, tmp);
8351 %}
8352 %}
8355 instruct convF2D_reg(regD dst, regF src) %{
8356 match(Set dst (ConvF2D src));
8357 format %{ "FSTOD $src,$dst" %}
8358 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf);
8359 ins_encode(form3_opf_rs2F_rdD(src, dst));
8360 ins_pipe(fcvtF2D);
8361 %}
8364 // Convert a float to an int in a float register.
8365 // If the float is a NAN, stuff a zero in instead.
8366 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{
8367 effect(DEF dst, USE src, KILL fcc0);
8368 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
8369 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8370 "FSTOI $src,$dst\t! convert in delay slot\n\t"
8371 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
8372 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
8373 "skip:" %}
8374 ins_encode(form_f2i_helper(src,dst));
8375 ins_pipe(fcvtF2I);
8376 %}
8378 instruct convF2I_stk(stackSlotI dst, regF src) %{
8379 match(Set dst (ConvF2I src));
8380 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8381 expand %{
8382 regF tmp;
8383 convF2I_helper(tmp, src);
8384 regF_to_stkI(dst, tmp);
8385 %}
8386 %}
8388 instruct convF2I_reg(iRegI dst, regF src) %{
8389 predicate(UseVIS >= 3);
8390 match(Set dst (ConvF2I src));
8391 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8392 expand %{
8393 regF tmp;
8394 convF2I_helper(tmp, src);
8395 MoveF2I_reg_reg(dst, tmp);
8396 %}
8397 %}
8400 // Convert a float to a long in a float register.
8401 // If the float is a NAN, stuff a zero in instead.
8402 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{
8403 effect(DEF dst, USE src, KILL fcc0);
8404 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
8405 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8406 "FSTOX $src,$dst\t! convert in delay slot\n\t"
8407 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
8408 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
8409 "skip:" %}
8410 ins_encode(form_f2l_helper(src,dst));
8411 ins_pipe(fcvtF2L);
8412 %}
8414 instruct convF2L_stk(stackSlotL dst, regF src) %{
8415 match(Set dst (ConvF2L src));
8416 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8417 expand %{
8418 regD tmp;
8419 convF2L_helper(tmp, src);
8420 regD_to_stkL(dst, tmp);
8421 %}
8422 %}
8424 instruct convF2L_reg(iRegL dst, regF src) %{
8425 predicate(UseVIS >= 3);
8426 match(Set dst (ConvF2L src));
8427 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8428 expand %{
8429 regD tmp;
8430 convF2L_helper(tmp, src);
8431 MoveD2L_reg_reg(dst, tmp);
8432 %}
8433 %}
8436 instruct convI2D_helper(regD dst, regF tmp) %{
8437 effect(USE tmp, DEF dst);
8438 format %{ "FITOD $tmp,$dst" %}
8439 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
8440 ins_encode(form3_opf_rs2F_rdD(tmp, dst));
8441 ins_pipe(fcvtI2D);
8442 %}
8444 instruct convI2D_stk(stackSlotI src, regD dst) %{
8445 match(Set dst (ConvI2D src));
8446 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8447 expand %{
8448 regF tmp;
8449 stkI_to_regF(tmp, src);
8450 convI2D_helper(dst, tmp);
8451 %}
8452 %}
8454 instruct convI2D_reg(regD_low dst, iRegI src) %{
8455 predicate(UseVIS >= 3);
8456 match(Set dst (ConvI2D src));
8457 expand %{
8458 regF tmp;
8459 MoveI2F_reg_reg(tmp, src);
8460 convI2D_helper(dst, tmp);
8461 %}
8462 %}
8464 instruct convI2D_mem(regD_low dst, memory mem) %{
8465 match(Set dst (ConvI2D (LoadI mem)));
8466 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8467 size(8);
8468 format %{ "LDF $mem,$dst\n\t"
8469 "FITOD $dst,$dst" %}
8470 opcode(Assembler::ldf_op3, Assembler::fitod_opf);
8471 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
8472 ins_pipe(floadF_mem);
8473 %}
8476 instruct convI2F_helper(regF dst, regF tmp) %{
8477 effect(DEF dst, USE tmp);
8478 format %{ "FITOS $tmp,$dst" %}
8479 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf);
8480 ins_encode(form3_opf_rs2F_rdF(tmp, dst));
8481 ins_pipe(fcvtI2F);
8482 %}
8484 instruct convI2F_stk(regF dst, stackSlotI src) %{
8485 match(Set dst (ConvI2F src));
8486 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8487 expand %{
8488 regF tmp;
8489 stkI_to_regF(tmp,src);
8490 convI2F_helper(dst, tmp);
8491 %}
8492 %}
8494 instruct convI2F_reg(regF dst, iRegI src) %{
8495 predicate(UseVIS >= 3);
8496 match(Set dst (ConvI2F src));
8497 ins_cost(DEFAULT_COST);
8498 expand %{
8499 regF tmp;
8500 MoveI2F_reg_reg(tmp, src);
8501 convI2F_helper(dst, tmp);
8502 %}
8503 %}
8505 instruct convI2F_mem( regF dst, memory mem ) %{
8506 match(Set dst (ConvI2F (LoadI mem)));
8507 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8508 size(8);
8509 format %{ "LDF $mem,$dst\n\t"
8510 "FITOS $dst,$dst" %}
8511 opcode(Assembler::ldf_op3, Assembler::fitos_opf);
8512 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
8513 ins_pipe(floadF_mem);
8514 %}
8517 instruct convI2L_reg(iRegL dst, iRegI src) %{
8518 match(Set dst (ConvI2L src));
8519 size(4);
8520 format %{ "SRA $src,0,$dst\t! int->long" %}
8521 opcode(Assembler::sra_op3, Assembler::arith_op);
8522 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8523 ins_pipe(ialu_reg_reg);
8524 %}
8526 // Zero-extend convert int to long
8527 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
8528 match(Set dst (AndL (ConvI2L src) mask) );
8529 size(4);
8530 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %}
8531 opcode(Assembler::srl_op3, Assembler::arith_op);
8532 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8533 ins_pipe(ialu_reg_reg);
8534 %}
8536 // Zero-extend long
8537 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
8538 match(Set dst (AndL src mask) );
8539 size(4);
8540 format %{ "SRL $src,0,$dst\t! zero-extend long" %}
8541 opcode(Assembler::srl_op3, Assembler::arith_op);
8542 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8543 ins_pipe(ialu_reg_reg);
8544 %}
8547 //-----------
8548 // Long to Double conversion using V8 opcodes.
8549 // Still useful because cheetah traps and becomes
8550 // amazingly slow for some common numbers.
8552 // Magic constant, 0x43300000
8553 instruct loadConI_x43300000(iRegI dst) %{
8554 effect(DEF dst);
8555 size(4);
8556 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %}
8557 ins_encode(SetHi22(0x43300000, dst));
8558 ins_pipe(ialu_none);
8559 %}
8561 // Magic constant, 0x41f00000
8562 instruct loadConI_x41f00000(iRegI dst) %{
8563 effect(DEF dst);
8564 size(4);
8565 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %}
8566 ins_encode(SetHi22(0x41f00000, dst));
8567 ins_pipe(ialu_none);
8568 %}
8570 // Construct a double from two float halves
8571 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{
8572 effect(DEF dst, USE src1, USE src2);
8573 size(8);
8574 format %{ "FMOVS $src1.hi,$dst.hi\n\t"
8575 "FMOVS $src2.lo,$dst.lo" %}
8576 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf);
8577 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst));
8578 ins_pipe(faddD_reg_reg);
8579 %}
8581 // Convert integer in high half of a double register (in the lower half of
8582 // the double register file) to double
8583 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{
8584 effect(DEF dst, USE src);
8585 size(4);
8586 format %{ "FITOD $src,$dst" %}
8587 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
8588 ins_encode(form3_opf_rs2D_rdD(src, dst));
8589 ins_pipe(fcvtLHi2D);
8590 %}
8592 // Add float double precision
8593 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{
8594 effect(DEF dst, USE src1, USE src2);
8595 size(4);
8596 format %{ "FADDD $src1,$src2,$dst" %}
8597 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf);
8598 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8599 ins_pipe(faddD_reg_reg);
8600 %}
8602 // Sub float double precision
8603 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{
8604 effect(DEF dst, USE src1, USE src2);
8605 size(4);
8606 format %{ "FSUBD $src1,$src2,$dst" %}
8607 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf);
8608 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8609 ins_pipe(faddD_reg_reg);
8610 %}
8612 // Mul float double precision
8613 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{
8614 effect(DEF dst, USE src1, USE src2);
8615 size(4);
8616 format %{ "FMULD $src1,$src2,$dst" %}
8617 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf);
8618 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8619 ins_pipe(fmulD_reg_reg);
8620 %}
8622 instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{
8623 match(Set dst (ConvL2D src));
8624 ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6);
8626 expand %{
8627 regD_low tmpsrc;
8628 iRegI ix43300000;
8629 iRegI ix41f00000;
8630 stackSlotL lx43300000;
8631 stackSlotL lx41f00000;
8632 regD_low dx43300000;
8633 regD dx41f00000;
8634 regD tmp1;
8635 regD_low tmp2;
8636 regD tmp3;
8637 regD tmp4;
8639 stkL_to_regD(tmpsrc, src);
8641 loadConI_x43300000(ix43300000);
8642 loadConI_x41f00000(ix41f00000);
8643 regI_to_stkLHi(lx43300000, ix43300000);
8644 regI_to_stkLHi(lx41f00000, ix41f00000);
8645 stkL_to_regD(dx43300000, lx43300000);
8646 stkL_to_regD(dx41f00000, lx41f00000);
8648 convI2D_regDHi_regD(tmp1, tmpsrc);
8649 regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc);
8650 subD_regD_regD(tmp3, tmp2, dx43300000);
8651 mulD_regD_regD(tmp4, tmp1, dx41f00000);
8652 addD_regD_regD(dst, tmp3, tmp4);
8653 %}
8654 %}
8656 // Long to Double conversion using fast fxtof
8657 instruct convL2D_helper(regD dst, regD tmp) %{
8658 effect(DEF dst, USE tmp);
8659 size(4);
8660 format %{ "FXTOD $tmp,$dst" %}
8661 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf);
8662 ins_encode(form3_opf_rs2D_rdD(tmp, dst));
8663 ins_pipe(fcvtL2D);
8664 %}
8666 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{
8667 predicate(VM_Version::has_fast_fxtof());
8668 match(Set dst (ConvL2D src));
8669 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST);
8670 expand %{
8671 regD tmp;
8672 stkL_to_regD(tmp, src);
8673 convL2D_helper(dst, tmp);
8674 %}
8675 %}
8677 instruct convL2D_reg(regD dst, iRegL src) %{
8678 predicate(UseVIS >= 3);
8679 match(Set dst (ConvL2D src));
8680 expand %{
8681 regD tmp;
8682 MoveL2D_reg_reg(tmp, src);
8683 convL2D_helper(dst, tmp);
8684 %}
8685 %}
8687 // Long to Float conversion using fast fxtof
8688 instruct convL2F_helper(regF dst, regD tmp) %{
8689 effect(DEF dst, USE tmp);
8690 size(4);
8691 format %{ "FXTOS $tmp,$dst" %}
8692 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf);
8693 ins_encode(form3_opf_rs2D_rdF(tmp, dst));
8694 ins_pipe(fcvtL2F);
8695 %}
8697 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{
8698 match(Set dst (ConvL2F src));
8699 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8700 expand %{
8701 regD tmp;
8702 stkL_to_regD(tmp, src);
8703 convL2F_helper(dst, tmp);
8704 %}
8705 %}
8707 instruct convL2F_reg(regF dst, iRegL src) %{
8708 predicate(UseVIS >= 3);
8709 match(Set dst (ConvL2F src));
8710 ins_cost(DEFAULT_COST);
8711 expand %{
8712 regD tmp;
8713 MoveL2D_reg_reg(tmp, src);
8714 convL2F_helper(dst, tmp);
8715 %}
8716 %}
8718 //-----------
8720 instruct convL2I_reg(iRegI dst, iRegL src) %{
8721 match(Set dst (ConvL2I src));
8722 #ifndef _LP64
8723 format %{ "MOV $src.lo,$dst\t! long->int" %}
8724 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) );
8725 ins_pipe(ialu_move_reg_I_to_L);
8726 #else
8727 size(4);
8728 format %{ "SRA $src,R_G0,$dst\t! long->int" %}
8729 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) );
8730 ins_pipe(ialu_reg);
8731 #endif
8732 %}
8734 // Register Shift Right Immediate
8735 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{
8736 match(Set dst (ConvL2I (RShiftL src cnt)));
8738 size(4);
8739 format %{ "SRAX $src,$cnt,$dst" %}
8740 opcode(Assembler::srax_op3, Assembler::arith_op);
8741 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) );
8742 ins_pipe(ialu_reg_imm);
8743 %}
8745 //----------Control Flow Instructions------------------------------------------
8746 // Compare Instructions
8747 // Compare Integers
8748 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{
8749 match(Set icc (CmpI op1 op2));
8750 effect( DEF icc, USE op1, USE op2 );
8752 size(4);
8753 format %{ "CMP $op1,$op2" %}
8754 opcode(Assembler::subcc_op3, Assembler::arith_op);
8755 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8756 ins_pipe(ialu_cconly_reg_reg);
8757 %}
8759 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{
8760 match(Set icc (CmpU op1 op2));
8762 size(4);
8763 format %{ "CMP $op1,$op2\t! unsigned" %}
8764 opcode(Assembler::subcc_op3, Assembler::arith_op);
8765 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8766 ins_pipe(ialu_cconly_reg_reg);
8767 %}
8769 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{
8770 match(Set icc (CmpI op1 op2));
8771 effect( DEF icc, USE op1 );
8773 size(4);
8774 format %{ "CMP $op1,$op2" %}
8775 opcode(Assembler::subcc_op3, Assembler::arith_op);
8776 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8777 ins_pipe(ialu_cconly_reg_imm);
8778 %}
8780 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{
8781 match(Set icc (CmpI (AndI op1 op2) zero));
8783 size(4);
8784 format %{ "BTST $op2,$op1" %}
8785 opcode(Assembler::andcc_op3, Assembler::arith_op);
8786 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8787 ins_pipe(ialu_cconly_reg_reg_zero);
8788 %}
8790 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{
8791 match(Set icc (CmpI (AndI op1 op2) zero));
8793 size(4);
8794 format %{ "BTST $op2,$op1" %}
8795 opcode(Assembler::andcc_op3, Assembler::arith_op);
8796 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8797 ins_pipe(ialu_cconly_reg_imm_zero);
8798 %}
8800 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{
8801 match(Set xcc (CmpL op1 op2));
8802 effect( DEF xcc, USE op1, USE op2 );
8804 size(4);
8805 format %{ "CMP $op1,$op2\t\t! long" %}
8806 opcode(Assembler::subcc_op3, Assembler::arith_op);
8807 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8808 ins_pipe(ialu_cconly_reg_reg);
8809 %}
8811 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{
8812 match(Set xcc (CmpL op1 con));
8813 effect( DEF xcc, USE op1, USE con );
8815 size(4);
8816 format %{ "CMP $op1,$con\t\t! long" %}
8817 opcode(Assembler::subcc_op3, Assembler::arith_op);
8818 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) );
8819 ins_pipe(ialu_cconly_reg_reg);
8820 %}
8822 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{
8823 match(Set xcc (CmpL (AndL op1 op2) zero));
8824 effect( DEF xcc, USE op1, USE op2 );
8826 size(4);
8827 format %{ "BTST $op1,$op2\t\t! long" %}
8828 opcode(Assembler::andcc_op3, Assembler::arith_op);
8829 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8830 ins_pipe(ialu_cconly_reg_reg);
8831 %}
8833 // useful for checking the alignment of a pointer:
8834 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{
8835 match(Set xcc (CmpL (AndL op1 con) zero));
8836 effect( DEF xcc, USE op1, USE con );
8838 size(4);
8839 format %{ "BTST $op1,$con\t\t! long" %}
8840 opcode(Assembler::andcc_op3, Assembler::arith_op);
8841 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) );
8842 ins_pipe(ialu_cconly_reg_reg);
8843 %}
8845 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU13 op2 ) %{
8846 match(Set icc (CmpU op1 op2));
8848 size(4);
8849 format %{ "CMP $op1,$op2\t! unsigned" %}
8850 opcode(Assembler::subcc_op3, Assembler::arith_op);
8851 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8852 ins_pipe(ialu_cconly_reg_imm);
8853 %}
8855 // Compare Pointers
8856 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{
8857 match(Set pcc (CmpP op1 op2));
8859 size(4);
8860 format %{ "CMP $op1,$op2\t! ptr" %}
8861 opcode(Assembler::subcc_op3, Assembler::arith_op);
8862 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8863 ins_pipe(ialu_cconly_reg_reg);
8864 %}
8866 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{
8867 match(Set pcc (CmpP op1 op2));
8869 size(4);
8870 format %{ "CMP $op1,$op2\t! ptr" %}
8871 opcode(Assembler::subcc_op3, Assembler::arith_op);
8872 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8873 ins_pipe(ialu_cconly_reg_imm);
8874 %}
8876 // Compare Narrow oops
8877 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{
8878 match(Set icc (CmpN op1 op2));
8880 size(4);
8881 format %{ "CMP $op1,$op2\t! compressed ptr" %}
8882 opcode(Assembler::subcc_op3, Assembler::arith_op);
8883 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8884 ins_pipe(ialu_cconly_reg_reg);
8885 %}
8887 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{
8888 match(Set icc (CmpN op1 op2));
8890 size(4);
8891 format %{ "CMP $op1,$op2\t! compressed ptr" %}
8892 opcode(Assembler::subcc_op3, Assembler::arith_op);
8893 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8894 ins_pipe(ialu_cconly_reg_imm);
8895 %}
8897 //----------Max and Min--------------------------------------------------------
8898 // Min Instructions
8899 // Conditional move for min
8900 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{
8901 effect( USE_DEF op2, USE op1, USE icc );
8903 size(4);
8904 format %{ "MOVlt icc,$op1,$op2\t! min" %}
8905 opcode(Assembler::less);
8906 ins_encode( enc_cmov_reg_minmax(op2,op1) );
8907 ins_pipe(ialu_reg_flags);
8908 %}
8910 // Min Register with Register.
8911 instruct minI_eReg(iRegI op1, iRegI op2) %{
8912 match(Set op2 (MinI op1 op2));
8913 ins_cost(DEFAULT_COST*2);
8914 expand %{
8915 flagsReg icc;
8916 compI_iReg(icc,op1,op2);
8917 cmovI_reg_lt(op2,op1,icc);
8918 %}
8919 %}
8921 // Max Instructions
8922 // Conditional move for max
8923 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{
8924 effect( USE_DEF op2, USE op1, USE icc );
8925 format %{ "MOVgt icc,$op1,$op2\t! max" %}
8926 opcode(Assembler::greater);
8927 ins_encode( enc_cmov_reg_minmax(op2,op1) );
8928 ins_pipe(ialu_reg_flags);
8929 %}
8931 // Max Register with Register
8932 instruct maxI_eReg(iRegI op1, iRegI op2) %{
8933 match(Set op2 (MaxI op1 op2));
8934 ins_cost(DEFAULT_COST*2);
8935 expand %{
8936 flagsReg icc;
8937 compI_iReg(icc,op1,op2);
8938 cmovI_reg_gt(op2,op1,icc);
8939 %}
8940 %}
8943 //----------Float Compares----------------------------------------------------
8944 // Compare floating, generate condition code
8945 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{
8946 match(Set fcc (CmpF src1 src2));
8948 size(4);
8949 format %{ "FCMPs $fcc,$src1,$src2" %}
8950 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf);
8951 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) );
8952 ins_pipe(faddF_fcc_reg_reg_zero);
8953 %}
8955 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{
8956 match(Set fcc (CmpD src1 src2));
8958 size(4);
8959 format %{ "FCMPd $fcc,$src1,$src2" %}
8960 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf);
8961 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) );
8962 ins_pipe(faddD_fcc_reg_reg_zero);
8963 %}
8966 // Compare floating, generate -1,0,1
8967 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{
8968 match(Set dst (CmpF3 src1 src2));
8969 effect(KILL fcc0);
8970 ins_cost(DEFAULT_COST*3+BRANCH_COST*3);
8971 format %{ "fcmpl $dst,$src1,$src2" %}
8972 // Primary = float
8973 opcode( true );
8974 ins_encode( floating_cmp( dst, src1, src2 ) );
8975 ins_pipe( floating_cmp );
8976 %}
8978 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{
8979 match(Set dst (CmpD3 src1 src2));
8980 effect(KILL fcc0);
8981 ins_cost(DEFAULT_COST*3+BRANCH_COST*3);
8982 format %{ "dcmpl $dst,$src1,$src2" %}
8983 // Primary = double (not float)
8984 opcode( false );
8985 ins_encode( floating_cmp( dst, src1, src2 ) );
8986 ins_pipe( floating_cmp );
8987 %}
8989 //----------Branches---------------------------------------------------------
8990 // Jump
8991 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above)
8992 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{
8993 match(Jump switch_val);
8994 effect(TEMP table);
8996 ins_cost(350);
8998 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t"
8999 "LD [O7 + $switch_val], O7\n\t"
9000 "JUMP O7" %}
9001 ins_encode %{
9002 // Calculate table address into a register.
9003 Register table_reg;
9004 Register label_reg = O7;
9005 // If we are calculating the size of this instruction don't trust
9006 // zero offsets because they might change when
9007 // MachConstantBaseNode decides to optimize the constant table
9008 // base.
9009 if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) {
9010 table_reg = $constanttablebase;
9011 } else {
9012 table_reg = O7;
9013 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7);
9014 __ add($constanttablebase, con_offset, table_reg);
9015 }
9017 // Jump to base address + switch value
9018 __ ld_ptr(table_reg, $switch_val$$Register, label_reg);
9019 __ jmp(label_reg, G0);
9020 __ delayed()->nop();
9021 %}
9022 ins_pipe(ialu_reg_reg);
9023 %}
9025 // Direct Branch. Use V8 version with longer range.
9026 instruct branch(label labl) %{
9027 match(Goto);
9028 effect(USE labl);
9030 size(8);
9031 ins_cost(BRANCH_COST);
9032 format %{ "BA $labl" %}
9033 ins_encode %{
9034 Label* L = $labl$$label;
9035 __ ba(*L);
9036 __ delayed()->nop();
9037 %}
9038 ins_pipe(br);
9039 %}
9041 // Direct Branch, short with no delay slot
9042 instruct branch_short(label labl) %{
9043 match(Goto);
9044 predicate(UseCBCond);
9045 effect(USE labl);
9047 size(4);
9048 ins_cost(BRANCH_COST);
9049 format %{ "BA $labl\t! short branch" %}
9050 ins_encode %{
9051 Label* L = $labl$$label;
9052 assert(__ use_cbcond(*L), "back to back cbcond");
9053 __ ba_short(*L);
9054 %}
9055 ins_short_branch(1);
9056 ins_avoid_back_to_back(1);
9057 ins_pipe(cbcond_reg_imm);
9058 %}
9060 // Conditional Direct Branch
9061 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
9062 match(If cmp icc);
9063 effect(USE labl);
9065 size(8);
9066 ins_cost(BRANCH_COST);
9067 format %{ "BP$cmp $icc,$labl" %}
9068 // Prim = bits 24-22, Secnd = bits 31-30
9069 ins_encode( enc_bp( labl, cmp, icc ) );
9070 ins_pipe(br_cc);
9071 %}
9073 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
9074 match(If cmp icc);
9075 effect(USE labl);
9077 ins_cost(BRANCH_COST);
9078 format %{ "BP$cmp $icc,$labl" %}
9079 // Prim = bits 24-22, Secnd = bits 31-30
9080 ins_encode( enc_bp( labl, cmp, icc ) );
9081 ins_pipe(br_cc);
9082 %}
9084 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
9085 match(If cmp pcc);
9086 effect(USE labl);
9088 size(8);
9089 ins_cost(BRANCH_COST);
9090 format %{ "BP$cmp $pcc,$labl" %}
9091 ins_encode %{
9092 Label* L = $labl$$label;
9093 Assembler::Predict predict_taken =
9094 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9096 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9097 __ delayed()->nop();
9098 %}
9099 ins_pipe(br_cc);
9100 %}
9102 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{
9103 match(If cmp fcc);
9104 effect(USE labl);
9106 size(8);
9107 ins_cost(BRANCH_COST);
9108 format %{ "FBP$cmp $fcc,$labl" %}
9109 ins_encode %{
9110 Label* L = $labl$$label;
9111 Assembler::Predict predict_taken =
9112 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9114 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L);
9115 __ delayed()->nop();
9116 %}
9117 ins_pipe(br_fcc);
9118 %}
9120 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
9121 match(CountedLoopEnd cmp icc);
9122 effect(USE labl);
9124 size(8);
9125 ins_cost(BRANCH_COST);
9126 format %{ "BP$cmp $icc,$labl\t! Loop end" %}
9127 // Prim = bits 24-22, Secnd = bits 31-30
9128 ins_encode( enc_bp( labl, cmp, icc ) );
9129 ins_pipe(br_cc);
9130 %}
9132 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
9133 match(CountedLoopEnd cmp icc);
9134 effect(USE labl);
9136 size(8);
9137 ins_cost(BRANCH_COST);
9138 format %{ "BP$cmp $icc,$labl\t! Loop end" %}
9139 // Prim = bits 24-22, Secnd = bits 31-30
9140 ins_encode( enc_bp( labl, cmp, icc ) );
9141 ins_pipe(br_cc);
9142 %}
9144 // Compare and branch instructions
9145 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9146 match(If cmp (CmpI op1 op2));
9147 effect(USE labl, KILL icc);
9149 size(12);
9150 ins_cost(BRANCH_COST);
9151 format %{ "CMP $op1,$op2\t! int\n\t"
9152 "BP$cmp $labl" %}
9153 ins_encode %{
9154 Label* L = $labl$$label;
9155 Assembler::Predict predict_taken =
9156 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9157 __ cmp($op1$$Register, $op2$$Register);
9158 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9159 __ delayed()->nop();
9160 %}
9161 ins_pipe(cmp_br_reg_reg);
9162 %}
9164 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9165 match(If cmp (CmpI op1 op2));
9166 effect(USE labl, KILL icc);
9168 size(12);
9169 ins_cost(BRANCH_COST);
9170 format %{ "CMP $op1,$op2\t! int\n\t"
9171 "BP$cmp $labl" %}
9172 ins_encode %{
9173 Label* L = $labl$$label;
9174 Assembler::Predict predict_taken =
9175 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9176 __ cmp($op1$$Register, $op2$$constant);
9177 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9178 __ delayed()->nop();
9179 %}
9180 ins_pipe(cmp_br_reg_imm);
9181 %}
9183 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
9184 match(If cmp (CmpU op1 op2));
9185 effect(USE labl, KILL icc);
9187 size(12);
9188 ins_cost(BRANCH_COST);
9189 format %{ "CMP $op1,$op2\t! unsigned\n\t"
9190 "BP$cmp $labl" %}
9191 ins_encode %{
9192 Label* L = $labl$$label;
9193 Assembler::Predict predict_taken =
9194 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9195 __ cmp($op1$$Register, $op2$$Register);
9196 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9197 __ delayed()->nop();
9198 %}
9199 ins_pipe(cmp_br_reg_reg);
9200 %}
9202 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
9203 match(If cmp (CmpU op1 op2));
9204 effect(USE labl, KILL icc);
9206 size(12);
9207 ins_cost(BRANCH_COST);
9208 format %{ "CMP $op1,$op2\t! unsigned\n\t"
9209 "BP$cmp $labl" %}
9210 ins_encode %{
9211 Label* L = $labl$$label;
9212 Assembler::Predict predict_taken =
9213 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9214 __ cmp($op1$$Register, $op2$$constant);
9215 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9216 __ delayed()->nop();
9217 %}
9218 ins_pipe(cmp_br_reg_imm);
9219 %}
9221 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
9222 match(If cmp (CmpL op1 op2));
9223 effect(USE labl, KILL xcc);
9225 size(12);
9226 ins_cost(BRANCH_COST);
9227 format %{ "CMP $op1,$op2\t! long\n\t"
9228 "BP$cmp $labl" %}
9229 ins_encode %{
9230 Label* L = $labl$$label;
9231 Assembler::Predict predict_taken =
9232 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9233 __ cmp($op1$$Register, $op2$$Register);
9234 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9235 __ delayed()->nop();
9236 %}
9237 ins_pipe(cmp_br_reg_reg);
9238 %}
9240 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
9241 match(If cmp (CmpL op1 op2));
9242 effect(USE labl, KILL xcc);
9244 size(12);
9245 ins_cost(BRANCH_COST);
9246 format %{ "CMP $op1,$op2\t! long\n\t"
9247 "BP$cmp $labl" %}
9248 ins_encode %{
9249 Label* L = $labl$$label;
9250 Assembler::Predict predict_taken =
9251 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9252 __ cmp($op1$$Register, $op2$$constant);
9253 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9254 __ delayed()->nop();
9255 %}
9256 ins_pipe(cmp_br_reg_imm);
9257 %}
9259 // Compare Pointers and branch
9260 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9261 match(If cmp (CmpP op1 op2));
9262 effect(USE labl, KILL pcc);
9264 size(12);
9265 ins_cost(BRANCH_COST);
9266 format %{ "CMP $op1,$op2\t! ptr\n\t"
9267 "B$cmp $labl" %}
9268 ins_encode %{
9269 Label* L = $labl$$label;
9270 Assembler::Predict predict_taken =
9271 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9272 __ cmp($op1$$Register, $op2$$Register);
9273 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9274 __ delayed()->nop();
9275 %}
9276 ins_pipe(cmp_br_reg_reg);
9277 %}
9279 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9280 match(If cmp (CmpP op1 null));
9281 effect(USE labl, KILL pcc);
9283 size(12);
9284 ins_cost(BRANCH_COST);
9285 format %{ "CMP $op1,0\t! ptr\n\t"
9286 "B$cmp $labl" %}
9287 ins_encode %{
9288 Label* L = $labl$$label;
9289 Assembler::Predict predict_taken =
9290 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9291 __ cmp($op1$$Register, G0);
9292 // bpr() is not used here since it has shorter distance.
9293 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9294 __ delayed()->nop();
9295 %}
9296 ins_pipe(cmp_br_reg_reg);
9297 %}
9299 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9300 match(If cmp (CmpN op1 op2));
9301 effect(USE labl, KILL icc);
9303 size(12);
9304 ins_cost(BRANCH_COST);
9305 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
9306 "BP$cmp $labl" %}
9307 ins_encode %{
9308 Label* L = $labl$$label;
9309 Assembler::Predict predict_taken =
9310 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9311 __ cmp($op1$$Register, $op2$$Register);
9312 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9313 __ delayed()->nop();
9314 %}
9315 ins_pipe(cmp_br_reg_reg);
9316 %}
9318 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
9319 match(If cmp (CmpN op1 null));
9320 effect(USE labl, KILL icc);
9322 size(12);
9323 ins_cost(BRANCH_COST);
9324 format %{ "CMP $op1,0\t! compressed ptr\n\t"
9325 "BP$cmp $labl" %}
9326 ins_encode %{
9327 Label* L = $labl$$label;
9328 Assembler::Predict predict_taken =
9329 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9330 __ cmp($op1$$Register, G0);
9331 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9332 __ delayed()->nop();
9333 %}
9334 ins_pipe(cmp_br_reg_reg);
9335 %}
9337 // Loop back branch
9338 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9339 match(CountedLoopEnd cmp (CmpI op1 op2));
9340 effect(USE labl, KILL icc);
9342 size(12);
9343 ins_cost(BRANCH_COST);
9344 format %{ "CMP $op1,$op2\t! int\n\t"
9345 "BP$cmp $labl\t! Loop end" %}
9346 ins_encode %{
9347 Label* L = $labl$$label;
9348 Assembler::Predict predict_taken =
9349 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9350 __ cmp($op1$$Register, $op2$$Register);
9351 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9352 __ delayed()->nop();
9353 %}
9354 ins_pipe(cmp_br_reg_reg);
9355 %}
9357 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9358 match(CountedLoopEnd cmp (CmpI op1 op2));
9359 effect(USE labl, KILL icc);
9361 size(12);
9362 ins_cost(BRANCH_COST);
9363 format %{ "CMP $op1,$op2\t! int\n\t"
9364 "BP$cmp $labl\t! Loop end" %}
9365 ins_encode %{
9366 Label* L = $labl$$label;
9367 Assembler::Predict predict_taken =
9368 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9369 __ cmp($op1$$Register, $op2$$constant);
9370 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9371 __ delayed()->nop();
9372 %}
9373 ins_pipe(cmp_br_reg_imm);
9374 %}
9376 // Short compare and branch instructions
9377 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9378 match(If cmp (CmpI op1 op2));
9379 predicate(UseCBCond);
9380 effect(USE labl, KILL icc);
9382 size(4);
9383 ins_cost(BRANCH_COST);
9384 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
9385 ins_encode %{
9386 Label* L = $labl$$label;
9387 assert(__ use_cbcond(*L), "back to back cbcond");
9388 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9389 %}
9390 ins_short_branch(1);
9391 ins_avoid_back_to_back(1);
9392 ins_pipe(cbcond_reg_reg);
9393 %}
9395 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9396 match(If cmp (CmpI op1 op2));
9397 predicate(UseCBCond);
9398 effect(USE labl, KILL icc);
9400 size(4);
9401 ins_cost(BRANCH_COST);
9402 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
9403 ins_encode %{
9404 Label* L = $labl$$label;
9405 assert(__ use_cbcond(*L), "back to back cbcond");
9406 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9407 %}
9408 ins_short_branch(1);
9409 ins_avoid_back_to_back(1);
9410 ins_pipe(cbcond_reg_imm);
9411 %}
9413 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
9414 match(If cmp (CmpU op1 op2));
9415 predicate(UseCBCond);
9416 effect(USE labl, KILL icc);
9418 size(4);
9419 ins_cost(BRANCH_COST);
9420 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
9421 ins_encode %{
9422 Label* L = $labl$$label;
9423 assert(__ use_cbcond(*L), "back to back cbcond");
9424 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9425 %}
9426 ins_short_branch(1);
9427 ins_avoid_back_to_back(1);
9428 ins_pipe(cbcond_reg_reg);
9429 %}
9431 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
9432 match(If cmp (CmpU op1 op2));
9433 predicate(UseCBCond);
9434 effect(USE labl, KILL icc);
9436 size(4);
9437 ins_cost(BRANCH_COST);
9438 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
9439 ins_encode %{
9440 Label* L = $labl$$label;
9441 assert(__ use_cbcond(*L), "back to back cbcond");
9442 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9443 %}
9444 ins_short_branch(1);
9445 ins_avoid_back_to_back(1);
9446 ins_pipe(cbcond_reg_imm);
9447 %}
9449 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
9450 match(If cmp (CmpL op1 op2));
9451 predicate(UseCBCond);
9452 effect(USE labl, KILL xcc);
9454 size(4);
9455 ins_cost(BRANCH_COST);
9456 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9457 ins_encode %{
9458 Label* L = $labl$$label;
9459 assert(__ use_cbcond(*L), "back to back cbcond");
9460 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L);
9461 %}
9462 ins_short_branch(1);
9463 ins_avoid_back_to_back(1);
9464 ins_pipe(cbcond_reg_reg);
9465 %}
9467 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
9468 match(If cmp (CmpL op1 op2));
9469 predicate(UseCBCond);
9470 effect(USE labl, KILL xcc);
9472 size(4);
9473 ins_cost(BRANCH_COST);
9474 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9475 ins_encode %{
9476 Label* L = $labl$$label;
9477 assert(__ use_cbcond(*L), "back to back cbcond");
9478 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
9479 %}
9480 ins_short_branch(1);
9481 ins_avoid_back_to_back(1);
9482 ins_pipe(cbcond_reg_imm);
9483 %}
9485 // Compare Pointers and branch
9486 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9487 match(If cmp (CmpP op1 op2));
9488 predicate(UseCBCond);
9489 effect(USE labl, KILL pcc);
9491 size(4);
9492 ins_cost(BRANCH_COST);
9493 #ifdef _LP64
9494 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
9495 #else
9496 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %}
9497 #endif
9498 ins_encode %{
9499 Label* L = $labl$$label;
9500 assert(__ use_cbcond(*L), "back to back cbcond");
9501 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
9502 %}
9503 ins_short_branch(1);
9504 ins_avoid_back_to_back(1);
9505 ins_pipe(cbcond_reg_reg);
9506 %}
9508 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9509 match(If cmp (CmpP op1 null));
9510 predicate(UseCBCond);
9511 effect(USE labl, KILL pcc);
9513 size(4);
9514 ins_cost(BRANCH_COST);
9515 #ifdef _LP64
9516 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
9517 #else
9518 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %}
9519 #endif
9520 ins_encode %{
9521 Label* L = $labl$$label;
9522 assert(__ use_cbcond(*L), "back to back cbcond");
9523 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
9524 %}
9525 ins_short_branch(1);
9526 ins_avoid_back_to_back(1);
9527 ins_pipe(cbcond_reg_reg);
9528 %}
9530 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9531 match(If cmp (CmpN op1 op2));
9532 predicate(UseCBCond);
9533 effect(USE labl, KILL icc);
9535 size(4);
9536 ins_cost(BRANCH_COST);
9537 format %{ "CWB$cmp $op1,op2,$labl\t! compressed ptr" %}
9538 ins_encode %{
9539 Label* L = $labl$$label;
9540 assert(__ use_cbcond(*L), "back to back cbcond");
9541 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9542 %}
9543 ins_short_branch(1);
9544 ins_avoid_back_to_back(1);
9545 ins_pipe(cbcond_reg_reg);
9546 %}
9548 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
9549 match(If cmp (CmpN op1 null));
9550 predicate(UseCBCond);
9551 effect(USE labl, KILL icc);
9553 size(4);
9554 ins_cost(BRANCH_COST);
9555 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %}
9556 ins_encode %{
9557 Label* L = $labl$$label;
9558 assert(__ use_cbcond(*L), "back to back cbcond");
9559 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L);
9560 %}
9561 ins_short_branch(1);
9562 ins_avoid_back_to_back(1);
9563 ins_pipe(cbcond_reg_reg);
9564 %}
9566 // Loop back branch
9567 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9568 match(CountedLoopEnd cmp (CmpI op1 op2));
9569 predicate(UseCBCond);
9570 effect(USE labl, KILL icc);
9572 size(4);
9573 ins_cost(BRANCH_COST);
9574 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
9575 ins_encode %{
9576 Label* L = $labl$$label;
9577 assert(__ use_cbcond(*L), "back to back cbcond");
9578 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9579 %}
9580 ins_short_branch(1);
9581 ins_avoid_back_to_back(1);
9582 ins_pipe(cbcond_reg_reg);
9583 %}
9585 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9586 match(CountedLoopEnd cmp (CmpI op1 op2));
9587 predicate(UseCBCond);
9588 effect(USE labl, KILL icc);
9590 size(4);
9591 ins_cost(BRANCH_COST);
9592 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
9593 ins_encode %{
9594 Label* L = $labl$$label;
9595 assert(__ use_cbcond(*L), "back to back cbcond");
9596 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9597 %}
9598 ins_short_branch(1);
9599 ins_avoid_back_to_back(1);
9600 ins_pipe(cbcond_reg_imm);
9601 %}
9603 // Branch-on-register tests all 64 bits. We assume that values
9604 // in 64-bit registers always remains zero or sign extended
9605 // unless our code munges the high bits. Interrupts can chop
9606 // the high order bits to zero or sign at any time.
9607 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{
9608 match(If cmp (CmpI op1 zero));
9609 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9610 effect(USE labl);
9612 size(8);
9613 ins_cost(BRANCH_COST);
9614 format %{ "BR$cmp $op1,$labl" %}
9615 ins_encode( enc_bpr( labl, cmp, op1 ) );
9616 ins_pipe(br_reg);
9617 %}
9619 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{
9620 match(If cmp (CmpP op1 null));
9621 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9622 effect(USE labl);
9624 size(8);
9625 ins_cost(BRANCH_COST);
9626 format %{ "BR$cmp $op1,$labl" %}
9627 ins_encode( enc_bpr( labl, cmp, op1 ) );
9628 ins_pipe(br_reg);
9629 %}
9631 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{
9632 match(If cmp (CmpL op1 zero));
9633 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9634 effect(USE labl);
9636 size(8);
9637 ins_cost(BRANCH_COST);
9638 format %{ "BR$cmp $op1,$labl" %}
9639 ins_encode( enc_bpr( labl, cmp, op1 ) );
9640 ins_pipe(br_reg);
9641 %}
9644 // ============================================================================
9645 // Long Compare
9646 //
9647 // Currently we hold longs in 2 registers. Comparing such values efficiently
9648 // is tricky. The flavor of compare used depends on whether we are testing
9649 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit.
9650 // The GE test is the negated LT test. The LE test can be had by commuting
9651 // the operands (yielding a GE test) and then negating; negate again for the
9652 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the
9653 // NE test is negated from that.
9655 // Due to a shortcoming in the ADLC, it mixes up expressions like:
9656 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the
9657 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections
9658 // are collapsed internally in the ADLC's dfa-gen code. The match for
9659 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the
9660 // foo match ends up with the wrong leaf. One fix is to not match both
9661 // reg-reg and reg-zero forms of long-compare. This is unfortunate because
9662 // both forms beat the trinary form of long-compare and both are very useful
9663 // on Intel which has so few registers.
9665 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{
9666 match(If cmp xcc);
9667 effect(USE labl);
9669 size(8);
9670 ins_cost(BRANCH_COST);
9671 format %{ "BP$cmp $xcc,$labl" %}
9672 ins_encode %{
9673 Label* L = $labl$$label;
9674 Assembler::Predict predict_taken =
9675 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9677 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9678 __ delayed()->nop();
9679 %}
9680 ins_pipe(br_cc);
9681 %}
9683 // Manifest a CmpL3 result in an integer register. Very painful.
9684 // This is the test to avoid.
9685 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{
9686 match(Set dst (CmpL3 src1 src2) );
9687 effect( KILL ccr );
9688 ins_cost(6*DEFAULT_COST);
9689 size(24);
9690 format %{ "CMP $src1,$src2\t\t! long\n"
9691 "\tBLT,a,pn done\n"
9692 "\tMOV -1,$dst\t! delay slot\n"
9693 "\tBGT,a,pn done\n"
9694 "\tMOV 1,$dst\t! delay slot\n"
9695 "\tCLR $dst\n"
9696 "done:" %}
9697 ins_encode( cmpl_flag(src1,src2,dst) );
9698 ins_pipe(cmpL_reg);
9699 %}
9701 // Conditional move
9702 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{
9703 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
9704 ins_cost(150);
9705 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %}
9706 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9707 ins_pipe(ialu_reg);
9708 %}
9710 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{
9711 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
9712 ins_cost(140);
9713 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %}
9714 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9715 ins_pipe(ialu_imm);
9716 %}
9718 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{
9719 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
9720 ins_cost(150);
9721 format %{ "MOV$cmp $xcc,$src,$dst" %}
9722 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9723 ins_pipe(ialu_reg);
9724 %}
9726 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{
9727 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
9728 ins_cost(140);
9729 format %{ "MOV$cmp $xcc,$src,$dst" %}
9730 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9731 ins_pipe(ialu_imm);
9732 %}
9734 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{
9735 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src)));
9736 ins_cost(150);
9737 format %{ "MOV$cmp $xcc,$src,$dst" %}
9738 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9739 ins_pipe(ialu_reg);
9740 %}
9742 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{
9743 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
9744 ins_cost(150);
9745 format %{ "MOV$cmp $xcc,$src,$dst" %}
9746 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9747 ins_pipe(ialu_reg);
9748 %}
9750 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{
9751 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
9752 ins_cost(140);
9753 format %{ "MOV$cmp $xcc,$src,$dst" %}
9754 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9755 ins_pipe(ialu_imm);
9756 %}
9758 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{
9759 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src)));
9760 ins_cost(150);
9761 opcode(0x101);
9762 format %{ "FMOVS$cmp $xcc,$src,$dst" %}
9763 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9764 ins_pipe(int_conditional_float_move);
9765 %}
9767 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{
9768 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
9769 ins_cost(150);
9770 opcode(0x102);
9771 format %{ "FMOVD$cmp $xcc,$src,$dst" %}
9772 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9773 ins_pipe(int_conditional_float_move);
9774 %}
9776 // ============================================================================
9777 // Safepoint Instruction
9778 instruct safePoint_poll(iRegP poll) %{
9779 match(SafePoint poll);
9780 effect(USE poll);
9782 size(4);
9783 #ifdef _LP64
9784 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %}
9785 #else
9786 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %}
9787 #endif
9788 ins_encode %{
9789 __ relocate(relocInfo::poll_type);
9790 __ ld_ptr($poll$$Register, 0, G0);
9791 %}
9792 ins_pipe(loadPollP);
9793 %}
9795 // ============================================================================
9796 // Call Instructions
9797 // Call Java Static Instruction
9798 instruct CallStaticJavaDirect( method meth ) %{
9799 match(CallStaticJava);
9800 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
9801 effect(USE meth);
9803 size(8);
9804 ins_cost(CALL_COST);
9805 format %{ "CALL,static ; NOP ==> " %}
9806 ins_encode( Java_Static_Call( meth ), call_epilog );
9807 ins_pipe(simple_call);
9808 %}
9810 // Call Java Static Instruction (method handle version)
9811 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{
9812 match(CallStaticJava);
9813 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
9814 effect(USE meth, KILL l7_mh_SP_save);
9816 size(16);
9817 ins_cost(CALL_COST);
9818 format %{ "CALL,static/MethodHandle" %}
9819 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog);
9820 ins_pipe(simple_call);
9821 %}
9823 // Call Java Dynamic Instruction
9824 instruct CallDynamicJavaDirect( method meth ) %{
9825 match(CallDynamicJava);
9826 effect(USE meth);
9828 ins_cost(CALL_COST);
9829 format %{ "SET (empty),R_G5\n\t"
9830 "CALL,dynamic ; NOP ==> " %}
9831 ins_encode( Java_Dynamic_Call( meth ), call_epilog );
9832 ins_pipe(call);
9833 %}
9835 // Call Runtime Instruction
9836 instruct CallRuntimeDirect(method meth, l7RegP l7) %{
9837 match(CallRuntime);
9838 effect(USE meth, KILL l7);
9839 ins_cost(CALL_COST);
9840 format %{ "CALL,runtime" %}
9841 ins_encode( Java_To_Runtime( meth ),
9842 call_epilog, adjust_long_from_native_call );
9843 ins_pipe(simple_call);
9844 %}
9846 // Call runtime without safepoint - same as CallRuntime
9847 instruct CallLeafDirect(method meth, l7RegP l7) %{
9848 match(CallLeaf);
9849 effect(USE meth, KILL l7);
9850 ins_cost(CALL_COST);
9851 format %{ "CALL,runtime leaf" %}
9852 ins_encode( Java_To_Runtime( meth ),
9853 call_epilog,
9854 adjust_long_from_native_call );
9855 ins_pipe(simple_call);
9856 %}
9858 // Call runtime without safepoint - same as CallLeaf
9859 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{
9860 match(CallLeafNoFP);
9861 effect(USE meth, KILL l7);
9862 ins_cost(CALL_COST);
9863 format %{ "CALL,runtime leaf nofp" %}
9864 ins_encode( Java_To_Runtime( meth ),
9865 call_epilog,
9866 adjust_long_from_native_call );
9867 ins_pipe(simple_call);
9868 %}
9870 // Tail Call; Jump from runtime stub to Java code.
9871 // Also known as an 'interprocedural jump'.
9872 // Target of jump will eventually return to caller.
9873 // TailJump below removes the return address.
9874 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{
9875 match(TailCall jump_target method_oop );
9877 ins_cost(CALL_COST);
9878 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %}
9879 ins_encode(form_jmpl(jump_target));
9880 ins_pipe(tail_call);
9881 %}
9884 // Return Instruction
9885 instruct Ret() %{
9886 match(Return);
9888 // The epilogue node did the ret already.
9889 size(0);
9890 format %{ "! return" %}
9891 ins_encode();
9892 ins_pipe(empty);
9893 %}
9896 // Tail Jump; remove the return address; jump to target.
9897 // TailCall above leaves the return address around.
9898 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
9899 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
9900 // "restore" before this instruction (in Epilogue), we need to materialize it
9901 // in %i0.
9902 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{
9903 match( TailJump jump_target ex_oop );
9904 ins_cost(CALL_COST);
9905 format %{ "! discard R_O7\n\t"
9906 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %}
9907 ins_encode(form_jmpl_set_exception_pc(jump_target));
9908 // opcode(Assembler::jmpl_op3, Assembler::arith_op);
9909 // The hack duplicates the exception oop into G3, so that CreateEx can use it there.
9910 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() );
9911 ins_pipe(tail_call);
9912 %}
9914 // Create exception oop: created by stack-crawling runtime code.
9915 // Created exception is now available to this handler, and is setup
9916 // just prior to jumping to this handler. No code emitted.
9917 instruct CreateException( o0RegP ex_oop )
9918 %{
9919 match(Set ex_oop (CreateEx));
9920 ins_cost(0);
9922 size(0);
9923 // use the following format syntax
9924 format %{ "! exception oop is in R_O0; no code emitted" %}
9925 ins_encode();
9926 ins_pipe(empty);
9927 %}
9930 // Rethrow exception:
9931 // The exception oop will come in the first argument position.
9932 // Then JUMP (not call) to the rethrow stub code.
9933 instruct RethrowException()
9934 %{
9935 match(Rethrow);
9936 ins_cost(CALL_COST);
9938 // use the following format syntax
9939 format %{ "Jmp rethrow_stub" %}
9940 ins_encode(enc_rethrow);
9941 ins_pipe(tail_call);
9942 %}
9945 // Die now
9946 instruct ShouldNotReachHere( )
9947 %{
9948 match(Halt);
9949 ins_cost(CALL_COST);
9951 size(4);
9952 // Use the following format syntax
9953 format %{ "ILLTRAP ; ShouldNotReachHere" %}
9954 ins_encode( form2_illtrap() );
9955 ins_pipe(tail_call);
9956 %}
9958 // ============================================================================
9959 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
9960 // array for an instance of the superklass. Set a hidden internal cache on a
9961 // hit (cache is checked with exposed code in gen_subtype_check()). Return
9962 // not zero for a miss or zero for a hit. The encoding ALSO sets flags.
9963 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{
9964 match(Set index (PartialSubtypeCheck sub super));
9965 effect( KILL pcc, KILL o7 );
9966 ins_cost(DEFAULT_COST*10);
9967 format %{ "CALL PartialSubtypeCheck\n\tNOP" %}
9968 ins_encode( enc_PartialSubtypeCheck() );
9969 ins_pipe(partial_subtype_check_pipe);
9970 %}
9972 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{
9973 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero));
9974 effect( KILL idx, KILL o7 );
9975 ins_cost(DEFAULT_COST*10);
9976 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %}
9977 ins_encode( enc_PartialSubtypeCheck() );
9978 ins_pipe(partial_subtype_check_pipe);
9979 %}
9982 // ============================================================================
9983 // inlined locking and unlocking
9985 instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
9986 match(Set pcc (FastLock object box));
9988 effect(TEMP scratch2, USE_KILL box, KILL scratch);
9989 ins_cost(100);
9991 format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
9992 ins_encode( Fast_Lock(object, box, scratch, scratch2) );
9993 ins_pipe(long_memory_op);
9994 %}
9997 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
9998 match(Set pcc (FastUnlock object box));
9999 effect(TEMP scratch2, USE_KILL box, KILL scratch);
10000 ins_cost(100);
10002 format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
10003 ins_encode( Fast_Unlock(object, box, scratch, scratch2) );
10004 ins_pipe(long_memory_op);
10005 %}
10007 // The encodings are generic.
10008 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{
10009 predicate(!use_block_zeroing(n->in(2)) );
10010 match(Set dummy (ClearArray cnt base));
10011 effect(TEMP temp, KILL ccr);
10012 ins_cost(300);
10013 format %{ "MOV $cnt,$temp\n"
10014 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n"
10015 " BRge loop\t\t! Clearing loop\n"
10016 " STX G0,[$base+$temp]\t! delay slot" %}
10018 ins_encode %{
10019 // Compiler ensures base is doubleword aligned and cnt is count of doublewords
10020 Register nof_bytes_arg = $cnt$$Register;
10021 Register nof_bytes_tmp = $temp$$Register;
10022 Register base_pointer_arg = $base$$Register;
10024 Label loop;
10025 __ mov(nof_bytes_arg, nof_bytes_tmp);
10027 // Loop and clear, walking backwards through the array.
10028 // nof_bytes_tmp (if >0) is always the number of bytes to zero
10029 __ bind(loop);
10030 __ deccc(nof_bytes_tmp, 8);
10031 __ br(Assembler::greaterEqual, true, Assembler::pt, loop);
10032 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp);
10033 // %%%% this mini-loop must not cross a cache boundary!
10034 %}
10035 ins_pipe(long_memory_op);
10036 %}
10038 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{
10039 predicate(use_block_zeroing(n->in(2)));
10040 match(Set dummy (ClearArray cnt base));
10041 effect(USE_KILL cnt, USE_KILL base, KILL ccr);
10042 ins_cost(300);
10043 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
10045 ins_encode %{
10047 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
10048 Register to = $base$$Register;
10049 Register count = $cnt$$Register;
10051 Label Ldone;
10052 __ nop(); // Separate short branches
10053 // Use BIS for zeroing (temp is not used).
10054 __ bis_zeroing(to, count, G0, Ldone);
10055 __ bind(Ldone);
10057 %}
10058 ins_pipe(long_memory_op);
10059 %}
10061 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{
10062 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit));
10063 match(Set dummy (ClearArray cnt base));
10064 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr);
10065 ins_cost(300);
10066 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
10068 ins_encode %{
10070 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
10071 Register to = $base$$Register;
10072 Register count = $cnt$$Register;
10073 Register temp = $tmp$$Register;
10075 Label Ldone;
10076 __ nop(); // Separate short branches
10077 // Use BIS for zeroing
10078 __ bis_zeroing(to, count, temp, Ldone);
10079 __ bind(Ldone);
10081 %}
10082 ins_pipe(long_memory_op);
10083 %}
10085 instruct string_compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
10086 o7RegI tmp, flagsReg ccr) %{
10087 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10088 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp);
10089 ins_cost(300);
10090 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %}
10091 ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result) );
10092 ins_pipe(long_memory_op);
10093 %}
10095 instruct string_equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result,
10096 o7RegI tmp, flagsReg ccr) %{
10097 match(Set result (StrEquals (Binary str1 str2) cnt));
10098 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr);
10099 ins_cost(300);
10100 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp" %}
10101 ins_encode( enc_String_Equals(str1, str2, cnt, result) );
10102 ins_pipe(long_memory_op);
10103 %}
10105 instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result,
10106 o7RegI tmp2, flagsReg ccr) %{
10107 match(Set result (AryEq ary1 ary2));
10108 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr);
10109 ins_cost(300);
10110 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %}
10111 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, result));
10112 ins_pipe(long_memory_op);
10113 %}
10116 //---------- Zeros Count Instructions ------------------------------------------
10118 instruct countLeadingZerosI(iRegI dst, iRegI src, iRegI tmp, flagsReg cr) %{
10119 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10120 match(Set dst (CountLeadingZerosI src));
10121 effect(TEMP dst, TEMP tmp, KILL cr);
10123 // x |= (x >> 1);
10124 // x |= (x >> 2);
10125 // x |= (x >> 4);
10126 // x |= (x >> 8);
10127 // x |= (x >> 16);
10128 // return (WORDBITS - popc(x));
10129 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t"
10130 "SRL $src,0,$dst\t! 32-bit zero extend\n\t"
10131 "OR $dst,$tmp,$dst\n\t"
10132 "SRL $dst,2,$tmp\n\t"
10133 "OR $dst,$tmp,$dst\n\t"
10134 "SRL $dst,4,$tmp\n\t"
10135 "OR $dst,$tmp,$dst\n\t"
10136 "SRL $dst,8,$tmp\n\t"
10137 "OR $dst,$tmp,$dst\n\t"
10138 "SRL $dst,16,$tmp\n\t"
10139 "OR $dst,$tmp,$dst\n\t"
10140 "POPC $dst,$dst\n\t"
10141 "MOV 32,$tmp\n\t"
10142 "SUB $tmp,$dst,$dst" %}
10143 ins_encode %{
10144 Register Rdst = $dst$$Register;
10145 Register Rsrc = $src$$Register;
10146 Register Rtmp = $tmp$$Register;
10147 __ srl(Rsrc, 1, Rtmp);
10148 __ srl(Rsrc, 0, Rdst);
10149 __ or3(Rdst, Rtmp, Rdst);
10150 __ srl(Rdst, 2, Rtmp);
10151 __ or3(Rdst, Rtmp, Rdst);
10152 __ srl(Rdst, 4, Rtmp);
10153 __ or3(Rdst, Rtmp, Rdst);
10154 __ srl(Rdst, 8, Rtmp);
10155 __ or3(Rdst, Rtmp, Rdst);
10156 __ srl(Rdst, 16, Rtmp);
10157 __ or3(Rdst, Rtmp, Rdst);
10158 __ popc(Rdst, Rdst);
10159 __ mov(BitsPerInt, Rtmp);
10160 __ sub(Rtmp, Rdst, Rdst);
10161 %}
10162 ins_pipe(ialu_reg);
10163 %}
10165 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{
10166 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10167 match(Set dst (CountLeadingZerosL src));
10168 effect(TEMP dst, TEMP tmp, KILL cr);
10170 // x |= (x >> 1);
10171 // x |= (x >> 2);
10172 // x |= (x >> 4);
10173 // x |= (x >> 8);
10174 // x |= (x >> 16);
10175 // x |= (x >> 32);
10176 // return (WORDBITS - popc(x));
10177 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t"
10178 "OR $src,$tmp,$dst\n\t"
10179 "SRLX $dst,2,$tmp\n\t"
10180 "OR $dst,$tmp,$dst\n\t"
10181 "SRLX $dst,4,$tmp\n\t"
10182 "OR $dst,$tmp,$dst\n\t"
10183 "SRLX $dst,8,$tmp\n\t"
10184 "OR $dst,$tmp,$dst\n\t"
10185 "SRLX $dst,16,$tmp\n\t"
10186 "OR $dst,$tmp,$dst\n\t"
10187 "SRLX $dst,32,$tmp\n\t"
10188 "OR $dst,$tmp,$dst\n\t"
10189 "POPC $dst,$dst\n\t"
10190 "MOV 64,$tmp\n\t"
10191 "SUB $tmp,$dst,$dst" %}
10192 ins_encode %{
10193 Register Rdst = $dst$$Register;
10194 Register Rsrc = $src$$Register;
10195 Register Rtmp = $tmp$$Register;
10196 __ srlx(Rsrc, 1, Rtmp);
10197 __ or3( Rsrc, Rtmp, Rdst);
10198 __ srlx(Rdst, 2, Rtmp);
10199 __ or3( Rdst, Rtmp, Rdst);
10200 __ srlx(Rdst, 4, Rtmp);
10201 __ or3( Rdst, Rtmp, Rdst);
10202 __ srlx(Rdst, 8, Rtmp);
10203 __ or3( Rdst, Rtmp, Rdst);
10204 __ srlx(Rdst, 16, Rtmp);
10205 __ or3( Rdst, Rtmp, Rdst);
10206 __ srlx(Rdst, 32, Rtmp);
10207 __ or3( Rdst, Rtmp, Rdst);
10208 __ popc(Rdst, Rdst);
10209 __ mov(BitsPerLong, Rtmp);
10210 __ sub(Rtmp, Rdst, Rdst);
10211 %}
10212 ins_pipe(ialu_reg);
10213 %}
10215 instruct countTrailingZerosI(iRegI dst, iRegI src, flagsReg cr) %{
10216 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10217 match(Set dst (CountTrailingZerosI src));
10218 effect(TEMP dst, KILL cr);
10220 // return popc(~x & (x - 1));
10221 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t"
10222 "ANDN $dst,$src,$dst\n\t"
10223 "SRL $dst,R_G0,$dst\n\t"
10224 "POPC $dst,$dst" %}
10225 ins_encode %{
10226 Register Rdst = $dst$$Register;
10227 Register Rsrc = $src$$Register;
10228 __ sub(Rsrc, 1, Rdst);
10229 __ andn(Rdst, Rsrc, Rdst);
10230 __ srl(Rdst, G0, Rdst);
10231 __ popc(Rdst, Rdst);
10232 %}
10233 ins_pipe(ialu_reg);
10234 %}
10236 instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{
10237 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10238 match(Set dst (CountTrailingZerosL src));
10239 effect(TEMP dst, KILL cr);
10241 // return popc(~x & (x - 1));
10242 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t"
10243 "ANDN $dst,$src,$dst\n\t"
10244 "POPC $dst,$dst" %}
10245 ins_encode %{
10246 Register Rdst = $dst$$Register;
10247 Register Rsrc = $src$$Register;
10248 __ sub(Rsrc, 1, Rdst);
10249 __ andn(Rdst, Rsrc, Rdst);
10250 __ popc(Rdst, Rdst);
10251 %}
10252 ins_pipe(ialu_reg);
10253 %}
10256 //---------- Population Count Instructions -------------------------------------
10258 instruct popCountI(iRegI dst, iRegI src) %{
10259 predicate(UsePopCountInstruction);
10260 match(Set dst (PopCountI src));
10262 format %{ "POPC $src, $dst" %}
10263 ins_encode %{
10264 __ popc($src$$Register, $dst$$Register);
10265 %}
10266 ins_pipe(ialu_reg);
10267 %}
10269 // Note: Long.bitCount(long) returns an int.
10270 instruct popCountL(iRegI dst, iRegL src) %{
10271 predicate(UsePopCountInstruction);
10272 match(Set dst (PopCountL src));
10274 format %{ "POPC $src, $dst" %}
10275 ins_encode %{
10276 __ popc($src$$Register, $dst$$Register);
10277 %}
10278 ins_pipe(ialu_reg);
10279 %}
10282 // ============================================================================
10283 //------------Bytes reverse--------------------------------------------------
10285 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{
10286 match(Set dst (ReverseBytesI src));
10288 // Op cost is artificially doubled to make sure that load or store
10289 // instructions are preferred over this one which requires a spill
10290 // onto a stack slot.
10291 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10292 format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
10294 ins_encode %{
10295 __ set($src$$disp + STACK_BIAS, O7);
10296 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10297 %}
10298 ins_pipe( iload_mem );
10299 %}
10301 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
10302 match(Set dst (ReverseBytesL src));
10304 // Op cost is artificially doubled to make sure that load or store
10305 // instructions are preferred over this one which requires a spill
10306 // onto a stack slot.
10307 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10308 format %{ "LDXA $src, $dst\t!asi=primary_little" %}
10310 ins_encode %{
10311 __ set($src$$disp + STACK_BIAS, O7);
10312 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10313 %}
10314 ins_pipe( iload_mem );
10315 %}
10317 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{
10318 match(Set dst (ReverseBytesUS src));
10320 // Op cost is artificially doubled to make sure that load or store
10321 // instructions are preferred over this one which requires a spill
10322 // onto a stack slot.
10323 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10324 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %}
10326 ins_encode %{
10327 // the value was spilled as an int so bias the load
10328 __ set($src$$disp + STACK_BIAS + 2, O7);
10329 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10330 %}
10331 ins_pipe( iload_mem );
10332 %}
10334 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{
10335 match(Set dst (ReverseBytesS src));
10337 // Op cost is artificially doubled to make sure that load or store
10338 // instructions are preferred over this one which requires a spill
10339 // onto a stack slot.
10340 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10341 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %}
10343 ins_encode %{
10344 // the value was spilled as an int so bias the load
10345 __ set($src$$disp + STACK_BIAS + 2, O7);
10346 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10347 %}
10348 ins_pipe( iload_mem );
10349 %}
10351 // Load Integer reversed byte order
10352 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{
10353 match(Set dst (ReverseBytesI (LoadI src)));
10355 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
10356 size(4);
10357 format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
10359 ins_encode %{
10360 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10361 %}
10362 ins_pipe(iload_mem);
10363 %}
10365 // Load Long - aligned and reversed
10366 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{
10367 match(Set dst (ReverseBytesL (LoadL src)));
10369 ins_cost(MEMORY_REF_COST);
10370 size(4);
10371 format %{ "LDXA $src, $dst\t!asi=primary_little" %}
10373 ins_encode %{
10374 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10375 %}
10376 ins_pipe(iload_mem);
10377 %}
10379 // Load unsigned short / char reversed byte order
10380 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{
10381 match(Set dst (ReverseBytesUS (LoadUS src)));
10383 ins_cost(MEMORY_REF_COST);
10384 size(4);
10385 format %{ "LDUHA $src, $dst\t!asi=primary_little" %}
10387 ins_encode %{
10388 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10389 %}
10390 ins_pipe(iload_mem);
10391 %}
10393 // Load short reversed byte order
10394 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{
10395 match(Set dst (ReverseBytesS (LoadS src)));
10397 ins_cost(MEMORY_REF_COST);
10398 size(4);
10399 format %{ "LDSHA $src, $dst\t!asi=primary_little" %}
10401 ins_encode %{
10402 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10403 %}
10404 ins_pipe(iload_mem);
10405 %}
10407 // Store Integer reversed byte order
10408 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{
10409 match(Set dst (StoreI dst (ReverseBytesI src)));
10411 ins_cost(MEMORY_REF_COST);
10412 size(4);
10413 format %{ "STWA $src, $dst\t!asi=primary_little" %}
10415 ins_encode %{
10416 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10417 %}
10418 ins_pipe(istore_mem_reg);
10419 %}
10421 // Store Long reversed byte order
10422 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{
10423 match(Set dst (StoreL dst (ReverseBytesL src)));
10425 ins_cost(MEMORY_REF_COST);
10426 size(4);
10427 format %{ "STXA $src, $dst\t!asi=primary_little" %}
10429 ins_encode %{
10430 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10431 %}
10432 ins_pipe(istore_mem_reg);
10433 %}
10435 // Store unsighed short/char reversed byte order
10436 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{
10437 match(Set dst (StoreC dst (ReverseBytesUS src)));
10439 ins_cost(MEMORY_REF_COST);
10440 size(4);
10441 format %{ "STHA $src, $dst\t!asi=primary_little" %}
10443 ins_encode %{
10444 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10445 %}
10446 ins_pipe(istore_mem_reg);
10447 %}
10449 // Store short reversed byte order
10450 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{
10451 match(Set dst (StoreC dst (ReverseBytesS src)));
10453 ins_cost(MEMORY_REF_COST);
10454 size(4);
10455 format %{ "STHA $src, $dst\t!asi=primary_little" %}
10457 ins_encode %{
10458 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10459 %}
10460 ins_pipe(istore_mem_reg);
10461 %}
10463 // ====================VECTOR INSTRUCTIONS=====================================
10465 // Load Aligned Packed values into a Double Register
10466 instruct loadV8(regD dst, memory mem) %{
10467 predicate(n->as_LoadVector()->memory_size() == 8);
10468 match(Set dst (LoadVector mem));
10469 ins_cost(MEMORY_REF_COST);
10470 size(4);
10471 format %{ "LDDF $mem,$dst\t! load vector (8 bytes)" %}
10472 ins_encode %{
10473 __ ldf(FloatRegisterImpl::D, $mem$$Address, as_DoubleFloatRegister($dst$$reg));
10474 %}
10475 ins_pipe(floadD_mem);
10476 %}
10478 // Store Vector in Double register to memory
10479 instruct storeV8(memory mem, regD src) %{
10480 predicate(n->as_StoreVector()->memory_size() == 8);
10481 match(Set mem (StoreVector mem src));
10482 ins_cost(MEMORY_REF_COST);
10483 size(4);
10484 format %{ "STDF $src,$mem\t! store vector (8 bytes)" %}
10485 ins_encode %{
10486 __ stf(FloatRegisterImpl::D, as_DoubleFloatRegister($src$$reg), $mem$$Address);
10487 %}
10488 ins_pipe(fstoreD_mem_reg);
10489 %}
10491 // Store Zero into vector in memory
10492 instruct storeV8B_zero(memory mem, immI0 zero) %{
10493 predicate(n->as_StoreVector()->memory_size() == 8);
10494 match(Set mem (StoreVector mem (ReplicateB zero)));
10495 ins_cost(MEMORY_REF_COST);
10496 size(4);
10497 format %{ "STX $zero,$mem\t! store zero vector (8 bytes)" %}
10498 ins_encode %{
10499 __ stx(G0, $mem$$Address);
10500 %}
10501 ins_pipe(fstoreD_mem_zero);
10502 %}
10504 instruct storeV4S_zero(memory mem, immI0 zero) %{
10505 predicate(n->as_StoreVector()->memory_size() == 8);
10506 match(Set mem (StoreVector mem (ReplicateS zero)));
10507 ins_cost(MEMORY_REF_COST);
10508 size(4);
10509 format %{ "STX $zero,$mem\t! store zero vector (4 shorts)" %}
10510 ins_encode %{
10511 __ stx(G0, $mem$$Address);
10512 %}
10513 ins_pipe(fstoreD_mem_zero);
10514 %}
10516 instruct storeV2I_zero(memory mem, immI0 zero) %{
10517 predicate(n->as_StoreVector()->memory_size() == 8);
10518 match(Set mem (StoreVector mem (ReplicateI zero)));
10519 ins_cost(MEMORY_REF_COST);
10520 size(4);
10521 format %{ "STX $zero,$mem\t! store zero vector (2 ints)" %}
10522 ins_encode %{
10523 __ stx(G0, $mem$$Address);
10524 %}
10525 ins_pipe(fstoreD_mem_zero);
10526 %}
10528 instruct storeV2F_zero(memory mem, immF0 zero) %{
10529 predicate(n->as_StoreVector()->memory_size() == 8);
10530 match(Set mem (StoreVector mem (ReplicateF zero)));
10531 ins_cost(MEMORY_REF_COST);
10532 size(4);
10533 format %{ "STX $zero,$mem\t! store zero vector (2 floats)" %}
10534 ins_encode %{
10535 __ stx(G0, $mem$$Address);
10536 %}
10537 ins_pipe(fstoreD_mem_zero);
10538 %}
10540 // Replicate scalar to packed byte values into Double register
10541 instruct Repl8B_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10542 predicate(n->as_Vector()->length() == 8 && UseVIS >= 3);
10543 match(Set dst (ReplicateB src));
10544 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10545 format %{ "SLLX $src,56,$tmp\n\t"
10546 "SRLX $tmp, 8,$tmp2\n\t"
10547 "OR $tmp,$tmp2,$tmp\n\t"
10548 "SRLX $tmp,16,$tmp2\n\t"
10549 "OR $tmp,$tmp2,$tmp\n\t"
10550 "SRLX $tmp,32,$tmp2\n\t"
10551 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t"
10552 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10553 ins_encode %{
10554 Register Rsrc = $src$$Register;
10555 Register Rtmp = $tmp$$Register;
10556 Register Rtmp2 = $tmp2$$Register;
10557 __ sllx(Rsrc, 56, Rtmp);
10558 __ srlx(Rtmp, 8, Rtmp2);
10559 __ or3 (Rtmp, Rtmp2, Rtmp);
10560 __ srlx(Rtmp, 16, Rtmp2);
10561 __ or3 (Rtmp, Rtmp2, Rtmp);
10562 __ srlx(Rtmp, 32, Rtmp2);
10563 __ or3 (Rtmp, Rtmp2, Rtmp);
10564 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10565 %}
10566 ins_pipe(ialu_reg);
10567 %}
10569 // Replicate scalar to packed byte values into Double stack
10570 instruct Repl8B_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10571 predicate(n->as_Vector()->length() == 8 && UseVIS < 3);
10572 match(Set dst (ReplicateB src));
10573 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10574 format %{ "SLLX $src,56,$tmp\n\t"
10575 "SRLX $tmp, 8,$tmp2\n\t"
10576 "OR $tmp,$tmp2,$tmp\n\t"
10577 "SRLX $tmp,16,$tmp2\n\t"
10578 "OR $tmp,$tmp2,$tmp\n\t"
10579 "SRLX $tmp,32,$tmp2\n\t"
10580 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t"
10581 "STX $tmp,$dst\t! regL to stkD" %}
10582 ins_encode %{
10583 Register Rsrc = $src$$Register;
10584 Register Rtmp = $tmp$$Register;
10585 Register Rtmp2 = $tmp2$$Register;
10586 __ sllx(Rsrc, 56, Rtmp);
10587 __ srlx(Rtmp, 8, Rtmp2);
10588 __ or3 (Rtmp, Rtmp2, Rtmp);
10589 __ srlx(Rtmp, 16, Rtmp2);
10590 __ or3 (Rtmp, Rtmp2, Rtmp);
10591 __ srlx(Rtmp, 32, Rtmp2);
10592 __ or3 (Rtmp, Rtmp2, Rtmp);
10593 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10594 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10595 %}
10596 ins_pipe(ialu_reg);
10597 %}
10599 // Replicate scalar constant to packed byte values in Double register
10600 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{
10601 predicate(n->as_Vector()->length() == 8);
10602 match(Set dst (ReplicateB con));
10603 effect(KILL tmp);
10604 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %}
10605 ins_encode %{
10606 // XXX This is a quick fix for 6833573.
10607 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister);
10608 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register);
10609 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10610 %}
10611 ins_pipe(loadConFD);
10612 %}
10614 // Replicate scalar to packed char/short values into Double register
10615 instruct Repl4S_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10616 predicate(n->as_Vector()->length() == 4 && UseVIS >= 3);
10617 match(Set dst (ReplicateS src));
10618 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10619 format %{ "SLLX $src,48,$tmp\n\t"
10620 "SRLX $tmp,16,$tmp2\n\t"
10621 "OR $tmp,$tmp2,$tmp\n\t"
10622 "SRLX $tmp,32,$tmp2\n\t"
10623 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t"
10624 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10625 ins_encode %{
10626 Register Rsrc = $src$$Register;
10627 Register Rtmp = $tmp$$Register;
10628 Register Rtmp2 = $tmp2$$Register;
10629 __ sllx(Rsrc, 48, Rtmp);
10630 __ srlx(Rtmp, 16, Rtmp2);
10631 __ or3 (Rtmp, Rtmp2, Rtmp);
10632 __ srlx(Rtmp, 32, Rtmp2);
10633 __ or3 (Rtmp, Rtmp2, Rtmp);
10634 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10635 %}
10636 ins_pipe(ialu_reg);
10637 %}
10639 // Replicate scalar to packed char/short values into Double stack
10640 instruct Repl4S_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10641 predicate(n->as_Vector()->length() == 4 && UseVIS < 3);
10642 match(Set dst (ReplicateS src));
10643 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10644 format %{ "SLLX $src,48,$tmp\n\t"
10645 "SRLX $tmp,16,$tmp2\n\t"
10646 "OR $tmp,$tmp2,$tmp\n\t"
10647 "SRLX $tmp,32,$tmp2\n\t"
10648 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t"
10649 "STX $tmp,$dst\t! regL to stkD" %}
10650 ins_encode %{
10651 Register Rsrc = $src$$Register;
10652 Register Rtmp = $tmp$$Register;
10653 Register Rtmp2 = $tmp2$$Register;
10654 __ sllx(Rsrc, 48, Rtmp);
10655 __ srlx(Rtmp, 16, Rtmp2);
10656 __ or3 (Rtmp, Rtmp2, Rtmp);
10657 __ srlx(Rtmp, 32, Rtmp2);
10658 __ or3 (Rtmp, Rtmp2, Rtmp);
10659 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10660 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10661 %}
10662 ins_pipe(ialu_reg);
10663 %}
10665 // Replicate scalar constant to packed char/short values in Double register
10666 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{
10667 predicate(n->as_Vector()->length() == 4);
10668 match(Set dst (ReplicateS con));
10669 effect(KILL tmp);
10670 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %}
10671 ins_encode %{
10672 // XXX This is a quick fix for 6833573.
10673 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister);
10674 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register);
10675 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10676 %}
10677 ins_pipe(loadConFD);
10678 %}
10680 // Replicate scalar to packed int values into Double register
10681 instruct Repl2I_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10682 predicate(n->as_Vector()->length() == 2 && UseVIS >= 3);
10683 match(Set dst (ReplicateI src));
10684 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10685 format %{ "SLLX $src,32,$tmp\n\t"
10686 "SRLX $tmp,32,$tmp2\n\t"
10687 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t"
10688 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10689 ins_encode %{
10690 Register Rsrc = $src$$Register;
10691 Register Rtmp = $tmp$$Register;
10692 Register Rtmp2 = $tmp2$$Register;
10693 __ sllx(Rsrc, 32, Rtmp);
10694 __ srlx(Rtmp, 32, Rtmp2);
10695 __ or3 (Rtmp, Rtmp2, Rtmp);
10696 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10697 %}
10698 ins_pipe(ialu_reg);
10699 %}
10701 // Replicate scalar to packed int values into Double stack
10702 instruct Repl2I_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10703 predicate(n->as_Vector()->length() == 2 && UseVIS < 3);
10704 match(Set dst (ReplicateI src));
10705 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10706 format %{ "SLLX $src,32,$tmp\n\t"
10707 "SRLX $tmp,32,$tmp2\n\t"
10708 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t"
10709 "STX $tmp,$dst\t! regL to stkD" %}
10710 ins_encode %{
10711 Register Rsrc = $src$$Register;
10712 Register Rtmp = $tmp$$Register;
10713 Register Rtmp2 = $tmp2$$Register;
10714 __ sllx(Rsrc, 32, Rtmp);
10715 __ srlx(Rtmp, 32, Rtmp2);
10716 __ or3 (Rtmp, Rtmp2, Rtmp);
10717 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10718 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10719 %}
10720 ins_pipe(ialu_reg);
10721 %}
10723 // Replicate scalar zero constant to packed int values in Double register
10724 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{
10725 predicate(n->as_Vector()->length() == 2);
10726 match(Set dst (ReplicateI con));
10727 effect(KILL tmp);
10728 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %}
10729 ins_encode %{
10730 // XXX This is a quick fix for 6833573.
10731 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister);
10732 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register);
10733 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10734 %}
10735 ins_pipe(loadConFD);
10736 %}
10738 // Replicate scalar to packed float values into Double stack
10739 instruct Repl2F_stk(stackSlotD dst, regF src) %{
10740 predicate(n->as_Vector()->length() == 2);
10741 match(Set dst (ReplicateF src));
10742 ins_cost(MEMORY_REF_COST*2);
10743 format %{ "STF $src,$dst.hi\t! packed2F\n\t"
10744 "STF $src,$dst.lo" %}
10745 opcode(Assembler::stf_op3);
10746 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, src));
10747 ins_pipe(fstoreF_stk_reg);
10748 %}
10750 // Replicate scalar zero constant to packed float values in Double register
10751 instruct Repl2F_immF(regD dst, immF con, o7RegI tmp) %{
10752 predicate(n->as_Vector()->length() == 2);
10753 match(Set dst (ReplicateF con));
10754 effect(KILL tmp);
10755 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2F($con)" %}
10756 ins_encode %{
10757 // XXX This is a quick fix for 6833573.
10758 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immF($con$$constant)), $dst$$FloatRegister);
10759 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immF($con$$constant)), $tmp$$Register);
10760 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10761 %}
10762 ins_pipe(loadConFD);
10763 %}
10765 //----------PEEPHOLE RULES-----------------------------------------------------
10766 // These must follow all instruction definitions as they use the names
10767 // defined in the instructions definitions.
10768 //
10769 // peepmatch ( root_instr_name [preceding_instruction]* );
10770 //
10771 // peepconstraint %{
10772 // (instruction_number.operand_name relational_op instruction_number.operand_name
10773 // [, ...] );
10774 // // instruction numbers are zero-based using left to right order in peepmatch
10775 //
10776 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
10777 // // provide an instruction_number.operand_name for each operand that appears
10778 // // in the replacement instruction's match rule
10779 //
10780 // ---------VM FLAGS---------------------------------------------------------
10781 //
10782 // All peephole optimizations can be turned off using -XX:-OptoPeephole
10783 //
10784 // Each peephole rule is given an identifying number starting with zero and
10785 // increasing by one in the order seen by the parser. An individual peephole
10786 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
10787 // on the command-line.
10788 //
10789 // ---------CURRENT LIMITATIONS----------------------------------------------
10790 //
10791 // Only match adjacent instructions in same basic block
10792 // Only equality constraints
10793 // Only constraints between operands, not (0.dest_reg == EAX_enc)
10794 // Only one replacement instruction
10795 //
10796 // ---------EXAMPLE----------------------------------------------------------
10797 //
10798 // // pertinent parts of existing instructions in architecture description
10799 // instruct movI(eRegI dst, eRegI src) %{
10800 // match(Set dst (CopyI src));
10801 // %}
10802 //
10803 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
10804 // match(Set dst (AddI dst src));
10805 // effect(KILL cr);
10806 // %}
10807 //
10808 // // Change (inc mov) to lea
10809 // peephole %{
10810 // // increment preceeded by register-register move
10811 // peepmatch ( incI_eReg movI );
10812 // // require that the destination register of the increment
10813 // // match the destination register of the move
10814 // peepconstraint ( 0.dst == 1.dst );
10815 // // construct a replacement instruction that sets
10816 // // the destination to ( move's source register + one )
10817 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) );
10818 // %}
10819 //
10821 // // Change load of spilled value to only a spill
10822 // instruct storeI(memory mem, eRegI src) %{
10823 // match(Set mem (StoreI mem src));
10824 // %}
10825 //
10826 // instruct loadI(eRegI dst, memory mem) %{
10827 // match(Set dst (LoadI mem));
10828 // %}
10829 //
10830 // peephole %{
10831 // peepmatch ( loadI storeI );
10832 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
10833 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
10834 // %}
10836 //----------SMARTSPILL RULES---------------------------------------------------
10837 // These must follow all instruction definitions as they use the names
10838 // defined in the instructions definitions.
10839 //
10840 // SPARC will probably not have any of these rules due to RISC instruction set.
10842 //----------PIPELINE-----------------------------------------------------------
10843 // Rules which define the behavior of the target architectures pipeline.