Fri, 08 May 2009 10:44:20 -0700
6788527: Server vm intermittently fails with assertion "live value must not be garbage" with fastdebug bits
Summary: Cache Jvmti and DTrace flags used by Compiler.
Reviewed-by: never
1 //
2 // Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 // CA 95054 USA or visit www.sun.com if you need additional information or
21 // have any questions.
22 //
23 //
25 // AMD64 Architecture Description File
27 //----------REGISTER DEFINITION BLOCK------------------------------------------
28 // This information is used by the matcher and the register allocator to
29 // describe individual registers and classes of registers within the target
30 // archtecture.
32 register %{
33 //----------Architecture Description Register Definitions----------------------
34 // General Registers
35 // "reg_def" name ( register save type, C convention save type,
36 // ideal register type, encoding );
37 // Register Save Types:
38 //
39 // NS = No-Save: The register allocator assumes that these registers
40 // can be used without saving upon entry to the method, &
41 // that they do not need to be saved at call sites.
42 //
43 // SOC = Save-On-Call: The register allocator assumes that these registers
44 // can be used without saving upon entry to the method,
45 // but that they must be saved at call sites.
46 //
47 // SOE = Save-On-Entry: The register allocator assumes that these registers
48 // must be saved before using them upon entry to the
49 // method, but they do not need to be saved at call
50 // sites.
51 //
52 // AS = Always-Save: The register allocator assumes that these registers
53 // must be saved before using them upon entry to the
54 // method, & that they must be saved at call sites.
55 //
56 // Ideal Register Type is used to determine how to save & restore a
57 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
58 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
59 //
60 // The encoding number is the actual bit-pattern placed into the opcodes.
62 // General Registers
63 // R8-R15 must be encoded with REX. (RSP, RBP, RSI, RDI need REX when
64 // used as byte registers)
66 // Previously set RBX, RSI, and RDI as save-on-entry for java code
67 // Turn off SOE in java-code due to frequent use of uncommon-traps.
68 // Now that allocator is better, turn on RSI and RDI as SOE registers.
70 reg_def RAX (SOC, SOC, Op_RegI, 0, rax->as_VMReg());
71 reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax->as_VMReg()->next());
73 reg_def RCX (SOC, SOC, Op_RegI, 1, rcx->as_VMReg());
74 reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()->next());
76 reg_def RDX (SOC, SOC, Op_RegI, 2, rdx->as_VMReg());
77 reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()->next());
79 reg_def RBX (SOC, SOE, Op_RegI, 3, rbx->as_VMReg());
80 reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx->as_VMReg()->next());
82 reg_def RSP (NS, NS, Op_RegI, 4, rsp->as_VMReg());
83 reg_def RSP_H(NS, NS, Op_RegI, 4, rsp->as_VMReg()->next());
85 // now that adapter frames are gone RBP is always saved and restored by the prolog/epilog code
86 reg_def RBP (NS, SOE, Op_RegI, 5, rbp->as_VMReg());
87 reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp->as_VMReg()->next());
89 #ifdef _WIN64
91 reg_def RSI (SOC, SOE, Op_RegI, 6, rsi->as_VMReg());
92 reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()->next());
94 reg_def RDI (SOC, SOE, Op_RegI, 7, rdi->as_VMReg());
95 reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()->next());
97 #else
99 reg_def RSI (SOC, SOC, Op_RegI, 6, rsi->as_VMReg());
100 reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi->as_VMReg()->next());
102 reg_def RDI (SOC, SOC, Op_RegI, 7, rdi->as_VMReg());
103 reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi->as_VMReg()->next());
105 #endif
107 reg_def R8 (SOC, SOC, Op_RegI, 8, r8->as_VMReg());
108 reg_def R8_H (SOC, SOC, Op_RegI, 8, r8->as_VMReg()->next());
110 reg_def R9 (SOC, SOC, Op_RegI, 9, r9->as_VMReg());
111 reg_def R9_H (SOC, SOC, Op_RegI, 9, r9->as_VMReg()->next());
113 reg_def R10 (SOC, SOC, Op_RegI, 10, r10->as_VMReg());
114 reg_def R10_H(SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
116 reg_def R11 (SOC, SOC, Op_RegI, 11, r11->as_VMReg());
117 reg_def R11_H(SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
119 reg_def R12 (SOC, SOE, Op_RegI, 12, r12->as_VMReg());
120 reg_def R12_H(SOC, SOE, Op_RegI, 12, r12->as_VMReg()->next());
122 reg_def R13 (SOC, SOE, Op_RegI, 13, r13->as_VMReg());
123 reg_def R13_H(SOC, SOE, Op_RegI, 13, r13->as_VMReg()->next());
125 reg_def R14 (SOC, SOE, Op_RegI, 14, r14->as_VMReg());
126 reg_def R14_H(SOC, SOE, Op_RegI, 14, r14->as_VMReg()->next());
128 reg_def R15 (SOC, SOE, Op_RegI, 15, r15->as_VMReg());
129 reg_def R15_H(SOC, SOE, Op_RegI, 15, r15->as_VMReg()->next());
132 // Floating Point Registers
134 // XMM registers. 128-bit registers or 4 words each, labeled (a)-d.
135 // Word a in each register holds a Float, words ab hold a Double. We
136 // currently do not use the SIMD capabilities, so registers cd are
137 // unused at the moment.
138 // XMM8-XMM15 must be encoded with REX.
139 // Linux ABI: No register preserved across function calls
140 // XMM0-XMM7 might hold parameters
141 // Windows ABI: XMM6-XMM15 preserved across function calls
142 // XMM0-XMM3 might hold parameters
144 reg_def XMM0 (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg());
145 reg_def XMM0_H (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next());
147 reg_def XMM1 (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg());
148 reg_def XMM1_H (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next());
150 reg_def XMM2 (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg());
151 reg_def XMM2_H (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next());
153 reg_def XMM3 (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg());
154 reg_def XMM3_H (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next());
156 reg_def XMM4 (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg());
157 reg_def XMM4_H (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next());
159 reg_def XMM5 (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg());
160 reg_def XMM5_H (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next());
162 #ifdef _WIN64
164 reg_def XMM6 (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg());
165 reg_def XMM6_H (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next());
167 reg_def XMM7 (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg());
168 reg_def XMM7_H (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next());
170 reg_def XMM8 (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg());
171 reg_def XMM8_H (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next());
173 reg_def XMM9 (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg());
174 reg_def XMM9_H (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next());
176 reg_def XMM10 (SOC, SOE, Op_RegF, 10, xmm10->as_VMReg());
177 reg_def XMM10_H(SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next());
179 reg_def XMM11 (SOC, SOE, Op_RegF, 11, xmm11->as_VMReg());
180 reg_def XMM11_H(SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next());
182 reg_def XMM12 (SOC, SOE, Op_RegF, 12, xmm12->as_VMReg());
183 reg_def XMM12_H(SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next());
185 reg_def XMM13 (SOC, SOE, Op_RegF, 13, xmm13->as_VMReg());
186 reg_def XMM13_H(SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next());
188 reg_def XMM14 (SOC, SOE, Op_RegF, 14, xmm14->as_VMReg());
189 reg_def XMM14_H(SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next());
191 reg_def XMM15 (SOC, SOE, Op_RegF, 15, xmm15->as_VMReg());
192 reg_def XMM15_H(SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next());
194 #else
196 reg_def XMM6 (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg());
197 reg_def XMM6_H (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next());
199 reg_def XMM7 (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg());
200 reg_def XMM7_H (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next());
202 reg_def XMM8 (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg());
203 reg_def XMM8_H (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next());
205 reg_def XMM9 (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg());
206 reg_def XMM9_H (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next());
208 reg_def XMM10 (SOC, SOC, Op_RegF, 10, xmm10->as_VMReg());
209 reg_def XMM10_H(SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next());
211 reg_def XMM11 (SOC, SOC, Op_RegF, 11, xmm11->as_VMReg());
212 reg_def XMM11_H(SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next());
214 reg_def XMM12 (SOC, SOC, Op_RegF, 12, xmm12->as_VMReg());
215 reg_def XMM12_H(SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next());
217 reg_def XMM13 (SOC, SOC, Op_RegF, 13, xmm13->as_VMReg());
218 reg_def XMM13_H(SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next());
220 reg_def XMM14 (SOC, SOC, Op_RegF, 14, xmm14->as_VMReg());
221 reg_def XMM14_H(SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next());
223 reg_def XMM15 (SOC, SOC, Op_RegF, 15, xmm15->as_VMReg());
224 reg_def XMM15_H(SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next());
226 #endif // _WIN64
228 reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad());
230 // Specify priority of register selection within phases of register
231 // allocation. Highest priority is first. A useful heuristic is to
232 // give registers a low priority when they are required by machine
233 // instructions, like EAX and EDX on I486, and choose no-save registers
234 // before save-on-call, & save-on-call before save-on-entry. Registers
235 // which participate in fixed calling sequences should come last.
236 // Registers which are used as pairs must fall on an even boundary.
238 alloc_class chunk0(R10, R10_H,
239 R11, R11_H,
240 R8, R8_H,
241 R9, R9_H,
242 R12, R12_H,
243 RCX, RCX_H,
244 RBX, RBX_H,
245 RDI, RDI_H,
246 RDX, RDX_H,
247 RSI, RSI_H,
248 RAX, RAX_H,
249 RBP, RBP_H,
250 R13, R13_H,
251 R14, R14_H,
252 R15, R15_H,
253 RSP, RSP_H);
255 // XXX probably use 8-15 first on Linux
256 alloc_class chunk1(XMM0, XMM0_H,
257 XMM1, XMM1_H,
258 XMM2, XMM2_H,
259 XMM3, XMM3_H,
260 XMM4, XMM4_H,
261 XMM5, XMM5_H,
262 XMM6, XMM6_H,
263 XMM7, XMM7_H,
264 XMM8, XMM8_H,
265 XMM9, XMM9_H,
266 XMM10, XMM10_H,
267 XMM11, XMM11_H,
268 XMM12, XMM12_H,
269 XMM13, XMM13_H,
270 XMM14, XMM14_H,
271 XMM15, XMM15_H);
273 alloc_class chunk2(RFLAGS);
276 //----------Architecture Description Register Classes--------------------------
277 // Several register classes are automatically defined based upon information in
278 // this architecture description.
279 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
280 // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ )
281 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
283 //
285 // Class for all pointer registers (including RSP)
286 reg_class any_reg(RAX, RAX_H,
287 RDX, RDX_H,
288 RBP, RBP_H,
289 RDI, RDI_H,
290 RSI, RSI_H,
291 RCX, RCX_H,
292 RBX, RBX_H,
293 RSP, RSP_H,
294 R8, R8_H,
295 R9, R9_H,
296 R10, R10_H,
297 R11, R11_H,
298 R12, R12_H,
299 R13, R13_H,
300 R14, R14_H,
301 R15, R15_H);
303 // Class for all pointer registers except RSP
304 reg_class ptr_reg(RAX, RAX_H,
305 RDX, RDX_H,
306 RBP, RBP_H,
307 RDI, RDI_H,
308 RSI, RSI_H,
309 RCX, RCX_H,
310 RBX, RBX_H,
311 R8, R8_H,
312 R9, R9_H,
313 R10, R10_H,
314 R11, R11_H,
315 R13, R13_H,
316 R14, R14_H);
318 // Class for all pointer registers except RAX and RSP
319 reg_class ptr_no_rax_reg(RDX, RDX_H,
320 RBP, RBP_H,
321 RDI, RDI_H,
322 RSI, RSI_H,
323 RCX, RCX_H,
324 RBX, RBX_H,
325 R8, R8_H,
326 R9, R9_H,
327 R10, R10_H,
328 R11, R11_H,
329 R13, R13_H,
330 R14, R14_H);
332 reg_class ptr_no_rbp_reg(RDX, RDX_H,
333 RAX, RAX_H,
334 RDI, RDI_H,
335 RSI, RSI_H,
336 RCX, RCX_H,
337 RBX, RBX_H,
338 R8, R8_H,
339 R9, R9_H,
340 R10, R10_H,
341 R11, R11_H,
342 R13, R13_H,
343 R14, R14_H);
345 // Class for all pointer registers except RAX, RBX and RSP
346 reg_class ptr_no_rax_rbx_reg(RDX, RDX_H,
347 RBP, RBP_H,
348 RDI, RDI_H,
349 RSI, RSI_H,
350 RCX, RCX_H,
351 R8, R8_H,
352 R9, R9_H,
353 R10, R10_H,
354 R11, R11_H,
355 R13, R13_H,
356 R14, R14_H);
358 // Singleton class for RAX pointer register
359 reg_class ptr_rax_reg(RAX, RAX_H);
361 // Singleton class for RBX pointer register
362 reg_class ptr_rbx_reg(RBX, RBX_H);
364 // Singleton class for RSI pointer register
365 reg_class ptr_rsi_reg(RSI, RSI_H);
367 // Singleton class for RDI pointer register
368 reg_class ptr_rdi_reg(RDI, RDI_H);
370 // Singleton class for RBP pointer register
371 reg_class ptr_rbp_reg(RBP, RBP_H);
373 // Singleton class for stack pointer
374 reg_class ptr_rsp_reg(RSP, RSP_H);
376 // Singleton class for TLS pointer
377 reg_class ptr_r15_reg(R15, R15_H);
379 // Class for all long registers (except RSP)
380 reg_class long_reg(RAX, RAX_H,
381 RDX, RDX_H,
382 RBP, RBP_H,
383 RDI, RDI_H,
384 RSI, RSI_H,
385 RCX, RCX_H,
386 RBX, RBX_H,
387 R8, R8_H,
388 R9, R9_H,
389 R10, R10_H,
390 R11, R11_H,
391 R13, R13_H,
392 R14, R14_H);
394 // Class for all long registers except RAX, RDX (and RSP)
395 reg_class long_no_rax_rdx_reg(RBP, RBP_H,
396 RDI, RDI_H,
397 RSI, RSI_H,
398 RCX, RCX_H,
399 RBX, RBX_H,
400 R8, R8_H,
401 R9, R9_H,
402 R10, R10_H,
403 R11, R11_H,
404 R13, R13_H,
405 R14, R14_H);
407 // Class for all long registers except RCX (and RSP)
408 reg_class long_no_rcx_reg(RBP, RBP_H,
409 RDI, RDI_H,
410 RSI, RSI_H,
411 RAX, RAX_H,
412 RDX, RDX_H,
413 RBX, RBX_H,
414 R8, R8_H,
415 R9, R9_H,
416 R10, R10_H,
417 R11, R11_H,
418 R13, R13_H,
419 R14, R14_H);
421 // Class for all long registers except RAX (and RSP)
422 reg_class long_no_rax_reg(RBP, RBP_H,
423 RDX, RDX_H,
424 RDI, RDI_H,
425 RSI, RSI_H,
426 RCX, RCX_H,
427 RBX, RBX_H,
428 R8, R8_H,
429 R9, R9_H,
430 R10, R10_H,
431 R11, R11_H,
432 R13, R13_H,
433 R14, R14_H);
435 // Singleton class for RAX long register
436 reg_class long_rax_reg(RAX, RAX_H);
438 // Singleton class for RCX long register
439 reg_class long_rcx_reg(RCX, RCX_H);
441 // Singleton class for RDX long register
442 reg_class long_rdx_reg(RDX, RDX_H);
444 // Class for all int registers (except RSP)
445 reg_class int_reg(RAX,
446 RDX,
447 RBP,
448 RDI,
449 RSI,
450 RCX,
451 RBX,
452 R8,
453 R9,
454 R10,
455 R11,
456 R13,
457 R14);
459 // Class for all int registers except RCX (and RSP)
460 reg_class int_no_rcx_reg(RAX,
461 RDX,
462 RBP,
463 RDI,
464 RSI,
465 RBX,
466 R8,
467 R9,
468 R10,
469 R11,
470 R13,
471 R14);
473 // Class for all int registers except RAX, RDX (and RSP)
474 reg_class int_no_rax_rdx_reg(RBP,
475 RDI,
476 RSI,
477 RCX,
478 RBX,
479 R8,
480 R9,
481 R10,
482 R11,
483 R13,
484 R14);
486 // Singleton class for RAX int register
487 reg_class int_rax_reg(RAX);
489 // Singleton class for RBX int register
490 reg_class int_rbx_reg(RBX);
492 // Singleton class for RCX int register
493 reg_class int_rcx_reg(RCX);
495 // Singleton class for RCX int register
496 reg_class int_rdx_reg(RDX);
498 // Singleton class for RCX int register
499 reg_class int_rdi_reg(RDI);
501 // Singleton class for instruction pointer
502 // reg_class ip_reg(RIP);
504 // Singleton class for condition codes
505 reg_class int_flags(RFLAGS);
507 // Class for all float registers
508 reg_class float_reg(XMM0,
509 XMM1,
510 XMM2,
511 XMM3,
512 XMM4,
513 XMM5,
514 XMM6,
515 XMM7,
516 XMM8,
517 XMM9,
518 XMM10,
519 XMM11,
520 XMM12,
521 XMM13,
522 XMM14,
523 XMM15);
525 // Class for all double registers
526 reg_class double_reg(XMM0, XMM0_H,
527 XMM1, XMM1_H,
528 XMM2, XMM2_H,
529 XMM3, XMM3_H,
530 XMM4, XMM4_H,
531 XMM5, XMM5_H,
532 XMM6, XMM6_H,
533 XMM7, XMM7_H,
534 XMM8, XMM8_H,
535 XMM9, XMM9_H,
536 XMM10, XMM10_H,
537 XMM11, XMM11_H,
538 XMM12, XMM12_H,
539 XMM13, XMM13_H,
540 XMM14, XMM14_H,
541 XMM15, XMM15_H);
542 %}
545 //----------SOURCE BLOCK-------------------------------------------------------
546 // This is a block of C++ code which provides values, functions, and
547 // definitions necessary in the rest of the architecture description
548 source %{
549 #define RELOC_IMM64 Assembler::imm_operand
550 #define RELOC_DISP32 Assembler::disp32_operand
552 #define __ _masm.
554 // !!!!! Special hack to get all types of calls to specify the byte offset
555 // from the start of the call to the point where the return address
556 // will point.
557 int MachCallStaticJavaNode::ret_addr_offset()
558 {
559 return 5; // 5 bytes from start of call to where return address points
560 }
562 int MachCallDynamicJavaNode::ret_addr_offset()
563 {
564 return 15; // 15 bytes from start of call to where return address points
565 }
567 // In os_cpu .ad file
568 // int MachCallRuntimeNode::ret_addr_offset()
570 // Indicate if the safepoint node needs the polling page as an input.
571 // Since amd64 does not have absolute addressing but RIP-relative
572 // addressing and the polling page is within 2G, it doesn't.
573 bool SafePointNode::needs_polling_address_input()
574 {
575 return false;
576 }
578 //
579 // Compute padding required for nodes which need alignment
580 //
582 // The address of the call instruction needs to be 4-byte aligned to
583 // ensure that it does not span a cache line so that it can be patched.
584 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
585 {
586 current_offset += 1; // skip call opcode byte
587 return round_to(current_offset, alignment_required()) - current_offset;
588 }
590 // The address of the call instruction needs to be 4-byte aligned to
591 // ensure that it does not span a cache line so that it can be patched.
592 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
593 {
594 current_offset += 11; // skip movq instruction + call opcode byte
595 return round_to(current_offset, alignment_required()) - current_offset;
596 }
598 #ifndef PRODUCT
599 void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const
600 {
601 st->print("INT3");
602 }
603 #endif
605 // EMIT_RM()
606 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3)
607 {
608 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
609 *(cbuf.code_end()) = c;
610 cbuf.set_code_end(cbuf.code_end() + 1);
611 }
613 // EMIT_CC()
614 void emit_cc(CodeBuffer &cbuf, int f1, int f2)
615 {
616 unsigned char c = (unsigned char) (f1 | f2);
617 *(cbuf.code_end()) = c;
618 cbuf.set_code_end(cbuf.code_end() + 1);
619 }
621 // EMIT_OPCODE()
622 void emit_opcode(CodeBuffer &cbuf, int code)
623 {
624 *(cbuf.code_end()) = (unsigned char) code;
625 cbuf.set_code_end(cbuf.code_end() + 1);
626 }
628 // EMIT_OPCODE() w/ relocation information
629 void emit_opcode(CodeBuffer &cbuf,
630 int code, relocInfo::relocType reloc, int offset, int format)
631 {
632 cbuf.relocate(cbuf.inst_mark() + offset, reloc, format);
633 emit_opcode(cbuf, code);
634 }
636 // EMIT_D8()
637 void emit_d8(CodeBuffer &cbuf, int d8)
638 {
639 *(cbuf.code_end()) = (unsigned char) d8;
640 cbuf.set_code_end(cbuf.code_end() + 1);
641 }
643 // EMIT_D16()
644 void emit_d16(CodeBuffer &cbuf, int d16)
645 {
646 *((short *)(cbuf.code_end())) = d16;
647 cbuf.set_code_end(cbuf.code_end() + 2);
648 }
650 // EMIT_D32()
651 void emit_d32(CodeBuffer &cbuf, int d32)
652 {
653 *((int *)(cbuf.code_end())) = d32;
654 cbuf.set_code_end(cbuf.code_end() + 4);
655 }
657 // EMIT_D64()
658 void emit_d64(CodeBuffer &cbuf, int64_t d64)
659 {
660 *((int64_t*) (cbuf.code_end())) = d64;
661 cbuf.set_code_end(cbuf.code_end() + 8);
662 }
664 // emit 32 bit value and construct relocation entry from relocInfo::relocType
665 void emit_d32_reloc(CodeBuffer& cbuf,
666 int d32,
667 relocInfo::relocType reloc,
668 int format)
669 {
670 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc");
671 cbuf.relocate(cbuf.inst_mark(), reloc, format);
673 *((int*) (cbuf.code_end())) = d32;
674 cbuf.set_code_end(cbuf.code_end() + 4);
675 }
677 // emit 32 bit value and construct relocation entry from RelocationHolder
678 void emit_d32_reloc(CodeBuffer& cbuf,
679 int d32,
680 RelocationHolder const& rspec,
681 int format)
682 {
683 #ifdef ASSERT
684 if (rspec.reloc()->type() == relocInfo::oop_type &&
685 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
686 assert(oop((intptr_t)d32)->is_oop() && oop((intptr_t)d32)->is_perm(), "cannot embed non-perm oops in code");
687 }
688 #endif
689 cbuf.relocate(cbuf.inst_mark(), rspec, format);
691 *((int* )(cbuf.code_end())) = d32;
692 cbuf.set_code_end(cbuf.code_end() + 4);
693 }
695 void emit_d32_reloc(CodeBuffer& cbuf, address addr) {
696 address next_ip = cbuf.code_end() + 4;
697 emit_d32_reloc(cbuf, (int) (addr - next_ip),
698 external_word_Relocation::spec(addr),
699 RELOC_DISP32);
700 }
703 // emit 64 bit value and construct relocation entry from relocInfo::relocType
704 void emit_d64_reloc(CodeBuffer& cbuf,
705 int64_t d64,
706 relocInfo::relocType reloc,
707 int format)
708 {
709 cbuf.relocate(cbuf.inst_mark(), reloc, format);
711 *((int64_t*) (cbuf.code_end())) = d64;
712 cbuf.set_code_end(cbuf.code_end() + 8);
713 }
715 // emit 64 bit value and construct relocation entry from RelocationHolder
716 void emit_d64_reloc(CodeBuffer& cbuf,
717 int64_t d64,
718 RelocationHolder const& rspec,
719 int format)
720 {
721 #ifdef ASSERT
722 if (rspec.reloc()->type() == relocInfo::oop_type &&
723 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
724 assert(oop(d64)->is_oop() && oop(d64)->is_perm(),
725 "cannot embed non-perm oops in code");
726 }
727 #endif
728 cbuf.relocate(cbuf.inst_mark(), rspec, format);
730 *((int64_t*) (cbuf.code_end())) = d64;
731 cbuf.set_code_end(cbuf.code_end() + 8);
732 }
734 // Access stack slot for load or store
735 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp)
736 {
737 emit_opcode(cbuf, opcode); // (e.g., FILD [RSP+src])
738 if (-0x80 <= disp && disp < 0x80) {
739 emit_rm(cbuf, 0x01, rm_field, RSP_enc); // R/M byte
740 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
741 emit_d8(cbuf, disp); // Displacement // R/M byte
742 } else {
743 emit_rm(cbuf, 0x02, rm_field, RSP_enc); // R/M byte
744 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
745 emit_d32(cbuf, disp); // Displacement // R/M byte
746 }
747 }
749 // rRegI ereg, memory mem) %{ // emit_reg_mem
750 void encode_RegMem(CodeBuffer &cbuf,
751 int reg,
752 int base, int index, int scale, int disp, bool disp_is_oop)
753 {
754 assert(!disp_is_oop, "cannot have disp");
755 int regenc = reg & 7;
756 int baseenc = base & 7;
757 int indexenc = index & 7;
759 // There is no index & no scale, use form without SIB byte
760 if (index == 0x4 && scale == 0 && base != RSP_enc && base != R12_enc) {
761 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
762 if (disp == 0 && base != RBP_enc && base != R13_enc) {
763 emit_rm(cbuf, 0x0, regenc, baseenc); // *
764 } else if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
765 // If 8-bit displacement, mode 0x1
766 emit_rm(cbuf, 0x1, regenc, baseenc); // *
767 emit_d8(cbuf, disp);
768 } else {
769 // If 32-bit displacement
770 if (base == -1) { // Special flag for absolute address
771 emit_rm(cbuf, 0x0, regenc, 0x5); // *
772 if (disp_is_oop) {
773 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
774 } else {
775 emit_d32(cbuf, disp);
776 }
777 } else {
778 // Normal base + offset
779 emit_rm(cbuf, 0x2, regenc, baseenc); // *
780 if (disp_is_oop) {
781 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
782 } else {
783 emit_d32(cbuf, disp);
784 }
785 }
786 }
787 } else {
788 // Else, encode with the SIB byte
789 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
790 if (disp == 0 && base != RBP_enc && base != R13_enc) {
791 // If no displacement
792 emit_rm(cbuf, 0x0, regenc, 0x4); // *
793 emit_rm(cbuf, scale, indexenc, baseenc);
794 } else {
795 if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
796 // If 8-bit displacement, mode 0x1
797 emit_rm(cbuf, 0x1, regenc, 0x4); // *
798 emit_rm(cbuf, scale, indexenc, baseenc);
799 emit_d8(cbuf, disp);
800 } else {
801 // If 32-bit displacement
802 if (base == 0x04 ) {
803 emit_rm(cbuf, 0x2, regenc, 0x4);
804 emit_rm(cbuf, scale, indexenc, 0x04); // XXX is this valid???
805 } else {
806 emit_rm(cbuf, 0x2, regenc, 0x4);
807 emit_rm(cbuf, scale, indexenc, baseenc); // *
808 }
809 if (disp_is_oop) {
810 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
811 } else {
812 emit_d32(cbuf, disp);
813 }
814 }
815 }
816 }
817 }
819 void encode_copy(CodeBuffer &cbuf, int dstenc, int srcenc)
820 {
821 if (dstenc != srcenc) {
822 if (dstenc < 8) {
823 if (srcenc >= 8) {
824 emit_opcode(cbuf, Assembler::REX_B);
825 srcenc -= 8;
826 }
827 } else {
828 if (srcenc < 8) {
829 emit_opcode(cbuf, Assembler::REX_R);
830 } else {
831 emit_opcode(cbuf, Assembler::REX_RB);
832 srcenc -= 8;
833 }
834 dstenc -= 8;
835 }
837 emit_opcode(cbuf, 0x8B);
838 emit_rm(cbuf, 0x3, dstenc, srcenc);
839 }
840 }
842 void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
843 if( dst_encoding == src_encoding ) {
844 // reg-reg copy, use an empty encoding
845 } else {
846 MacroAssembler _masm(&cbuf);
848 __ movdqa(as_XMMRegister(dst_encoding), as_XMMRegister(src_encoding));
849 }
850 }
853 //=============================================================================
854 #ifndef PRODUCT
855 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
856 {
857 Compile* C = ra_->C;
859 int framesize = C->frame_slots() << LogBytesPerInt;
860 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
861 // Remove wordSize for return adr already pushed
862 // and another for the RBP we are going to save
863 framesize -= 2*wordSize;
864 bool need_nop = true;
866 // Calls to C2R adapters often do not accept exceptional returns.
867 // We require that their callers must bang for them. But be
868 // careful, because some VM calls (such as call site linkage) can
869 // use several kilobytes of stack. But the stack safety zone should
870 // account for that. See bugs 4446381, 4468289, 4497237.
871 if (C->need_stack_bang(framesize)) {
872 st->print_cr("# stack bang"); st->print("\t");
873 need_nop = false;
874 }
875 st->print_cr("pushq rbp"); st->print("\t");
877 if (VerifyStackAtCalls) {
878 // Majik cookie to verify stack depth
879 st->print_cr("pushq 0xffffffffbadb100d"
880 "\t# Majik cookie for stack depth check");
881 st->print("\t");
882 framesize -= wordSize; // Remove 2 for cookie
883 need_nop = false;
884 }
886 if (framesize) {
887 st->print("subq rsp, #%d\t# Create frame", framesize);
888 if (framesize < 0x80 && need_nop) {
889 st->print("\n\tnop\t# nop for patch_verified_entry");
890 }
891 }
892 }
893 #endif
895 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
896 {
897 Compile* C = ra_->C;
899 // WARNING: Initial instruction MUST be 5 bytes or longer so that
900 // NativeJump::patch_verified_entry will be able to patch out the entry
901 // code safely. The fldcw is ok at 6 bytes, the push to verify stack
902 // depth is ok at 5 bytes, the frame allocation can be either 3 or
903 // 6 bytes. So if we don't do the fldcw or the push then we must
904 // use the 6 byte frame allocation even if we have no frame. :-(
905 // If method sets FPU control word do it now
907 int framesize = C->frame_slots() << LogBytesPerInt;
908 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
909 // Remove wordSize for return adr already pushed
910 // and another for the RBP we are going to save
911 framesize -= 2*wordSize;
912 bool need_nop = true;
914 // Calls to C2R adapters often do not accept exceptional returns.
915 // We require that their callers must bang for them. But be
916 // careful, because some VM calls (such as call site linkage) can
917 // use several kilobytes of stack. But the stack safety zone should
918 // account for that. See bugs 4446381, 4468289, 4497237.
919 if (C->need_stack_bang(framesize)) {
920 MacroAssembler masm(&cbuf);
921 masm.generate_stack_overflow_check(framesize);
922 need_nop = false;
923 }
925 // We always push rbp so that on return to interpreter rbp will be
926 // restored correctly and we can correct the stack.
927 emit_opcode(cbuf, 0x50 | RBP_enc);
929 if (VerifyStackAtCalls) {
930 // Majik cookie to verify stack depth
931 emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d
932 emit_d32(cbuf, 0xbadb100d);
933 framesize -= wordSize; // Remove 2 for cookie
934 need_nop = false;
935 }
937 if (framesize) {
938 emit_opcode(cbuf, Assembler::REX_W);
939 if (framesize < 0x80) {
940 emit_opcode(cbuf, 0x83); // sub SP,#framesize
941 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
942 emit_d8(cbuf, framesize);
943 if (need_nop) {
944 emit_opcode(cbuf, 0x90); // nop
945 }
946 } else {
947 emit_opcode(cbuf, 0x81); // sub SP,#framesize
948 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
949 emit_d32(cbuf, framesize);
950 }
951 }
953 C->set_frame_complete(cbuf.code_end() - cbuf.code_begin());
955 #ifdef ASSERT
956 if (VerifyStackAtCalls) {
957 Label L;
958 MacroAssembler masm(&cbuf);
959 masm.push(rax);
960 masm.mov(rax, rsp);
961 masm.andptr(rax, StackAlignmentInBytes-1);
962 masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
963 masm.pop(rax);
964 masm.jcc(Assembler::equal, L);
965 masm.stop("Stack is not properly aligned!");
966 masm.bind(L);
967 }
968 #endif
969 }
971 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
972 {
973 return MachNode::size(ra_); // too many variables; just compute it
974 // the hard way
975 }
977 int MachPrologNode::reloc() const
978 {
979 return 0; // a large enough number
980 }
982 //=============================================================================
983 #ifndef PRODUCT
984 void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
985 {
986 Compile* C = ra_->C;
987 int framesize = C->frame_slots() << LogBytesPerInt;
988 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
989 // Remove word for return adr already pushed
990 // and RBP
991 framesize -= 2*wordSize;
993 if (framesize) {
994 st->print_cr("addq\trsp, %d\t# Destroy frame", framesize);
995 st->print("\t");
996 }
998 st->print_cr("popq\trbp");
999 if (do_polling() && C->is_method_compilation()) {
1000 st->print_cr("\ttestl\trax, [rip + #offset_to_poll_page]\t"
1001 "# Safepoint: poll for GC");
1002 st->print("\t");
1003 }
1004 }
1005 #endif
1007 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1008 {
1009 Compile* C = ra_->C;
1010 int framesize = C->frame_slots() << LogBytesPerInt;
1011 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1012 // Remove word for return adr already pushed
1013 // and RBP
1014 framesize -= 2*wordSize;
1016 // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
1018 if (framesize) {
1019 emit_opcode(cbuf, Assembler::REX_W);
1020 if (framesize < 0x80) {
1021 emit_opcode(cbuf, 0x83); // addq rsp, #framesize
1022 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
1023 emit_d8(cbuf, framesize);
1024 } else {
1025 emit_opcode(cbuf, 0x81); // addq rsp, #framesize
1026 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
1027 emit_d32(cbuf, framesize);
1028 }
1029 }
1031 // popq rbp
1032 emit_opcode(cbuf, 0x58 | RBP_enc);
1034 if (do_polling() && C->is_method_compilation()) {
1035 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
1036 // XXX reg_mem doesn't support RIP-relative addressing yet
1037 cbuf.set_inst_mark();
1038 cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_return_type, 0); // XXX
1039 emit_opcode(cbuf, 0x85); // testl
1040 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
1041 // cbuf.inst_mark() is beginning of instruction
1042 emit_d32_reloc(cbuf, os::get_polling_page());
1043 // relocInfo::poll_return_type,
1044 }
1045 }
1047 uint MachEpilogNode::size(PhaseRegAlloc* ra_) const
1048 {
1049 Compile* C = ra_->C;
1050 int framesize = C->frame_slots() << LogBytesPerInt;
1051 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1052 // Remove word for return adr already pushed
1053 // and RBP
1054 framesize -= 2*wordSize;
1056 uint size = 0;
1058 if (do_polling() && C->is_method_compilation()) {
1059 size += 6;
1060 }
1062 // count popq rbp
1063 size++;
1065 if (framesize) {
1066 if (framesize < 0x80) {
1067 size += 4;
1068 } else if (framesize) {
1069 size += 7;
1070 }
1071 }
1073 return size;
1074 }
1076 int MachEpilogNode::reloc() const
1077 {
1078 return 2; // a large enough number
1079 }
1081 const Pipeline* MachEpilogNode::pipeline() const
1082 {
1083 return MachNode::pipeline_class();
1084 }
1086 int MachEpilogNode::safepoint_offset() const
1087 {
1088 return 0;
1089 }
1091 //=============================================================================
1093 enum RC {
1094 rc_bad,
1095 rc_int,
1096 rc_float,
1097 rc_stack
1098 };
1100 static enum RC rc_class(OptoReg::Name reg)
1101 {
1102 if( !OptoReg::is_valid(reg) ) return rc_bad;
1104 if (OptoReg::is_stack(reg)) return rc_stack;
1106 VMReg r = OptoReg::as_VMReg(reg);
1108 if (r->is_Register()) return rc_int;
1110 assert(r->is_XMMRegister(), "must be");
1111 return rc_float;
1112 }
1114 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
1115 PhaseRegAlloc* ra_,
1116 bool do_size,
1117 outputStream* st) const
1118 {
1120 // Get registers to move
1121 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1122 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1123 OptoReg::Name dst_second = ra_->get_reg_second(this);
1124 OptoReg::Name dst_first = ra_->get_reg_first(this);
1126 enum RC src_second_rc = rc_class(src_second);
1127 enum RC src_first_rc = rc_class(src_first);
1128 enum RC dst_second_rc = rc_class(dst_second);
1129 enum RC dst_first_rc = rc_class(dst_first);
1131 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first),
1132 "must move at least 1 register" );
1134 if (src_first == dst_first && src_second == dst_second) {
1135 // Self copy, no move
1136 return 0;
1137 } else if (src_first_rc == rc_stack) {
1138 // mem ->
1139 if (dst_first_rc == rc_stack) {
1140 // mem -> mem
1141 assert(src_second != dst_first, "overlap");
1142 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1143 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1144 // 64-bit
1145 int src_offset = ra_->reg2offset(src_first);
1146 int dst_offset = ra_->reg2offset(dst_first);
1147 if (cbuf) {
1148 emit_opcode(*cbuf, 0xFF);
1149 encode_RegMem(*cbuf, RSI_enc, RSP_enc, 0x4, 0, src_offset, false);
1151 emit_opcode(*cbuf, 0x8F);
1152 encode_RegMem(*cbuf, RAX_enc, RSP_enc, 0x4, 0, dst_offset, false);
1154 #ifndef PRODUCT
1155 } else if (!do_size) {
1156 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
1157 "popq [rsp + #%d]",
1158 src_offset,
1159 dst_offset);
1160 #endif
1161 }
1162 return
1163 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) +
1164 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4));
1165 } else {
1166 // 32-bit
1167 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1168 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1169 // No pushl/popl, so:
1170 int src_offset = ra_->reg2offset(src_first);
1171 int dst_offset = ra_->reg2offset(dst_first);
1172 if (cbuf) {
1173 emit_opcode(*cbuf, Assembler::REX_W);
1174 emit_opcode(*cbuf, 0x89);
1175 emit_opcode(*cbuf, 0x44);
1176 emit_opcode(*cbuf, 0x24);
1177 emit_opcode(*cbuf, 0xF8);
1179 emit_opcode(*cbuf, 0x8B);
1180 encode_RegMem(*cbuf,
1181 RAX_enc,
1182 RSP_enc, 0x4, 0, src_offset,
1183 false);
1185 emit_opcode(*cbuf, 0x89);
1186 encode_RegMem(*cbuf,
1187 RAX_enc,
1188 RSP_enc, 0x4, 0, dst_offset,
1189 false);
1191 emit_opcode(*cbuf, Assembler::REX_W);
1192 emit_opcode(*cbuf, 0x8B);
1193 emit_opcode(*cbuf, 0x44);
1194 emit_opcode(*cbuf, 0x24);
1195 emit_opcode(*cbuf, 0xF8);
1197 #ifndef PRODUCT
1198 } else if (!do_size) {
1199 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t"
1200 "movl rax, [rsp + #%d]\n\t"
1201 "movl [rsp + #%d], rax\n\t"
1202 "movq rax, [rsp - #8]",
1203 src_offset,
1204 dst_offset);
1205 #endif
1206 }
1207 return
1208 5 + // movq
1209 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + // movl
1210 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)) + // movl
1211 5; // movq
1212 }
1213 } else if (dst_first_rc == rc_int) {
1214 // mem -> gpr
1215 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1216 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1217 // 64-bit
1218 int offset = ra_->reg2offset(src_first);
1219 if (cbuf) {
1220 if (Matcher::_regEncode[dst_first] < 8) {
1221 emit_opcode(*cbuf, Assembler::REX_W);
1222 } else {
1223 emit_opcode(*cbuf, Assembler::REX_WR);
1224 }
1225 emit_opcode(*cbuf, 0x8B);
1226 encode_RegMem(*cbuf,
1227 Matcher::_regEncode[dst_first],
1228 RSP_enc, 0x4, 0, offset,
1229 false);
1230 #ifndef PRODUCT
1231 } else if (!do_size) {
1232 st->print("movq %s, [rsp + #%d]\t# spill",
1233 Matcher::regName[dst_first],
1234 offset);
1235 #endif
1236 }
1237 return
1238 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
1239 } else {
1240 // 32-bit
1241 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1242 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1243 int offset = ra_->reg2offset(src_first);
1244 if (cbuf) {
1245 if (Matcher::_regEncode[dst_first] >= 8) {
1246 emit_opcode(*cbuf, Assembler::REX_R);
1247 }
1248 emit_opcode(*cbuf, 0x8B);
1249 encode_RegMem(*cbuf,
1250 Matcher::_regEncode[dst_first],
1251 RSP_enc, 0x4, 0, offset,
1252 false);
1253 #ifndef PRODUCT
1254 } else if (!do_size) {
1255 st->print("movl %s, [rsp + #%d]\t# spill",
1256 Matcher::regName[dst_first],
1257 offset);
1258 #endif
1259 }
1260 return
1261 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1262 ((Matcher::_regEncode[dst_first] < 8)
1263 ? 3
1264 : 4); // REX
1265 }
1266 } else if (dst_first_rc == rc_float) {
1267 // mem-> xmm
1268 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1269 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1270 // 64-bit
1271 int offset = ra_->reg2offset(src_first);
1272 if (cbuf) {
1273 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
1274 if (Matcher::_regEncode[dst_first] >= 8) {
1275 emit_opcode(*cbuf, Assembler::REX_R);
1276 }
1277 emit_opcode(*cbuf, 0x0F);
1278 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
1279 encode_RegMem(*cbuf,
1280 Matcher::_regEncode[dst_first],
1281 RSP_enc, 0x4, 0, offset,
1282 false);
1283 #ifndef PRODUCT
1284 } else if (!do_size) {
1285 st->print("%s %s, [rsp + #%d]\t# spill",
1286 UseXmmLoadAndClearUpper ? "movsd " : "movlpd",
1287 Matcher::regName[dst_first],
1288 offset);
1289 #endif
1290 }
1291 return
1292 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1293 ((Matcher::_regEncode[dst_first] < 8)
1294 ? 5
1295 : 6); // REX
1296 } else {
1297 // 32-bit
1298 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1299 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1300 int offset = ra_->reg2offset(src_first);
1301 if (cbuf) {
1302 emit_opcode(*cbuf, 0xF3);
1303 if (Matcher::_regEncode[dst_first] >= 8) {
1304 emit_opcode(*cbuf, Assembler::REX_R);
1305 }
1306 emit_opcode(*cbuf, 0x0F);
1307 emit_opcode(*cbuf, 0x10);
1308 encode_RegMem(*cbuf,
1309 Matcher::_regEncode[dst_first],
1310 RSP_enc, 0x4, 0, offset,
1311 false);
1312 #ifndef PRODUCT
1313 } else if (!do_size) {
1314 st->print("movss %s, [rsp + #%d]\t# spill",
1315 Matcher::regName[dst_first],
1316 offset);
1317 #endif
1318 }
1319 return
1320 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1321 ((Matcher::_regEncode[dst_first] < 8)
1322 ? 5
1323 : 6); // REX
1324 }
1325 }
1326 } else if (src_first_rc == rc_int) {
1327 // gpr ->
1328 if (dst_first_rc == rc_stack) {
1329 // gpr -> mem
1330 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1331 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1332 // 64-bit
1333 int offset = ra_->reg2offset(dst_first);
1334 if (cbuf) {
1335 if (Matcher::_regEncode[src_first] < 8) {
1336 emit_opcode(*cbuf, Assembler::REX_W);
1337 } else {
1338 emit_opcode(*cbuf, Assembler::REX_WR);
1339 }
1340 emit_opcode(*cbuf, 0x89);
1341 encode_RegMem(*cbuf,
1342 Matcher::_regEncode[src_first],
1343 RSP_enc, 0x4, 0, offset,
1344 false);
1345 #ifndef PRODUCT
1346 } else if (!do_size) {
1347 st->print("movq [rsp + #%d], %s\t# spill",
1348 offset,
1349 Matcher::regName[src_first]);
1350 #endif
1351 }
1352 return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
1353 } else {
1354 // 32-bit
1355 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1356 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1357 int offset = ra_->reg2offset(dst_first);
1358 if (cbuf) {
1359 if (Matcher::_regEncode[src_first] >= 8) {
1360 emit_opcode(*cbuf, Assembler::REX_R);
1361 }
1362 emit_opcode(*cbuf, 0x89);
1363 encode_RegMem(*cbuf,
1364 Matcher::_regEncode[src_first],
1365 RSP_enc, 0x4, 0, offset,
1366 false);
1367 #ifndef PRODUCT
1368 } else if (!do_size) {
1369 st->print("movl [rsp + #%d], %s\t# spill",
1370 offset,
1371 Matcher::regName[src_first]);
1372 #endif
1373 }
1374 return
1375 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1376 ((Matcher::_regEncode[src_first] < 8)
1377 ? 3
1378 : 4); // REX
1379 }
1380 } else if (dst_first_rc == rc_int) {
1381 // gpr -> gpr
1382 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1383 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1384 // 64-bit
1385 if (cbuf) {
1386 if (Matcher::_regEncode[dst_first] < 8) {
1387 if (Matcher::_regEncode[src_first] < 8) {
1388 emit_opcode(*cbuf, Assembler::REX_W);
1389 } else {
1390 emit_opcode(*cbuf, Assembler::REX_WB);
1391 }
1392 } else {
1393 if (Matcher::_regEncode[src_first] < 8) {
1394 emit_opcode(*cbuf, Assembler::REX_WR);
1395 } else {
1396 emit_opcode(*cbuf, Assembler::REX_WRB);
1397 }
1398 }
1399 emit_opcode(*cbuf, 0x8B);
1400 emit_rm(*cbuf, 0x3,
1401 Matcher::_regEncode[dst_first] & 7,
1402 Matcher::_regEncode[src_first] & 7);
1403 #ifndef PRODUCT
1404 } else if (!do_size) {
1405 st->print("movq %s, %s\t# spill",
1406 Matcher::regName[dst_first],
1407 Matcher::regName[src_first]);
1408 #endif
1409 }
1410 return 3; // REX
1411 } else {
1412 // 32-bit
1413 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1414 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1415 if (cbuf) {
1416 if (Matcher::_regEncode[dst_first] < 8) {
1417 if (Matcher::_regEncode[src_first] >= 8) {
1418 emit_opcode(*cbuf, Assembler::REX_B);
1419 }
1420 } else {
1421 if (Matcher::_regEncode[src_first] < 8) {
1422 emit_opcode(*cbuf, Assembler::REX_R);
1423 } else {
1424 emit_opcode(*cbuf, Assembler::REX_RB);
1425 }
1426 }
1427 emit_opcode(*cbuf, 0x8B);
1428 emit_rm(*cbuf, 0x3,
1429 Matcher::_regEncode[dst_first] & 7,
1430 Matcher::_regEncode[src_first] & 7);
1431 #ifndef PRODUCT
1432 } else if (!do_size) {
1433 st->print("movl %s, %s\t# spill",
1434 Matcher::regName[dst_first],
1435 Matcher::regName[src_first]);
1436 #endif
1437 }
1438 return
1439 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1440 ? 2
1441 : 3; // REX
1442 }
1443 } else if (dst_first_rc == rc_float) {
1444 // gpr -> xmm
1445 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1446 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1447 // 64-bit
1448 if (cbuf) {
1449 emit_opcode(*cbuf, 0x66);
1450 if (Matcher::_regEncode[dst_first] < 8) {
1451 if (Matcher::_regEncode[src_first] < 8) {
1452 emit_opcode(*cbuf, Assembler::REX_W);
1453 } else {
1454 emit_opcode(*cbuf, Assembler::REX_WB);
1455 }
1456 } else {
1457 if (Matcher::_regEncode[src_first] < 8) {
1458 emit_opcode(*cbuf, Assembler::REX_WR);
1459 } else {
1460 emit_opcode(*cbuf, Assembler::REX_WRB);
1461 }
1462 }
1463 emit_opcode(*cbuf, 0x0F);
1464 emit_opcode(*cbuf, 0x6E);
1465 emit_rm(*cbuf, 0x3,
1466 Matcher::_regEncode[dst_first] & 7,
1467 Matcher::_regEncode[src_first] & 7);
1468 #ifndef PRODUCT
1469 } else if (!do_size) {
1470 st->print("movdq %s, %s\t# spill",
1471 Matcher::regName[dst_first],
1472 Matcher::regName[src_first]);
1473 #endif
1474 }
1475 return 5; // REX
1476 } else {
1477 // 32-bit
1478 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1479 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1480 if (cbuf) {
1481 emit_opcode(*cbuf, 0x66);
1482 if (Matcher::_regEncode[dst_first] < 8) {
1483 if (Matcher::_regEncode[src_first] >= 8) {
1484 emit_opcode(*cbuf, Assembler::REX_B);
1485 }
1486 } else {
1487 if (Matcher::_regEncode[src_first] < 8) {
1488 emit_opcode(*cbuf, Assembler::REX_R);
1489 } else {
1490 emit_opcode(*cbuf, Assembler::REX_RB);
1491 }
1492 }
1493 emit_opcode(*cbuf, 0x0F);
1494 emit_opcode(*cbuf, 0x6E);
1495 emit_rm(*cbuf, 0x3,
1496 Matcher::_regEncode[dst_first] & 7,
1497 Matcher::_regEncode[src_first] & 7);
1498 #ifndef PRODUCT
1499 } else if (!do_size) {
1500 st->print("movdl %s, %s\t# spill",
1501 Matcher::regName[dst_first],
1502 Matcher::regName[src_first]);
1503 #endif
1504 }
1505 return
1506 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1507 ? 4
1508 : 5; // REX
1509 }
1510 }
1511 } else if (src_first_rc == rc_float) {
1512 // xmm ->
1513 if (dst_first_rc == rc_stack) {
1514 // xmm -> mem
1515 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1516 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1517 // 64-bit
1518 int offset = ra_->reg2offset(dst_first);
1519 if (cbuf) {
1520 emit_opcode(*cbuf, 0xF2);
1521 if (Matcher::_regEncode[src_first] >= 8) {
1522 emit_opcode(*cbuf, Assembler::REX_R);
1523 }
1524 emit_opcode(*cbuf, 0x0F);
1525 emit_opcode(*cbuf, 0x11);
1526 encode_RegMem(*cbuf,
1527 Matcher::_regEncode[src_first],
1528 RSP_enc, 0x4, 0, offset,
1529 false);
1530 #ifndef PRODUCT
1531 } else if (!do_size) {
1532 st->print("movsd [rsp + #%d], %s\t# spill",
1533 offset,
1534 Matcher::regName[src_first]);
1535 #endif
1536 }
1537 return
1538 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1539 ((Matcher::_regEncode[src_first] < 8)
1540 ? 5
1541 : 6); // REX
1542 } else {
1543 // 32-bit
1544 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1545 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1546 int offset = ra_->reg2offset(dst_first);
1547 if (cbuf) {
1548 emit_opcode(*cbuf, 0xF3);
1549 if (Matcher::_regEncode[src_first] >= 8) {
1550 emit_opcode(*cbuf, Assembler::REX_R);
1551 }
1552 emit_opcode(*cbuf, 0x0F);
1553 emit_opcode(*cbuf, 0x11);
1554 encode_RegMem(*cbuf,
1555 Matcher::_regEncode[src_first],
1556 RSP_enc, 0x4, 0, offset,
1557 false);
1558 #ifndef PRODUCT
1559 } else if (!do_size) {
1560 st->print("movss [rsp + #%d], %s\t# spill",
1561 offset,
1562 Matcher::regName[src_first]);
1563 #endif
1564 }
1565 return
1566 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1567 ((Matcher::_regEncode[src_first] < 8)
1568 ? 5
1569 : 6); // REX
1570 }
1571 } else if (dst_first_rc == rc_int) {
1572 // xmm -> gpr
1573 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1574 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1575 // 64-bit
1576 if (cbuf) {
1577 emit_opcode(*cbuf, 0x66);
1578 if (Matcher::_regEncode[dst_first] < 8) {
1579 if (Matcher::_regEncode[src_first] < 8) {
1580 emit_opcode(*cbuf, Assembler::REX_W);
1581 } else {
1582 emit_opcode(*cbuf, Assembler::REX_WR); // attention!
1583 }
1584 } else {
1585 if (Matcher::_regEncode[src_first] < 8) {
1586 emit_opcode(*cbuf, Assembler::REX_WB); // attention!
1587 } else {
1588 emit_opcode(*cbuf, Assembler::REX_WRB);
1589 }
1590 }
1591 emit_opcode(*cbuf, 0x0F);
1592 emit_opcode(*cbuf, 0x7E);
1593 emit_rm(*cbuf, 0x3,
1594 Matcher::_regEncode[dst_first] & 7,
1595 Matcher::_regEncode[src_first] & 7);
1596 #ifndef PRODUCT
1597 } else if (!do_size) {
1598 st->print("movdq %s, %s\t# spill",
1599 Matcher::regName[dst_first],
1600 Matcher::regName[src_first]);
1601 #endif
1602 }
1603 return 5; // REX
1604 } else {
1605 // 32-bit
1606 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1607 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1608 if (cbuf) {
1609 emit_opcode(*cbuf, 0x66);
1610 if (Matcher::_regEncode[dst_first] < 8) {
1611 if (Matcher::_regEncode[src_first] >= 8) {
1612 emit_opcode(*cbuf, Assembler::REX_R); // attention!
1613 }
1614 } else {
1615 if (Matcher::_regEncode[src_first] < 8) {
1616 emit_opcode(*cbuf, Assembler::REX_B); // attention!
1617 } else {
1618 emit_opcode(*cbuf, Assembler::REX_RB);
1619 }
1620 }
1621 emit_opcode(*cbuf, 0x0F);
1622 emit_opcode(*cbuf, 0x7E);
1623 emit_rm(*cbuf, 0x3,
1624 Matcher::_regEncode[dst_first] & 7,
1625 Matcher::_regEncode[src_first] & 7);
1626 #ifndef PRODUCT
1627 } else if (!do_size) {
1628 st->print("movdl %s, %s\t# spill",
1629 Matcher::regName[dst_first],
1630 Matcher::regName[src_first]);
1631 #endif
1632 }
1633 return
1634 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1635 ? 4
1636 : 5; // REX
1637 }
1638 } else if (dst_first_rc == rc_float) {
1639 // xmm -> xmm
1640 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1641 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1642 // 64-bit
1643 if (cbuf) {
1644 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
1645 if (Matcher::_regEncode[dst_first] < 8) {
1646 if (Matcher::_regEncode[src_first] >= 8) {
1647 emit_opcode(*cbuf, Assembler::REX_B);
1648 }
1649 } else {
1650 if (Matcher::_regEncode[src_first] < 8) {
1651 emit_opcode(*cbuf, Assembler::REX_R);
1652 } else {
1653 emit_opcode(*cbuf, Assembler::REX_RB);
1654 }
1655 }
1656 emit_opcode(*cbuf, 0x0F);
1657 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
1658 emit_rm(*cbuf, 0x3,
1659 Matcher::_regEncode[dst_first] & 7,
1660 Matcher::_regEncode[src_first] & 7);
1661 #ifndef PRODUCT
1662 } else if (!do_size) {
1663 st->print("%s %s, %s\t# spill",
1664 UseXmmRegToRegMoveAll ? "movapd" : "movsd ",
1665 Matcher::regName[dst_first],
1666 Matcher::regName[src_first]);
1667 #endif
1668 }
1669 return
1670 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1671 ? 4
1672 : 5; // REX
1673 } else {
1674 // 32-bit
1675 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1676 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1677 if (cbuf) {
1678 if (!UseXmmRegToRegMoveAll)
1679 emit_opcode(*cbuf, 0xF3);
1680 if (Matcher::_regEncode[dst_first] < 8) {
1681 if (Matcher::_regEncode[src_first] >= 8) {
1682 emit_opcode(*cbuf, Assembler::REX_B);
1683 }
1684 } else {
1685 if (Matcher::_regEncode[src_first] < 8) {
1686 emit_opcode(*cbuf, Assembler::REX_R);
1687 } else {
1688 emit_opcode(*cbuf, Assembler::REX_RB);
1689 }
1690 }
1691 emit_opcode(*cbuf, 0x0F);
1692 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
1693 emit_rm(*cbuf, 0x3,
1694 Matcher::_regEncode[dst_first] & 7,
1695 Matcher::_regEncode[src_first] & 7);
1696 #ifndef PRODUCT
1697 } else if (!do_size) {
1698 st->print("%s %s, %s\t# spill",
1699 UseXmmRegToRegMoveAll ? "movaps" : "movss ",
1700 Matcher::regName[dst_first],
1701 Matcher::regName[src_first]);
1702 #endif
1703 }
1704 return
1705 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1706 ? (UseXmmRegToRegMoveAll ? 3 : 4)
1707 : (UseXmmRegToRegMoveAll ? 4 : 5); // REX
1708 }
1709 }
1710 }
1712 assert(0," foo ");
1713 Unimplemented();
1715 return 0;
1716 }
1718 #ifndef PRODUCT
1719 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const
1720 {
1721 implementation(NULL, ra_, false, st);
1722 }
1723 #endif
1725 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
1726 {
1727 implementation(&cbuf, ra_, false, NULL);
1728 }
1730 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const
1731 {
1732 return implementation(NULL, ra_, true, NULL);
1733 }
1735 //=============================================================================
1736 #ifndef PRODUCT
1737 void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const
1738 {
1739 st->print("nop \t# %d bytes pad for loops and calls", _count);
1740 }
1741 #endif
1743 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const
1744 {
1745 MacroAssembler _masm(&cbuf);
1746 __ nop(_count);
1747 }
1749 uint MachNopNode::size(PhaseRegAlloc*) const
1750 {
1751 return _count;
1752 }
1755 //=============================================================================
1756 #ifndef PRODUCT
1757 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1758 {
1759 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1760 int reg = ra_->get_reg_first(this);
1761 st->print("leaq %s, [rsp + #%d]\t# box lock",
1762 Matcher::regName[reg], offset);
1763 }
1764 #endif
1766 void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1767 {
1768 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1769 int reg = ra_->get_encode(this);
1770 if (offset >= 0x80) {
1771 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
1772 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1773 emit_rm(cbuf, 0x2, reg & 7, 0x04);
1774 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
1775 emit_d32(cbuf, offset);
1776 } else {
1777 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
1778 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1779 emit_rm(cbuf, 0x1, reg & 7, 0x04);
1780 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
1781 emit_d8(cbuf, offset);
1782 }
1783 }
1785 uint BoxLockNode::size(PhaseRegAlloc *ra_) const
1786 {
1787 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1788 return (offset < 0x80) ? 5 : 8; // REX
1789 }
1791 //=============================================================================
1793 // emit call stub, compiled java to interpreter
1794 void emit_java_to_interp(CodeBuffer& cbuf)
1795 {
1796 // Stub is fixed up when the corresponding call is converted from
1797 // calling compiled code to calling interpreted code.
1798 // movq rbx, 0
1799 // jmp -5 # to self
1801 address mark = cbuf.inst_mark(); // get mark within main instrs section
1803 // Note that the code buffer's inst_mark is always relative to insts.
1804 // That's why we must use the macroassembler to generate a stub.
1805 MacroAssembler _masm(&cbuf);
1807 address base =
1808 __ start_a_stub(Compile::MAX_stubs_size);
1809 if (base == NULL) return; // CodeBuffer::expand failed
1810 // static stub relocation stores the instruction address of the call
1811 __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
1812 // static stub relocation also tags the methodOop in the code-stream.
1813 __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time
1814 // This is recognized as unresolved by relocs/nativeinst/ic code
1815 __ jump(RuntimeAddress(__ pc()));
1817 // Update current stubs pointer and restore code_end.
1818 __ end_a_stub();
1819 }
1821 // size of call stub, compiled java to interpretor
1822 uint size_java_to_interp()
1823 {
1824 return 15; // movq (1+1+8); jmp (1+4)
1825 }
1827 // relocation entries for call stub, compiled java to interpretor
1828 uint reloc_java_to_interp()
1829 {
1830 return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
1831 }
1833 //=============================================================================
1834 #ifndef PRODUCT
1835 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1836 {
1837 if (UseCompressedOops) {
1838 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
1839 if (Universe::narrow_oop_shift() != 0) {
1840 st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]");
1841 }
1842 st->print_cr("cmpq rax, rscratch1\t # Inline cache check");
1843 } else {
1844 st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
1845 "# Inline cache check", oopDesc::klass_offset_in_bytes());
1846 }
1847 st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
1848 st->print_cr("\tnop");
1849 if (!OptoBreakpoint) {
1850 st->print_cr("\tnop");
1851 }
1852 }
1853 #endif
1855 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1856 {
1857 MacroAssembler masm(&cbuf);
1858 #ifdef ASSERT
1859 uint code_size = cbuf.code_size();
1860 #endif
1861 if (UseCompressedOops) {
1862 masm.load_klass(rscratch1, j_rarg0);
1863 masm.cmpptr(rax, rscratch1);
1864 } else {
1865 masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
1866 }
1868 masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1870 /* WARNING these NOPs are critical so that verified entry point is properly
1871 aligned for patching by NativeJump::patch_verified_entry() */
1872 int nops_cnt = 1;
1873 if (!OptoBreakpoint) {
1874 // Leave space for int3
1875 nops_cnt += 1;
1876 }
1877 if (UseCompressedOops) {
1878 // ??? divisible by 4 is aligned?
1879 nops_cnt += 1;
1880 }
1881 masm.nop(nops_cnt);
1883 assert(cbuf.code_size() - code_size == size(ra_),
1884 "checking code size of inline cache node");
1885 }
1887 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1888 {
1889 if (UseCompressedOops) {
1890 if (Universe::narrow_oop_shift() == 0) {
1891 return OptoBreakpoint ? 15 : 16;
1892 } else {
1893 return OptoBreakpoint ? 19 : 20;
1894 }
1895 } else {
1896 return OptoBreakpoint ? 11 : 12;
1897 }
1898 }
1901 //=============================================================================
1902 uint size_exception_handler()
1903 {
1904 // NativeCall instruction size is the same as NativeJump.
1905 // Note that this value is also credited (in output.cpp) to
1906 // the size of the code section.
1907 return NativeJump::instruction_size;
1908 }
1910 // Emit exception handler code.
1911 int emit_exception_handler(CodeBuffer& cbuf)
1912 {
1914 // Note that the code buffer's inst_mark is always relative to insts.
1915 // That's why we must use the macroassembler to generate a handler.
1916 MacroAssembler _masm(&cbuf);
1917 address base =
1918 __ start_a_stub(size_exception_handler());
1919 if (base == NULL) return 0; // CodeBuffer::expand failed
1920 int offset = __ offset();
1921 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin()));
1922 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1923 __ end_a_stub();
1924 return offset;
1925 }
1927 uint size_deopt_handler()
1928 {
1929 // three 5 byte instructions
1930 return 15;
1931 }
1933 // Emit deopt handler code.
1934 int emit_deopt_handler(CodeBuffer& cbuf)
1935 {
1937 // Note that the code buffer's inst_mark is always relative to insts.
1938 // That's why we must use the macroassembler to generate a handler.
1939 MacroAssembler _masm(&cbuf);
1940 address base =
1941 __ start_a_stub(size_deopt_handler());
1942 if (base == NULL) return 0; // CodeBuffer::expand failed
1943 int offset = __ offset();
1944 address the_pc = (address) __ pc();
1945 Label next;
1946 // push a "the_pc" on the stack without destroying any registers
1947 // as they all may be live.
1949 // push address of "next"
1950 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
1951 __ bind(next);
1952 // adjust it so it matches "the_pc"
1953 __ subptr(Address(rsp, 0), __ offset() - offset);
1954 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
1955 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1956 __ end_a_stub();
1957 return offset;
1958 }
1960 static void emit_double_constant(CodeBuffer& cbuf, double x) {
1961 int mark = cbuf.insts()->mark_off();
1962 MacroAssembler _masm(&cbuf);
1963 address double_address = __ double_constant(x);
1964 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
1965 emit_d32_reloc(cbuf,
1966 (int) (double_address - cbuf.code_end() - 4),
1967 internal_word_Relocation::spec(double_address),
1968 RELOC_DISP32);
1969 }
1971 static void emit_float_constant(CodeBuffer& cbuf, float x) {
1972 int mark = cbuf.insts()->mark_off();
1973 MacroAssembler _masm(&cbuf);
1974 address float_address = __ float_constant(x);
1975 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
1976 emit_d32_reloc(cbuf,
1977 (int) (float_address - cbuf.code_end() - 4),
1978 internal_word_Relocation::spec(float_address),
1979 RELOC_DISP32);
1980 }
1983 const bool Matcher::match_rule_supported(int opcode) {
1984 if (!has_match_rule(opcode))
1985 return false;
1987 return true; // Per default match rules are supported.
1988 }
1990 int Matcher::regnum_to_fpu_offset(int regnum)
1991 {
1992 return regnum - 32; // The FP registers are in the second chunk
1993 }
1995 // This is UltraSparc specific, true just means we have fast l2f conversion
1996 const bool Matcher::convL2FSupported(void) {
1997 return true;
1998 }
2000 // Vector width in bytes
2001 const uint Matcher::vector_width_in_bytes(void) {
2002 return 8;
2003 }
2005 // Vector ideal reg
2006 const uint Matcher::vector_ideal_reg(void) {
2007 return Op_RegD;
2008 }
2010 // Is this branch offset short enough that a short branch can be used?
2011 //
2012 // NOTE: If the platform does not provide any short branch variants, then
2013 // this method should return false for offset 0.
2014 bool Matcher::is_short_branch_offset(int rule, int offset) {
2015 // the short version of jmpConUCF2 contains multiple branches,
2016 // making the reach slightly less
2017 if (rule == jmpConUCF2_rule)
2018 return (-126 <= offset && offset <= 125);
2019 return (-128 <= offset && offset <= 127);
2020 }
2022 const bool Matcher::isSimpleConstant64(jlong value) {
2023 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2024 //return value == (int) value; // Cf. storeImmL and immL32.
2026 // Probably always true, even if a temp register is required.
2027 return true;
2028 }
2030 // The ecx parameter to rep stosq for the ClearArray node is in words.
2031 const bool Matcher::init_array_count_is_in_bytes = false;
2033 // Threshold size for cleararray.
2034 const int Matcher::init_array_short_size = 8 * BytesPerLong;
2036 // Should the Matcher clone shifts on addressing modes, expecting them
2037 // to be subsumed into complex addressing expressions or compute them
2038 // into registers? True for Intel but false for most RISCs
2039 const bool Matcher::clone_shift_expressions = true;
2041 // Is it better to copy float constants, or load them directly from
2042 // memory? Intel can load a float constant from a direct address,
2043 // requiring no extra registers. Most RISCs will have to materialize
2044 // an address into a register first, so they would do better to copy
2045 // the constant from stack.
2046 const bool Matcher::rematerialize_float_constants = true; // XXX
2048 // If CPU can load and store mis-aligned doubles directly then no
2049 // fixup is needed. Else we split the double into 2 integer pieces
2050 // and move it piece-by-piece. Only happens when passing doubles into
2051 // C code as the Java calling convention forces doubles to be aligned.
2052 const bool Matcher::misaligned_doubles_ok = true;
2054 // No-op on amd64
2055 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
2057 // Advertise here if the CPU requires explicit rounding operations to
2058 // implement the UseStrictFP mode.
2059 const bool Matcher::strict_fp_requires_explicit_rounding = true;
2061 // Do floats take an entire double register or just half?
2062 const bool Matcher::float_in_double = true;
2063 // Do ints take an entire long register or just half?
2064 const bool Matcher::int_in_long = true;
2066 // Return whether or not this register is ever used as an argument.
2067 // This function is used on startup to build the trampoline stubs in
2068 // generateOptoStub. Registers not mentioned will be killed by the VM
2069 // call in the trampoline, and arguments in those registers not be
2070 // available to the callee.
2071 bool Matcher::can_be_java_arg(int reg)
2072 {
2073 return
2074 reg == RDI_num || reg == RDI_H_num ||
2075 reg == RSI_num || reg == RSI_H_num ||
2076 reg == RDX_num || reg == RDX_H_num ||
2077 reg == RCX_num || reg == RCX_H_num ||
2078 reg == R8_num || reg == R8_H_num ||
2079 reg == R9_num || reg == R9_H_num ||
2080 reg == R12_num || reg == R12_H_num ||
2081 reg == XMM0_num || reg == XMM0_H_num ||
2082 reg == XMM1_num || reg == XMM1_H_num ||
2083 reg == XMM2_num || reg == XMM2_H_num ||
2084 reg == XMM3_num || reg == XMM3_H_num ||
2085 reg == XMM4_num || reg == XMM4_H_num ||
2086 reg == XMM5_num || reg == XMM5_H_num ||
2087 reg == XMM6_num || reg == XMM6_H_num ||
2088 reg == XMM7_num || reg == XMM7_H_num;
2089 }
2091 bool Matcher::is_spillable_arg(int reg)
2092 {
2093 return can_be_java_arg(reg);
2094 }
2096 // Register for DIVI projection of divmodI
2097 RegMask Matcher::divI_proj_mask() {
2098 return INT_RAX_REG_mask;
2099 }
2101 // Register for MODI projection of divmodI
2102 RegMask Matcher::modI_proj_mask() {
2103 return INT_RDX_REG_mask;
2104 }
2106 // Register for DIVL projection of divmodL
2107 RegMask Matcher::divL_proj_mask() {
2108 return LONG_RAX_REG_mask;
2109 }
2111 // Register for MODL projection of divmodL
2112 RegMask Matcher::modL_proj_mask() {
2113 return LONG_RDX_REG_mask;
2114 }
2116 static Address build_address(int b, int i, int s, int d) {
2117 Register index = as_Register(i);
2118 Address::ScaleFactor scale = (Address::ScaleFactor)s;
2119 if (index == rsp) {
2120 index = noreg;
2121 scale = Address::no_scale;
2122 }
2123 Address addr(as_Register(b), index, scale, d);
2124 return addr;
2125 }
2127 %}
2129 //----------ENCODING BLOCK-----------------------------------------------------
2130 // This block specifies the encoding classes used by the compiler to
2131 // output byte streams. Encoding classes are parameterized macros
2132 // used by Machine Instruction Nodes in order to generate the bit
2133 // encoding of the instruction. Operands specify their base encoding
2134 // interface with the interface keyword. There are currently
2135 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2136 // COND_INTER. REG_INTER causes an operand to generate a function
2137 // which returns its register number when queried. CONST_INTER causes
2138 // an operand to generate a function which returns the value of the
2139 // constant when queried. MEMORY_INTER causes an operand to generate
2140 // four functions which return the Base Register, the Index Register,
2141 // the Scale Value, and the Offset Value of the operand when queried.
2142 // COND_INTER causes an operand to generate six functions which return
2143 // the encoding code (ie - encoding bits for the instruction)
2144 // associated with each basic boolean condition for a conditional
2145 // instruction.
2146 //
2147 // Instructions specify two basic values for encoding. Again, a
2148 // function is available to check if the constant displacement is an
2149 // oop. They use the ins_encode keyword to specify their encoding
2150 // classes (which must be a sequence of enc_class names, and their
2151 // parameters, specified in the encoding block), and they use the
2152 // opcode keyword to specify, in order, their primary, secondary, and
2153 // tertiary opcode. Only the opcode sections which a particular
2154 // instruction needs for encoding need to be specified.
2155 encode %{
2156 // Build emit functions for each basic byte or larger field in the
2157 // intel encoding scheme (opcode, rm, sib, immediate), and call them
2158 // from C++ code in the enc_class source block. Emit functions will
2159 // live in the main source block for now. In future, we can
2160 // generalize this by adding a syntax that specifies the sizes of
2161 // fields in an order, so that the adlc can build the emit functions
2162 // automagically
2164 // Emit primary opcode
2165 enc_class OpcP
2166 %{
2167 emit_opcode(cbuf, $primary);
2168 %}
2170 // Emit secondary opcode
2171 enc_class OpcS
2172 %{
2173 emit_opcode(cbuf, $secondary);
2174 %}
2176 // Emit tertiary opcode
2177 enc_class OpcT
2178 %{
2179 emit_opcode(cbuf, $tertiary);
2180 %}
2182 // Emit opcode directly
2183 enc_class Opcode(immI d8)
2184 %{
2185 emit_opcode(cbuf, $d8$$constant);
2186 %}
2188 // Emit size prefix
2189 enc_class SizePrefix
2190 %{
2191 emit_opcode(cbuf, 0x66);
2192 %}
2194 enc_class reg(rRegI reg)
2195 %{
2196 emit_rm(cbuf, 0x3, 0, $reg$$reg & 7);
2197 %}
2199 enc_class reg_reg(rRegI dst, rRegI src)
2200 %{
2201 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2202 %}
2204 enc_class opc_reg_reg(immI opcode, rRegI dst, rRegI src)
2205 %{
2206 emit_opcode(cbuf, $opcode$$constant);
2207 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2208 %}
2210 enc_class cmpfp_fixup()
2211 %{
2212 // jnp,s exit
2213 emit_opcode(cbuf, 0x7B);
2214 emit_d8(cbuf, 0x0A);
2216 // pushfq
2217 emit_opcode(cbuf, 0x9C);
2219 // andq $0xffffff2b, (%rsp)
2220 emit_opcode(cbuf, Assembler::REX_W);
2221 emit_opcode(cbuf, 0x81);
2222 emit_opcode(cbuf, 0x24);
2223 emit_opcode(cbuf, 0x24);
2224 emit_d32(cbuf, 0xffffff2b);
2226 // popfq
2227 emit_opcode(cbuf, 0x9D);
2229 // nop (target for branch to avoid branch to branch)
2230 emit_opcode(cbuf, 0x90);
2231 %}
2233 enc_class cmpfp3(rRegI dst)
2234 %{
2235 int dstenc = $dst$$reg;
2237 // movl $dst, -1
2238 if (dstenc >= 8) {
2239 emit_opcode(cbuf, Assembler::REX_B);
2240 }
2241 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
2242 emit_d32(cbuf, -1);
2244 // jp,s done
2245 emit_opcode(cbuf, 0x7A);
2246 emit_d8(cbuf, dstenc < 4 ? 0x08 : 0x0A);
2248 // jb,s done
2249 emit_opcode(cbuf, 0x72);
2250 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
2252 // setne $dst
2253 if (dstenc >= 4) {
2254 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
2255 }
2256 emit_opcode(cbuf, 0x0F);
2257 emit_opcode(cbuf, 0x95);
2258 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
2260 // movzbl $dst, $dst
2261 if (dstenc >= 4) {
2262 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
2263 }
2264 emit_opcode(cbuf, 0x0F);
2265 emit_opcode(cbuf, 0xB6);
2266 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
2267 %}
2269 enc_class cdql_enc(no_rax_rdx_RegI div)
2270 %{
2271 // Full implementation of Java idiv and irem; checks for
2272 // special case as described in JVM spec., p.243 & p.271.
2273 //
2274 // normal case special case
2275 //
2276 // input : rax: dividend min_int
2277 // reg: divisor -1
2278 //
2279 // output: rax: quotient (= rax idiv reg) min_int
2280 // rdx: remainder (= rax irem reg) 0
2281 //
2282 // Code sequnce:
2283 //
2284 // 0: 3d 00 00 00 80 cmp $0x80000000,%eax
2285 // 5: 75 07/08 jne e <normal>
2286 // 7: 33 d2 xor %edx,%edx
2287 // [div >= 8 -> offset + 1]
2288 // [REX_B]
2289 // 9: 83 f9 ff cmp $0xffffffffffffffff,$div
2290 // c: 74 03/04 je 11 <done>
2291 // 000000000000000e <normal>:
2292 // e: 99 cltd
2293 // [div >= 8 -> offset + 1]
2294 // [REX_B]
2295 // f: f7 f9 idiv $div
2296 // 0000000000000011 <done>:
2298 // cmp $0x80000000,%eax
2299 emit_opcode(cbuf, 0x3d);
2300 emit_d8(cbuf, 0x00);
2301 emit_d8(cbuf, 0x00);
2302 emit_d8(cbuf, 0x00);
2303 emit_d8(cbuf, 0x80);
2305 // jne e <normal>
2306 emit_opcode(cbuf, 0x75);
2307 emit_d8(cbuf, $div$$reg < 8 ? 0x07 : 0x08);
2309 // xor %edx,%edx
2310 emit_opcode(cbuf, 0x33);
2311 emit_d8(cbuf, 0xD2);
2313 // cmp $0xffffffffffffffff,%ecx
2314 if ($div$$reg >= 8) {
2315 emit_opcode(cbuf, Assembler::REX_B);
2316 }
2317 emit_opcode(cbuf, 0x83);
2318 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
2319 emit_d8(cbuf, 0xFF);
2321 // je 11 <done>
2322 emit_opcode(cbuf, 0x74);
2323 emit_d8(cbuf, $div$$reg < 8 ? 0x03 : 0x04);
2325 // <normal>
2326 // cltd
2327 emit_opcode(cbuf, 0x99);
2329 // idivl (note: must be emitted by the user of this rule)
2330 // <done>
2331 %}
2333 enc_class cdqq_enc(no_rax_rdx_RegL div)
2334 %{
2335 // Full implementation of Java ldiv and lrem; checks for
2336 // special case as described in JVM spec., p.243 & p.271.
2337 //
2338 // normal case special case
2339 //
2340 // input : rax: dividend min_long
2341 // reg: divisor -1
2342 //
2343 // output: rax: quotient (= rax idiv reg) min_long
2344 // rdx: remainder (= rax irem reg) 0
2345 //
2346 // Code sequnce:
2347 //
2348 // 0: 48 ba 00 00 00 00 00 mov $0x8000000000000000,%rdx
2349 // 7: 00 00 80
2350 // a: 48 39 d0 cmp %rdx,%rax
2351 // d: 75 08 jne 17 <normal>
2352 // f: 33 d2 xor %edx,%edx
2353 // 11: 48 83 f9 ff cmp $0xffffffffffffffff,$div
2354 // 15: 74 05 je 1c <done>
2355 // 0000000000000017 <normal>:
2356 // 17: 48 99 cqto
2357 // 19: 48 f7 f9 idiv $div
2358 // 000000000000001c <done>:
2360 // mov $0x8000000000000000,%rdx
2361 emit_opcode(cbuf, Assembler::REX_W);
2362 emit_opcode(cbuf, 0xBA);
2363 emit_d8(cbuf, 0x00);
2364 emit_d8(cbuf, 0x00);
2365 emit_d8(cbuf, 0x00);
2366 emit_d8(cbuf, 0x00);
2367 emit_d8(cbuf, 0x00);
2368 emit_d8(cbuf, 0x00);
2369 emit_d8(cbuf, 0x00);
2370 emit_d8(cbuf, 0x80);
2372 // cmp %rdx,%rax
2373 emit_opcode(cbuf, Assembler::REX_W);
2374 emit_opcode(cbuf, 0x39);
2375 emit_d8(cbuf, 0xD0);
2377 // jne 17 <normal>
2378 emit_opcode(cbuf, 0x75);
2379 emit_d8(cbuf, 0x08);
2381 // xor %edx,%edx
2382 emit_opcode(cbuf, 0x33);
2383 emit_d8(cbuf, 0xD2);
2385 // cmp $0xffffffffffffffff,$div
2386 emit_opcode(cbuf, $div$$reg < 8 ? Assembler::REX_W : Assembler::REX_WB);
2387 emit_opcode(cbuf, 0x83);
2388 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
2389 emit_d8(cbuf, 0xFF);
2391 // je 1e <done>
2392 emit_opcode(cbuf, 0x74);
2393 emit_d8(cbuf, 0x05);
2395 // <normal>
2396 // cqto
2397 emit_opcode(cbuf, Assembler::REX_W);
2398 emit_opcode(cbuf, 0x99);
2400 // idivq (note: must be emitted by the user of this rule)
2401 // <done>
2402 %}
2404 // Opcde enc_class for 8/32 bit immediate instructions with sign-extension
2405 enc_class OpcSE(immI imm)
2406 %{
2407 // Emit primary opcode and set sign-extend bit
2408 // Check for 8-bit immediate, and set sign extend bit in opcode
2409 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2410 emit_opcode(cbuf, $primary | 0x02);
2411 } else {
2412 // 32-bit immediate
2413 emit_opcode(cbuf, $primary);
2414 }
2415 %}
2417 enc_class OpcSErm(rRegI dst, immI imm)
2418 %{
2419 // OpcSEr/m
2420 int dstenc = $dst$$reg;
2421 if (dstenc >= 8) {
2422 emit_opcode(cbuf, Assembler::REX_B);
2423 dstenc -= 8;
2424 }
2425 // Emit primary opcode and set sign-extend bit
2426 // Check for 8-bit immediate, and set sign extend bit in opcode
2427 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2428 emit_opcode(cbuf, $primary | 0x02);
2429 } else {
2430 // 32-bit immediate
2431 emit_opcode(cbuf, $primary);
2432 }
2433 // Emit r/m byte with secondary opcode, after primary opcode.
2434 emit_rm(cbuf, 0x3, $secondary, dstenc);
2435 %}
2437 enc_class OpcSErm_wide(rRegL dst, immI imm)
2438 %{
2439 // OpcSEr/m
2440 int dstenc = $dst$$reg;
2441 if (dstenc < 8) {
2442 emit_opcode(cbuf, Assembler::REX_W);
2443 } else {
2444 emit_opcode(cbuf, Assembler::REX_WB);
2445 dstenc -= 8;
2446 }
2447 // Emit primary opcode and set sign-extend bit
2448 // Check for 8-bit immediate, and set sign extend bit in opcode
2449 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2450 emit_opcode(cbuf, $primary | 0x02);
2451 } else {
2452 // 32-bit immediate
2453 emit_opcode(cbuf, $primary);
2454 }
2455 // Emit r/m byte with secondary opcode, after primary opcode.
2456 emit_rm(cbuf, 0x3, $secondary, dstenc);
2457 %}
2459 enc_class Con8or32(immI imm)
2460 %{
2461 // Check for 8-bit immediate, and set sign extend bit in opcode
2462 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2463 $$$emit8$imm$$constant;
2464 } else {
2465 // 32-bit immediate
2466 $$$emit32$imm$$constant;
2467 }
2468 %}
2470 enc_class Lbl(label labl)
2471 %{
2472 // JMP, CALL
2473 Label* l = $labl$$label;
2474 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
2475 %}
2477 enc_class LblShort(label labl)
2478 %{
2479 // JMP, CALL
2480 Label* l = $labl$$label;
2481 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
2482 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
2483 emit_d8(cbuf, disp);
2484 %}
2486 enc_class opc2_reg(rRegI dst)
2487 %{
2488 // BSWAP
2489 emit_cc(cbuf, $secondary, $dst$$reg);
2490 %}
2492 enc_class opc3_reg(rRegI dst)
2493 %{
2494 // BSWAP
2495 emit_cc(cbuf, $tertiary, $dst$$reg);
2496 %}
2498 enc_class reg_opc(rRegI div)
2499 %{
2500 // INC, DEC, IDIV, IMOD, JMP indirect, ...
2501 emit_rm(cbuf, 0x3, $secondary, $div$$reg & 7);
2502 %}
2504 enc_class Jcc(cmpOp cop, label labl)
2505 %{
2506 // JCC
2507 Label* l = $labl$$label;
2508 $$$emit8$primary;
2509 emit_cc(cbuf, $secondary, $cop$$cmpcode);
2510 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
2511 %}
2513 enc_class JccShort (cmpOp cop, label labl)
2514 %{
2515 // JCC
2516 Label *l = $labl$$label;
2517 emit_cc(cbuf, $primary, $cop$$cmpcode);
2518 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
2519 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
2520 emit_d8(cbuf, disp);
2521 %}
2523 enc_class enc_cmov(cmpOp cop)
2524 %{
2525 // CMOV
2526 $$$emit8$primary;
2527 emit_cc(cbuf, $secondary, $cop$$cmpcode);
2528 %}
2530 enc_class enc_cmovf_branch(cmpOp cop, regF dst, regF src)
2531 %{
2532 // Invert sense of branch from sense of cmov
2533 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
2534 emit_d8(cbuf, ($dst$$reg < 8 && $src$$reg < 8)
2535 ? (UseXmmRegToRegMoveAll ? 3 : 4)
2536 : (UseXmmRegToRegMoveAll ? 4 : 5) ); // REX
2537 // UseXmmRegToRegMoveAll ? movaps(dst, src) : movss(dst, src)
2538 if (!UseXmmRegToRegMoveAll) emit_opcode(cbuf, 0xF3);
2539 if ($dst$$reg < 8) {
2540 if ($src$$reg >= 8) {
2541 emit_opcode(cbuf, Assembler::REX_B);
2542 }
2543 } else {
2544 if ($src$$reg < 8) {
2545 emit_opcode(cbuf, Assembler::REX_R);
2546 } else {
2547 emit_opcode(cbuf, Assembler::REX_RB);
2548 }
2549 }
2550 emit_opcode(cbuf, 0x0F);
2551 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
2552 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2553 %}
2555 enc_class enc_cmovd_branch(cmpOp cop, regD dst, regD src)
2556 %{
2557 // Invert sense of branch from sense of cmov
2558 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
2559 emit_d8(cbuf, $dst$$reg < 8 && $src$$reg < 8 ? 4 : 5); // REX
2561 // UseXmmRegToRegMoveAll ? movapd(dst, src) : movsd(dst, src)
2562 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
2563 if ($dst$$reg < 8) {
2564 if ($src$$reg >= 8) {
2565 emit_opcode(cbuf, Assembler::REX_B);
2566 }
2567 } else {
2568 if ($src$$reg < 8) {
2569 emit_opcode(cbuf, Assembler::REX_R);
2570 } else {
2571 emit_opcode(cbuf, Assembler::REX_RB);
2572 }
2573 }
2574 emit_opcode(cbuf, 0x0F);
2575 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
2576 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2577 %}
2579 enc_class enc_PartialSubtypeCheck()
2580 %{
2581 Register Rrdi = as_Register(RDI_enc); // result register
2582 Register Rrax = as_Register(RAX_enc); // super class
2583 Register Rrcx = as_Register(RCX_enc); // killed
2584 Register Rrsi = as_Register(RSI_enc); // sub class
2585 Label miss;
2586 const bool set_cond_codes = true;
2588 MacroAssembler _masm(&cbuf);
2589 __ check_klass_subtype_slow_path(Rrsi, Rrax, Rrcx, Rrdi,
2590 NULL, &miss,
2591 /*set_cond_codes:*/ true);
2592 if ($primary) {
2593 __ xorptr(Rrdi, Rrdi);
2594 }
2595 __ bind(miss);
2596 %}
2598 enc_class Java_To_Interpreter(method meth)
2599 %{
2600 // CALL Java_To_Interpreter
2601 // This is the instruction starting address for relocation info.
2602 cbuf.set_inst_mark();
2603 $$$emit8$primary;
2604 // CALL directly to the runtime
2605 emit_d32_reloc(cbuf,
2606 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2607 runtime_call_Relocation::spec(),
2608 RELOC_DISP32);
2609 %}
2611 enc_class Java_Static_Call(method meth)
2612 %{
2613 // JAVA STATIC CALL
2614 // CALL to fixup routine. Fixup routine uses ScopeDesc info to
2615 // determine who we intended to call.
2616 cbuf.set_inst_mark();
2617 $$$emit8$primary;
2619 if (!_method) {
2620 emit_d32_reloc(cbuf,
2621 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2622 runtime_call_Relocation::spec(),
2623 RELOC_DISP32);
2624 } else if (_optimized_virtual) {
2625 emit_d32_reloc(cbuf,
2626 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2627 opt_virtual_call_Relocation::spec(),
2628 RELOC_DISP32);
2629 } else {
2630 emit_d32_reloc(cbuf,
2631 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2632 static_call_Relocation::spec(),
2633 RELOC_DISP32);
2634 }
2635 if (_method) {
2636 // Emit stub for static call
2637 emit_java_to_interp(cbuf);
2638 }
2639 %}
2641 enc_class Java_Dynamic_Call(method meth)
2642 %{
2643 // JAVA DYNAMIC CALL
2644 // !!!!!
2645 // Generate "movq rax, -1", placeholder instruction to load oop-info
2646 // emit_call_dynamic_prologue( cbuf );
2647 cbuf.set_inst_mark();
2649 // movq rax, -1
2650 emit_opcode(cbuf, Assembler::REX_W);
2651 emit_opcode(cbuf, 0xB8 | RAX_enc);
2652 emit_d64_reloc(cbuf,
2653 (int64_t) Universe::non_oop_word(),
2654 oop_Relocation::spec_for_immediate(), RELOC_IMM64);
2655 address virtual_call_oop_addr = cbuf.inst_mark();
2656 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2657 // who we intended to call.
2658 cbuf.set_inst_mark();
2659 $$$emit8$primary;
2660 emit_d32_reloc(cbuf,
2661 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2662 virtual_call_Relocation::spec(virtual_call_oop_addr),
2663 RELOC_DISP32);
2664 %}
2666 enc_class Java_Compiled_Call(method meth)
2667 %{
2668 // JAVA COMPILED CALL
2669 int disp = in_bytes(methodOopDesc:: from_compiled_offset());
2671 // XXX XXX offset is 128 is 1.5 NON-PRODUCT !!!
2672 // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small");
2674 // callq *disp(%rax)
2675 cbuf.set_inst_mark();
2676 $$$emit8$primary;
2677 if (disp < 0x80) {
2678 emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte
2679 emit_d8(cbuf, disp); // Displacement
2680 } else {
2681 emit_rm(cbuf, 0x02, $secondary, RAX_enc); // R/M byte
2682 emit_d32(cbuf, disp); // Displacement
2683 }
2684 %}
2686 enc_class reg_opc_imm(rRegI dst, immI8 shift)
2687 %{
2688 // SAL, SAR, SHR
2689 int dstenc = $dst$$reg;
2690 if (dstenc >= 8) {
2691 emit_opcode(cbuf, Assembler::REX_B);
2692 dstenc -= 8;
2693 }
2694 $$$emit8$primary;
2695 emit_rm(cbuf, 0x3, $secondary, dstenc);
2696 $$$emit8$shift$$constant;
2697 %}
2699 enc_class reg_opc_imm_wide(rRegL dst, immI8 shift)
2700 %{
2701 // SAL, SAR, SHR
2702 int dstenc = $dst$$reg;
2703 if (dstenc < 8) {
2704 emit_opcode(cbuf, Assembler::REX_W);
2705 } else {
2706 emit_opcode(cbuf, Assembler::REX_WB);
2707 dstenc -= 8;
2708 }
2709 $$$emit8$primary;
2710 emit_rm(cbuf, 0x3, $secondary, dstenc);
2711 $$$emit8$shift$$constant;
2712 %}
2714 enc_class load_immI(rRegI dst, immI src)
2715 %{
2716 int dstenc = $dst$$reg;
2717 if (dstenc >= 8) {
2718 emit_opcode(cbuf, Assembler::REX_B);
2719 dstenc -= 8;
2720 }
2721 emit_opcode(cbuf, 0xB8 | dstenc);
2722 $$$emit32$src$$constant;
2723 %}
2725 enc_class load_immL(rRegL dst, immL src)
2726 %{
2727 int dstenc = $dst$$reg;
2728 if (dstenc < 8) {
2729 emit_opcode(cbuf, Assembler::REX_W);
2730 } else {
2731 emit_opcode(cbuf, Assembler::REX_WB);
2732 dstenc -= 8;
2733 }
2734 emit_opcode(cbuf, 0xB8 | dstenc);
2735 emit_d64(cbuf, $src$$constant);
2736 %}
2738 enc_class load_immUL32(rRegL dst, immUL32 src)
2739 %{
2740 // same as load_immI, but this time we care about zeroes in the high word
2741 int dstenc = $dst$$reg;
2742 if (dstenc >= 8) {
2743 emit_opcode(cbuf, Assembler::REX_B);
2744 dstenc -= 8;
2745 }
2746 emit_opcode(cbuf, 0xB8 | dstenc);
2747 $$$emit32$src$$constant;
2748 %}
2750 enc_class load_immL32(rRegL dst, immL32 src)
2751 %{
2752 int dstenc = $dst$$reg;
2753 if (dstenc < 8) {
2754 emit_opcode(cbuf, Assembler::REX_W);
2755 } else {
2756 emit_opcode(cbuf, Assembler::REX_WB);
2757 dstenc -= 8;
2758 }
2759 emit_opcode(cbuf, 0xC7);
2760 emit_rm(cbuf, 0x03, 0x00, dstenc);
2761 $$$emit32$src$$constant;
2762 %}
2764 enc_class load_immP31(rRegP dst, immP32 src)
2765 %{
2766 // same as load_immI, but this time we care about zeroes in the high word
2767 int dstenc = $dst$$reg;
2768 if (dstenc >= 8) {
2769 emit_opcode(cbuf, Assembler::REX_B);
2770 dstenc -= 8;
2771 }
2772 emit_opcode(cbuf, 0xB8 | dstenc);
2773 $$$emit32$src$$constant;
2774 %}
2776 enc_class load_immP(rRegP dst, immP src)
2777 %{
2778 int dstenc = $dst$$reg;
2779 if (dstenc < 8) {
2780 emit_opcode(cbuf, Assembler::REX_W);
2781 } else {
2782 emit_opcode(cbuf, Assembler::REX_WB);
2783 dstenc -= 8;
2784 }
2785 emit_opcode(cbuf, 0xB8 | dstenc);
2786 // This next line should be generated from ADLC
2787 if ($src->constant_is_oop()) {
2788 emit_d64_reloc(cbuf, $src$$constant, relocInfo::oop_type, RELOC_IMM64);
2789 } else {
2790 emit_d64(cbuf, $src$$constant);
2791 }
2792 %}
2794 enc_class load_immF(regF dst, immF con)
2795 %{
2796 // XXX reg_mem doesn't support RIP-relative addressing yet
2797 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2798 emit_float_constant(cbuf, $con$$constant);
2799 %}
2801 enc_class load_immD(regD dst, immD con)
2802 %{
2803 // XXX reg_mem doesn't support RIP-relative addressing yet
2804 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2805 emit_double_constant(cbuf, $con$$constant);
2806 %}
2808 enc_class load_conF (regF dst, immF con) %{ // Load float constant
2809 emit_opcode(cbuf, 0xF3);
2810 if ($dst$$reg >= 8) {
2811 emit_opcode(cbuf, Assembler::REX_R);
2812 }
2813 emit_opcode(cbuf, 0x0F);
2814 emit_opcode(cbuf, 0x10);
2815 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2816 emit_float_constant(cbuf, $con$$constant);
2817 %}
2819 enc_class load_conD (regD dst, immD con) %{ // Load double constant
2820 // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)
2821 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
2822 if ($dst$$reg >= 8) {
2823 emit_opcode(cbuf, Assembler::REX_R);
2824 }
2825 emit_opcode(cbuf, 0x0F);
2826 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
2827 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2828 emit_double_constant(cbuf, $con$$constant);
2829 %}
2831 // Encode a reg-reg copy. If it is useless, then empty encoding.
2832 enc_class enc_copy(rRegI dst, rRegI src)
2833 %{
2834 encode_copy(cbuf, $dst$$reg, $src$$reg);
2835 %}
2837 // Encode xmm reg-reg copy. If it is useless, then empty encoding.
2838 enc_class enc_CopyXD( RegD dst, RegD src ) %{
2839 encode_CopyXD( cbuf, $dst$$reg, $src$$reg );
2840 %}
2842 enc_class enc_copy_always(rRegI dst, rRegI src)
2843 %{
2844 int srcenc = $src$$reg;
2845 int dstenc = $dst$$reg;
2847 if (dstenc < 8) {
2848 if (srcenc >= 8) {
2849 emit_opcode(cbuf, Assembler::REX_B);
2850 srcenc -= 8;
2851 }
2852 } else {
2853 if (srcenc < 8) {
2854 emit_opcode(cbuf, Assembler::REX_R);
2855 } else {
2856 emit_opcode(cbuf, Assembler::REX_RB);
2857 srcenc -= 8;
2858 }
2859 dstenc -= 8;
2860 }
2862 emit_opcode(cbuf, 0x8B);
2863 emit_rm(cbuf, 0x3, dstenc, srcenc);
2864 %}
2866 enc_class enc_copy_wide(rRegL dst, rRegL src)
2867 %{
2868 int srcenc = $src$$reg;
2869 int dstenc = $dst$$reg;
2871 if (dstenc != srcenc) {
2872 if (dstenc < 8) {
2873 if (srcenc < 8) {
2874 emit_opcode(cbuf, Assembler::REX_W);
2875 } else {
2876 emit_opcode(cbuf, Assembler::REX_WB);
2877 srcenc -= 8;
2878 }
2879 } else {
2880 if (srcenc < 8) {
2881 emit_opcode(cbuf, Assembler::REX_WR);
2882 } else {
2883 emit_opcode(cbuf, Assembler::REX_WRB);
2884 srcenc -= 8;
2885 }
2886 dstenc -= 8;
2887 }
2888 emit_opcode(cbuf, 0x8B);
2889 emit_rm(cbuf, 0x3, dstenc, srcenc);
2890 }
2891 %}
2893 enc_class Con32(immI src)
2894 %{
2895 // Output immediate
2896 $$$emit32$src$$constant;
2897 %}
2899 enc_class Con64(immL src)
2900 %{
2901 // Output immediate
2902 emit_d64($src$$constant);
2903 %}
2905 enc_class Con32F_as_bits(immF src)
2906 %{
2907 // Output Float immediate bits
2908 jfloat jf = $src$$constant;
2909 jint jf_as_bits = jint_cast(jf);
2910 emit_d32(cbuf, jf_as_bits);
2911 %}
2913 enc_class Con16(immI src)
2914 %{
2915 // Output immediate
2916 $$$emit16$src$$constant;
2917 %}
2919 // How is this different from Con32??? XXX
2920 enc_class Con_d32(immI src)
2921 %{
2922 emit_d32(cbuf,$src$$constant);
2923 %}
2925 enc_class conmemref (rRegP t1) %{ // Con32(storeImmI)
2926 // Output immediate memory reference
2927 emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
2928 emit_d32(cbuf, 0x00);
2929 %}
2931 enc_class jump_enc(rRegL switch_val, rRegI dest) %{
2932 MacroAssembler masm(&cbuf);
2934 Register switch_reg = as_Register($switch_val$$reg);
2935 Register dest_reg = as_Register($dest$$reg);
2936 address table_base = masm.address_table_constant(_index2label);
2938 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
2939 // to do that and the compiler is using that register as one it can allocate.
2940 // So we build it all by hand.
2941 // Address index(noreg, switch_reg, Address::times_1);
2942 // ArrayAddress dispatch(table, index);
2944 Address dispatch(dest_reg, switch_reg, Address::times_1);
2946 masm.lea(dest_reg, InternalAddress(table_base));
2947 masm.jmp(dispatch);
2948 %}
2950 enc_class jump_enc_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
2951 MacroAssembler masm(&cbuf);
2953 Register switch_reg = as_Register($switch_val$$reg);
2954 Register dest_reg = as_Register($dest$$reg);
2955 address table_base = masm.address_table_constant(_index2label);
2957 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
2958 // to do that and the compiler is using that register as one it can allocate.
2959 // So we build it all by hand.
2960 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
2961 // ArrayAddress dispatch(table, index);
2963 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
2965 masm.lea(dest_reg, InternalAddress(table_base));
2966 masm.jmp(dispatch);
2967 %}
2969 enc_class jump_enc_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
2970 MacroAssembler masm(&cbuf);
2972 Register switch_reg = as_Register($switch_val$$reg);
2973 Register dest_reg = as_Register($dest$$reg);
2974 address table_base = masm.address_table_constant(_index2label);
2976 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
2977 // to do that and the compiler is using that register as one it can allocate.
2978 // So we build it all by hand.
2979 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);
2980 // ArrayAddress dispatch(table, index);
2982 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant);
2983 masm.lea(dest_reg, InternalAddress(table_base));
2984 masm.jmp(dispatch);
2986 %}
2988 enc_class lock_prefix()
2989 %{
2990 if (os::is_MP()) {
2991 emit_opcode(cbuf, 0xF0); // lock
2992 }
2993 %}
2995 enc_class REX_mem(memory mem)
2996 %{
2997 if ($mem$$base >= 8) {
2998 if ($mem$$index < 8) {
2999 emit_opcode(cbuf, Assembler::REX_B);
3000 } else {
3001 emit_opcode(cbuf, Assembler::REX_XB);
3002 }
3003 } else {
3004 if ($mem$$index >= 8) {
3005 emit_opcode(cbuf, Assembler::REX_X);
3006 }
3007 }
3008 %}
3010 enc_class REX_mem_wide(memory mem)
3011 %{
3012 if ($mem$$base >= 8) {
3013 if ($mem$$index < 8) {
3014 emit_opcode(cbuf, Assembler::REX_WB);
3015 } else {
3016 emit_opcode(cbuf, Assembler::REX_WXB);
3017 }
3018 } else {
3019 if ($mem$$index < 8) {
3020 emit_opcode(cbuf, Assembler::REX_W);
3021 } else {
3022 emit_opcode(cbuf, Assembler::REX_WX);
3023 }
3024 }
3025 %}
3027 // for byte regs
3028 enc_class REX_breg(rRegI reg)
3029 %{
3030 if ($reg$$reg >= 4) {
3031 emit_opcode(cbuf, $reg$$reg < 8 ? Assembler::REX : Assembler::REX_B);
3032 }
3033 %}
3035 // for byte regs
3036 enc_class REX_reg_breg(rRegI dst, rRegI src)
3037 %{
3038 if ($dst$$reg < 8) {
3039 if ($src$$reg >= 4) {
3040 emit_opcode(cbuf, $src$$reg < 8 ? Assembler::REX : Assembler::REX_B);
3041 }
3042 } else {
3043 if ($src$$reg < 8) {
3044 emit_opcode(cbuf, Assembler::REX_R);
3045 } else {
3046 emit_opcode(cbuf, Assembler::REX_RB);
3047 }
3048 }
3049 %}
3051 // for byte regs
3052 enc_class REX_breg_mem(rRegI reg, memory mem)
3053 %{
3054 if ($reg$$reg < 8) {
3055 if ($mem$$base < 8) {
3056 if ($mem$$index >= 8) {
3057 emit_opcode(cbuf, Assembler::REX_X);
3058 } else if ($reg$$reg >= 4) {
3059 emit_opcode(cbuf, Assembler::REX);
3060 }
3061 } else {
3062 if ($mem$$index < 8) {
3063 emit_opcode(cbuf, Assembler::REX_B);
3064 } else {
3065 emit_opcode(cbuf, Assembler::REX_XB);
3066 }
3067 }
3068 } else {
3069 if ($mem$$base < 8) {
3070 if ($mem$$index < 8) {
3071 emit_opcode(cbuf, Assembler::REX_R);
3072 } else {
3073 emit_opcode(cbuf, Assembler::REX_RX);
3074 }
3075 } else {
3076 if ($mem$$index < 8) {
3077 emit_opcode(cbuf, Assembler::REX_RB);
3078 } else {
3079 emit_opcode(cbuf, Assembler::REX_RXB);
3080 }
3081 }
3082 }
3083 %}
3085 enc_class REX_reg(rRegI reg)
3086 %{
3087 if ($reg$$reg >= 8) {
3088 emit_opcode(cbuf, Assembler::REX_B);
3089 }
3090 %}
3092 enc_class REX_reg_wide(rRegI reg)
3093 %{
3094 if ($reg$$reg < 8) {
3095 emit_opcode(cbuf, Assembler::REX_W);
3096 } else {
3097 emit_opcode(cbuf, Assembler::REX_WB);
3098 }
3099 %}
3101 enc_class REX_reg_reg(rRegI dst, rRegI src)
3102 %{
3103 if ($dst$$reg < 8) {
3104 if ($src$$reg >= 8) {
3105 emit_opcode(cbuf, Assembler::REX_B);
3106 }
3107 } else {
3108 if ($src$$reg < 8) {
3109 emit_opcode(cbuf, Assembler::REX_R);
3110 } else {
3111 emit_opcode(cbuf, Assembler::REX_RB);
3112 }
3113 }
3114 %}
3116 enc_class REX_reg_reg_wide(rRegI dst, rRegI src)
3117 %{
3118 if ($dst$$reg < 8) {
3119 if ($src$$reg < 8) {
3120 emit_opcode(cbuf, Assembler::REX_W);
3121 } else {
3122 emit_opcode(cbuf, Assembler::REX_WB);
3123 }
3124 } else {
3125 if ($src$$reg < 8) {
3126 emit_opcode(cbuf, Assembler::REX_WR);
3127 } else {
3128 emit_opcode(cbuf, Assembler::REX_WRB);
3129 }
3130 }
3131 %}
3133 enc_class REX_reg_mem(rRegI reg, memory mem)
3134 %{
3135 if ($reg$$reg < 8) {
3136 if ($mem$$base < 8) {
3137 if ($mem$$index >= 8) {
3138 emit_opcode(cbuf, Assembler::REX_X);
3139 }
3140 } else {
3141 if ($mem$$index < 8) {
3142 emit_opcode(cbuf, Assembler::REX_B);
3143 } else {
3144 emit_opcode(cbuf, Assembler::REX_XB);
3145 }
3146 }
3147 } else {
3148 if ($mem$$base < 8) {
3149 if ($mem$$index < 8) {
3150 emit_opcode(cbuf, Assembler::REX_R);
3151 } else {
3152 emit_opcode(cbuf, Assembler::REX_RX);
3153 }
3154 } else {
3155 if ($mem$$index < 8) {
3156 emit_opcode(cbuf, Assembler::REX_RB);
3157 } else {
3158 emit_opcode(cbuf, Assembler::REX_RXB);
3159 }
3160 }
3161 }
3162 %}
3164 enc_class REX_reg_mem_wide(rRegL reg, memory mem)
3165 %{
3166 if ($reg$$reg < 8) {
3167 if ($mem$$base < 8) {
3168 if ($mem$$index < 8) {
3169 emit_opcode(cbuf, Assembler::REX_W);
3170 } else {
3171 emit_opcode(cbuf, Assembler::REX_WX);
3172 }
3173 } else {
3174 if ($mem$$index < 8) {
3175 emit_opcode(cbuf, Assembler::REX_WB);
3176 } else {
3177 emit_opcode(cbuf, Assembler::REX_WXB);
3178 }
3179 }
3180 } else {
3181 if ($mem$$base < 8) {
3182 if ($mem$$index < 8) {
3183 emit_opcode(cbuf, Assembler::REX_WR);
3184 } else {
3185 emit_opcode(cbuf, Assembler::REX_WRX);
3186 }
3187 } else {
3188 if ($mem$$index < 8) {
3189 emit_opcode(cbuf, Assembler::REX_WRB);
3190 } else {
3191 emit_opcode(cbuf, Assembler::REX_WRXB);
3192 }
3193 }
3194 }
3195 %}
3197 enc_class reg_mem(rRegI ereg, memory mem)
3198 %{
3199 // High registers handle in encode_RegMem
3200 int reg = $ereg$$reg;
3201 int base = $mem$$base;
3202 int index = $mem$$index;
3203 int scale = $mem$$scale;
3204 int disp = $mem$$disp;
3205 bool disp_is_oop = $mem->disp_is_oop();
3207 encode_RegMem(cbuf, reg, base, index, scale, disp, disp_is_oop);
3208 %}
3210 enc_class RM_opc_mem(immI rm_opcode, memory mem)
3211 %{
3212 int rm_byte_opcode = $rm_opcode$$constant;
3214 // High registers handle in encode_RegMem
3215 int base = $mem$$base;
3216 int index = $mem$$index;
3217 int scale = $mem$$scale;
3218 int displace = $mem$$disp;
3220 bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when
3221 // working with static
3222 // globals
3223 encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace,
3224 disp_is_oop);
3225 %}
3227 enc_class reg_lea(rRegI dst, rRegI src0, immI src1)
3228 %{
3229 int reg_encoding = $dst$$reg;
3230 int base = $src0$$reg; // 0xFFFFFFFF indicates no base
3231 int index = 0x04; // 0x04 indicates no index
3232 int scale = 0x00; // 0x00 indicates no scale
3233 int displace = $src1$$constant; // 0x00 indicates no displacement
3234 bool disp_is_oop = false;
3235 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace,
3236 disp_is_oop);
3237 %}
3239 enc_class neg_reg(rRegI dst)
3240 %{
3241 int dstenc = $dst$$reg;
3242 if (dstenc >= 8) {
3243 emit_opcode(cbuf, Assembler::REX_B);
3244 dstenc -= 8;
3245 }
3246 // NEG $dst
3247 emit_opcode(cbuf, 0xF7);
3248 emit_rm(cbuf, 0x3, 0x03, dstenc);
3249 %}
3251 enc_class neg_reg_wide(rRegI dst)
3252 %{
3253 int dstenc = $dst$$reg;
3254 if (dstenc < 8) {
3255 emit_opcode(cbuf, Assembler::REX_W);
3256 } else {
3257 emit_opcode(cbuf, Assembler::REX_WB);
3258 dstenc -= 8;
3259 }
3260 // NEG $dst
3261 emit_opcode(cbuf, 0xF7);
3262 emit_rm(cbuf, 0x3, 0x03, dstenc);
3263 %}
3265 enc_class setLT_reg(rRegI dst)
3266 %{
3267 int dstenc = $dst$$reg;
3268 if (dstenc >= 8) {
3269 emit_opcode(cbuf, Assembler::REX_B);
3270 dstenc -= 8;
3271 } else if (dstenc >= 4) {
3272 emit_opcode(cbuf, Assembler::REX);
3273 }
3274 // SETLT $dst
3275 emit_opcode(cbuf, 0x0F);
3276 emit_opcode(cbuf, 0x9C);
3277 emit_rm(cbuf, 0x3, 0x0, dstenc);
3278 %}
3280 enc_class setNZ_reg(rRegI dst)
3281 %{
3282 int dstenc = $dst$$reg;
3283 if (dstenc >= 8) {
3284 emit_opcode(cbuf, Assembler::REX_B);
3285 dstenc -= 8;
3286 } else if (dstenc >= 4) {
3287 emit_opcode(cbuf, Assembler::REX);
3288 }
3289 // SETNZ $dst
3290 emit_opcode(cbuf, 0x0F);
3291 emit_opcode(cbuf, 0x95);
3292 emit_rm(cbuf, 0x3, 0x0, dstenc);
3293 %}
3295 enc_class enc_cmpLTP(no_rcx_RegI p, no_rcx_RegI q, no_rcx_RegI y,
3296 rcx_RegI tmp)
3297 %{
3298 // cadd_cmpLT
3300 int tmpReg = $tmp$$reg;
3302 int penc = $p$$reg;
3303 int qenc = $q$$reg;
3304 int yenc = $y$$reg;
3306 // subl $p,$q
3307 if (penc < 8) {
3308 if (qenc >= 8) {
3309 emit_opcode(cbuf, Assembler::REX_B);
3310 }
3311 } else {
3312 if (qenc < 8) {
3313 emit_opcode(cbuf, Assembler::REX_R);
3314 } else {
3315 emit_opcode(cbuf, Assembler::REX_RB);
3316 }
3317 }
3318 emit_opcode(cbuf, 0x2B);
3319 emit_rm(cbuf, 0x3, penc & 7, qenc & 7);
3321 // sbbl $tmp, $tmp
3322 emit_opcode(cbuf, 0x1B);
3323 emit_rm(cbuf, 0x3, tmpReg, tmpReg);
3325 // andl $tmp, $y
3326 if (yenc >= 8) {
3327 emit_opcode(cbuf, Assembler::REX_B);
3328 }
3329 emit_opcode(cbuf, 0x23);
3330 emit_rm(cbuf, 0x3, tmpReg, yenc & 7);
3332 // addl $p,$tmp
3333 if (penc >= 8) {
3334 emit_opcode(cbuf, Assembler::REX_R);
3335 }
3336 emit_opcode(cbuf, 0x03);
3337 emit_rm(cbuf, 0x3, penc & 7, tmpReg);
3338 %}
3340 // Compare the lonogs and set -1, 0, or 1 into dst
3341 enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst)
3342 %{
3343 int src1enc = $src1$$reg;
3344 int src2enc = $src2$$reg;
3345 int dstenc = $dst$$reg;
3347 // cmpq $src1, $src2
3348 if (src1enc < 8) {
3349 if (src2enc < 8) {
3350 emit_opcode(cbuf, Assembler::REX_W);
3351 } else {
3352 emit_opcode(cbuf, Assembler::REX_WB);
3353 }
3354 } else {
3355 if (src2enc < 8) {
3356 emit_opcode(cbuf, Assembler::REX_WR);
3357 } else {
3358 emit_opcode(cbuf, Assembler::REX_WRB);
3359 }
3360 }
3361 emit_opcode(cbuf, 0x3B);
3362 emit_rm(cbuf, 0x3, src1enc & 7, src2enc & 7);
3364 // movl $dst, -1
3365 if (dstenc >= 8) {
3366 emit_opcode(cbuf, Assembler::REX_B);
3367 }
3368 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
3369 emit_d32(cbuf, -1);
3371 // jl,s done
3372 emit_opcode(cbuf, 0x7C);
3373 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
3375 // setne $dst
3376 if (dstenc >= 4) {
3377 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
3378 }
3379 emit_opcode(cbuf, 0x0F);
3380 emit_opcode(cbuf, 0x95);
3381 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
3383 // movzbl $dst, $dst
3384 if (dstenc >= 4) {
3385 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
3386 }
3387 emit_opcode(cbuf, 0x0F);
3388 emit_opcode(cbuf, 0xB6);
3389 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
3390 %}
3392 enc_class Push_ResultXD(regD dst) %{
3393 int dstenc = $dst$$reg;
3395 store_to_stackslot( cbuf, 0xDD, 0x03, 0 ); //FSTP [RSP]
3397 // UseXmmLoadAndClearUpper ? movsd dst,[rsp] : movlpd dst,[rsp]
3398 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
3399 if (dstenc >= 8) {
3400 emit_opcode(cbuf, Assembler::REX_R);
3401 }
3402 emit_opcode (cbuf, 0x0F );
3403 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12 );
3404 encode_RegMem(cbuf, dstenc, RSP_enc, 0x4, 0, 0, false);
3406 // add rsp,8
3407 emit_opcode(cbuf, Assembler::REX_W);
3408 emit_opcode(cbuf,0x83);
3409 emit_rm(cbuf,0x3, 0x0, RSP_enc);
3410 emit_d8(cbuf,0x08);
3411 %}
3413 enc_class Push_SrcXD(regD src) %{
3414 int srcenc = $src$$reg;
3416 // subq rsp,#8
3417 emit_opcode(cbuf, Assembler::REX_W);
3418 emit_opcode(cbuf, 0x83);
3419 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
3420 emit_d8(cbuf, 0x8);
3422 // movsd [rsp],src
3423 emit_opcode(cbuf, 0xF2);
3424 if (srcenc >= 8) {
3425 emit_opcode(cbuf, Assembler::REX_R);
3426 }
3427 emit_opcode(cbuf, 0x0F);
3428 emit_opcode(cbuf, 0x11);
3429 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false);
3431 // fldd [rsp]
3432 emit_opcode(cbuf, 0x66);
3433 emit_opcode(cbuf, 0xDD);
3434 encode_RegMem(cbuf, 0x0, RSP_enc, 0x4, 0, 0, false);
3435 %}
3438 enc_class movq_ld(regD dst, memory mem) %{
3439 MacroAssembler _masm(&cbuf);
3440 __ movq($dst$$XMMRegister, $mem$$Address);
3441 %}
3443 enc_class movq_st(memory mem, regD src) %{
3444 MacroAssembler _masm(&cbuf);
3445 __ movq($mem$$Address, $src$$XMMRegister);
3446 %}
3448 enc_class pshufd_8x8(regF dst, regF src) %{
3449 MacroAssembler _masm(&cbuf);
3451 encode_CopyXD(cbuf, $dst$$reg, $src$$reg);
3452 __ punpcklbw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg));
3453 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg), 0x00);
3454 %}
3456 enc_class pshufd_4x16(regF dst, regF src) %{
3457 MacroAssembler _masm(&cbuf);
3459 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), 0x00);
3460 %}
3462 enc_class pshufd(regD dst, regD src, int mode) %{
3463 MacroAssembler _masm(&cbuf);
3465 __ pshufd(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), $mode);
3466 %}
3468 enc_class pxor(regD dst, regD src) %{
3469 MacroAssembler _masm(&cbuf);
3471 __ pxor(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg));
3472 %}
3474 enc_class mov_i2x(regD dst, rRegI src) %{
3475 MacroAssembler _masm(&cbuf);
3477 __ movdl(as_XMMRegister($dst$$reg), as_Register($src$$reg));
3478 %}
3480 // obj: object to lock
3481 // box: box address (header location) -- killed
3482 // tmp: rax -- killed
3483 // scr: rbx -- killed
3484 //
3485 // What follows is a direct transliteration of fast_lock() and fast_unlock()
3486 // from i486.ad. See that file for comments.
3487 // TODO: where possible switch from movq (r, 0) to movl(r,0) and
3488 // use the shorter encoding. (Movl clears the high-order 32-bits).
3491 enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr)
3492 %{
3493 Register objReg = as_Register((int)$obj$$reg);
3494 Register boxReg = as_Register((int)$box$$reg);
3495 Register tmpReg = as_Register($tmp$$reg);
3496 Register scrReg = as_Register($scr$$reg);
3497 MacroAssembler masm(&cbuf);
3499 // Verify uniqueness of register assignments -- necessary but not sufficient
3500 assert (objReg != boxReg && objReg != tmpReg &&
3501 objReg != scrReg && tmpReg != scrReg, "invariant") ;
3503 if (_counters != NULL) {
3504 masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
3505 }
3506 if (EmitSync & 1) {
3507 // Without cast to int32_t a movptr will destroy r10 which is typically obj
3508 masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
3509 masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
3510 } else
3511 if (EmitSync & 2) {
3512 Label DONE_LABEL;
3513 if (UseBiasedLocking) {
3514 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
3515 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
3516 }
3517 // QQQ was movl...
3518 masm.movptr(tmpReg, 0x1);
3519 masm.orptr(tmpReg, Address(objReg, 0));
3520 masm.movptr(Address(boxReg, 0), tmpReg);
3521 if (os::is_MP()) {
3522 masm.lock();
3523 }
3524 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
3525 masm.jcc(Assembler::equal, DONE_LABEL);
3527 // Recursive locking
3528 masm.subptr(tmpReg, rsp);
3529 masm.andptr(tmpReg, 7 - os::vm_page_size());
3530 masm.movptr(Address(boxReg, 0), tmpReg);
3532 masm.bind(DONE_LABEL);
3533 masm.nop(); // avoid branch to branch
3534 } else {
3535 Label DONE_LABEL, IsInflated, Egress;
3537 masm.movptr(tmpReg, Address(objReg, 0)) ;
3538 masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
3539 masm.jcc (Assembler::notZero, IsInflated) ;
3541 // it's stack-locked, biased or neutral
3542 // TODO: optimize markword triage order to reduce the number of
3543 // conditional branches in the most common cases.
3544 // Beware -- there's a subtle invariant that fetch of the markword
3545 // at [FETCH], below, will never observe a biased encoding (*101b).
3546 // If this invariant is not held we'll suffer exclusion (safety) failure.
3548 if (UseBiasedLocking && !UseOptoBiasInlining) {
3549 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
3550 masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
3551 }
3553 // was q will it destroy high?
3554 masm.orl (tmpReg, 1) ;
3555 masm.movptr(Address(boxReg, 0), tmpReg) ;
3556 if (os::is_MP()) { masm.lock(); }
3557 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
3558 if (_counters != NULL) {
3559 masm.cond_inc32(Assembler::equal,
3560 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
3561 }
3562 masm.jcc (Assembler::equal, DONE_LABEL);
3564 // Recursive locking
3565 masm.subptr(tmpReg, rsp);
3566 masm.andptr(tmpReg, 7 - os::vm_page_size());
3567 masm.movptr(Address(boxReg, 0), tmpReg);
3568 if (_counters != NULL) {
3569 masm.cond_inc32(Assembler::equal,
3570 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
3571 }
3572 masm.jmp (DONE_LABEL) ;
3574 masm.bind (IsInflated) ;
3575 // It's inflated
3577 // TODO: someday avoid the ST-before-CAS penalty by
3578 // relocating (deferring) the following ST.
3579 // We should also think about trying a CAS without having
3580 // fetched _owner. If the CAS is successful we may
3581 // avoid an RTO->RTS upgrade on the $line.
3582 // Without cast to int32_t a movptr will destroy r10 which is typically obj
3583 masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
3585 masm.mov (boxReg, tmpReg) ;
3586 masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3587 masm.testptr(tmpReg, tmpReg) ;
3588 masm.jcc (Assembler::notZero, DONE_LABEL) ;
3590 // It's inflated and appears unlocked
3591 if (os::is_MP()) { masm.lock(); }
3592 masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3593 // Intentional fall-through into DONE_LABEL ...
3595 masm.bind (DONE_LABEL) ;
3596 masm.nop () ; // avoid jmp to jmp
3597 }
3598 %}
3600 // obj: object to unlock
3601 // box: box address (displaced header location), killed
3602 // RBX: killed tmp; cannot be obj nor box
3603 enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp)
3604 %{
3606 Register objReg = as_Register($obj$$reg);
3607 Register boxReg = as_Register($box$$reg);
3608 Register tmpReg = as_Register($tmp$$reg);
3609 MacroAssembler masm(&cbuf);
3611 if (EmitSync & 4) {
3612 masm.cmpptr(rsp, 0) ;
3613 } else
3614 if (EmitSync & 8) {
3615 Label DONE_LABEL;
3616 if (UseBiasedLocking) {
3617 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3618 }
3620 // Check whether the displaced header is 0
3621 //(=> recursive unlock)
3622 masm.movptr(tmpReg, Address(boxReg, 0));
3623 masm.testptr(tmpReg, tmpReg);
3624 masm.jcc(Assembler::zero, DONE_LABEL);
3626 // If not recursive lock, reset the header to displaced header
3627 if (os::is_MP()) {
3628 masm.lock();
3629 }
3630 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
3631 masm.bind(DONE_LABEL);
3632 masm.nop(); // avoid branch to branch
3633 } else {
3634 Label DONE_LABEL, Stacked, CheckSucc ;
3636 if (UseBiasedLocking && !UseOptoBiasInlining) {
3637 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3638 }
3640 masm.movptr(tmpReg, Address(objReg, 0)) ;
3641 masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
3642 masm.jcc (Assembler::zero, DONE_LABEL) ;
3643 masm.testl (tmpReg, 0x02) ;
3644 masm.jcc (Assembler::zero, Stacked) ;
3646 // It's inflated
3647 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3648 masm.xorptr(boxReg, r15_thread) ;
3649 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
3650 masm.jcc (Assembler::notZero, DONE_LABEL) ;
3651 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
3652 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
3653 masm.jcc (Assembler::notZero, CheckSucc) ;
3654 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3655 masm.jmp (DONE_LABEL) ;
3657 if ((EmitSync & 65536) == 0) {
3658 Label LSuccess, LGoSlowPath ;
3659 masm.bind (CheckSucc) ;
3660 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3661 masm.jcc (Assembler::zero, LGoSlowPath) ;
3663 // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
3664 // the explicit ST;MEMBAR combination, but masm doesn't currently support
3665 // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
3666 // are all faster when the write buffer is populated.
3667 masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3668 if (os::is_MP()) {
3669 masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
3670 }
3671 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3672 masm.jcc (Assembler::notZero, LSuccess) ;
3674 masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX
3675 if (os::is_MP()) { masm.lock(); }
3676 masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
3677 masm.jcc (Assembler::notEqual, LSuccess) ;
3678 // Intentional fall-through into slow-path
3680 masm.bind (LGoSlowPath) ;
3681 masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure
3682 masm.jmp (DONE_LABEL) ;
3684 masm.bind (LSuccess) ;
3685 masm.testl (boxReg, 0) ; // set ICC.ZF=1 to indicate success
3686 masm.jmp (DONE_LABEL) ;
3687 }
3689 masm.bind (Stacked) ;
3690 masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch
3691 if (os::is_MP()) { masm.lock(); }
3692 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
3694 if (EmitSync & 65536) {
3695 masm.bind (CheckSucc) ;
3696 }
3697 masm.bind(DONE_LABEL);
3698 if (EmitSync & 32768) {
3699 masm.nop(); // avoid branch to branch
3700 }
3701 }
3702 %}
3704 enc_class enc_String_Compare(rdi_RegP str1, rsi_RegP str2, regD tmp1, regD tmp2,
3705 rax_RegI tmp3, rbx_RegI tmp4, rcx_RegI result) %{
3706 Label RCX_GOOD_LABEL, LENGTH_DIFF_LABEL,
3707 POP_LABEL, DONE_LABEL, CONT_LABEL,
3708 WHILE_HEAD_LABEL;
3709 MacroAssembler masm(&cbuf);
3711 XMMRegister tmp1Reg = as_XMMRegister($tmp1$$reg);
3712 XMMRegister tmp2Reg = as_XMMRegister($tmp2$$reg);
3714 // Get the first character position in both strings
3715 // [8] char array, [12] offset, [16] count
3716 int value_offset = java_lang_String::value_offset_in_bytes();
3717 int offset_offset = java_lang_String::offset_offset_in_bytes();
3718 int count_offset = java_lang_String::count_offset_in_bytes();
3719 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
3721 masm.load_heap_oop(rax, Address(rsi, value_offset));
3722 masm.movl(rcx, Address(rsi, offset_offset));
3723 masm.lea(rax, Address(rax, rcx, Address::times_2, base_offset));
3724 masm.load_heap_oop(rbx, Address(rdi, value_offset));
3725 masm.movl(rcx, Address(rdi, offset_offset));
3726 masm.lea(rbx, Address(rbx, rcx, Address::times_2, base_offset));
3728 // Compute the minimum of the string lengths(rsi) and the
3729 // difference of the string lengths (stack)
3731 // do the conditional move stuff
3732 masm.movl(rdi, Address(rdi, count_offset));
3733 masm.movl(rsi, Address(rsi, count_offset));
3734 masm.movl(rcx, rdi);
3735 masm.subl(rdi, rsi);
3736 masm.push(rdi);
3737 masm.cmov(Assembler::lessEqual, rsi, rcx);
3739 // Is the minimum length zero?
3740 masm.bind(RCX_GOOD_LABEL);
3741 masm.testl(rsi, rsi);
3742 masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
3744 // Load first characters
3745 masm.load_unsigned_short(rcx, Address(rbx, 0));
3746 masm.load_unsigned_short(rdi, Address(rax, 0));
3748 // Compare first characters
3749 masm.subl(rcx, rdi);
3750 masm.jcc(Assembler::notZero, POP_LABEL);
3751 masm.decrementl(rsi);
3752 masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
3754 {
3755 // Check after comparing first character to see if strings are equivalent
3756 Label LSkip2;
3757 // Check if the strings start at same location
3758 masm.cmpptr(rbx, rax);
3759 masm.jccb(Assembler::notEqual, LSkip2);
3761 // Check if the length difference is zero (from stack)
3762 masm.cmpl(Address(rsp, 0), 0x0);
3763 masm.jcc(Assembler::equal, LENGTH_DIFF_LABEL);
3765 // Strings might not be equivalent
3766 masm.bind(LSkip2);
3767 }
3769 // Advance to next character
3770 masm.addptr(rax, 2);
3771 masm.addptr(rbx, 2);
3773 if (UseSSE42Intrinsics) {
3774 // With SSE4.2, use double quad vector compare
3775 Label COMPARE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
3776 // Setup to compare 16-byte vectors
3777 masm.movl(rdi, rsi);
3778 masm.andl(rsi, 0xfffffff8); // rsi holds the vector count
3779 masm.andl(rdi, 0x00000007); // rdi holds the tail count
3780 masm.testl(rsi, rsi);
3781 masm.jccb(Assembler::zero, COMPARE_TAIL);
3783 masm.lea(rax, Address(rax, rsi, Address::times_2));
3784 masm.lea(rbx, Address(rbx, rsi, Address::times_2));
3785 masm.negptr(rsi);
3787 masm.bind(COMPARE_VECTORS);
3788 masm.movdqu(tmp1Reg, Address(rax, rsi, Address::times_2));
3789 masm.movdqu(tmp2Reg, Address(rbx, rsi, Address::times_2));
3790 masm.pxor(tmp1Reg, tmp2Reg);
3791 masm.ptest(tmp1Reg, tmp1Reg);
3792 masm.jccb(Assembler::notZero, VECTOR_NOT_EQUAL);
3793 masm.addptr(rsi, 8);
3794 masm.jcc(Assembler::notZero, COMPARE_VECTORS);
3795 masm.jmpb(COMPARE_TAIL);
3797 // Mismatched characters in the vectors
3798 masm.bind(VECTOR_NOT_EQUAL);
3799 masm.lea(rax, Address(rax, rsi, Address::times_2));
3800 masm.lea(rbx, Address(rbx, rsi, Address::times_2));
3801 masm.movl(rdi, 8);
3803 // Compare tail (< 8 chars), or rescan last vectors to
3804 // find 1st mismatched characters
3805 masm.bind(COMPARE_TAIL);
3806 masm.testl(rdi, rdi);
3807 masm.jccb(Assembler::zero, LENGTH_DIFF_LABEL);
3808 masm.movl(rsi, rdi);
3809 // Fallthru to tail compare
3810 }
3812 // Shift RAX and RBX to the end of the arrays, negate min
3813 masm.lea(rax, Address(rax, rsi, Address::times_2, 0));
3814 masm.lea(rbx, Address(rbx, rsi, Address::times_2, 0));
3815 masm.negptr(rsi);
3817 // Compare the rest of the characters
3818 masm.bind(WHILE_HEAD_LABEL);
3819 masm.load_unsigned_short(rcx, Address(rbx, rsi, Address::times_2, 0));
3820 masm.load_unsigned_short(rdi, Address(rax, rsi, Address::times_2, 0));
3821 masm.subl(rcx, rdi);
3822 masm.jccb(Assembler::notZero, POP_LABEL);
3823 masm.increment(rsi);
3824 masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL);
3826 // Strings are equal up to min length. Return the length difference.
3827 masm.bind(LENGTH_DIFF_LABEL);
3828 masm.pop(rcx);
3829 masm.jmpb(DONE_LABEL);
3831 // Discard the stored length difference
3832 masm.bind(POP_LABEL);
3833 masm.addptr(rsp, 8);
3835 // That's it
3836 masm.bind(DONE_LABEL);
3837 %}
3839 enc_class enc_String_IndexOf(rsi_RegP str1, rdi_RegP str2, regD tmp1, rax_RegI tmp2,
3840 rcx_RegI tmp3, rdx_RegI tmp4, rbx_RegI result) %{
3841 // SSE4.2 version
3842 Label LOAD_SUBSTR, PREP_FOR_SCAN, SCAN_TO_SUBSTR,
3843 SCAN_SUBSTR, RET_NEG_ONE, RET_NOT_FOUND, CLEANUP, DONE;
3844 MacroAssembler masm(&cbuf);
3846 XMMRegister tmp1Reg = as_XMMRegister($tmp1$$reg);
3848 // Get the first character position in both strings
3849 // [8] char array, [12] offset, [16] count
3850 int value_offset = java_lang_String::value_offset_in_bytes();
3851 int offset_offset = java_lang_String::offset_offset_in_bytes();
3852 int count_offset = java_lang_String::count_offset_in_bytes();
3853 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
3855 // Get counts for string and substr
3856 masm.movl(rdx, Address(rsi, count_offset));
3857 masm.movl(rax, Address(rdi, count_offset));
3858 // Check for substr count > string count
3859 masm.cmpl(rax, rdx);
3860 masm.jcc(Assembler::greater, RET_NEG_ONE);
3862 // Start the indexOf operation
3863 // Get start addr of string
3864 masm.load_heap_oop(rbx, Address(rsi, value_offset));
3865 masm.movl(rcx, Address(rsi, offset_offset));
3866 masm.lea(rsi, Address(rbx, rcx, Address::times_2, base_offset));
3867 masm.push(rsi);
3869 // Get start addr of substr
3870 masm.load_heap_oop(rbx, Address(rdi, value_offset));
3871 masm.movl(rcx, Address(rdi, offset_offset));
3872 masm.lea(rdi, Address(rbx, rcx, Address::times_2, base_offset));
3873 masm.push(rdi);
3874 masm.push(rax);
3875 masm.jmpb(PREP_FOR_SCAN);
3877 // Substr count saved at sp
3878 // Substr saved at sp+8
3879 // String saved at sp+16
3881 // Prep to load substr for scan
3882 masm.bind(LOAD_SUBSTR);
3883 masm.movptr(rdi, Address(rsp, 8));
3884 masm.movl(rax, Address(rsp, 0));
3886 // Load substr
3887 masm.bind(PREP_FOR_SCAN);
3888 masm.movdqu(tmp1Reg, Address(rdi, 0));
3889 masm.addq(rdx, 8); // prime the loop
3890 masm.subptr(rsi, 16);
3892 // Scan string for substr in 16-byte vectors
3893 masm.bind(SCAN_TO_SUBSTR);
3894 masm.subq(rdx, 8);
3895 masm.addptr(rsi, 16);
3896 masm.pcmpestri(tmp1Reg, Address(rsi, 0), 0x0d);
3897 masm.jcc(Assembler::above, SCAN_TO_SUBSTR);
3898 masm.jccb(Assembler::aboveEqual, RET_NOT_FOUND);
3900 // Fallthru: found a potential substr
3902 //Make sure string is still long enough
3903 masm.subl(rdx, rcx);
3904 masm.cmpl(rdx, rax);
3905 masm.jccb(Assembler::negative, RET_NOT_FOUND);
3906 // Compute start addr of substr
3907 masm.lea(rsi, Address(rsi, rcx, Address::times_2));
3908 masm.movptr(rbx, rsi);
3910 // Compare potential substr
3911 masm.addq(rdx, 8); // prime the loop
3912 masm.addq(rax, 8);
3913 masm.subptr(rsi, 16);
3914 masm.subptr(rdi, 16);
3916 // Scan 16-byte vectors of string and substr
3917 masm.bind(SCAN_SUBSTR);
3918 masm.subq(rax, 8);
3919 masm.subq(rdx, 8);
3920 masm.addptr(rsi, 16);
3921 masm.addptr(rdi, 16);
3922 masm.movdqu(tmp1Reg, Address(rdi, 0));
3923 masm.pcmpestri(tmp1Reg, Address(rsi, 0), 0x0d);
3924 masm.jcc(Assembler::noOverflow, LOAD_SUBSTR); // OF == 0
3925 masm.jcc(Assembler::positive, SCAN_SUBSTR); // SF == 0
3927 // Compute substr offset
3928 masm.movptr(rsi, Address(rsp, 16));
3929 masm.subptr(rbx, rsi);
3930 masm.shrl(rbx, 1);
3931 masm.jmpb(CLEANUP);
3933 masm.bind(RET_NEG_ONE);
3934 masm.movl(rbx, -1);
3935 masm.jmpb(DONE);
3937 masm.bind(RET_NOT_FOUND);
3938 masm.movl(rbx, -1);
3940 masm.bind(CLEANUP);
3941 masm.addptr(rsp, 24);
3943 masm.bind(DONE);
3944 %}
3946 enc_class enc_String_Equals(rdi_RegP str1, rsi_RegP str2, regD tmp1, regD tmp2,
3947 rbx_RegI tmp3, rcx_RegI tmp2, rax_RegI result) %{
3948 Label RET_TRUE, RET_FALSE, DONE, COMPARE_VECTORS, COMPARE_CHAR;
3949 MacroAssembler masm(&cbuf);
3951 XMMRegister tmp1Reg = as_XMMRegister($tmp1$$reg);
3952 XMMRegister tmp2Reg = as_XMMRegister($tmp2$$reg);
3954 int value_offset = java_lang_String::value_offset_in_bytes();
3955 int offset_offset = java_lang_String::offset_offset_in_bytes();
3956 int count_offset = java_lang_String::count_offset_in_bytes();
3957 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
3959 // does source == target string?
3960 masm.cmpptr(rdi, rsi);
3961 masm.jcc(Assembler::equal, RET_TRUE);
3963 // get and compare counts
3964 masm.movl(rcx, Address(rdi, count_offset));
3965 masm.movl(rax, Address(rsi, count_offset));
3966 masm.cmpl(rcx, rax);
3967 masm.jcc(Assembler::notEqual, RET_FALSE);
3968 masm.testl(rax, rax);
3969 masm.jcc(Assembler::zero, RET_TRUE);
3971 // get source string offset and value
3972 masm.load_heap_oop(rbx, Address(rsi, value_offset));
3973 masm.movl(rax, Address(rsi, offset_offset));
3974 masm.lea(rsi, Address(rbx, rax, Address::times_2, base_offset));
3976 // get compare string offset and value
3977 masm.load_heap_oop(rbx, Address(rdi, value_offset));
3978 masm.movl(rax, Address(rdi, offset_offset));
3979 masm.lea(rdi, Address(rbx, rax, Address::times_2, base_offset));
3981 // Set byte count
3982 masm.shll(rcx, 1);
3983 masm.movl(rax, rcx);
3985 if (UseSSE42Intrinsics) {
3986 // With SSE4.2, use double quad vector compare
3987 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
3988 // Compare 16-byte vectors
3989 masm.andl(rcx, 0xfffffff0); // vector count (in bytes)
3990 masm.andl(rax, 0x0000000e); // tail count (in bytes)
3991 masm.testl(rcx, rcx);
3992 masm.jccb(Assembler::zero, COMPARE_TAIL);
3993 masm.lea(rdi, Address(rdi, rcx, Address::times_1));
3994 masm.lea(rsi, Address(rsi, rcx, Address::times_1));
3995 masm.negptr(rcx);
3997 masm.bind(COMPARE_WIDE_VECTORS);
3998 masm.movdqu(tmp1Reg, Address(rdi, rcx, Address::times_1));
3999 masm.movdqu(tmp2Reg, Address(rsi, rcx, Address::times_1));
4000 masm.pxor(tmp1Reg, tmp2Reg);
4001 masm.ptest(tmp1Reg, tmp1Reg);
4002 masm.jccb(Assembler::notZero, RET_FALSE);
4003 masm.addptr(rcx, 16);
4004 masm.jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
4005 masm.bind(COMPARE_TAIL);
4006 masm.movl(rcx, rax);
4007 // Fallthru to tail compare
4008 }
4010 // Compare 4-byte vectors
4011 masm.andl(rcx, 0xfffffffc); // vector count (in bytes)
4012 masm.andl(rax, 0x00000002); // tail char (in bytes)
4013 masm.testl(rcx, rcx);
4014 masm.jccb(Assembler::zero, COMPARE_CHAR);
4015 masm.lea(rdi, Address(rdi, rcx, Address::times_1));
4016 masm.lea(rsi, Address(rsi, rcx, Address::times_1));
4017 masm.negptr(rcx);
4019 masm.bind(COMPARE_VECTORS);
4020 masm.movl(rbx, Address(rdi, rcx, Address::times_1));
4021 masm.cmpl(rbx, Address(rsi, rcx, Address::times_1));
4022 masm.jccb(Assembler::notEqual, RET_FALSE);
4023 masm.addptr(rcx, 4);
4024 masm.jcc(Assembler::notZero, COMPARE_VECTORS);
4026 // Compare trailing char (final 2 bytes), if any
4027 masm.bind(COMPARE_CHAR);
4028 masm.testl(rax, rax);
4029 masm.jccb(Assembler::zero, RET_TRUE);
4030 masm.load_unsigned_short(rbx, Address(rdi, 0));
4031 masm.load_unsigned_short(rcx, Address(rsi, 0));
4032 masm.cmpl(rbx, rcx);
4033 masm.jccb(Assembler::notEqual, RET_FALSE);
4035 masm.bind(RET_TRUE);
4036 masm.movl(rax, 1); // return true
4037 masm.jmpb(DONE);
4039 masm.bind(RET_FALSE);
4040 masm.xorl(rax, rax); // return false
4042 masm.bind(DONE);
4043 %}
4045 enc_class enc_Array_Equals(rdi_RegP ary1, rsi_RegP ary2, regD tmp1, regD tmp2,
4046 rax_RegI tmp3, rbx_RegI tmp4, rcx_RegI result) %{
4047 Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR;
4048 MacroAssembler masm(&cbuf);
4050 XMMRegister tmp1Reg = as_XMMRegister($tmp1$$reg);
4051 XMMRegister tmp2Reg = as_XMMRegister($tmp2$$reg);
4052 Register ary1Reg = as_Register($ary1$$reg);
4053 Register ary2Reg = as_Register($ary2$$reg);
4054 Register tmp3Reg = as_Register($tmp3$$reg);
4055 Register tmp4Reg = as_Register($tmp4$$reg);
4056 Register resultReg = as_Register($result$$reg);
4058 int length_offset = arrayOopDesc::length_offset_in_bytes();
4059 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
4061 // Check the input args
4062 masm.cmpq(ary1Reg, ary2Reg);
4063 masm.jcc(Assembler::equal, TRUE_LABEL);
4064 masm.testq(ary1Reg, ary1Reg);
4065 masm.jcc(Assembler::zero, FALSE_LABEL);
4066 masm.testq(ary2Reg, ary2Reg);
4067 masm.jcc(Assembler::zero, FALSE_LABEL);
4069 // Check the lengths
4070 masm.movl(tmp4Reg, Address(ary1Reg, length_offset));
4071 masm.movl(resultReg, Address(ary2Reg, length_offset));
4072 masm.cmpl(tmp4Reg, resultReg);
4073 masm.jcc(Assembler::notEqual, FALSE_LABEL);
4074 masm.testl(resultReg, resultReg);
4075 masm.jcc(Assembler::zero, TRUE_LABEL);
4077 //load array address
4078 masm.lea(ary1Reg, Address(ary1Reg, base_offset));
4079 masm.lea(ary2Reg, Address(ary2Reg, base_offset));
4081 //set byte count
4082 masm.shll(tmp4Reg, 1);
4083 masm.movl(resultReg,tmp4Reg);
4085 if (UseSSE42Intrinsics){
4086 // With SSE4.2, use double quad vector compare
4087 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
4088 // Compare 16-byte vectors
4089 masm.andl(tmp4Reg, 0xfffffff0); // vector count (in bytes)
4090 masm.andl(resultReg, 0x0000000e); // tail count (in bytes)
4091 masm.testl(tmp4Reg, tmp4Reg);
4092 masm.jccb(Assembler::zero, COMPARE_TAIL);
4093 masm.lea(ary1Reg, Address(ary1Reg, tmp4Reg, Address::times_1));
4094 masm.lea(ary2Reg, Address(ary2Reg, tmp4Reg, Address::times_1));
4095 masm.negptr(tmp4Reg);
4097 masm.bind(COMPARE_WIDE_VECTORS);
4098 masm.movdqu(tmp1Reg, Address(ary1Reg, tmp4Reg, Address::times_1));
4099 masm.movdqu(tmp2Reg, Address(ary2Reg, tmp4Reg, Address::times_1));
4100 masm.pxor(tmp1Reg, tmp2Reg);
4101 masm.ptest(tmp1Reg, tmp1Reg);
4103 masm.jccb(Assembler::notZero, FALSE_LABEL);
4104 masm.addptr(tmp4Reg, 16);
4105 masm.jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
4106 masm.bind(COMPARE_TAIL);
4107 masm.movl(tmp4Reg, resultReg);
4108 // Fallthru to tail compare
4109 }
4111 // Compare 4-byte vectors
4112 masm.andl(tmp4Reg, 0xfffffffc); // vector count (in bytes)
4113 masm.andl(resultReg, 0x00000002); // tail char (in bytes)
4114 masm.testl(tmp4Reg, tmp4Reg); //if tmp2 == 0, only compare char
4115 masm.jccb(Assembler::zero, COMPARE_CHAR);
4116 masm.lea(ary1Reg, Address(ary1Reg, tmp4Reg, Address::times_1));
4117 masm.lea(ary2Reg, Address(ary2Reg, tmp4Reg, Address::times_1));
4118 masm.negptr(tmp4Reg);
4120 masm.bind(COMPARE_VECTORS);
4121 masm.movl(tmp3Reg, Address(ary1Reg, tmp4Reg, Address::times_1));
4122 masm.cmpl(tmp3Reg, Address(ary2Reg, tmp4Reg, Address::times_1));
4123 masm.jccb(Assembler::notEqual, FALSE_LABEL);
4124 masm.addptr(tmp4Reg, 4);
4125 masm.jcc(Assembler::notZero, COMPARE_VECTORS);
4127 // Compare trailing char (final 2 bytes), if any
4128 masm.bind(COMPARE_CHAR);
4129 masm.testl(resultReg, resultReg);
4130 masm.jccb(Assembler::zero, TRUE_LABEL);
4131 masm.load_unsigned_short(tmp3Reg, Address(ary1Reg, 0));
4132 masm.load_unsigned_short(tmp4Reg, Address(ary2Reg, 0));
4133 masm.cmpl(tmp3Reg, tmp4Reg);
4134 masm.jccb(Assembler::notEqual, FALSE_LABEL);
4136 masm.bind(TRUE_LABEL);
4137 masm.movl(resultReg, 1); // return true
4138 masm.jmpb(DONE);
4140 masm.bind(FALSE_LABEL);
4141 masm.xorl(resultReg, resultReg); // return false
4143 // That's it
4144 masm.bind(DONE);
4145 %}
4147 enc_class enc_rethrow()
4148 %{
4149 cbuf.set_inst_mark();
4150 emit_opcode(cbuf, 0xE9); // jmp entry
4151 emit_d32_reloc(cbuf,
4152 (int) (OptoRuntime::rethrow_stub() - cbuf.code_end() - 4),
4153 runtime_call_Relocation::spec(),
4154 RELOC_DISP32);
4155 %}
4157 enc_class absF_encoding(regF dst)
4158 %{
4159 int dstenc = $dst$$reg;
4160 address signmask_address = (address) StubRoutines::x86::float_sign_mask();
4162 cbuf.set_inst_mark();
4163 if (dstenc >= 8) {
4164 emit_opcode(cbuf, Assembler::REX_R);
4165 dstenc -= 8;
4166 }
4167 // XXX reg_mem doesn't support RIP-relative addressing yet
4168 emit_opcode(cbuf, 0x0F);
4169 emit_opcode(cbuf, 0x54);
4170 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
4171 emit_d32_reloc(cbuf, signmask_address);
4172 %}
4174 enc_class absD_encoding(regD dst)
4175 %{
4176 int dstenc = $dst$$reg;
4177 address signmask_address = (address) StubRoutines::x86::double_sign_mask();
4179 cbuf.set_inst_mark();
4180 emit_opcode(cbuf, 0x66);
4181 if (dstenc >= 8) {
4182 emit_opcode(cbuf, Assembler::REX_R);
4183 dstenc -= 8;
4184 }
4185 // XXX reg_mem doesn't support RIP-relative addressing yet
4186 emit_opcode(cbuf, 0x0F);
4187 emit_opcode(cbuf, 0x54);
4188 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
4189 emit_d32_reloc(cbuf, signmask_address);
4190 %}
4192 enc_class negF_encoding(regF dst)
4193 %{
4194 int dstenc = $dst$$reg;
4195 address signflip_address = (address) StubRoutines::x86::float_sign_flip();
4197 cbuf.set_inst_mark();
4198 if (dstenc >= 8) {
4199 emit_opcode(cbuf, Assembler::REX_R);
4200 dstenc -= 8;
4201 }
4202 // XXX reg_mem doesn't support RIP-relative addressing yet
4203 emit_opcode(cbuf, 0x0F);
4204 emit_opcode(cbuf, 0x57);
4205 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
4206 emit_d32_reloc(cbuf, signflip_address);
4207 %}
4209 enc_class negD_encoding(regD dst)
4210 %{
4211 int dstenc = $dst$$reg;
4212 address signflip_address = (address) StubRoutines::x86::double_sign_flip();
4214 cbuf.set_inst_mark();
4215 emit_opcode(cbuf, 0x66);
4216 if (dstenc >= 8) {
4217 emit_opcode(cbuf, Assembler::REX_R);
4218 dstenc -= 8;
4219 }
4220 // XXX reg_mem doesn't support RIP-relative addressing yet
4221 emit_opcode(cbuf, 0x0F);
4222 emit_opcode(cbuf, 0x57);
4223 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
4224 emit_d32_reloc(cbuf, signflip_address);
4225 %}
4227 enc_class f2i_fixup(rRegI dst, regF src)
4228 %{
4229 int dstenc = $dst$$reg;
4230 int srcenc = $src$$reg;
4232 // cmpl $dst, #0x80000000
4233 if (dstenc >= 8) {
4234 emit_opcode(cbuf, Assembler::REX_B);
4235 }
4236 emit_opcode(cbuf, 0x81);
4237 emit_rm(cbuf, 0x3, 0x7, dstenc & 7);
4238 emit_d32(cbuf, 0x80000000);
4240 // jne,s done
4241 emit_opcode(cbuf, 0x75);
4242 if (srcenc < 8 && dstenc < 8) {
4243 emit_d8(cbuf, 0xF);
4244 } else if (srcenc >= 8 && dstenc >= 8) {
4245 emit_d8(cbuf, 0x11);
4246 } else {
4247 emit_d8(cbuf, 0x10);
4248 }
4250 // subq rsp, #8
4251 emit_opcode(cbuf, Assembler::REX_W);
4252 emit_opcode(cbuf, 0x83);
4253 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
4254 emit_d8(cbuf, 8);
4256 // movss [rsp], $src
4257 emit_opcode(cbuf, 0xF3);
4258 if (srcenc >= 8) {
4259 emit_opcode(cbuf, Assembler::REX_R);
4260 }
4261 emit_opcode(cbuf, 0x0F);
4262 emit_opcode(cbuf, 0x11);
4263 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
4265 // call f2i_fixup
4266 cbuf.set_inst_mark();
4267 emit_opcode(cbuf, 0xE8);
4268 emit_d32_reloc(cbuf,
4269 (int)
4270 (StubRoutines::x86::f2i_fixup() - cbuf.code_end() - 4),
4271 runtime_call_Relocation::spec(),
4272 RELOC_DISP32);
4274 // popq $dst
4275 if (dstenc >= 8) {
4276 emit_opcode(cbuf, Assembler::REX_B);
4277 }
4278 emit_opcode(cbuf, 0x58 | (dstenc & 7));
4280 // done:
4281 %}
4283 enc_class f2l_fixup(rRegL dst, regF src)
4284 %{
4285 int dstenc = $dst$$reg;
4286 int srcenc = $src$$reg;
4287 address const_address = (address) StubRoutines::x86::double_sign_flip();
4289 // cmpq $dst, [0x8000000000000000]
4290 cbuf.set_inst_mark();
4291 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
4292 emit_opcode(cbuf, 0x39);
4293 // XXX reg_mem doesn't support RIP-relative addressing yet
4294 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101
4295 emit_d32_reloc(cbuf, const_address);
4298 // jne,s done
4299 emit_opcode(cbuf, 0x75);
4300 if (srcenc < 8 && dstenc < 8) {
4301 emit_d8(cbuf, 0xF);
4302 } else if (srcenc >= 8 && dstenc >= 8) {
4303 emit_d8(cbuf, 0x11);
4304 } else {
4305 emit_d8(cbuf, 0x10);
4306 }
4308 // subq rsp, #8
4309 emit_opcode(cbuf, Assembler::REX_W);
4310 emit_opcode(cbuf, 0x83);
4311 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
4312 emit_d8(cbuf, 8);
4314 // movss [rsp], $src
4315 emit_opcode(cbuf, 0xF3);
4316 if (srcenc >= 8) {
4317 emit_opcode(cbuf, Assembler::REX_R);
4318 }
4319 emit_opcode(cbuf, 0x0F);
4320 emit_opcode(cbuf, 0x11);
4321 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
4323 // call f2l_fixup
4324 cbuf.set_inst_mark();
4325 emit_opcode(cbuf, 0xE8);
4326 emit_d32_reloc(cbuf,
4327 (int)
4328 (StubRoutines::x86::f2l_fixup() - cbuf.code_end() - 4),
4329 runtime_call_Relocation::spec(),
4330 RELOC_DISP32);
4332 // popq $dst
4333 if (dstenc >= 8) {
4334 emit_opcode(cbuf, Assembler::REX_B);
4335 }
4336 emit_opcode(cbuf, 0x58 | (dstenc & 7));
4338 // done:
4339 %}
4341 enc_class d2i_fixup(rRegI dst, regD src)
4342 %{
4343 int dstenc = $dst$$reg;
4344 int srcenc = $src$$reg;
4346 // cmpl $dst, #0x80000000
4347 if (dstenc >= 8) {
4348 emit_opcode(cbuf, Assembler::REX_B);
4349 }
4350 emit_opcode(cbuf, 0x81);
4351 emit_rm(cbuf, 0x3, 0x7, dstenc & 7);
4352 emit_d32(cbuf, 0x80000000);
4354 // jne,s done
4355 emit_opcode(cbuf, 0x75);
4356 if (srcenc < 8 && dstenc < 8) {
4357 emit_d8(cbuf, 0xF);
4358 } else if (srcenc >= 8 && dstenc >= 8) {
4359 emit_d8(cbuf, 0x11);
4360 } else {
4361 emit_d8(cbuf, 0x10);
4362 }
4364 // subq rsp, #8
4365 emit_opcode(cbuf, Assembler::REX_W);
4366 emit_opcode(cbuf, 0x83);
4367 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
4368 emit_d8(cbuf, 8);
4370 // movsd [rsp], $src
4371 emit_opcode(cbuf, 0xF2);
4372 if (srcenc >= 8) {
4373 emit_opcode(cbuf, Assembler::REX_R);
4374 }
4375 emit_opcode(cbuf, 0x0F);
4376 emit_opcode(cbuf, 0x11);
4377 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
4379 // call d2i_fixup
4380 cbuf.set_inst_mark();
4381 emit_opcode(cbuf, 0xE8);
4382 emit_d32_reloc(cbuf,
4383 (int)
4384 (StubRoutines::x86::d2i_fixup() - cbuf.code_end() - 4),
4385 runtime_call_Relocation::spec(),
4386 RELOC_DISP32);
4388 // popq $dst
4389 if (dstenc >= 8) {
4390 emit_opcode(cbuf, Assembler::REX_B);
4391 }
4392 emit_opcode(cbuf, 0x58 | (dstenc & 7));
4394 // done:
4395 %}
4397 enc_class d2l_fixup(rRegL dst, regD src)
4398 %{
4399 int dstenc = $dst$$reg;
4400 int srcenc = $src$$reg;
4401 address const_address = (address) StubRoutines::x86::double_sign_flip();
4403 // cmpq $dst, [0x8000000000000000]
4404 cbuf.set_inst_mark();
4405 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
4406 emit_opcode(cbuf, 0x39);
4407 // XXX reg_mem doesn't support RIP-relative addressing yet
4408 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101
4409 emit_d32_reloc(cbuf, const_address);
4412 // jne,s done
4413 emit_opcode(cbuf, 0x75);
4414 if (srcenc < 8 && dstenc < 8) {
4415 emit_d8(cbuf, 0xF);
4416 } else if (srcenc >= 8 && dstenc >= 8) {
4417 emit_d8(cbuf, 0x11);
4418 } else {
4419 emit_d8(cbuf, 0x10);
4420 }
4422 // subq rsp, #8
4423 emit_opcode(cbuf, Assembler::REX_W);
4424 emit_opcode(cbuf, 0x83);
4425 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
4426 emit_d8(cbuf, 8);
4428 // movsd [rsp], $src
4429 emit_opcode(cbuf, 0xF2);
4430 if (srcenc >= 8) {
4431 emit_opcode(cbuf, Assembler::REX_R);
4432 }
4433 emit_opcode(cbuf, 0x0F);
4434 emit_opcode(cbuf, 0x11);
4435 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
4437 // call d2l_fixup
4438 cbuf.set_inst_mark();
4439 emit_opcode(cbuf, 0xE8);
4440 emit_d32_reloc(cbuf,
4441 (int)
4442 (StubRoutines::x86::d2l_fixup() - cbuf.code_end() - 4),
4443 runtime_call_Relocation::spec(),
4444 RELOC_DISP32);
4446 // popq $dst
4447 if (dstenc >= 8) {
4448 emit_opcode(cbuf, Assembler::REX_B);
4449 }
4450 emit_opcode(cbuf, 0x58 | (dstenc & 7));
4452 // done:
4453 %}
4455 // Safepoint Poll. This polls the safepoint page, and causes an
4456 // exception if it is not readable. Unfortunately, it kills
4457 // RFLAGS in the process.
4458 enc_class enc_safepoint_poll
4459 %{
4460 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
4461 // XXX reg_mem doesn't support RIP-relative addressing yet
4462 cbuf.set_inst_mark();
4463 cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_type, 0); // XXX
4464 emit_opcode(cbuf, 0x85); // testl
4465 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
4466 // cbuf.inst_mark() is beginning of instruction
4467 emit_d32_reloc(cbuf, os::get_polling_page());
4468 // relocInfo::poll_type,
4469 %}
4470 %}
4474 //----------FRAME--------------------------------------------------------------
4475 // Definition of frame structure and management information.
4476 //
4477 // S T A C K L A Y O U T Allocators stack-slot number
4478 // | (to get allocators register number
4479 // G Owned by | | v add OptoReg::stack0())
4480 // r CALLER | |
4481 // o | +--------+ pad to even-align allocators stack-slot
4482 // w V | pad0 | numbers; owned by CALLER
4483 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
4484 // h ^ | in | 5
4485 // | | args | 4 Holes in incoming args owned by SELF
4486 // | | | | 3
4487 // | | +--------+
4488 // V | | old out| Empty on Intel, window on Sparc
4489 // | old |preserve| Must be even aligned.
4490 // | SP-+--------+----> Matcher::_old_SP, even aligned
4491 // | | in | 3 area for Intel ret address
4492 // Owned by |preserve| Empty on Sparc.
4493 // SELF +--------+
4494 // | | pad2 | 2 pad to align old SP
4495 // | +--------+ 1
4496 // | | locks | 0
4497 // | +--------+----> OptoReg::stack0(), even aligned
4498 // | | pad1 | 11 pad to align new SP
4499 // | +--------+
4500 // | | | 10
4501 // | | spills | 9 spills
4502 // V | | 8 (pad0 slot for callee)
4503 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
4504 // ^ | out | 7
4505 // | | args | 6 Holes in outgoing args owned by CALLEE
4506 // Owned by +--------+
4507 // CALLEE | new out| 6 Empty on Intel, window on Sparc
4508 // | new |preserve| Must be even-aligned.
4509 // | SP-+--------+----> Matcher::_new_SP, even aligned
4510 // | | |
4511 //
4512 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
4513 // known from SELF's arguments and the Java calling convention.
4514 // Region 6-7 is determined per call site.
4515 // Note 2: If the calling convention leaves holes in the incoming argument
4516 // area, those holes are owned by SELF. Holes in the outgoing area
4517 // are owned by the CALLEE. Holes should not be nessecary in the
4518 // incoming area, as the Java calling convention is completely under
4519 // the control of the AD file. Doubles can be sorted and packed to
4520 // avoid holes. Holes in the outgoing arguments may be nessecary for
4521 // varargs C calling conventions.
4522 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
4523 // even aligned with pad0 as needed.
4524 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
4525 // region 6-11 is even aligned; it may be padded out more so that
4526 // the region from SP to FP meets the minimum stack alignment.
4527 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4528 // alignment. Region 11, pad1, may be dynamically extended so that
4529 // SP meets the minimum alignment.
4531 frame
4532 %{
4533 // What direction does stack grow in (assumed to be same for C & Java)
4534 stack_direction(TOWARDS_LOW);
4536 // These three registers define part of the calling convention
4537 // between compiled code and the interpreter.
4538 inline_cache_reg(RAX); // Inline Cache Register
4539 interpreter_method_oop_reg(RBX); // Method Oop Register when
4540 // calling interpreter
4542 // Optional: name the operand used by cisc-spilling to access
4543 // [stack_pointer + offset]
4544 cisc_spilling_operand_name(indOffset32);
4546 // Number of stack slots consumed by locking an object
4547 sync_stack_slots(2);
4549 // Compiled code's Frame Pointer
4550 frame_pointer(RSP);
4552 // Interpreter stores its frame pointer in a register which is
4553 // stored to the stack by I2CAdaptors.
4554 // I2CAdaptors convert from interpreted java to compiled java.
4555 interpreter_frame_pointer(RBP);
4557 // Stack alignment requirement
4558 stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4560 // Number of stack slots between incoming argument block and the start of
4561 // a new frame. The PROLOG must add this many slots to the stack. The
4562 // EPILOG must remove this many slots. amd64 needs two slots for
4563 // return address.
4564 in_preserve_stack_slots(4 + 2 * VerifyStackAtCalls);
4566 // Number of outgoing stack slots killed above the out_preserve_stack_slots
4567 // for calls to C. Supports the var-args backing area for register parms.
4568 varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4570 // The after-PROLOG location of the return address. Location of
4571 // return address specifies a type (REG or STACK) and a number
4572 // representing the register number (i.e. - use a register name) or
4573 // stack slot.
4574 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
4575 // Otherwise, it is above the locks and verification slot and alignment word
4576 return_addr(STACK - 2 +
4577 round_to(2 + 2 * VerifyStackAtCalls +
4578 Compile::current()->fixed_slots(),
4579 WordsPerLong * 2));
4581 // Body of function which returns an integer array locating
4582 // arguments either in registers or in stack slots. Passed an array
4583 // of ideal registers called "sig" and a "length" count. Stack-slot
4584 // offsets are based on outgoing arguments, i.e. a CALLER setting up
4585 // arguments for a CALLEE. Incoming stack arguments are
4586 // automatically biased by the preserve_stack_slots field above.
4588 calling_convention
4589 %{
4590 // No difference between ingoing/outgoing just pass false
4591 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
4592 %}
4594 c_calling_convention
4595 %{
4596 // This is obviously always outgoing
4597 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
4598 %}
4600 // Location of compiled Java return values. Same as C for now.
4601 return_value
4602 %{
4603 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
4604 "only return normal values");
4606 static const int lo[Op_RegL + 1] = {
4607 0,
4608 0,
4609 RAX_num, // Op_RegN
4610 RAX_num, // Op_RegI
4611 RAX_num, // Op_RegP
4612 XMM0_num, // Op_RegF
4613 XMM0_num, // Op_RegD
4614 RAX_num // Op_RegL
4615 };
4616 static const int hi[Op_RegL + 1] = {
4617 0,
4618 0,
4619 OptoReg::Bad, // Op_RegN
4620 OptoReg::Bad, // Op_RegI
4621 RAX_H_num, // Op_RegP
4622 OptoReg::Bad, // Op_RegF
4623 XMM0_H_num, // Op_RegD
4624 RAX_H_num // Op_RegL
4625 };
4626 assert(ARRAY_SIZE(hi) == _last_machine_leaf - 1, "missing type");
4627 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
4628 %}
4629 %}
4631 //----------ATTRIBUTES---------------------------------------------------------
4632 //----------Operand Attributes-------------------------------------------------
4633 op_attrib op_cost(0); // Required cost attribute
4635 //----------Instruction Attributes---------------------------------------------
4636 ins_attrib ins_cost(100); // Required cost attribute
4637 ins_attrib ins_size(8); // Required size attribute (in bits)
4638 ins_attrib ins_pc_relative(0); // Required PC Relative flag
4639 ins_attrib ins_short_branch(0); // Required flag: is this instruction
4640 // a non-matching short branch variant
4641 // of some long branch?
4642 ins_attrib ins_alignment(1); // Required alignment attribute (must
4643 // be a power of 2) specifies the
4644 // alignment that some part of the
4645 // instruction (not necessarily the
4646 // start) requires. If > 1, a
4647 // compute_padding() function must be
4648 // provided for the instruction
4650 //----------OPERANDS-----------------------------------------------------------
4651 // Operand definitions must precede instruction definitions for correct parsing
4652 // in the ADLC because operands constitute user defined types which are used in
4653 // instruction definitions.
4655 //----------Simple Operands----------------------------------------------------
4656 // Immediate Operands
4657 // Integer Immediate
4658 operand immI()
4659 %{
4660 match(ConI);
4662 op_cost(10);
4663 format %{ %}
4664 interface(CONST_INTER);
4665 %}
4667 // Constant for test vs zero
4668 operand immI0()
4669 %{
4670 predicate(n->get_int() == 0);
4671 match(ConI);
4673 op_cost(0);
4674 format %{ %}
4675 interface(CONST_INTER);
4676 %}
4678 // Constant for increment
4679 operand immI1()
4680 %{
4681 predicate(n->get_int() == 1);
4682 match(ConI);
4684 op_cost(0);
4685 format %{ %}
4686 interface(CONST_INTER);
4687 %}
4689 // Constant for decrement
4690 operand immI_M1()
4691 %{
4692 predicate(n->get_int() == -1);
4693 match(ConI);
4695 op_cost(0);
4696 format %{ %}
4697 interface(CONST_INTER);
4698 %}
4700 // Valid scale values for addressing modes
4701 operand immI2()
4702 %{
4703 predicate(0 <= n->get_int() && (n->get_int() <= 3));
4704 match(ConI);
4706 format %{ %}
4707 interface(CONST_INTER);
4708 %}
4710 operand immI8()
4711 %{
4712 predicate((-0x80 <= n->get_int()) && (n->get_int() < 0x80));
4713 match(ConI);
4715 op_cost(5);
4716 format %{ %}
4717 interface(CONST_INTER);
4718 %}
4720 operand immI16()
4721 %{
4722 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
4723 match(ConI);
4725 op_cost(10);
4726 format %{ %}
4727 interface(CONST_INTER);
4728 %}
4730 // Constant for long shifts
4731 operand immI_32()
4732 %{
4733 predicate( n->get_int() == 32 );
4734 match(ConI);
4736 op_cost(0);
4737 format %{ %}
4738 interface(CONST_INTER);
4739 %}
4741 // Constant for long shifts
4742 operand immI_64()
4743 %{
4744 predicate( n->get_int() == 64 );
4745 match(ConI);
4747 op_cost(0);
4748 format %{ %}
4749 interface(CONST_INTER);
4750 %}
4752 // Pointer Immediate
4753 operand immP()
4754 %{
4755 match(ConP);
4757 op_cost(10);
4758 format %{ %}
4759 interface(CONST_INTER);
4760 %}
4762 // NULL Pointer Immediate
4763 operand immP0()
4764 %{
4765 predicate(n->get_ptr() == 0);
4766 match(ConP);
4768 op_cost(5);
4769 format %{ %}
4770 interface(CONST_INTER);
4771 %}
4773 // Pointer Immediate
4774 operand immN() %{
4775 match(ConN);
4777 op_cost(10);
4778 format %{ %}
4779 interface(CONST_INTER);
4780 %}
4782 // NULL Pointer Immediate
4783 operand immN0() %{
4784 predicate(n->get_narrowcon() == 0);
4785 match(ConN);
4787 op_cost(5);
4788 format %{ %}
4789 interface(CONST_INTER);
4790 %}
4792 operand immP31()
4793 %{
4794 predicate(!n->as_Type()->type()->isa_oopptr()
4795 && (n->get_ptr() >> 31) == 0);
4796 match(ConP);
4798 op_cost(5);
4799 format %{ %}
4800 interface(CONST_INTER);
4801 %}
4804 // Long Immediate
4805 operand immL()
4806 %{
4807 match(ConL);
4809 op_cost(20);
4810 format %{ %}
4811 interface(CONST_INTER);
4812 %}
4814 // Long Immediate 8-bit
4815 operand immL8()
4816 %{
4817 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
4818 match(ConL);
4820 op_cost(5);
4821 format %{ %}
4822 interface(CONST_INTER);
4823 %}
4825 // Long Immediate 32-bit unsigned
4826 operand immUL32()
4827 %{
4828 predicate(n->get_long() == (unsigned int) (n->get_long()));
4829 match(ConL);
4831 op_cost(10);
4832 format %{ %}
4833 interface(CONST_INTER);
4834 %}
4836 // Long Immediate 32-bit signed
4837 operand immL32()
4838 %{
4839 predicate(n->get_long() == (int) (n->get_long()));
4840 match(ConL);
4842 op_cost(15);
4843 format %{ %}
4844 interface(CONST_INTER);
4845 %}
4847 // Long Immediate zero
4848 operand immL0()
4849 %{
4850 predicate(n->get_long() == 0L);
4851 match(ConL);
4853 op_cost(10);
4854 format %{ %}
4855 interface(CONST_INTER);
4856 %}
4858 // Constant for increment
4859 operand immL1()
4860 %{
4861 predicate(n->get_long() == 1);
4862 match(ConL);
4864 format %{ %}
4865 interface(CONST_INTER);
4866 %}
4868 // Constant for decrement
4869 operand immL_M1()
4870 %{
4871 predicate(n->get_long() == -1);
4872 match(ConL);
4874 format %{ %}
4875 interface(CONST_INTER);
4876 %}
4878 // Long Immediate: the value 10
4879 operand immL10()
4880 %{
4881 predicate(n->get_long() == 10);
4882 match(ConL);
4884 format %{ %}
4885 interface(CONST_INTER);
4886 %}
4888 // Long immediate from 0 to 127.
4889 // Used for a shorter form of long mul by 10.
4890 operand immL_127()
4891 %{
4892 predicate(0 <= n->get_long() && n->get_long() < 0x80);
4893 match(ConL);
4895 op_cost(10);
4896 format %{ %}
4897 interface(CONST_INTER);
4898 %}
4900 // Long Immediate: low 32-bit mask
4901 operand immL_32bits()
4902 %{
4903 predicate(n->get_long() == 0xFFFFFFFFL);
4904 match(ConL);
4905 op_cost(20);
4907 format %{ %}
4908 interface(CONST_INTER);
4909 %}
4911 // Float Immediate zero
4912 operand immF0()
4913 %{
4914 predicate(jint_cast(n->getf()) == 0);
4915 match(ConF);
4917 op_cost(5);
4918 format %{ %}
4919 interface(CONST_INTER);
4920 %}
4922 // Float Immediate
4923 operand immF()
4924 %{
4925 match(ConF);
4927 op_cost(15);
4928 format %{ %}
4929 interface(CONST_INTER);
4930 %}
4932 // Double Immediate zero
4933 operand immD0()
4934 %{
4935 predicate(jlong_cast(n->getd()) == 0);
4936 match(ConD);
4938 op_cost(5);
4939 format %{ %}
4940 interface(CONST_INTER);
4941 %}
4943 // Double Immediate
4944 operand immD()
4945 %{
4946 match(ConD);
4948 op_cost(15);
4949 format %{ %}
4950 interface(CONST_INTER);
4951 %}
4953 // Immediates for special shifts (sign extend)
4955 // Constants for increment
4956 operand immI_16()
4957 %{
4958 predicate(n->get_int() == 16);
4959 match(ConI);
4961 format %{ %}
4962 interface(CONST_INTER);
4963 %}
4965 operand immI_24()
4966 %{
4967 predicate(n->get_int() == 24);
4968 match(ConI);
4970 format %{ %}
4971 interface(CONST_INTER);
4972 %}
4974 // Constant for byte-wide masking
4975 operand immI_255()
4976 %{
4977 predicate(n->get_int() == 255);
4978 match(ConI);
4980 format %{ %}
4981 interface(CONST_INTER);
4982 %}
4984 // Constant for short-wide masking
4985 operand immI_65535()
4986 %{
4987 predicate(n->get_int() == 65535);
4988 match(ConI);
4990 format %{ %}
4991 interface(CONST_INTER);
4992 %}
4994 // Constant for byte-wide masking
4995 operand immL_255()
4996 %{
4997 predicate(n->get_long() == 255);
4998 match(ConL);
5000 format %{ %}
5001 interface(CONST_INTER);
5002 %}
5004 // Constant for short-wide masking
5005 operand immL_65535()
5006 %{
5007 predicate(n->get_long() == 65535);
5008 match(ConL);
5010 format %{ %}
5011 interface(CONST_INTER);
5012 %}
5014 // Register Operands
5015 // Integer Register
5016 operand rRegI()
5017 %{
5018 constraint(ALLOC_IN_RC(int_reg));
5019 match(RegI);
5021 match(rax_RegI);
5022 match(rbx_RegI);
5023 match(rcx_RegI);
5024 match(rdx_RegI);
5025 match(rdi_RegI);
5027 format %{ %}
5028 interface(REG_INTER);
5029 %}
5031 // Special Registers
5032 operand rax_RegI()
5033 %{
5034 constraint(ALLOC_IN_RC(int_rax_reg));
5035 match(RegI);
5036 match(rRegI);
5038 format %{ "RAX" %}
5039 interface(REG_INTER);
5040 %}
5042 // Special Registers
5043 operand rbx_RegI()
5044 %{
5045 constraint(ALLOC_IN_RC(int_rbx_reg));
5046 match(RegI);
5047 match(rRegI);
5049 format %{ "RBX" %}
5050 interface(REG_INTER);
5051 %}
5053 operand rcx_RegI()
5054 %{
5055 constraint(ALLOC_IN_RC(int_rcx_reg));
5056 match(RegI);
5057 match(rRegI);
5059 format %{ "RCX" %}
5060 interface(REG_INTER);
5061 %}
5063 operand rdx_RegI()
5064 %{
5065 constraint(ALLOC_IN_RC(int_rdx_reg));
5066 match(RegI);
5067 match(rRegI);
5069 format %{ "RDX" %}
5070 interface(REG_INTER);
5071 %}
5073 operand rdi_RegI()
5074 %{
5075 constraint(ALLOC_IN_RC(int_rdi_reg));
5076 match(RegI);
5077 match(rRegI);
5079 format %{ "RDI" %}
5080 interface(REG_INTER);
5081 %}
5083 operand no_rcx_RegI()
5084 %{
5085 constraint(ALLOC_IN_RC(int_no_rcx_reg));
5086 match(RegI);
5087 match(rax_RegI);
5088 match(rbx_RegI);
5089 match(rdx_RegI);
5090 match(rdi_RegI);
5092 format %{ %}
5093 interface(REG_INTER);
5094 %}
5096 operand no_rax_rdx_RegI()
5097 %{
5098 constraint(ALLOC_IN_RC(int_no_rax_rdx_reg));
5099 match(RegI);
5100 match(rbx_RegI);
5101 match(rcx_RegI);
5102 match(rdi_RegI);
5104 format %{ %}
5105 interface(REG_INTER);
5106 %}
5108 // Pointer Register
5109 operand any_RegP()
5110 %{
5111 constraint(ALLOC_IN_RC(any_reg));
5112 match(RegP);
5113 match(rax_RegP);
5114 match(rbx_RegP);
5115 match(rdi_RegP);
5116 match(rsi_RegP);
5117 match(rbp_RegP);
5118 match(r15_RegP);
5119 match(rRegP);
5121 format %{ %}
5122 interface(REG_INTER);
5123 %}
5125 operand rRegP()
5126 %{
5127 constraint(ALLOC_IN_RC(ptr_reg));
5128 match(RegP);
5129 match(rax_RegP);
5130 match(rbx_RegP);
5131 match(rdi_RegP);
5132 match(rsi_RegP);
5133 match(rbp_RegP);
5134 match(r15_RegP); // See Q&A below about r15_RegP.
5136 format %{ %}
5137 interface(REG_INTER);
5138 %}
5140 operand rRegN() %{
5141 constraint(ALLOC_IN_RC(int_reg));
5142 match(RegN);
5144 format %{ %}
5145 interface(REG_INTER);
5146 %}
5148 // Question: Why is r15_RegP (the read-only TLS register) a match for rRegP?
5149 // Answer: Operand match rules govern the DFA as it processes instruction inputs.
5150 // It's fine for an instruction input which expects rRegP to match a r15_RegP.
5151 // The output of an instruction is controlled by the allocator, which respects
5152 // register class masks, not match rules. Unless an instruction mentions
5153 // r15_RegP or any_RegP explicitly as its output, r15 will not be considered
5154 // by the allocator as an input.
5156 operand no_rax_RegP()
5157 %{
5158 constraint(ALLOC_IN_RC(ptr_no_rax_reg));
5159 match(RegP);
5160 match(rbx_RegP);
5161 match(rsi_RegP);
5162 match(rdi_RegP);
5164 format %{ %}
5165 interface(REG_INTER);
5166 %}
5168 operand no_rbp_RegP()
5169 %{
5170 constraint(ALLOC_IN_RC(ptr_no_rbp_reg));
5171 match(RegP);
5172 match(rbx_RegP);
5173 match(rsi_RegP);
5174 match(rdi_RegP);
5176 format %{ %}
5177 interface(REG_INTER);
5178 %}
5180 operand no_rax_rbx_RegP()
5181 %{
5182 constraint(ALLOC_IN_RC(ptr_no_rax_rbx_reg));
5183 match(RegP);
5184 match(rsi_RegP);
5185 match(rdi_RegP);
5187 format %{ %}
5188 interface(REG_INTER);
5189 %}
5191 // Special Registers
5192 // Return a pointer value
5193 operand rax_RegP()
5194 %{
5195 constraint(ALLOC_IN_RC(ptr_rax_reg));
5196 match(RegP);
5197 match(rRegP);
5199 format %{ %}
5200 interface(REG_INTER);
5201 %}
5203 // Special Registers
5204 // Return a compressed pointer value
5205 operand rax_RegN()
5206 %{
5207 constraint(ALLOC_IN_RC(int_rax_reg));
5208 match(RegN);
5209 match(rRegN);
5211 format %{ %}
5212 interface(REG_INTER);
5213 %}
5215 // Used in AtomicAdd
5216 operand rbx_RegP()
5217 %{
5218 constraint(ALLOC_IN_RC(ptr_rbx_reg));
5219 match(RegP);
5220 match(rRegP);
5222 format %{ %}
5223 interface(REG_INTER);
5224 %}
5226 operand rsi_RegP()
5227 %{
5228 constraint(ALLOC_IN_RC(ptr_rsi_reg));
5229 match(RegP);
5230 match(rRegP);
5232 format %{ %}
5233 interface(REG_INTER);
5234 %}
5236 // Used in rep stosq
5237 operand rdi_RegP()
5238 %{
5239 constraint(ALLOC_IN_RC(ptr_rdi_reg));
5240 match(RegP);
5241 match(rRegP);
5243 format %{ %}
5244 interface(REG_INTER);
5245 %}
5247 operand rbp_RegP()
5248 %{
5249 constraint(ALLOC_IN_RC(ptr_rbp_reg));
5250 match(RegP);
5251 match(rRegP);
5253 format %{ %}
5254 interface(REG_INTER);
5255 %}
5257 operand r15_RegP()
5258 %{
5259 constraint(ALLOC_IN_RC(ptr_r15_reg));
5260 match(RegP);
5261 match(rRegP);
5263 format %{ %}
5264 interface(REG_INTER);
5265 %}
5267 operand rRegL()
5268 %{
5269 constraint(ALLOC_IN_RC(long_reg));
5270 match(RegL);
5271 match(rax_RegL);
5272 match(rdx_RegL);
5274 format %{ %}
5275 interface(REG_INTER);
5276 %}
5278 // Special Registers
5279 operand no_rax_rdx_RegL()
5280 %{
5281 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg));
5282 match(RegL);
5283 match(rRegL);
5285 format %{ %}
5286 interface(REG_INTER);
5287 %}
5289 operand no_rax_RegL()
5290 %{
5291 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg));
5292 match(RegL);
5293 match(rRegL);
5294 match(rdx_RegL);
5296 format %{ %}
5297 interface(REG_INTER);
5298 %}
5300 operand no_rcx_RegL()
5301 %{
5302 constraint(ALLOC_IN_RC(long_no_rcx_reg));
5303 match(RegL);
5304 match(rRegL);
5306 format %{ %}
5307 interface(REG_INTER);
5308 %}
5310 operand rax_RegL()
5311 %{
5312 constraint(ALLOC_IN_RC(long_rax_reg));
5313 match(RegL);
5314 match(rRegL);
5316 format %{ "RAX" %}
5317 interface(REG_INTER);
5318 %}
5320 operand rcx_RegL()
5321 %{
5322 constraint(ALLOC_IN_RC(long_rcx_reg));
5323 match(RegL);
5324 match(rRegL);
5326 format %{ %}
5327 interface(REG_INTER);
5328 %}
5330 operand rdx_RegL()
5331 %{
5332 constraint(ALLOC_IN_RC(long_rdx_reg));
5333 match(RegL);
5334 match(rRegL);
5336 format %{ %}
5337 interface(REG_INTER);
5338 %}
5340 // Flags register, used as output of compare instructions
5341 operand rFlagsReg()
5342 %{
5343 constraint(ALLOC_IN_RC(int_flags));
5344 match(RegFlags);
5346 format %{ "RFLAGS" %}
5347 interface(REG_INTER);
5348 %}
5350 // Flags register, used as output of FLOATING POINT compare instructions
5351 operand rFlagsRegU()
5352 %{
5353 constraint(ALLOC_IN_RC(int_flags));
5354 match(RegFlags);
5356 format %{ "RFLAGS_U" %}
5357 interface(REG_INTER);
5358 %}
5360 operand rFlagsRegUCF() %{
5361 constraint(ALLOC_IN_RC(int_flags));
5362 match(RegFlags);
5363 predicate(false);
5365 format %{ "RFLAGS_U_CF" %}
5366 interface(REG_INTER);
5367 %}
5369 // Float register operands
5370 operand regF()
5371 %{
5372 constraint(ALLOC_IN_RC(float_reg));
5373 match(RegF);
5375 format %{ %}
5376 interface(REG_INTER);
5377 %}
5379 // Double register operands
5380 operand regD()
5381 %{
5382 constraint(ALLOC_IN_RC(double_reg));
5383 match(RegD);
5385 format %{ %}
5386 interface(REG_INTER);
5387 %}
5390 //----------Memory Operands----------------------------------------------------
5391 // Direct Memory Operand
5392 // operand direct(immP addr)
5393 // %{
5394 // match(addr);
5396 // format %{ "[$addr]" %}
5397 // interface(MEMORY_INTER) %{
5398 // base(0xFFFFFFFF);
5399 // index(0x4);
5400 // scale(0x0);
5401 // disp($addr);
5402 // %}
5403 // %}
5405 // Indirect Memory Operand
5406 operand indirect(any_RegP reg)
5407 %{
5408 constraint(ALLOC_IN_RC(ptr_reg));
5409 match(reg);
5411 format %{ "[$reg]" %}
5412 interface(MEMORY_INTER) %{
5413 base($reg);
5414 index(0x4);
5415 scale(0x0);
5416 disp(0x0);
5417 %}
5418 %}
5420 // Indirect Memory Plus Short Offset Operand
5421 operand indOffset8(any_RegP reg, immL8 off)
5422 %{
5423 constraint(ALLOC_IN_RC(ptr_reg));
5424 match(AddP reg off);
5426 format %{ "[$reg + $off (8-bit)]" %}
5427 interface(MEMORY_INTER) %{
5428 base($reg);
5429 index(0x4);
5430 scale(0x0);
5431 disp($off);
5432 %}
5433 %}
5435 // Indirect Memory Plus Long Offset Operand
5436 operand indOffset32(any_RegP reg, immL32 off)
5437 %{
5438 constraint(ALLOC_IN_RC(ptr_reg));
5439 match(AddP reg off);
5441 format %{ "[$reg + $off (32-bit)]" %}
5442 interface(MEMORY_INTER) %{
5443 base($reg);
5444 index(0x4);
5445 scale(0x0);
5446 disp($off);
5447 %}
5448 %}
5450 // Indirect Memory Plus Index Register Plus Offset Operand
5451 operand indIndexOffset(any_RegP reg, rRegL lreg, immL32 off)
5452 %{
5453 constraint(ALLOC_IN_RC(ptr_reg));
5454 match(AddP (AddP reg lreg) off);
5456 op_cost(10);
5457 format %{"[$reg + $off + $lreg]" %}
5458 interface(MEMORY_INTER) %{
5459 base($reg);
5460 index($lreg);
5461 scale(0x0);
5462 disp($off);
5463 %}
5464 %}
5466 // Indirect Memory Plus Index Register Plus Offset Operand
5467 operand indIndex(any_RegP reg, rRegL lreg)
5468 %{
5469 constraint(ALLOC_IN_RC(ptr_reg));
5470 match(AddP reg lreg);
5472 op_cost(10);
5473 format %{"[$reg + $lreg]" %}
5474 interface(MEMORY_INTER) %{
5475 base($reg);
5476 index($lreg);
5477 scale(0x0);
5478 disp(0x0);
5479 %}
5480 %}
5482 // Indirect Memory Times Scale Plus Index Register
5483 operand indIndexScale(any_RegP reg, rRegL lreg, immI2 scale)
5484 %{
5485 constraint(ALLOC_IN_RC(ptr_reg));
5486 match(AddP reg (LShiftL lreg scale));
5488 op_cost(10);
5489 format %{"[$reg + $lreg << $scale]" %}
5490 interface(MEMORY_INTER) %{
5491 base($reg);
5492 index($lreg);
5493 scale($scale);
5494 disp(0x0);
5495 %}
5496 %}
5498 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5499 operand indIndexScaleOffset(any_RegP reg, immL32 off, rRegL lreg, immI2 scale)
5500 %{
5501 constraint(ALLOC_IN_RC(ptr_reg));
5502 match(AddP (AddP reg (LShiftL lreg scale)) off);
5504 op_cost(10);
5505 format %{"[$reg + $off + $lreg << $scale]" %}
5506 interface(MEMORY_INTER) %{
5507 base($reg);
5508 index($lreg);
5509 scale($scale);
5510 disp($off);
5511 %}
5512 %}
5514 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
5515 operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
5516 %{
5517 constraint(ALLOC_IN_RC(ptr_reg));
5518 predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5519 match(AddP (AddP reg (LShiftL (ConvI2L idx) scale)) off);
5521 op_cost(10);
5522 format %{"[$reg + $off + $idx << $scale]" %}
5523 interface(MEMORY_INTER) %{
5524 base($reg);
5525 index($idx);
5526 scale($scale);
5527 disp($off);
5528 %}
5529 %}
5531 // Indirect Narrow Oop Plus Offset Operand
5532 // Note: x86 architecture doesn't support "scale * index + offset" without a base
5533 // we can't free r12 even with Universe::narrow_oop_base() == NULL.
5534 operand indCompressedOopOffset(rRegN reg, immL32 off) %{
5535 predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
5536 constraint(ALLOC_IN_RC(ptr_reg));
5537 match(AddP (DecodeN reg) off);
5539 op_cost(10);
5540 format %{"[R12 + $reg << 3 + $off] (compressed oop addressing)" %}
5541 interface(MEMORY_INTER) %{
5542 base(0xc); // R12
5543 index($reg);
5544 scale(0x3);
5545 disp($off);
5546 %}
5547 %}
5549 // Indirect Memory Operand
5550 operand indirectNarrow(rRegN reg)
5551 %{
5552 predicate(Universe::narrow_oop_shift() == 0);
5553 constraint(ALLOC_IN_RC(ptr_reg));
5554 match(DecodeN reg);
5556 format %{ "[$reg]" %}
5557 interface(MEMORY_INTER) %{
5558 base($reg);
5559 index(0x4);
5560 scale(0x0);
5561 disp(0x0);
5562 %}
5563 %}
5565 // Indirect Memory Plus Short Offset Operand
5566 operand indOffset8Narrow(rRegN reg, immL8 off)
5567 %{
5568 predicate(Universe::narrow_oop_shift() == 0);
5569 constraint(ALLOC_IN_RC(ptr_reg));
5570 match(AddP (DecodeN reg) off);
5572 format %{ "[$reg + $off (8-bit)]" %}
5573 interface(MEMORY_INTER) %{
5574 base($reg);
5575 index(0x4);
5576 scale(0x0);
5577 disp($off);
5578 %}
5579 %}
5581 // Indirect Memory Plus Long Offset Operand
5582 operand indOffset32Narrow(rRegN reg, immL32 off)
5583 %{
5584 predicate(Universe::narrow_oop_shift() == 0);
5585 constraint(ALLOC_IN_RC(ptr_reg));
5586 match(AddP (DecodeN reg) off);
5588 format %{ "[$reg + $off (32-bit)]" %}
5589 interface(MEMORY_INTER) %{
5590 base($reg);
5591 index(0x4);
5592 scale(0x0);
5593 disp($off);
5594 %}
5595 %}
5597 // Indirect Memory Plus Index Register Plus Offset Operand
5598 operand indIndexOffsetNarrow(rRegN reg, rRegL lreg, immL32 off)
5599 %{
5600 predicate(Universe::narrow_oop_shift() == 0);
5601 constraint(ALLOC_IN_RC(ptr_reg));
5602 match(AddP (AddP (DecodeN reg) lreg) off);
5604 op_cost(10);
5605 format %{"[$reg + $off + $lreg]" %}
5606 interface(MEMORY_INTER) %{
5607 base($reg);
5608 index($lreg);
5609 scale(0x0);
5610 disp($off);
5611 %}
5612 %}
5614 // Indirect Memory Plus Index Register Plus Offset Operand
5615 operand indIndexNarrow(rRegN reg, rRegL lreg)
5616 %{
5617 predicate(Universe::narrow_oop_shift() == 0);
5618 constraint(ALLOC_IN_RC(ptr_reg));
5619 match(AddP (DecodeN reg) lreg);
5621 op_cost(10);
5622 format %{"[$reg + $lreg]" %}
5623 interface(MEMORY_INTER) %{
5624 base($reg);
5625 index($lreg);
5626 scale(0x0);
5627 disp(0x0);
5628 %}
5629 %}
5631 // Indirect Memory Times Scale Plus Index Register
5632 operand indIndexScaleNarrow(rRegN reg, rRegL lreg, immI2 scale)
5633 %{
5634 predicate(Universe::narrow_oop_shift() == 0);
5635 constraint(ALLOC_IN_RC(ptr_reg));
5636 match(AddP (DecodeN reg) (LShiftL lreg scale));
5638 op_cost(10);
5639 format %{"[$reg + $lreg << $scale]" %}
5640 interface(MEMORY_INTER) %{
5641 base($reg);
5642 index($lreg);
5643 scale($scale);
5644 disp(0x0);
5645 %}
5646 %}
5648 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5649 operand indIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegL lreg, immI2 scale)
5650 %{
5651 predicate(Universe::narrow_oop_shift() == 0);
5652 constraint(ALLOC_IN_RC(ptr_reg));
5653 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5655 op_cost(10);
5656 format %{"[$reg + $off + $lreg << $scale]" %}
5657 interface(MEMORY_INTER) %{
5658 base($reg);
5659 index($lreg);
5660 scale($scale);
5661 disp($off);
5662 %}
5663 %}
5665 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
5666 operand indPosIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegI idx, immI2 scale)
5667 %{
5668 constraint(ALLOC_IN_RC(ptr_reg));
5669 predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5670 match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L idx) scale)) off);
5672 op_cost(10);
5673 format %{"[$reg + $off + $idx << $scale]" %}
5674 interface(MEMORY_INTER) %{
5675 base($reg);
5676 index($idx);
5677 scale($scale);
5678 disp($off);
5679 %}
5680 %}
5683 //----------Special Memory Operands--------------------------------------------
5684 // Stack Slot Operand - This operand is used for loading and storing temporary
5685 // values on the stack where a match requires a value to
5686 // flow through memory.
5687 operand stackSlotP(sRegP reg)
5688 %{
5689 constraint(ALLOC_IN_RC(stack_slots));
5690 // No match rule because this operand is only generated in matching
5692 format %{ "[$reg]" %}
5693 interface(MEMORY_INTER) %{
5694 base(0x4); // RSP
5695 index(0x4); // No Index
5696 scale(0x0); // No Scale
5697 disp($reg); // Stack Offset
5698 %}
5699 %}
5701 operand stackSlotI(sRegI reg)
5702 %{
5703 constraint(ALLOC_IN_RC(stack_slots));
5704 // No match rule because this operand is only generated in matching
5706 format %{ "[$reg]" %}
5707 interface(MEMORY_INTER) %{
5708 base(0x4); // RSP
5709 index(0x4); // No Index
5710 scale(0x0); // No Scale
5711 disp($reg); // Stack Offset
5712 %}
5713 %}
5715 operand stackSlotF(sRegF reg)
5716 %{
5717 constraint(ALLOC_IN_RC(stack_slots));
5718 // No match rule because this operand is only generated in matching
5720 format %{ "[$reg]" %}
5721 interface(MEMORY_INTER) %{
5722 base(0x4); // RSP
5723 index(0x4); // No Index
5724 scale(0x0); // No Scale
5725 disp($reg); // Stack Offset
5726 %}
5727 %}
5729 operand stackSlotD(sRegD reg)
5730 %{
5731 constraint(ALLOC_IN_RC(stack_slots));
5732 // No match rule because this operand is only generated in matching
5734 format %{ "[$reg]" %}
5735 interface(MEMORY_INTER) %{
5736 base(0x4); // RSP
5737 index(0x4); // No Index
5738 scale(0x0); // No Scale
5739 disp($reg); // Stack Offset
5740 %}
5741 %}
5742 operand stackSlotL(sRegL reg)
5743 %{
5744 constraint(ALLOC_IN_RC(stack_slots));
5745 // No match rule because this operand is only generated in matching
5747 format %{ "[$reg]" %}
5748 interface(MEMORY_INTER) %{
5749 base(0x4); // RSP
5750 index(0x4); // No Index
5751 scale(0x0); // No Scale
5752 disp($reg); // Stack Offset
5753 %}
5754 %}
5756 //----------Conditional Branch Operands----------------------------------------
5757 // Comparison Op - This is the operation of the comparison, and is limited to
5758 // the following set of codes:
5759 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5760 //
5761 // Other attributes of the comparison, such as unsignedness, are specified
5762 // by the comparison instruction that sets a condition code flags register.
5763 // That result is represented by a flags operand whose subtype is appropriate
5764 // to the unsignedness (etc.) of the comparison.
5765 //
5766 // Later, the instruction which matches both the Comparison Op (a Bool) and
5767 // the flags (produced by the Cmp) specifies the coding of the comparison op
5768 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5770 // Comparision Code
5771 operand cmpOp()
5772 %{
5773 match(Bool);
5775 format %{ "" %}
5776 interface(COND_INTER) %{
5777 equal(0x4, "e");
5778 not_equal(0x5, "ne");
5779 less(0xC, "l");
5780 greater_equal(0xD, "ge");
5781 less_equal(0xE, "le");
5782 greater(0xF, "g");
5783 %}
5784 %}
5786 // Comparison Code, unsigned compare. Used by FP also, with
5787 // C2 (unordered) turned into GT or LT already. The other bits
5788 // C0 and C3 are turned into Carry & Zero flags.
5789 operand cmpOpU()
5790 %{
5791 match(Bool);
5793 format %{ "" %}
5794 interface(COND_INTER) %{
5795 equal(0x4, "e");
5796 not_equal(0x5, "ne");
5797 less(0x2, "b");
5798 greater_equal(0x3, "nb");
5799 less_equal(0x6, "be");
5800 greater(0x7, "nbe");
5801 %}
5802 %}
5805 // Floating comparisons that don't require any fixup for the unordered case
5806 operand cmpOpUCF() %{
5807 match(Bool);
5808 predicate(n->as_Bool()->_test._test == BoolTest::lt ||
5809 n->as_Bool()->_test._test == BoolTest::ge ||
5810 n->as_Bool()->_test._test == BoolTest::le ||
5811 n->as_Bool()->_test._test == BoolTest::gt);
5812 format %{ "" %}
5813 interface(COND_INTER) %{
5814 equal(0x4, "e");
5815 not_equal(0x5, "ne");
5816 less(0x2, "b");
5817 greater_equal(0x3, "nb");
5818 less_equal(0x6, "be");
5819 greater(0x7, "nbe");
5820 %}
5821 %}
5824 // Floating comparisons that can be fixed up with extra conditional jumps
5825 operand cmpOpUCF2() %{
5826 match(Bool);
5827 predicate(n->as_Bool()->_test._test == BoolTest::ne ||
5828 n->as_Bool()->_test._test == BoolTest::eq);
5829 format %{ "" %}
5830 interface(COND_INTER) %{
5831 equal(0x4, "e");
5832 not_equal(0x5, "ne");
5833 less(0x2, "b");
5834 greater_equal(0x3, "nb");
5835 less_equal(0x6, "be");
5836 greater(0x7, "nbe");
5837 %}
5838 %}
5841 //----------OPERAND CLASSES----------------------------------------------------
5842 // Operand Classes are groups of operands that are used as to simplify
5843 // instruction definitions by not requiring the AD writer to specify separate
5844 // instructions for every form of operand when the instruction accepts
5845 // multiple operand types with the same basic encoding and format. The classic
5846 // case of this is memory operands.
5848 opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
5849 indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset,
5850 indCompressedOopOffset,
5851 indirectNarrow, indOffset8Narrow, indOffset32Narrow,
5852 indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
5853 indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow);
5855 //----------PIPELINE-----------------------------------------------------------
5856 // Rules which define the behavior of the target architectures pipeline.
5857 pipeline %{
5859 //----------ATTRIBUTES---------------------------------------------------------
5860 attributes %{
5861 variable_size_instructions; // Fixed size instructions
5862 max_instructions_per_bundle = 3; // Up to 3 instructions per bundle
5863 instruction_unit_size = 1; // An instruction is 1 bytes long
5864 instruction_fetch_unit_size = 16; // The processor fetches one line
5865 instruction_fetch_units = 1; // of 16 bytes
5867 // List of nop instructions
5868 nops( MachNop );
5869 %}
5871 //----------RESOURCES----------------------------------------------------------
5872 // Resources are the functional units available to the machine
5874 // Generic P2/P3 pipeline
5875 // 3 decoders, only D0 handles big operands; a "bundle" is the limit of
5876 // 3 instructions decoded per cycle.
5877 // 2 load/store ops per cycle, 1 branch, 1 FPU,
5878 // 3 ALU op, only ALU0 handles mul instructions.
5879 resources( D0, D1, D2, DECODE = D0 | D1 | D2,
5880 MS0, MS1, MS2, MEM = MS0 | MS1 | MS2,
5881 BR, FPU,
5882 ALU0, ALU1, ALU2, ALU = ALU0 | ALU1 | ALU2);
5884 //----------PIPELINE DESCRIPTION-----------------------------------------------
5885 // Pipeline Description specifies the stages in the machine's pipeline
5887 // Generic P2/P3 pipeline
5888 pipe_desc(S0, S1, S2, S3, S4, S5);
5890 //----------PIPELINE CLASSES---------------------------------------------------
5891 // Pipeline Classes describe the stages in which input and output are
5892 // referenced by the hardware pipeline.
5894 // Naming convention: ialu or fpu
5895 // Then: _reg
5896 // Then: _reg if there is a 2nd register
5897 // Then: _long if it's a pair of instructions implementing a long
5898 // Then: _fat if it requires the big decoder
5899 // Or: _mem if it requires the big decoder and a memory unit.
5901 // Integer ALU reg operation
5902 pipe_class ialu_reg(rRegI dst)
5903 %{
5904 single_instruction;
5905 dst : S4(write);
5906 dst : S3(read);
5907 DECODE : S0; // any decoder
5908 ALU : S3; // any alu
5909 %}
5911 // Long ALU reg operation
5912 pipe_class ialu_reg_long(rRegL dst)
5913 %{
5914 instruction_count(2);
5915 dst : S4(write);
5916 dst : S3(read);
5917 DECODE : S0(2); // any 2 decoders
5918 ALU : S3(2); // both alus
5919 %}
5921 // Integer ALU reg operation using big decoder
5922 pipe_class ialu_reg_fat(rRegI dst)
5923 %{
5924 single_instruction;
5925 dst : S4(write);
5926 dst : S3(read);
5927 D0 : S0; // big decoder only
5928 ALU : S3; // any alu
5929 %}
5931 // Long ALU reg operation using big decoder
5932 pipe_class ialu_reg_long_fat(rRegL dst)
5933 %{
5934 instruction_count(2);
5935 dst : S4(write);
5936 dst : S3(read);
5937 D0 : S0(2); // big decoder only; twice
5938 ALU : S3(2); // any 2 alus
5939 %}
5941 // Integer ALU reg-reg operation
5942 pipe_class ialu_reg_reg(rRegI dst, rRegI src)
5943 %{
5944 single_instruction;
5945 dst : S4(write);
5946 src : S3(read);
5947 DECODE : S0; // any decoder
5948 ALU : S3; // any alu
5949 %}
5951 // Long ALU reg-reg operation
5952 pipe_class ialu_reg_reg_long(rRegL dst, rRegL src)
5953 %{
5954 instruction_count(2);
5955 dst : S4(write);
5956 src : S3(read);
5957 DECODE : S0(2); // any 2 decoders
5958 ALU : S3(2); // both alus
5959 %}
5961 // Integer ALU reg-reg operation
5962 pipe_class ialu_reg_reg_fat(rRegI dst, memory src)
5963 %{
5964 single_instruction;
5965 dst : S4(write);
5966 src : S3(read);
5967 D0 : S0; // big decoder only
5968 ALU : S3; // any alu
5969 %}
5971 // Long ALU reg-reg operation
5972 pipe_class ialu_reg_reg_long_fat(rRegL dst, rRegL src)
5973 %{
5974 instruction_count(2);
5975 dst : S4(write);
5976 src : S3(read);
5977 D0 : S0(2); // big decoder only; twice
5978 ALU : S3(2); // both alus
5979 %}
5981 // Integer ALU reg-mem operation
5982 pipe_class ialu_reg_mem(rRegI dst, memory mem)
5983 %{
5984 single_instruction;
5985 dst : S5(write);
5986 mem : S3(read);
5987 D0 : S0; // big decoder only
5988 ALU : S4; // any alu
5989 MEM : S3; // any mem
5990 %}
5992 // Integer mem operation (prefetch)
5993 pipe_class ialu_mem(memory mem)
5994 %{
5995 single_instruction;
5996 mem : S3(read);
5997 D0 : S0; // big decoder only
5998 MEM : S3; // any mem
5999 %}
6001 // Integer Store to Memory
6002 pipe_class ialu_mem_reg(memory mem, rRegI src)
6003 %{
6004 single_instruction;
6005 mem : S3(read);
6006 src : S5(read);
6007 D0 : S0; // big decoder only
6008 ALU : S4; // any alu
6009 MEM : S3;
6010 %}
6012 // // Long Store to Memory
6013 // pipe_class ialu_mem_long_reg(memory mem, rRegL src)
6014 // %{
6015 // instruction_count(2);
6016 // mem : S3(read);
6017 // src : S5(read);
6018 // D0 : S0(2); // big decoder only; twice
6019 // ALU : S4(2); // any 2 alus
6020 // MEM : S3(2); // Both mems
6021 // %}
6023 // Integer Store to Memory
6024 pipe_class ialu_mem_imm(memory mem)
6025 %{
6026 single_instruction;
6027 mem : S3(read);
6028 D0 : S0; // big decoder only
6029 ALU : S4; // any alu
6030 MEM : S3;
6031 %}
6033 // Integer ALU0 reg-reg operation
6034 pipe_class ialu_reg_reg_alu0(rRegI dst, rRegI src)
6035 %{
6036 single_instruction;
6037 dst : S4(write);
6038 src : S3(read);
6039 D0 : S0; // Big decoder only
6040 ALU0 : S3; // only alu0
6041 %}
6043 // Integer ALU0 reg-mem operation
6044 pipe_class ialu_reg_mem_alu0(rRegI dst, memory mem)
6045 %{
6046 single_instruction;
6047 dst : S5(write);
6048 mem : S3(read);
6049 D0 : S0; // big decoder only
6050 ALU0 : S4; // ALU0 only
6051 MEM : S3; // any mem
6052 %}
6054 // Integer ALU reg-reg operation
6055 pipe_class ialu_cr_reg_reg(rFlagsReg cr, rRegI src1, rRegI src2)
6056 %{
6057 single_instruction;
6058 cr : S4(write);
6059 src1 : S3(read);
6060 src2 : S3(read);
6061 DECODE : S0; // any decoder
6062 ALU : S3; // any alu
6063 %}
6065 // Integer ALU reg-imm operation
6066 pipe_class ialu_cr_reg_imm(rFlagsReg cr, rRegI src1)
6067 %{
6068 single_instruction;
6069 cr : S4(write);
6070 src1 : S3(read);
6071 DECODE : S0; // any decoder
6072 ALU : S3; // any alu
6073 %}
6075 // Integer ALU reg-mem operation
6076 pipe_class ialu_cr_reg_mem(rFlagsReg cr, rRegI src1, memory src2)
6077 %{
6078 single_instruction;
6079 cr : S4(write);
6080 src1 : S3(read);
6081 src2 : S3(read);
6082 D0 : S0; // big decoder only
6083 ALU : S4; // any alu
6084 MEM : S3;
6085 %}
6087 // Conditional move reg-reg
6088 pipe_class pipe_cmplt( rRegI p, rRegI q, rRegI y)
6089 %{
6090 instruction_count(4);
6091 y : S4(read);
6092 q : S3(read);
6093 p : S3(read);
6094 DECODE : S0(4); // any decoder
6095 %}
6097 // Conditional move reg-reg
6098 pipe_class pipe_cmov_reg( rRegI dst, rRegI src, rFlagsReg cr)
6099 %{
6100 single_instruction;
6101 dst : S4(write);
6102 src : S3(read);
6103 cr : S3(read);
6104 DECODE : S0; // any decoder
6105 %}
6107 // Conditional move reg-mem
6108 pipe_class pipe_cmov_mem( rFlagsReg cr, rRegI dst, memory src)
6109 %{
6110 single_instruction;
6111 dst : S4(write);
6112 src : S3(read);
6113 cr : S3(read);
6114 DECODE : S0; // any decoder
6115 MEM : S3;
6116 %}
6118 // Conditional move reg-reg long
6119 pipe_class pipe_cmov_reg_long( rFlagsReg cr, rRegL dst, rRegL src)
6120 %{
6121 single_instruction;
6122 dst : S4(write);
6123 src : S3(read);
6124 cr : S3(read);
6125 DECODE : S0(2); // any 2 decoders
6126 %}
6128 // XXX
6129 // // Conditional move double reg-reg
6130 // pipe_class pipe_cmovD_reg( rFlagsReg cr, regDPR1 dst, regD src)
6131 // %{
6132 // single_instruction;
6133 // dst : S4(write);
6134 // src : S3(read);
6135 // cr : S3(read);
6136 // DECODE : S0; // any decoder
6137 // %}
6139 // Float reg-reg operation
6140 pipe_class fpu_reg(regD dst)
6141 %{
6142 instruction_count(2);
6143 dst : S3(read);
6144 DECODE : S0(2); // any 2 decoders
6145 FPU : S3;
6146 %}
6148 // Float reg-reg operation
6149 pipe_class fpu_reg_reg(regD dst, regD src)
6150 %{
6151 instruction_count(2);
6152 dst : S4(write);
6153 src : S3(read);
6154 DECODE : S0(2); // any 2 decoders
6155 FPU : S3;
6156 %}
6158 // Float reg-reg operation
6159 pipe_class fpu_reg_reg_reg(regD dst, regD src1, regD src2)
6160 %{
6161 instruction_count(3);
6162 dst : S4(write);
6163 src1 : S3(read);
6164 src2 : S3(read);
6165 DECODE : S0(3); // any 3 decoders
6166 FPU : S3(2);
6167 %}
6169 // Float reg-reg operation
6170 pipe_class fpu_reg_reg_reg_reg(regD dst, regD src1, regD src2, regD src3)
6171 %{
6172 instruction_count(4);
6173 dst : S4(write);
6174 src1 : S3(read);
6175 src2 : S3(read);
6176 src3 : S3(read);
6177 DECODE : S0(4); // any 3 decoders
6178 FPU : S3(2);
6179 %}
6181 // Float reg-reg operation
6182 pipe_class fpu_reg_mem_reg_reg(regD dst, memory src1, regD src2, regD src3)
6183 %{
6184 instruction_count(4);
6185 dst : S4(write);
6186 src1 : S3(read);
6187 src2 : S3(read);
6188 src3 : S3(read);
6189 DECODE : S1(3); // any 3 decoders
6190 D0 : S0; // Big decoder only
6191 FPU : S3(2);
6192 MEM : S3;
6193 %}
6195 // Float reg-mem operation
6196 pipe_class fpu_reg_mem(regD dst, memory mem)
6197 %{
6198 instruction_count(2);
6199 dst : S5(write);
6200 mem : S3(read);
6201 D0 : S0; // big decoder only
6202 DECODE : S1; // any decoder for FPU POP
6203 FPU : S4;
6204 MEM : S3; // any mem
6205 %}
6207 // Float reg-mem operation
6208 pipe_class fpu_reg_reg_mem(regD dst, regD src1, memory mem)
6209 %{
6210 instruction_count(3);
6211 dst : S5(write);
6212 src1 : S3(read);
6213 mem : S3(read);
6214 D0 : S0; // big decoder only
6215 DECODE : S1(2); // any decoder for FPU POP
6216 FPU : S4;
6217 MEM : S3; // any mem
6218 %}
6220 // Float mem-reg operation
6221 pipe_class fpu_mem_reg(memory mem, regD src)
6222 %{
6223 instruction_count(2);
6224 src : S5(read);
6225 mem : S3(read);
6226 DECODE : S0; // any decoder for FPU PUSH
6227 D0 : S1; // big decoder only
6228 FPU : S4;
6229 MEM : S3; // any mem
6230 %}
6232 pipe_class fpu_mem_reg_reg(memory mem, regD src1, regD src2)
6233 %{
6234 instruction_count(3);
6235 src1 : S3(read);
6236 src2 : S3(read);
6237 mem : S3(read);
6238 DECODE : S0(2); // any decoder for FPU PUSH
6239 D0 : S1; // big decoder only
6240 FPU : S4;
6241 MEM : S3; // any mem
6242 %}
6244 pipe_class fpu_mem_reg_mem(memory mem, regD src1, memory src2)
6245 %{
6246 instruction_count(3);
6247 src1 : S3(read);
6248 src2 : S3(read);
6249 mem : S4(read);
6250 DECODE : S0; // any decoder for FPU PUSH
6251 D0 : S0(2); // big decoder only
6252 FPU : S4;
6253 MEM : S3(2); // any mem
6254 %}
6256 pipe_class fpu_mem_mem(memory dst, memory src1)
6257 %{
6258 instruction_count(2);
6259 src1 : S3(read);
6260 dst : S4(read);
6261 D0 : S0(2); // big decoder only
6262 MEM : S3(2); // any mem
6263 %}
6265 pipe_class fpu_mem_mem_mem(memory dst, memory src1, memory src2)
6266 %{
6267 instruction_count(3);
6268 src1 : S3(read);
6269 src2 : S3(read);
6270 dst : S4(read);
6271 D0 : S0(3); // big decoder only
6272 FPU : S4;
6273 MEM : S3(3); // any mem
6274 %}
6276 pipe_class fpu_mem_reg_con(memory mem, regD src1)
6277 %{
6278 instruction_count(3);
6279 src1 : S4(read);
6280 mem : S4(read);
6281 DECODE : S0; // any decoder for FPU PUSH
6282 D0 : S0(2); // big decoder only
6283 FPU : S4;
6284 MEM : S3(2); // any mem
6285 %}
6287 // Float load constant
6288 pipe_class fpu_reg_con(regD dst)
6289 %{
6290 instruction_count(2);
6291 dst : S5(write);
6292 D0 : S0; // big decoder only for the load
6293 DECODE : S1; // any decoder for FPU POP
6294 FPU : S4;
6295 MEM : S3; // any mem
6296 %}
6298 // Float load constant
6299 pipe_class fpu_reg_reg_con(regD dst, regD src)
6300 %{
6301 instruction_count(3);
6302 dst : S5(write);
6303 src : S3(read);
6304 D0 : S0; // big decoder only for the load
6305 DECODE : S1(2); // any decoder for FPU POP
6306 FPU : S4;
6307 MEM : S3; // any mem
6308 %}
6310 // UnConditional branch
6311 pipe_class pipe_jmp(label labl)
6312 %{
6313 single_instruction;
6314 BR : S3;
6315 %}
6317 // Conditional branch
6318 pipe_class pipe_jcc(cmpOp cmp, rFlagsReg cr, label labl)
6319 %{
6320 single_instruction;
6321 cr : S1(read);
6322 BR : S3;
6323 %}
6325 // Allocation idiom
6326 pipe_class pipe_cmpxchg(rRegP dst, rRegP heap_ptr)
6327 %{
6328 instruction_count(1); force_serialization;
6329 fixed_latency(6);
6330 heap_ptr : S3(read);
6331 DECODE : S0(3);
6332 D0 : S2;
6333 MEM : S3;
6334 ALU : S3(2);
6335 dst : S5(write);
6336 BR : S5;
6337 %}
6339 // Generic big/slow expanded idiom
6340 pipe_class pipe_slow()
6341 %{
6342 instruction_count(10); multiple_bundles; force_serialization;
6343 fixed_latency(100);
6344 D0 : S0(2);
6345 MEM : S3(2);
6346 %}
6348 // The real do-nothing guy
6349 pipe_class empty()
6350 %{
6351 instruction_count(0);
6352 %}
6354 // Define the class for the Nop node
6355 define
6356 %{
6357 MachNop = empty;
6358 %}
6360 %}
6362 //----------INSTRUCTIONS-------------------------------------------------------
6363 //
6364 // match -- States which machine-independent subtree may be replaced
6365 // by this instruction.
6366 // ins_cost -- The estimated cost of this instruction is used by instruction
6367 // selection to identify a minimum cost tree of machine
6368 // instructions that matches a tree of machine-independent
6369 // instructions.
6370 // format -- A string providing the disassembly for this instruction.
6371 // The value of an instruction's operand may be inserted
6372 // by referring to it with a '$' prefix.
6373 // opcode -- Three instruction opcodes may be provided. These are referred
6374 // to within an encode class as $primary, $secondary, and $tertiary
6375 // rrspectively. The primary opcode is commonly used to
6376 // indicate the type of machine instruction, while secondary
6377 // and tertiary are often used for prefix options or addressing
6378 // modes.
6379 // ins_encode -- A list of encode classes with parameters. The encode class
6380 // name must have been defined in an 'enc_class' specification
6381 // in the encode section of the architecture description.
6384 //----------Load/Store/Move Instructions---------------------------------------
6385 //----------Load Instructions--------------------------------------------------
6387 // Load Byte (8 bit signed)
6388 instruct loadB(rRegI dst, memory mem)
6389 %{
6390 match(Set dst (LoadB mem));
6392 ins_cost(125);
6393 format %{ "movsbl $dst, $mem\t# byte" %}
6395 ins_encode %{
6396 __ movsbl($dst$$Register, $mem$$Address);
6397 %}
6399 ins_pipe(ialu_reg_mem);
6400 %}
6402 // Load Byte (8 bit signed) into Long Register
6403 instruct loadB2L(rRegL dst, memory mem)
6404 %{
6405 match(Set dst (ConvI2L (LoadB mem)));
6407 ins_cost(125);
6408 format %{ "movsbq $dst, $mem\t# byte -> long" %}
6410 ins_encode %{
6411 __ movsbq($dst$$Register, $mem$$Address);
6412 %}
6414 ins_pipe(ialu_reg_mem);
6415 %}
6417 // Load Unsigned Byte (8 bit UNsigned)
6418 instruct loadUB(rRegI dst, memory mem)
6419 %{
6420 match(Set dst (LoadUB mem));
6422 ins_cost(125);
6423 format %{ "movzbl $dst, $mem\t# ubyte" %}
6425 ins_encode %{
6426 __ movzbl($dst$$Register, $mem$$Address);
6427 %}
6429 ins_pipe(ialu_reg_mem);
6430 %}
6432 // Load Unsigned Byte (8 bit UNsigned) into Long Register
6433 instruct loadUB2L(rRegL dst, memory mem)
6434 %{
6435 match(Set dst (ConvI2L (LoadUB mem)));
6437 ins_cost(125);
6438 format %{ "movzbq $dst, $mem\t# ubyte -> long" %}
6440 ins_encode %{
6441 __ movzbq($dst$$Register, $mem$$Address);
6442 %}
6444 ins_pipe(ialu_reg_mem);
6445 %}
6447 // Load Short (16 bit signed)
6448 instruct loadS(rRegI dst, memory mem)
6449 %{
6450 match(Set dst (LoadS mem));
6452 ins_cost(125);
6453 format %{ "movswl $dst, $mem\t# short" %}
6455 ins_encode %{
6456 __ movswl($dst$$Register, $mem$$Address);
6457 %}
6459 ins_pipe(ialu_reg_mem);
6460 %}
6462 // Load Short (16 bit signed) into Long Register
6463 instruct loadS2L(rRegL dst, memory mem)
6464 %{
6465 match(Set dst (ConvI2L (LoadS mem)));
6467 ins_cost(125);
6468 format %{ "movswq $dst, $mem\t# short -> long" %}
6470 ins_encode %{
6471 __ movswq($dst$$Register, $mem$$Address);
6472 %}
6474 ins_pipe(ialu_reg_mem);
6475 %}
6477 // Load Unsigned Short/Char (16 bit UNsigned)
6478 instruct loadUS(rRegI dst, memory mem)
6479 %{
6480 match(Set dst (LoadUS mem));
6482 ins_cost(125);
6483 format %{ "movzwl $dst, $mem\t# ushort/char" %}
6485 ins_encode %{
6486 __ movzwl($dst$$Register, $mem$$Address);
6487 %}
6489 ins_pipe(ialu_reg_mem);
6490 %}
6492 // Load Unsigned Short/Char (16 bit UNsigned) into Long Register
6493 instruct loadUS2L(rRegL dst, memory mem)
6494 %{
6495 match(Set dst (ConvI2L (LoadUS mem)));
6497 ins_cost(125);
6498 format %{ "movzwq $dst, $mem\t# ushort/char -> long" %}
6500 ins_encode %{
6501 __ movzwq($dst$$Register, $mem$$Address);
6502 %}
6504 ins_pipe(ialu_reg_mem);
6505 %}
6507 // Load Integer
6508 instruct loadI(rRegI dst, memory mem)
6509 %{
6510 match(Set dst (LoadI mem));
6512 ins_cost(125);
6513 format %{ "movl $dst, $mem\t# int" %}
6515 ins_encode %{
6516 __ movl($dst$$Register, $mem$$Address);
6517 %}
6519 ins_pipe(ialu_reg_mem);
6520 %}
6522 // Load Integer into Long Register
6523 instruct loadI2L(rRegL dst, memory mem)
6524 %{
6525 match(Set dst (ConvI2L (LoadI mem)));
6527 ins_cost(125);
6528 format %{ "movslq $dst, $mem\t# int -> long" %}
6530 ins_encode %{
6531 __ movslq($dst$$Register, $mem$$Address);
6532 %}
6534 ins_pipe(ialu_reg_mem);
6535 %}
6537 // Load Unsigned Integer into Long Register
6538 instruct loadUI2L(rRegL dst, memory mem)
6539 %{
6540 match(Set dst (LoadUI2L mem));
6542 ins_cost(125);
6543 format %{ "movl $dst, $mem\t# uint -> long" %}
6545 ins_encode %{
6546 __ movl($dst$$Register, $mem$$Address);
6547 %}
6549 ins_pipe(ialu_reg_mem);
6550 %}
6552 // Load Long
6553 instruct loadL(rRegL dst, memory mem)
6554 %{
6555 match(Set dst (LoadL mem));
6557 ins_cost(125);
6558 format %{ "movq $dst, $mem\t# long" %}
6560 ins_encode %{
6561 __ movq($dst$$Register, $mem$$Address);
6562 %}
6564 ins_pipe(ialu_reg_mem); // XXX
6565 %}
6567 // Load Range
6568 instruct loadRange(rRegI dst, memory mem)
6569 %{
6570 match(Set dst (LoadRange mem));
6572 ins_cost(125); // XXX
6573 format %{ "movl $dst, $mem\t# range" %}
6574 opcode(0x8B);
6575 ins_encode(REX_reg_mem(dst, mem), OpcP, reg_mem(dst, mem));
6576 ins_pipe(ialu_reg_mem);
6577 %}
6579 // Load Pointer
6580 instruct loadP(rRegP dst, memory mem)
6581 %{
6582 match(Set dst (LoadP mem));
6584 ins_cost(125); // XXX
6585 format %{ "movq $dst, $mem\t# ptr" %}
6586 opcode(0x8B);
6587 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6588 ins_pipe(ialu_reg_mem); // XXX
6589 %}
6591 // Load Compressed Pointer
6592 instruct loadN(rRegN dst, memory mem)
6593 %{
6594 match(Set dst (LoadN mem));
6596 ins_cost(125); // XXX
6597 format %{ "movl $dst, $mem\t# compressed ptr" %}
6598 ins_encode %{
6599 __ movl($dst$$Register, $mem$$Address);
6600 %}
6601 ins_pipe(ialu_reg_mem); // XXX
6602 %}
6605 // Load Klass Pointer
6606 instruct loadKlass(rRegP dst, memory mem)
6607 %{
6608 match(Set dst (LoadKlass mem));
6610 ins_cost(125); // XXX
6611 format %{ "movq $dst, $mem\t# class" %}
6612 opcode(0x8B);
6613 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6614 ins_pipe(ialu_reg_mem); // XXX
6615 %}
6617 // Load narrow Klass Pointer
6618 instruct loadNKlass(rRegN dst, memory mem)
6619 %{
6620 match(Set dst (LoadNKlass mem));
6622 ins_cost(125); // XXX
6623 format %{ "movl $dst, $mem\t# compressed klass ptr" %}
6624 ins_encode %{
6625 __ movl($dst$$Register, $mem$$Address);
6626 %}
6627 ins_pipe(ialu_reg_mem); // XXX
6628 %}
6630 // Load Float
6631 instruct loadF(regF dst, memory mem)
6632 %{
6633 match(Set dst (LoadF mem));
6635 ins_cost(145); // XXX
6636 format %{ "movss $dst, $mem\t# float" %}
6637 opcode(0xF3, 0x0F, 0x10);
6638 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6639 ins_pipe(pipe_slow); // XXX
6640 %}
6642 // Load Double
6643 instruct loadD_partial(regD dst, memory mem)
6644 %{
6645 predicate(!UseXmmLoadAndClearUpper);
6646 match(Set dst (LoadD mem));
6648 ins_cost(145); // XXX
6649 format %{ "movlpd $dst, $mem\t# double" %}
6650 opcode(0x66, 0x0F, 0x12);
6651 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6652 ins_pipe(pipe_slow); // XXX
6653 %}
6655 instruct loadD(regD dst, memory mem)
6656 %{
6657 predicate(UseXmmLoadAndClearUpper);
6658 match(Set dst (LoadD mem));
6660 ins_cost(145); // XXX
6661 format %{ "movsd $dst, $mem\t# double" %}
6662 opcode(0xF2, 0x0F, 0x10);
6663 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6664 ins_pipe(pipe_slow); // XXX
6665 %}
6667 // Load Aligned Packed Byte to XMM register
6668 instruct loadA8B(regD dst, memory mem) %{
6669 match(Set dst (Load8B mem));
6670 ins_cost(125);
6671 format %{ "MOVQ $dst,$mem\t! packed8B" %}
6672 ins_encode( movq_ld(dst, mem));
6673 ins_pipe( pipe_slow );
6674 %}
6676 // Load Aligned Packed Short to XMM register
6677 instruct loadA4S(regD dst, memory mem) %{
6678 match(Set dst (Load4S mem));
6679 ins_cost(125);
6680 format %{ "MOVQ $dst,$mem\t! packed4S" %}
6681 ins_encode( movq_ld(dst, mem));
6682 ins_pipe( pipe_slow );
6683 %}
6685 // Load Aligned Packed Char to XMM register
6686 instruct loadA4C(regD dst, memory mem) %{
6687 match(Set dst (Load4C mem));
6688 ins_cost(125);
6689 format %{ "MOVQ $dst,$mem\t! packed4C" %}
6690 ins_encode( movq_ld(dst, mem));
6691 ins_pipe( pipe_slow );
6692 %}
6694 // Load Aligned Packed Integer to XMM register
6695 instruct load2IU(regD dst, memory mem) %{
6696 match(Set dst (Load2I mem));
6697 ins_cost(125);
6698 format %{ "MOVQ $dst,$mem\t! packed2I" %}
6699 ins_encode( movq_ld(dst, mem));
6700 ins_pipe( pipe_slow );
6701 %}
6703 // Load Aligned Packed Single to XMM
6704 instruct loadA2F(regD dst, memory mem) %{
6705 match(Set dst (Load2F mem));
6706 ins_cost(145);
6707 format %{ "MOVQ $dst,$mem\t! packed2F" %}
6708 ins_encode( movq_ld(dst, mem));
6709 ins_pipe( pipe_slow );
6710 %}
6712 // Load Effective Address
6713 instruct leaP8(rRegP dst, indOffset8 mem)
6714 %{
6715 match(Set dst mem);
6717 ins_cost(110); // XXX
6718 format %{ "leaq $dst, $mem\t# ptr 8" %}
6719 opcode(0x8D);
6720 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6721 ins_pipe(ialu_reg_reg_fat);
6722 %}
6724 instruct leaP32(rRegP dst, indOffset32 mem)
6725 %{
6726 match(Set dst mem);
6728 ins_cost(110);
6729 format %{ "leaq $dst, $mem\t# ptr 32" %}
6730 opcode(0x8D);
6731 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6732 ins_pipe(ialu_reg_reg_fat);
6733 %}
6735 // instruct leaPIdx(rRegP dst, indIndex mem)
6736 // %{
6737 // match(Set dst mem);
6739 // ins_cost(110);
6740 // format %{ "leaq $dst, $mem\t# ptr idx" %}
6741 // opcode(0x8D);
6742 // ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6743 // ins_pipe(ialu_reg_reg_fat);
6744 // %}
6746 instruct leaPIdxOff(rRegP dst, indIndexOffset mem)
6747 %{
6748 match(Set dst mem);
6750 ins_cost(110);
6751 format %{ "leaq $dst, $mem\t# ptr idxoff" %}
6752 opcode(0x8D);
6753 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6754 ins_pipe(ialu_reg_reg_fat);
6755 %}
6757 instruct leaPIdxScale(rRegP dst, indIndexScale mem)
6758 %{
6759 match(Set dst mem);
6761 ins_cost(110);
6762 format %{ "leaq $dst, $mem\t# ptr idxscale" %}
6763 opcode(0x8D);
6764 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6765 ins_pipe(ialu_reg_reg_fat);
6766 %}
6768 instruct leaPIdxScaleOff(rRegP dst, indIndexScaleOffset mem)
6769 %{
6770 match(Set dst mem);
6772 ins_cost(110);
6773 format %{ "leaq $dst, $mem\t# ptr idxscaleoff" %}
6774 opcode(0x8D);
6775 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6776 ins_pipe(ialu_reg_reg_fat);
6777 %}
6779 instruct leaPPosIdxScaleOff(rRegP dst, indPosIndexScaleOffset mem)
6780 %{
6781 match(Set dst mem);
6783 ins_cost(110);
6784 format %{ "leaq $dst, $mem\t# ptr posidxscaleoff" %}
6785 opcode(0x8D);
6786 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6787 ins_pipe(ialu_reg_reg_fat);
6788 %}
6790 // Load Effective Address which uses Narrow (32-bits) oop
6791 instruct leaPCompressedOopOffset(rRegP dst, indCompressedOopOffset mem)
6792 %{
6793 predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
6794 match(Set dst mem);
6796 ins_cost(110);
6797 format %{ "leaq $dst, $mem\t# ptr compressedoopoff32" %}
6798 opcode(0x8D);
6799 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6800 ins_pipe(ialu_reg_reg_fat);
6801 %}
6803 instruct leaP8Narrow(rRegP dst, indOffset8Narrow mem)
6804 %{
6805 predicate(Universe::narrow_oop_shift() == 0);
6806 match(Set dst mem);
6808 ins_cost(110); // XXX
6809 format %{ "leaq $dst, $mem\t# ptr off8narrow" %}
6810 opcode(0x8D);
6811 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6812 ins_pipe(ialu_reg_reg_fat);
6813 %}
6815 instruct leaP32Narrow(rRegP dst, indOffset32Narrow mem)
6816 %{
6817 predicate(Universe::narrow_oop_shift() == 0);
6818 match(Set dst mem);
6820 ins_cost(110);
6821 format %{ "leaq $dst, $mem\t# ptr off32narrow" %}
6822 opcode(0x8D);
6823 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6824 ins_pipe(ialu_reg_reg_fat);
6825 %}
6827 instruct leaPIdxOffNarrow(rRegP dst, indIndexOffsetNarrow mem)
6828 %{
6829 predicate(Universe::narrow_oop_shift() == 0);
6830 match(Set dst mem);
6832 ins_cost(110);
6833 format %{ "leaq $dst, $mem\t# ptr idxoffnarrow" %}
6834 opcode(0x8D);
6835 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6836 ins_pipe(ialu_reg_reg_fat);
6837 %}
6839 instruct leaPIdxScaleNarrow(rRegP dst, indIndexScaleNarrow mem)
6840 %{
6841 predicate(Universe::narrow_oop_shift() == 0);
6842 match(Set dst mem);
6844 ins_cost(110);
6845 format %{ "leaq $dst, $mem\t# ptr idxscalenarrow" %}
6846 opcode(0x8D);
6847 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6848 ins_pipe(ialu_reg_reg_fat);
6849 %}
6851 instruct leaPIdxScaleOffNarrow(rRegP dst, indIndexScaleOffsetNarrow mem)
6852 %{
6853 predicate(Universe::narrow_oop_shift() == 0);
6854 match(Set dst mem);
6856 ins_cost(110);
6857 format %{ "leaq $dst, $mem\t# ptr idxscaleoffnarrow" %}
6858 opcode(0x8D);
6859 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6860 ins_pipe(ialu_reg_reg_fat);
6861 %}
6863 instruct leaPPosIdxScaleOffNarrow(rRegP dst, indPosIndexScaleOffsetNarrow mem)
6864 %{
6865 predicate(Universe::narrow_oop_shift() == 0);
6866 match(Set dst mem);
6868 ins_cost(110);
6869 format %{ "leaq $dst, $mem\t# ptr posidxscaleoffnarrow" %}
6870 opcode(0x8D);
6871 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6872 ins_pipe(ialu_reg_reg_fat);
6873 %}
6875 instruct loadConI(rRegI dst, immI src)
6876 %{
6877 match(Set dst src);
6879 format %{ "movl $dst, $src\t# int" %}
6880 ins_encode(load_immI(dst, src));
6881 ins_pipe(ialu_reg_fat); // XXX
6882 %}
6884 instruct loadConI0(rRegI dst, immI0 src, rFlagsReg cr)
6885 %{
6886 match(Set dst src);
6887 effect(KILL cr);
6889 ins_cost(50);
6890 format %{ "xorl $dst, $dst\t# int" %}
6891 opcode(0x33); /* + rd */
6892 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6893 ins_pipe(ialu_reg);
6894 %}
6896 instruct loadConL(rRegL dst, immL src)
6897 %{
6898 match(Set dst src);
6900 ins_cost(150);
6901 format %{ "movq $dst, $src\t# long" %}
6902 ins_encode(load_immL(dst, src));
6903 ins_pipe(ialu_reg);
6904 %}
6906 instruct loadConL0(rRegL dst, immL0 src, rFlagsReg cr)
6907 %{
6908 match(Set dst src);
6909 effect(KILL cr);
6911 ins_cost(50);
6912 format %{ "xorl $dst, $dst\t# long" %}
6913 opcode(0x33); /* + rd */
6914 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6915 ins_pipe(ialu_reg); // XXX
6916 %}
6918 instruct loadConUL32(rRegL dst, immUL32 src)
6919 %{
6920 match(Set dst src);
6922 ins_cost(60);
6923 format %{ "movl $dst, $src\t# long (unsigned 32-bit)" %}
6924 ins_encode(load_immUL32(dst, src));
6925 ins_pipe(ialu_reg);
6926 %}
6928 instruct loadConL32(rRegL dst, immL32 src)
6929 %{
6930 match(Set dst src);
6932 ins_cost(70);
6933 format %{ "movq $dst, $src\t# long (32-bit)" %}
6934 ins_encode(load_immL32(dst, src));
6935 ins_pipe(ialu_reg);
6936 %}
6938 instruct loadConP(rRegP dst, immP src)
6939 %{
6940 match(Set dst src);
6942 format %{ "movq $dst, $src\t# ptr" %}
6943 ins_encode(load_immP(dst, src));
6944 ins_pipe(ialu_reg_fat); // XXX
6945 %}
6947 instruct loadConP0(rRegP dst, immP0 src, rFlagsReg cr)
6948 %{
6949 match(Set dst src);
6950 effect(KILL cr);
6952 ins_cost(50);
6953 format %{ "xorl $dst, $dst\t# ptr" %}
6954 opcode(0x33); /* + rd */
6955 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6956 ins_pipe(ialu_reg);
6957 %}
6959 instruct loadConP31(rRegP dst, immP31 src, rFlagsReg cr)
6960 %{
6961 match(Set dst src);
6962 effect(KILL cr);
6964 ins_cost(60);
6965 format %{ "movl $dst, $src\t# ptr (positive 32-bit)" %}
6966 ins_encode(load_immP31(dst, src));
6967 ins_pipe(ialu_reg);
6968 %}
6970 instruct loadConF(regF dst, immF src)
6971 %{
6972 match(Set dst src);
6973 ins_cost(125);
6975 format %{ "movss $dst, [$src]" %}
6976 ins_encode(load_conF(dst, src));
6977 ins_pipe(pipe_slow);
6978 %}
6980 instruct loadConN0(rRegN dst, immN0 src, rFlagsReg cr) %{
6981 match(Set dst src);
6982 effect(KILL cr);
6983 format %{ "xorq $dst, $src\t# compressed NULL ptr" %}
6984 ins_encode %{
6985 __ xorq($dst$$Register, $dst$$Register);
6986 %}
6987 ins_pipe(ialu_reg);
6988 %}
6990 instruct loadConN(rRegN dst, immN src) %{
6991 match(Set dst src);
6993 ins_cost(125);
6994 format %{ "movl $dst, $src\t# compressed ptr" %}
6995 ins_encode %{
6996 address con = (address)$src$$constant;
6997 if (con == NULL) {
6998 ShouldNotReachHere();
6999 } else {
7000 __ set_narrow_oop($dst$$Register, (jobject)$src$$constant);
7001 }
7002 %}
7003 ins_pipe(ialu_reg_fat); // XXX
7004 %}
7006 instruct loadConF0(regF dst, immF0 src)
7007 %{
7008 match(Set dst src);
7009 ins_cost(100);
7011 format %{ "xorps $dst, $dst\t# float 0.0" %}
7012 opcode(0x0F, 0x57);
7013 ins_encode(REX_reg_reg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
7014 ins_pipe(pipe_slow);
7015 %}
7017 // Use the same format since predicate() can not be used here.
7018 instruct loadConD(regD dst, immD src)
7019 %{
7020 match(Set dst src);
7021 ins_cost(125);
7023 format %{ "movsd $dst, [$src]" %}
7024 ins_encode(load_conD(dst, src));
7025 ins_pipe(pipe_slow);
7026 %}
7028 instruct loadConD0(regD dst, immD0 src)
7029 %{
7030 match(Set dst src);
7031 ins_cost(100);
7033 format %{ "xorpd $dst, $dst\t# double 0.0" %}
7034 opcode(0x66, 0x0F, 0x57);
7035 ins_encode(OpcP, REX_reg_reg(dst, dst), OpcS, OpcT, reg_reg(dst, dst));
7036 ins_pipe(pipe_slow);
7037 %}
7039 instruct loadSSI(rRegI dst, stackSlotI src)
7040 %{
7041 match(Set dst src);
7043 ins_cost(125);
7044 format %{ "movl $dst, $src\t# int stk" %}
7045 opcode(0x8B);
7046 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
7047 ins_pipe(ialu_reg_mem);
7048 %}
7050 instruct loadSSL(rRegL dst, stackSlotL src)
7051 %{
7052 match(Set dst src);
7054 ins_cost(125);
7055 format %{ "movq $dst, $src\t# long stk" %}
7056 opcode(0x8B);
7057 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
7058 ins_pipe(ialu_reg_mem);
7059 %}
7061 instruct loadSSP(rRegP dst, stackSlotP src)
7062 %{
7063 match(Set dst src);
7065 ins_cost(125);
7066 format %{ "movq $dst, $src\t# ptr stk" %}
7067 opcode(0x8B);
7068 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
7069 ins_pipe(ialu_reg_mem);
7070 %}
7072 instruct loadSSF(regF dst, stackSlotF src)
7073 %{
7074 match(Set dst src);
7076 ins_cost(125);
7077 format %{ "movss $dst, $src\t# float stk" %}
7078 opcode(0xF3, 0x0F, 0x10);
7079 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
7080 ins_pipe(pipe_slow); // XXX
7081 %}
7083 // Use the same format since predicate() can not be used here.
7084 instruct loadSSD(regD dst, stackSlotD src)
7085 %{
7086 match(Set dst src);
7088 ins_cost(125);
7089 format %{ "movsd $dst, $src\t# double stk" %}
7090 ins_encode %{
7091 __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp));
7092 %}
7093 ins_pipe(pipe_slow); // XXX
7094 %}
7096 // Prefetch instructions.
7097 // Must be safe to execute with invalid address (cannot fault).
7099 instruct prefetchr( memory mem ) %{
7100 predicate(ReadPrefetchInstr==3);
7101 match(PrefetchRead mem);
7102 ins_cost(125);
7104 format %{ "PREFETCHR $mem\t# Prefetch into level 1 cache" %}
7105 opcode(0x0F, 0x0D); /* Opcode 0F 0D /0 */
7106 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
7107 ins_pipe(ialu_mem);
7108 %}
7110 instruct prefetchrNTA( memory mem ) %{
7111 predicate(ReadPrefetchInstr==0);
7112 match(PrefetchRead mem);
7113 ins_cost(125);
7115 format %{ "PREFETCHNTA $mem\t# Prefetch into non-temporal cache for read" %}
7116 opcode(0x0F, 0x18); /* Opcode 0F 18 /0 */
7117 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
7118 ins_pipe(ialu_mem);
7119 %}
7121 instruct prefetchrT0( memory mem ) %{
7122 predicate(ReadPrefetchInstr==1);
7123 match(PrefetchRead mem);
7124 ins_cost(125);
7126 format %{ "PREFETCHT0 $mem\t# prefetch into L1 and L2 caches for read" %}
7127 opcode(0x0F, 0x18); /* Opcode 0F 18 /1 */
7128 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
7129 ins_pipe(ialu_mem);
7130 %}
7132 instruct prefetchrT2( memory mem ) %{
7133 predicate(ReadPrefetchInstr==2);
7134 match(PrefetchRead mem);
7135 ins_cost(125);
7137 format %{ "PREFETCHT2 $mem\t# prefetch into L2 caches for read" %}
7138 opcode(0x0F, 0x18); /* Opcode 0F 18 /3 */
7139 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x03, mem));
7140 ins_pipe(ialu_mem);
7141 %}
7143 instruct prefetchw( memory mem ) %{
7144 predicate(AllocatePrefetchInstr==3);
7145 match(PrefetchWrite mem);
7146 ins_cost(125);
7148 format %{ "PREFETCHW $mem\t# Prefetch into level 1 cache and mark modified" %}
7149 opcode(0x0F, 0x0D); /* Opcode 0F 0D /1 */
7150 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
7151 ins_pipe(ialu_mem);
7152 %}
7154 instruct prefetchwNTA( memory mem ) %{
7155 predicate(AllocatePrefetchInstr==0);
7156 match(PrefetchWrite mem);
7157 ins_cost(125);
7159 format %{ "PREFETCHNTA $mem\t# Prefetch to non-temporal cache for write" %}
7160 opcode(0x0F, 0x18); /* Opcode 0F 18 /0 */
7161 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
7162 ins_pipe(ialu_mem);
7163 %}
7165 instruct prefetchwT0( memory mem ) %{
7166 predicate(AllocatePrefetchInstr==1);
7167 match(PrefetchWrite mem);
7168 ins_cost(125);
7170 format %{ "PREFETCHT0 $mem\t# Prefetch to level 1 and 2 caches for write" %}
7171 opcode(0x0F, 0x18); /* Opcode 0F 18 /1 */
7172 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
7173 ins_pipe(ialu_mem);
7174 %}
7176 instruct prefetchwT2( memory mem ) %{
7177 predicate(AllocatePrefetchInstr==2);
7178 match(PrefetchWrite mem);
7179 ins_cost(125);
7181 format %{ "PREFETCHT2 $mem\t# Prefetch to level 2 cache for write" %}
7182 opcode(0x0F, 0x18); /* Opcode 0F 18 /3 */
7183 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x03, mem));
7184 ins_pipe(ialu_mem);
7185 %}
7187 //----------Store Instructions-------------------------------------------------
7189 // Store Byte
7190 instruct storeB(memory mem, rRegI src)
7191 %{
7192 match(Set mem (StoreB mem src));
7194 ins_cost(125); // XXX
7195 format %{ "movb $mem, $src\t# byte" %}
7196 opcode(0x88);
7197 ins_encode(REX_breg_mem(src, mem), OpcP, reg_mem(src, mem));
7198 ins_pipe(ialu_mem_reg);
7199 %}
7201 // Store Char/Short
7202 instruct storeC(memory mem, rRegI src)
7203 %{
7204 match(Set mem (StoreC mem src));
7206 ins_cost(125); // XXX
7207 format %{ "movw $mem, $src\t# char/short" %}
7208 opcode(0x89);
7209 ins_encode(SizePrefix, REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
7210 ins_pipe(ialu_mem_reg);
7211 %}
7213 // Store Integer
7214 instruct storeI(memory mem, rRegI src)
7215 %{
7216 match(Set mem (StoreI mem src));
7218 ins_cost(125); // XXX
7219 format %{ "movl $mem, $src\t# int" %}
7220 opcode(0x89);
7221 ins_encode(REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
7222 ins_pipe(ialu_mem_reg);
7223 %}
7225 // Store Long
7226 instruct storeL(memory mem, rRegL src)
7227 %{
7228 match(Set mem (StoreL mem src));
7230 ins_cost(125); // XXX
7231 format %{ "movq $mem, $src\t# long" %}
7232 opcode(0x89);
7233 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
7234 ins_pipe(ialu_mem_reg); // XXX
7235 %}
7237 // Store Pointer
7238 instruct storeP(memory mem, any_RegP src)
7239 %{
7240 match(Set mem (StoreP mem src));
7242 ins_cost(125); // XXX
7243 format %{ "movq $mem, $src\t# ptr" %}
7244 opcode(0x89);
7245 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
7246 ins_pipe(ialu_mem_reg);
7247 %}
7249 instruct storeImmP0(memory mem, immP0 zero)
7250 %{
7251 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7252 match(Set mem (StoreP mem zero));
7254 ins_cost(125); // XXX
7255 format %{ "movq $mem, R12\t# ptr (R12_heapbase==0)" %}
7256 ins_encode %{
7257 __ movq($mem$$Address, r12);
7258 %}
7259 ins_pipe(ialu_mem_reg);
7260 %}
7262 // Store NULL Pointer, mark word, or other simple pointer constant.
7263 instruct storeImmP(memory mem, immP31 src)
7264 %{
7265 match(Set mem (StoreP mem src));
7267 ins_cost(150); // XXX
7268 format %{ "movq $mem, $src\t# ptr" %}
7269 opcode(0xC7); /* C7 /0 */
7270 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
7271 ins_pipe(ialu_mem_imm);
7272 %}
7274 // Store Compressed Pointer
7275 instruct storeN(memory mem, rRegN src)
7276 %{
7277 match(Set mem (StoreN mem src));
7279 ins_cost(125); // XXX
7280 format %{ "movl $mem, $src\t# compressed ptr" %}
7281 ins_encode %{
7282 __ movl($mem$$Address, $src$$Register);
7283 %}
7284 ins_pipe(ialu_mem_reg);
7285 %}
7287 instruct storeImmN0(memory mem, immN0 zero)
7288 %{
7289 predicate(Universe::narrow_oop_base() == NULL);
7290 match(Set mem (StoreN mem zero));
7292 ins_cost(125); // XXX
7293 format %{ "movl $mem, R12\t# compressed ptr (R12_heapbase==0)" %}
7294 ins_encode %{
7295 __ movl($mem$$Address, r12);
7296 %}
7297 ins_pipe(ialu_mem_reg);
7298 %}
7300 instruct storeImmN(memory mem, immN src)
7301 %{
7302 match(Set mem (StoreN mem src));
7304 ins_cost(150); // XXX
7305 format %{ "movl $mem, $src\t# compressed ptr" %}
7306 ins_encode %{
7307 address con = (address)$src$$constant;
7308 if (con == NULL) {
7309 __ movl($mem$$Address, (int32_t)0);
7310 } else {
7311 __ set_narrow_oop($mem$$Address, (jobject)$src$$constant);
7312 }
7313 %}
7314 ins_pipe(ialu_mem_imm);
7315 %}
7317 // Store Integer Immediate
7318 instruct storeImmI0(memory mem, immI0 zero)
7319 %{
7320 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7321 match(Set mem (StoreI mem zero));
7323 ins_cost(125); // XXX
7324 format %{ "movl $mem, R12\t# int (R12_heapbase==0)" %}
7325 ins_encode %{
7326 __ movl($mem$$Address, r12);
7327 %}
7328 ins_pipe(ialu_mem_reg);
7329 %}
7331 instruct storeImmI(memory mem, immI src)
7332 %{
7333 match(Set mem (StoreI mem src));
7335 ins_cost(150);
7336 format %{ "movl $mem, $src\t# int" %}
7337 opcode(0xC7); /* C7 /0 */
7338 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
7339 ins_pipe(ialu_mem_imm);
7340 %}
7342 // Store Long Immediate
7343 instruct storeImmL0(memory mem, immL0 zero)
7344 %{
7345 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7346 match(Set mem (StoreL mem zero));
7348 ins_cost(125); // XXX
7349 format %{ "movq $mem, R12\t# long (R12_heapbase==0)" %}
7350 ins_encode %{
7351 __ movq($mem$$Address, r12);
7352 %}
7353 ins_pipe(ialu_mem_reg);
7354 %}
7356 instruct storeImmL(memory mem, immL32 src)
7357 %{
7358 match(Set mem (StoreL mem src));
7360 ins_cost(150);
7361 format %{ "movq $mem, $src\t# long" %}
7362 opcode(0xC7); /* C7 /0 */
7363 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
7364 ins_pipe(ialu_mem_imm);
7365 %}
7367 // Store Short/Char Immediate
7368 instruct storeImmC0(memory mem, immI0 zero)
7369 %{
7370 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7371 match(Set mem (StoreC mem zero));
7373 ins_cost(125); // XXX
7374 format %{ "movw $mem, R12\t# short/char (R12_heapbase==0)" %}
7375 ins_encode %{
7376 __ movw($mem$$Address, r12);
7377 %}
7378 ins_pipe(ialu_mem_reg);
7379 %}
7381 instruct storeImmI16(memory mem, immI16 src)
7382 %{
7383 predicate(UseStoreImmI16);
7384 match(Set mem (StoreC mem src));
7386 ins_cost(150);
7387 format %{ "movw $mem, $src\t# short/char" %}
7388 opcode(0xC7); /* C7 /0 Same as 32 store immediate with prefix */
7389 ins_encode(SizePrefix, REX_mem(mem), OpcP, RM_opc_mem(0x00, mem),Con16(src));
7390 ins_pipe(ialu_mem_imm);
7391 %}
7393 // Store Byte Immediate
7394 instruct storeImmB0(memory mem, immI0 zero)
7395 %{
7396 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7397 match(Set mem (StoreB mem zero));
7399 ins_cost(125); // XXX
7400 format %{ "movb $mem, R12\t# short/char (R12_heapbase==0)" %}
7401 ins_encode %{
7402 __ movb($mem$$Address, r12);
7403 %}
7404 ins_pipe(ialu_mem_reg);
7405 %}
7407 instruct storeImmB(memory mem, immI8 src)
7408 %{
7409 match(Set mem (StoreB mem src));
7411 ins_cost(150); // XXX
7412 format %{ "movb $mem, $src\t# byte" %}
7413 opcode(0xC6); /* C6 /0 */
7414 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src));
7415 ins_pipe(ialu_mem_imm);
7416 %}
7418 // Store Aligned Packed Byte XMM register to memory
7419 instruct storeA8B(memory mem, regD src) %{
7420 match(Set mem (Store8B mem src));
7421 ins_cost(145);
7422 format %{ "MOVQ $mem,$src\t! packed8B" %}
7423 ins_encode( movq_st(mem, src));
7424 ins_pipe( pipe_slow );
7425 %}
7427 // Store Aligned Packed Char/Short XMM register to memory
7428 instruct storeA4C(memory mem, regD src) %{
7429 match(Set mem (Store4C mem src));
7430 ins_cost(145);
7431 format %{ "MOVQ $mem,$src\t! packed4C" %}
7432 ins_encode( movq_st(mem, src));
7433 ins_pipe( pipe_slow );
7434 %}
7436 // Store Aligned Packed Integer XMM register to memory
7437 instruct storeA2I(memory mem, regD src) %{
7438 match(Set mem (Store2I mem src));
7439 ins_cost(145);
7440 format %{ "MOVQ $mem,$src\t! packed2I" %}
7441 ins_encode( movq_st(mem, src));
7442 ins_pipe( pipe_slow );
7443 %}
7445 // Store CMS card-mark Immediate
7446 instruct storeImmCM0_reg(memory mem, immI0 zero)
7447 %{
7448 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7449 match(Set mem (StoreCM mem zero));
7451 ins_cost(125); // XXX
7452 format %{ "movb $mem, R12\t# CMS card-mark byte 0 (R12_heapbase==0)" %}
7453 ins_encode %{
7454 __ movb($mem$$Address, r12);
7455 %}
7456 ins_pipe(ialu_mem_reg);
7457 %}
7459 instruct storeImmCM0(memory mem, immI0 src)
7460 %{
7461 match(Set mem (StoreCM mem src));
7463 ins_cost(150); // XXX
7464 format %{ "movb $mem, $src\t# CMS card-mark byte 0" %}
7465 opcode(0xC6); /* C6 /0 */
7466 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src));
7467 ins_pipe(ialu_mem_imm);
7468 %}
7470 // Store Aligned Packed Single Float XMM register to memory
7471 instruct storeA2F(memory mem, regD src) %{
7472 match(Set mem (Store2F mem src));
7473 ins_cost(145);
7474 format %{ "MOVQ $mem,$src\t! packed2F" %}
7475 ins_encode( movq_st(mem, src));
7476 ins_pipe( pipe_slow );
7477 %}
7479 // Store Float
7480 instruct storeF(memory mem, regF src)
7481 %{
7482 match(Set mem (StoreF mem src));
7484 ins_cost(95); // XXX
7485 format %{ "movss $mem, $src\t# float" %}
7486 opcode(0xF3, 0x0F, 0x11);
7487 ins_encode(OpcP, REX_reg_mem(src, mem), OpcS, OpcT, reg_mem(src, mem));
7488 ins_pipe(pipe_slow); // XXX
7489 %}
7491 // Store immediate Float value (it is faster than store from XMM register)
7492 instruct storeF0(memory mem, immF0 zero)
7493 %{
7494 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7495 match(Set mem (StoreF mem zero));
7497 ins_cost(25); // XXX
7498 format %{ "movl $mem, R12\t# float 0. (R12_heapbase==0)" %}
7499 ins_encode %{
7500 __ movl($mem$$Address, r12);
7501 %}
7502 ins_pipe(ialu_mem_reg);
7503 %}
7505 instruct storeF_imm(memory mem, immF src)
7506 %{
7507 match(Set mem (StoreF mem src));
7509 ins_cost(50);
7510 format %{ "movl $mem, $src\t# float" %}
7511 opcode(0xC7); /* C7 /0 */
7512 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con32F_as_bits(src));
7513 ins_pipe(ialu_mem_imm);
7514 %}
7516 // Store Double
7517 instruct storeD(memory mem, regD src)
7518 %{
7519 match(Set mem (StoreD mem src));
7521 ins_cost(95); // XXX
7522 format %{ "movsd $mem, $src\t# double" %}
7523 opcode(0xF2, 0x0F, 0x11);
7524 ins_encode(OpcP, REX_reg_mem(src, mem), OpcS, OpcT, reg_mem(src, mem));
7525 ins_pipe(pipe_slow); // XXX
7526 %}
7528 // Store immediate double 0.0 (it is faster than store from XMM register)
7529 instruct storeD0_imm(memory mem, immD0 src)
7530 %{
7531 predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL));
7532 match(Set mem (StoreD mem src));
7534 ins_cost(50);
7535 format %{ "movq $mem, $src\t# double 0." %}
7536 opcode(0xC7); /* C7 /0 */
7537 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32F_as_bits(src));
7538 ins_pipe(ialu_mem_imm);
7539 %}
7541 instruct storeD0(memory mem, immD0 zero)
7542 %{
7543 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7544 match(Set mem (StoreD mem zero));
7546 ins_cost(25); // XXX
7547 format %{ "movq $mem, R12\t# double 0. (R12_heapbase==0)" %}
7548 ins_encode %{
7549 __ movq($mem$$Address, r12);
7550 %}
7551 ins_pipe(ialu_mem_reg);
7552 %}
7554 instruct storeSSI(stackSlotI dst, rRegI src)
7555 %{
7556 match(Set dst src);
7558 ins_cost(100);
7559 format %{ "movl $dst, $src\t# int stk" %}
7560 opcode(0x89);
7561 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
7562 ins_pipe( ialu_mem_reg );
7563 %}
7565 instruct storeSSL(stackSlotL dst, rRegL src)
7566 %{
7567 match(Set dst src);
7569 ins_cost(100);
7570 format %{ "movq $dst, $src\t# long stk" %}
7571 opcode(0x89);
7572 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
7573 ins_pipe(ialu_mem_reg);
7574 %}
7576 instruct storeSSP(stackSlotP dst, rRegP src)
7577 %{
7578 match(Set dst src);
7580 ins_cost(100);
7581 format %{ "movq $dst, $src\t# ptr stk" %}
7582 opcode(0x89);
7583 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
7584 ins_pipe(ialu_mem_reg);
7585 %}
7587 instruct storeSSF(stackSlotF dst, regF src)
7588 %{
7589 match(Set dst src);
7591 ins_cost(95); // XXX
7592 format %{ "movss $dst, $src\t# float stk" %}
7593 opcode(0xF3, 0x0F, 0x11);
7594 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
7595 ins_pipe(pipe_slow); // XXX
7596 %}
7598 instruct storeSSD(stackSlotD dst, regD src)
7599 %{
7600 match(Set dst src);
7602 ins_cost(95); // XXX
7603 format %{ "movsd $dst, $src\t# double stk" %}
7604 opcode(0xF2, 0x0F, 0x11);
7605 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
7606 ins_pipe(pipe_slow); // XXX
7607 %}
7609 //----------BSWAP Instructions-------------------------------------------------
7610 instruct bytes_reverse_int(rRegI dst) %{
7611 match(Set dst (ReverseBytesI dst));
7613 format %{ "bswapl $dst" %}
7614 opcode(0x0F, 0xC8); /*Opcode 0F /C8 */
7615 ins_encode( REX_reg(dst), OpcP, opc2_reg(dst) );
7616 ins_pipe( ialu_reg );
7617 %}
7619 instruct bytes_reverse_long(rRegL dst) %{
7620 match(Set dst (ReverseBytesL dst));
7622 format %{ "bswapq $dst" %}
7624 opcode(0x0F, 0xC8); /* Opcode 0F /C8 */
7625 ins_encode( REX_reg_wide(dst), OpcP, opc2_reg(dst) );
7626 ins_pipe( ialu_reg);
7627 %}
7629 instruct loadI_reversed(rRegI dst, memory src) %{
7630 match(Set dst (ReverseBytesI (LoadI src)));
7632 format %{ "bswap_movl $dst, $src" %}
7633 opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */
7634 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src), REX_reg(dst), OpcS, opc3_reg(dst));
7635 ins_pipe( ialu_reg_mem );
7636 %}
7638 instruct loadL_reversed(rRegL dst, memory src) %{
7639 match(Set dst (ReverseBytesL (LoadL src)));
7641 format %{ "bswap_movq $dst, $src" %}
7642 opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */
7643 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src), REX_reg_wide(dst), OpcS, opc3_reg(dst));
7644 ins_pipe( ialu_reg_mem );
7645 %}
7647 instruct storeI_reversed(memory dst, rRegI src) %{
7648 match(Set dst (StoreI dst (ReverseBytesI src)));
7650 format %{ "movl_bswap $dst, $src" %}
7651 opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */
7652 ins_encode( REX_reg(src), OpcP, opc2_reg(src), REX_reg_mem(src, dst), OpcT, reg_mem(src, dst) );
7653 ins_pipe( ialu_mem_reg );
7654 %}
7656 instruct storeL_reversed(memory dst, rRegL src) %{
7657 match(Set dst (StoreL dst (ReverseBytesL src)));
7659 format %{ "movq_bswap $dst, $src" %}
7660 opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */
7661 ins_encode( REX_reg_wide(src), OpcP, opc2_reg(src), REX_reg_mem_wide(src, dst), OpcT, reg_mem(src, dst) );
7662 ins_pipe( ialu_mem_reg );
7663 %}
7666 //---------- Zeros Count Instructions ------------------------------------------
7668 instruct countLeadingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{
7669 predicate(UseCountLeadingZerosInstruction);
7670 match(Set dst (CountLeadingZerosI src));
7671 effect(KILL cr);
7673 format %{ "lzcntl $dst, $src\t# count leading zeros (int)" %}
7674 ins_encode %{
7675 __ lzcntl($dst$$Register, $src$$Register);
7676 %}
7677 ins_pipe(ialu_reg);
7678 %}
7680 instruct countLeadingZerosI_bsr(rRegI dst, rRegI src, rFlagsReg cr) %{
7681 predicate(!UseCountLeadingZerosInstruction);
7682 match(Set dst (CountLeadingZerosI src));
7683 effect(KILL cr);
7685 format %{ "bsrl $dst, $src\t# count leading zeros (int)\n\t"
7686 "jnz skip\n\t"
7687 "movl $dst, -1\n"
7688 "skip:\n\t"
7689 "negl $dst\n\t"
7690 "addl $dst, 31" %}
7691 ins_encode %{
7692 Register Rdst = $dst$$Register;
7693 Register Rsrc = $src$$Register;
7694 Label skip;
7695 __ bsrl(Rdst, Rsrc);
7696 __ jccb(Assembler::notZero, skip);
7697 __ movl(Rdst, -1);
7698 __ bind(skip);
7699 __ negl(Rdst);
7700 __ addl(Rdst, BitsPerInt - 1);
7701 %}
7702 ins_pipe(ialu_reg);
7703 %}
7705 instruct countLeadingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{
7706 predicate(UseCountLeadingZerosInstruction);
7707 match(Set dst (CountLeadingZerosL src));
7708 effect(KILL cr);
7710 format %{ "lzcntq $dst, $src\t# count leading zeros (long)" %}
7711 ins_encode %{
7712 __ lzcntq($dst$$Register, $src$$Register);
7713 %}
7714 ins_pipe(ialu_reg);
7715 %}
7717 instruct countLeadingZerosL_bsr(rRegI dst, rRegL src, rFlagsReg cr) %{
7718 predicate(!UseCountLeadingZerosInstruction);
7719 match(Set dst (CountLeadingZerosL src));
7720 effect(KILL cr);
7722 format %{ "bsrq $dst, $src\t# count leading zeros (long)\n\t"
7723 "jnz skip\n\t"
7724 "movl $dst, -1\n"
7725 "skip:\n\t"
7726 "negl $dst\n\t"
7727 "addl $dst, 63" %}
7728 ins_encode %{
7729 Register Rdst = $dst$$Register;
7730 Register Rsrc = $src$$Register;
7731 Label skip;
7732 __ bsrq(Rdst, Rsrc);
7733 __ jccb(Assembler::notZero, skip);
7734 __ movl(Rdst, -1);
7735 __ bind(skip);
7736 __ negl(Rdst);
7737 __ addl(Rdst, BitsPerLong - 1);
7738 %}
7739 ins_pipe(ialu_reg);
7740 %}
7742 instruct countTrailingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{
7743 match(Set dst (CountTrailingZerosI src));
7744 effect(KILL cr);
7746 format %{ "bsfl $dst, $src\t# count trailing zeros (int)\n\t"
7747 "jnz done\n\t"
7748 "movl $dst, 32\n"
7749 "done:" %}
7750 ins_encode %{
7751 Register Rdst = $dst$$Register;
7752 Label done;
7753 __ bsfl(Rdst, $src$$Register);
7754 __ jccb(Assembler::notZero, done);
7755 __ movl(Rdst, BitsPerInt);
7756 __ bind(done);
7757 %}
7758 ins_pipe(ialu_reg);
7759 %}
7761 instruct countTrailingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{
7762 match(Set dst (CountTrailingZerosL src));
7763 effect(KILL cr);
7765 format %{ "bsfq $dst, $src\t# count trailing zeros (long)\n\t"
7766 "jnz done\n\t"
7767 "movl $dst, 64\n"
7768 "done:" %}
7769 ins_encode %{
7770 Register Rdst = $dst$$Register;
7771 Label done;
7772 __ bsfq(Rdst, $src$$Register);
7773 __ jccb(Assembler::notZero, done);
7774 __ movl(Rdst, BitsPerLong);
7775 __ bind(done);
7776 %}
7777 ins_pipe(ialu_reg);
7778 %}
7781 //---------- Population Count Instructions -------------------------------------
7783 instruct popCountI(rRegI dst, rRegI src) %{
7784 predicate(UsePopCountInstruction);
7785 match(Set dst (PopCountI src));
7787 format %{ "popcnt $dst, $src" %}
7788 ins_encode %{
7789 __ popcntl($dst$$Register, $src$$Register);
7790 %}
7791 ins_pipe(ialu_reg);
7792 %}
7794 instruct popCountI_mem(rRegI dst, memory mem) %{
7795 predicate(UsePopCountInstruction);
7796 match(Set dst (PopCountI (LoadI mem)));
7798 format %{ "popcnt $dst, $mem" %}
7799 ins_encode %{
7800 __ popcntl($dst$$Register, $mem$$Address);
7801 %}
7802 ins_pipe(ialu_reg);
7803 %}
7805 // Note: Long.bitCount(long) returns an int.
7806 instruct popCountL(rRegI dst, rRegL src) %{
7807 predicate(UsePopCountInstruction);
7808 match(Set dst (PopCountL src));
7810 format %{ "popcnt $dst, $src" %}
7811 ins_encode %{
7812 __ popcntq($dst$$Register, $src$$Register);
7813 %}
7814 ins_pipe(ialu_reg);
7815 %}
7817 // Note: Long.bitCount(long) returns an int.
7818 instruct popCountL_mem(rRegI dst, memory mem) %{
7819 predicate(UsePopCountInstruction);
7820 match(Set dst (PopCountL (LoadL mem)));
7822 format %{ "popcnt $dst, $mem" %}
7823 ins_encode %{
7824 __ popcntq($dst$$Register, $mem$$Address);
7825 %}
7826 ins_pipe(ialu_reg);
7827 %}
7830 //----------MemBar Instructions-----------------------------------------------
7831 // Memory barrier flavors
7833 instruct membar_acquire()
7834 %{
7835 match(MemBarAcquire);
7836 ins_cost(0);
7838 size(0);
7839 format %{ "MEMBAR-acquire ! (empty encoding)" %}
7840 ins_encode();
7841 ins_pipe(empty);
7842 %}
7844 instruct membar_acquire_lock()
7845 %{
7846 match(MemBarAcquire);
7847 predicate(Matcher::prior_fast_lock(n));
7848 ins_cost(0);
7850 size(0);
7851 format %{ "MEMBAR-acquire (prior CMPXCHG in FastLock so empty encoding)" %}
7852 ins_encode();
7853 ins_pipe(empty);
7854 %}
7856 instruct membar_release()
7857 %{
7858 match(MemBarRelease);
7859 ins_cost(0);
7861 size(0);
7862 format %{ "MEMBAR-release ! (empty encoding)" %}
7863 ins_encode();
7864 ins_pipe(empty);
7865 %}
7867 instruct membar_release_lock()
7868 %{
7869 match(MemBarRelease);
7870 predicate(Matcher::post_fast_unlock(n));
7871 ins_cost(0);
7873 size(0);
7874 format %{ "MEMBAR-release (a FastUnlock follows so empty encoding)" %}
7875 ins_encode();
7876 ins_pipe(empty);
7877 %}
7879 instruct membar_volatile(rFlagsReg cr) %{
7880 match(MemBarVolatile);
7881 effect(KILL cr);
7882 ins_cost(400);
7884 format %{
7885 $$template
7886 if (os::is_MP()) {
7887 $$emit$$"lock addl [rsp + #0], 0\t! membar_volatile"
7888 } else {
7889 $$emit$$"MEMBAR-volatile ! (empty encoding)"
7890 }
7891 %}
7892 ins_encode %{
7893 __ membar(Assembler::StoreLoad);
7894 %}
7895 ins_pipe(pipe_slow);
7896 %}
7898 instruct unnecessary_membar_volatile()
7899 %{
7900 match(MemBarVolatile);
7901 predicate(Matcher::post_store_load_barrier(n));
7902 ins_cost(0);
7904 size(0);
7905 format %{ "MEMBAR-volatile (unnecessary so empty encoding)" %}
7906 ins_encode();
7907 ins_pipe(empty);
7908 %}
7910 //----------Move Instructions--------------------------------------------------
7912 instruct castX2P(rRegP dst, rRegL src)
7913 %{
7914 match(Set dst (CastX2P src));
7916 format %{ "movq $dst, $src\t# long->ptr" %}
7917 ins_encode(enc_copy_wide(dst, src));
7918 ins_pipe(ialu_reg_reg); // XXX
7919 %}
7921 instruct castP2X(rRegL dst, rRegP src)
7922 %{
7923 match(Set dst (CastP2X src));
7925 format %{ "movq $dst, $src\t# ptr -> long" %}
7926 ins_encode(enc_copy_wide(dst, src));
7927 ins_pipe(ialu_reg_reg); // XXX
7928 %}
7931 // Convert oop pointer into compressed form
7932 instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{
7933 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7934 match(Set dst (EncodeP src));
7935 effect(KILL cr);
7936 format %{ "encode_heap_oop $dst,$src" %}
7937 ins_encode %{
7938 Register s = $src$$Register;
7939 Register d = $dst$$Register;
7940 if (s != d) {
7941 __ movq(d, s);
7942 }
7943 __ encode_heap_oop(d);
7944 %}
7945 ins_pipe(ialu_reg_long);
7946 %}
7948 instruct encodeHeapOop_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
7949 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7950 match(Set dst (EncodeP src));
7951 effect(KILL cr);
7952 format %{ "encode_heap_oop_not_null $dst,$src" %}
7953 ins_encode %{
7954 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7955 %}
7956 ins_pipe(ialu_reg_long);
7957 %}
7959 instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
7960 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
7961 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant);
7962 match(Set dst (DecodeN src));
7963 effect(KILL cr);
7964 format %{ "decode_heap_oop $dst,$src" %}
7965 ins_encode %{
7966 Register s = $src$$Register;
7967 Register d = $dst$$Register;
7968 if (s != d) {
7969 __ movq(d, s);
7970 }
7971 __ decode_heap_oop(d);
7972 %}
7973 ins_pipe(ialu_reg_long);
7974 %}
7976 instruct decodeHeapOop_not_null(rRegP dst, rRegN src) %{
7977 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
7978 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
7979 match(Set dst (DecodeN src));
7980 format %{ "decode_heap_oop_not_null $dst,$src" %}
7981 ins_encode %{
7982 Register s = $src$$Register;
7983 Register d = $dst$$Register;
7984 if (s != d) {
7985 __ decode_heap_oop_not_null(d, s);
7986 } else {
7987 __ decode_heap_oop_not_null(d);
7988 }
7989 %}
7990 ins_pipe(ialu_reg_long);
7991 %}
7994 //----------Conditional Move---------------------------------------------------
7995 // Jump
7996 // dummy instruction for generating temp registers
7997 instruct jumpXtnd_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
7998 match(Jump (LShiftL switch_val shift));
7999 ins_cost(350);
8000 predicate(false);
8001 effect(TEMP dest);
8003 format %{ "leaq $dest, table_base\n\t"
8004 "jmp [$dest + $switch_val << $shift]\n\t" %}
8005 ins_encode(jump_enc_offset(switch_val, shift, dest));
8006 ins_pipe(pipe_jmp);
8007 ins_pc_relative(1);
8008 %}
8010 instruct jumpXtnd_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
8011 match(Jump (AddL (LShiftL switch_val shift) offset));
8012 ins_cost(350);
8013 effect(TEMP dest);
8015 format %{ "leaq $dest, table_base\n\t"
8016 "jmp [$dest + $switch_val << $shift + $offset]\n\t" %}
8017 ins_encode(jump_enc_addr(switch_val, shift, offset, dest));
8018 ins_pipe(pipe_jmp);
8019 ins_pc_relative(1);
8020 %}
8022 instruct jumpXtnd(rRegL switch_val, rRegI dest) %{
8023 match(Jump switch_val);
8024 ins_cost(350);
8025 effect(TEMP dest);
8027 format %{ "leaq $dest, table_base\n\t"
8028 "jmp [$dest + $switch_val]\n\t" %}
8029 ins_encode(jump_enc(switch_val, dest));
8030 ins_pipe(pipe_jmp);
8031 ins_pc_relative(1);
8032 %}
8034 // Conditional move
8035 instruct cmovI_reg(rRegI dst, rRegI src, rFlagsReg cr, cmpOp cop)
8036 %{
8037 match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
8039 ins_cost(200); // XXX
8040 format %{ "cmovl$cop $dst, $src\t# signed, int" %}
8041 opcode(0x0F, 0x40);
8042 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
8043 ins_pipe(pipe_cmov_reg);
8044 %}
8046 instruct cmovI_regU(cmpOpU cop, rFlagsRegU cr, rRegI dst, rRegI src) %{
8047 match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
8049 ins_cost(200); // XXX
8050 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %}
8051 opcode(0x0F, 0x40);
8052 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
8053 ins_pipe(pipe_cmov_reg);
8054 %}
8056 instruct cmovI_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, rRegI src) %{
8057 match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
8058 ins_cost(200);
8059 expand %{
8060 cmovI_regU(cop, cr, dst, src);
8061 %}
8062 %}
8064 // Conditional move
8065 instruct cmovI_mem(cmpOp cop, rFlagsReg cr, rRegI dst, memory src) %{
8066 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
8068 ins_cost(250); // XXX
8069 format %{ "cmovl$cop $dst, $src\t# signed, int" %}
8070 opcode(0x0F, 0x40);
8071 ins_encode(REX_reg_mem(dst, src), enc_cmov(cop), reg_mem(dst, src));
8072 ins_pipe(pipe_cmov_mem);
8073 %}
8075 // Conditional move
8076 instruct cmovI_memU(cmpOpU cop, rFlagsRegU cr, rRegI dst, memory src)
8077 %{
8078 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
8080 ins_cost(250); // XXX
8081 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %}
8082 opcode(0x0F, 0x40);
8083 ins_encode(REX_reg_mem(dst, src), enc_cmov(cop), reg_mem(dst, src));
8084 ins_pipe(pipe_cmov_mem);
8085 %}
8087 instruct cmovI_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, memory src) %{
8088 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
8089 ins_cost(250);
8090 expand %{
8091 cmovI_memU(cop, cr, dst, src);
8092 %}
8093 %}
8095 // Conditional move
8096 instruct cmovN_reg(rRegN dst, rRegN src, rFlagsReg cr, cmpOp cop)
8097 %{
8098 match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
8100 ins_cost(200); // XXX
8101 format %{ "cmovl$cop $dst, $src\t# signed, compressed ptr" %}
8102 opcode(0x0F, 0x40);
8103 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
8104 ins_pipe(pipe_cmov_reg);
8105 %}
8107 // Conditional move
8108 instruct cmovN_regU(cmpOpU cop, rFlagsRegU cr, rRegN dst, rRegN src)
8109 %{
8110 match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
8112 ins_cost(200); // XXX
8113 format %{ "cmovl$cop $dst, $src\t# unsigned, compressed ptr" %}
8114 opcode(0x0F, 0x40);
8115 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
8116 ins_pipe(pipe_cmov_reg);
8117 %}
8119 instruct cmovN_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegN dst, rRegN src) %{
8120 match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
8121 ins_cost(200);
8122 expand %{
8123 cmovN_regU(cop, cr, dst, src);
8124 %}
8125 %}
8127 // Conditional move
8128 instruct cmovP_reg(rRegP dst, rRegP src, rFlagsReg cr, cmpOp cop)
8129 %{
8130 match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
8132 ins_cost(200); // XXX
8133 format %{ "cmovq$cop $dst, $src\t# signed, ptr" %}
8134 opcode(0x0F, 0x40);
8135 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
8136 ins_pipe(pipe_cmov_reg); // XXX
8137 %}
8139 // Conditional move
8140 instruct cmovP_regU(cmpOpU cop, rFlagsRegU cr, rRegP dst, rRegP src)
8141 %{
8142 match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
8144 ins_cost(200); // XXX
8145 format %{ "cmovq$cop $dst, $src\t# unsigned, ptr" %}
8146 opcode(0x0F, 0x40);
8147 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
8148 ins_pipe(pipe_cmov_reg); // XXX
8149 %}
8151 instruct cmovP_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegP dst, rRegP src) %{
8152 match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
8153 ins_cost(200);
8154 expand %{
8155 cmovP_regU(cop, cr, dst, src);
8156 %}
8157 %}
8159 // DISABLED: Requires the ADLC to emit a bottom_type call that
8160 // correctly meets the two pointer arguments; one is an incoming
8161 // register but the other is a memory operand. ALSO appears to
8162 // be buggy with implicit null checks.
8163 //
8164 //// Conditional move
8165 //instruct cmovP_mem(cmpOp cop, rFlagsReg cr, rRegP dst, memory src)
8166 //%{
8167 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
8168 // ins_cost(250);
8169 // format %{ "CMOV$cop $dst,$src\t# ptr" %}
8170 // opcode(0x0F,0x40);
8171 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) );
8172 // ins_pipe( pipe_cmov_mem );
8173 //%}
8174 //
8175 //// Conditional move
8176 //instruct cmovP_memU(cmpOpU cop, rFlagsRegU cr, rRegP dst, memory src)
8177 //%{
8178 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
8179 // ins_cost(250);
8180 // format %{ "CMOV$cop $dst,$src\t# ptr" %}
8181 // opcode(0x0F,0x40);
8182 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) );
8183 // ins_pipe( pipe_cmov_mem );
8184 //%}
8186 instruct cmovL_reg(cmpOp cop, rFlagsReg cr, rRegL dst, rRegL src)
8187 %{
8188 match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
8190 ins_cost(200); // XXX
8191 format %{ "cmovq$cop $dst, $src\t# signed, long" %}
8192 opcode(0x0F, 0x40);
8193 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
8194 ins_pipe(pipe_cmov_reg); // XXX
8195 %}
8197 instruct cmovL_mem(cmpOp cop, rFlagsReg cr, rRegL dst, memory src)
8198 %{
8199 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
8201 ins_cost(200); // XXX
8202 format %{ "cmovq$cop $dst, $src\t# signed, long" %}
8203 opcode(0x0F, 0x40);
8204 ins_encode(REX_reg_mem_wide(dst, src), enc_cmov(cop), reg_mem(dst, src));
8205 ins_pipe(pipe_cmov_mem); // XXX
8206 %}
8208 instruct cmovL_regU(cmpOpU cop, rFlagsRegU cr, rRegL dst, rRegL src)
8209 %{
8210 match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
8212 ins_cost(200); // XXX
8213 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %}
8214 opcode(0x0F, 0x40);
8215 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
8216 ins_pipe(pipe_cmov_reg); // XXX
8217 %}
8219 instruct cmovL_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, rRegL src) %{
8220 match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
8221 ins_cost(200);
8222 expand %{
8223 cmovL_regU(cop, cr, dst, src);
8224 %}
8225 %}
8227 instruct cmovL_memU(cmpOpU cop, rFlagsRegU cr, rRegL dst, memory src)
8228 %{
8229 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
8231 ins_cost(200); // XXX
8232 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %}
8233 opcode(0x0F, 0x40);
8234 ins_encode(REX_reg_mem_wide(dst, src), enc_cmov(cop), reg_mem(dst, src));
8235 ins_pipe(pipe_cmov_mem); // XXX
8236 %}
8238 instruct cmovL_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, memory src) %{
8239 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
8240 ins_cost(200);
8241 expand %{
8242 cmovL_memU(cop, cr, dst, src);
8243 %}
8244 %}
8246 instruct cmovF_reg(cmpOp cop, rFlagsReg cr, regF dst, regF src)
8247 %{
8248 match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
8250 ins_cost(200); // XXX
8251 format %{ "jn$cop skip\t# signed cmove float\n\t"
8252 "movss $dst, $src\n"
8253 "skip:" %}
8254 ins_encode(enc_cmovf_branch(cop, dst, src));
8255 ins_pipe(pipe_slow);
8256 %}
8258 // instruct cmovF_mem(cmpOp cop, rFlagsReg cr, regF dst, memory src)
8259 // %{
8260 // match(Set dst (CMoveF (Binary cop cr) (Binary dst (LoadL src))));
8262 // ins_cost(200); // XXX
8263 // format %{ "jn$cop skip\t# signed cmove float\n\t"
8264 // "movss $dst, $src\n"
8265 // "skip:" %}
8266 // ins_encode(enc_cmovf_mem_branch(cop, dst, src));
8267 // ins_pipe(pipe_slow);
8268 // %}
8270 instruct cmovF_regU(cmpOpU cop, rFlagsRegU cr, regF dst, regF src)
8271 %{
8272 match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
8274 ins_cost(200); // XXX
8275 format %{ "jn$cop skip\t# unsigned cmove float\n\t"
8276 "movss $dst, $src\n"
8277 "skip:" %}
8278 ins_encode(enc_cmovf_branch(cop, dst, src));
8279 ins_pipe(pipe_slow);
8280 %}
8282 instruct cmovF_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regF dst, regF src) %{
8283 match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
8284 ins_cost(200);
8285 expand %{
8286 cmovF_regU(cop, cr, dst, src);
8287 %}
8288 %}
8290 instruct cmovD_reg(cmpOp cop, rFlagsReg cr, regD dst, regD src)
8291 %{
8292 match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
8294 ins_cost(200); // XXX
8295 format %{ "jn$cop skip\t# signed cmove double\n\t"
8296 "movsd $dst, $src\n"
8297 "skip:" %}
8298 ins_encode(enc_cmovd_branch(cop, dst, src));
8299 ins_pipe(pipe_slow);
8300 %}
8302 instruct cmovD_regU(cmpOpU cop, rFlagsRegU cr, regD dst, regD src)
8303 %{
8304 match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
8306 ins_cost(200); // XXX
8307 format %{ "jn$cop skip\t# unsigned cmove double\n\t"
8308 "movsd $dst, $src\n"
8309 "skip:" %}
8310 ins_encode(enc_cmovd_branch(cop, dst, src));
8311 ins_pipe(pipe_slow);
8312 %}
8314 instruct cmovD_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regD dst, regD src) %{
8315 match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
8316 ins_cost(200);
8317 expand %{
8318 cmovD_regU(cop, cr, dst, src);
8319 %}
8320 %}
8322 //----------Arithmetic Instructions--------------------------------------------
8323 //----------Addition Instructions----------------------------------------------
8325 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
8326 %{
8327 match(Set dst (AddI dst src));
8328 effect(KILL cr);
8330 format %{ "addl $dst, $src\t# int" %}
8331 opcode(0x03);
8332 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
8333 ins_pipe(ialu_reg_reg);
8334 %}
8336 instruct addI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
8337 %{
8338 match(Set dst (AddI dst src));
8339 effect(KILL cr);
8341 format %{ "addl $dst, $src\t# int" %}
8342 opcode(0x81, 0x00); /* /0 id */
8343 ins_encode(OpcSErm(dst, src), Con8or32(src));
8344 ins_pipe( ialu_reg );
8345 %}
8347 instruct addI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
8348 %{
8349 match(Set dst (AddI dst (LoadI src)));
8350 effect(KILL cr);
8352 ins_cost(125); // XXX
8353 format %{ "addl $dst, $src\t# int" %}
8354 opcode(0x03);
8355 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
8356 ins_pipe(ialu_reg_mem);
8357 %}
8359 instruct addI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
8360 %{
8361 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
8362 effect(KILL cr);
8364 ins_cost(150); // XXX
8365 format %{ "addl $dst, $src\t# int" %}
8366 opcode(0x01); /* Opcode 01 /r */
8367 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
8368 ins_pipe(ialu_mem_reg);
8369 %}
8371 instruct addI_mem_imm(memory dst, immI src, rFlagsReg cr)
8372 %{
8373 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
8374 effect(KILL cr);
8376 ins_cost(125); // XXX
8377 format %{ "addl $dst, $src\t# int" %}
8378 opcode(0x81); /* Opcode 81 /0 id */
8379 ins_encode(REX_mem(dst), OpcSE(src), RM_opc_mem(0x00, dst), Con8or32(src));
8380 ins_pipe(ialu_mem_imm);
8381 %}
8383 instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr)
8384 %{
8385 predicate(UseIncDec);
8386 match(Set dst (AddI dst src));
8387 effect(KILL cr);
8389 format %{ "incl $dst\t# int" %}
8390 opcode(0xFF, 0x00); // FF /0
8391 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8392 ins_pipe(ialu_reg);
8393 %}
8395 instruct incI_mem(memory dst, immI1 src, rFlagsReg cr)
8396 %{
8397 predicate(UseIncDec);
8398 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
8399 effect(KILL cr);
8401 ins_cost(125); // XXX
8402 format %{ "incl $dst\t# int" %}
8403 opcode(0xFF); /* Opcode FF /0 */
8404 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(0x00, dst));
8405 ins_pipe(ialu_mem_imm);
8406 %}
8408 // XXX why does that use AddI
8409 instruct decI_rReg(rRegI dst, immI_M1 src, rFlagsReg cr)
8410 %{
8411 predicate(UseIncDec);
8412 match(Set dst (AddI dst src));
8413 effect(KILL cr);
8415 format %{ "decl $dst\t# int" %}
8416 opcode(0xFF, 0x01); // FF /1
8417 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8418 ins_pipe(ialu_reg);
8419 %}
8421 // XXX why does that use AddI
8422 instruct decI_mem(memory dst, immI_M1 src, rFlagsReg cr)
8423 %{
8424 predicate(UseIncDec);
8425 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
8426 effect(KILL cr);
8428 ins_cost(125); // XXX
8429 format %{ "decl $dst\t# int" %}
8430 opcode(0xFF); /* Opcode FF /1 */
8431 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(0x01, dst));
8432 ins_pipe(ialu_mem_imm);
8433 %}
8435 instruct leaI_rReg_immI(rRegI dst, rRegI src0, immI src1)
8436 %{
8437 match(Set dst (AddI src0 src1));
8439 ins_cost(110);
8440 format %{ "addr32 leal $dst, [$src0 + $src1]\t# int" %}
8441 opcode(0x8D); /* 0x8D /r */
8442 ins_encode(Opcode(0x67), REX_reg_reg(dst, src0), OpcP, reg_lea(dst, src0, src1)); // XXX
8443 ins_pipe(ialu_reg_reg);
8444 %}
8446 instruct addL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
8447 %{
8448 match(Set dst (AddL dst src));
8449 effect(KILL cr);
8451 format %{ "addq $dst, $src\t# long" %}
8452 opcode(0x03);
8453 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8454 ins_pipe(ialu_reg_reg);
8455 %}
8457 instruct addL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
8458 %{
8459 match(Set dst (AddL dst src));
8460 effect(KILL cr);
8462 format %{ "addq $dst, $src\t# long" %}
8463 opcode(0x81, 0x00); /* /0 id */
8464 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
8465 ins_pipe( ialu_reg );
8466 %}
8468 instruct addL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
8469 %{
8470 match(Set dst (AddL dst (LoadL src)));
8471 effect(KILL cr);
8473 ins_cost(125); // XXX
8474 format %{ "addq $dst, $src\t# long" %}
8475 opcode(0x03);
8476 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
8477 ins_pipe(ialu_reg_mem);
8478 %}
8480 instruct addL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
8481 %{
8482 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8483 effect(KILL cr);
8485 ins_cost(150); // XXX
8486 format %{ "addq $dst, $src\t# long" %}
8487 opcode(0x01); /* Opcode 01 /r */
8488 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
8489 ins_pipe(ialu_mem_reg);
8490 %}
8492 instruct addL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
8493 %{
8494 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8495 effect(KILL cr);
8497 ins_cost(125); // XXX
8498 format %{ "addq $dst, $src\t# long" %}
8499 opcode(0x81); /* Opcode 81 /0 id */
8500 ins_encode(REX_mem_wide(dst),
8501 OpcSE(src), RM_opc_mem(0x00, dst), Con8or32(src));
8502 ins_pipe(ialu_mem_imm);
8503 %}
8505 instruct incL_rReg(rRegI dst, immL1 src, rFlagsReg cr)
8506 %{
8507 predicate(UseIncDec);
8508 match(Set dst (AddL dst src));
8509 effect(KILL cr);
8511 format %{ "incq $dst\t# long" %}
8512 opcode(0xFF, 0x00); // FF /0
8513 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8514 ins_pipe(ialu_reg);
8515 %}
8517 instruct incL_mem(memory dst, immL1 src, rFlagsReg cr)
8518 %{
8519 predicate(UseIncDec);
8520 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8521 effect(KILL cr);
8523 ins_cost(125); // XXX
8524 format %{ "incq $dst\t# long" %}
8525 opcode(0xFF); /* Opcode FF /0 */
8526 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(0x00, dst));
8527 ins_pipe(ialu_mem_imm);
8528 %}
8530 // XXX why does that use AddL
8531 instruct decL_rReg(rRegL dst, immL_M1 src, rFlagsReg cr)
8532 %{
8533 predicate(UseIncDec);
8534 match(Set dst (AddL dst src));
8535 effect(KILL cr);
8537 format %{ "decq $dst\t# long" %}
8538 opcode(0xFF, 0x01); // FF /1
8539 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8540 ins_pipe(ialu_reg);
8541 %}
8543 // XXX why does that use AddL
8544 instruct decL_mem(memory dst, immL_M1 src, rFlagsReg cr)
8545 %{
8546 predicate(UseIncDec);
8547 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8548 effect(KILL cr);
8550 ins_cost(125); // XXX
8551 format %{ "decq $dst\t# long" %}
8552 opcode(0xFF); /* Opcode FF /1 */
8553 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(0x01, dst));
8554 ins_pipe(ialu_mem_imm);
8555 %}
8557 instruct leaL_rReg_immL(rRegL dst, rRegL src0, immL32 src1)
8558 %{
8559 match(Set dst (AddL src0 src1));
8561 ins_cost(110);
8562 format %{ "leaq $dst, [$src0 + $src1]\t# long" %}
8563 opcode(0x8D); /* 0x8D /r */
8564 ins_encode(REX_reg_reg_wide(dst, src0), OpcP, reg_lea(dst, src0, src1)); // XXX
8565 ins_pipe(ialu_reg_reg);
8566 %}
8568 instruct addP_rReg(rRegP dst, rRegL src, rFlagsReg cr)
8569 %{
8570 match(Set dst (AddP dst src));
8571 effect(KILL cr);
8573 format %{ "addq $dst, $src\t# ptr" %}
8574 opcode(0x03);
8575 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8576 ins_pipe(ialu_reg_reg);
8577 %}
8579 instruct addP_rReg_imm(rRegP dst, immL32 src, rFlagsReg cr)
8580 %{
8581 match(Set dst (AddP dst src));
8582 effect(KILL cr);
8584 format %{ "addq $dst, $src\t# ptr" %}
8585 opcode(0x81, 0x00); /* /0 id */
8586 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
8587 ins_pipe( ialu_reg );
8588 %}
8590 // XXX addP mem ops ????
8592 instruct leaP_rReg_imm(rRegP dst, rRegP src0, immL32 src1)
8593 %{
8594 match(Set dst (AddP src0 src1));
8596 ins_cost(110);
8597 format %{ "leaq $dst, [$src0 + $src1]\t# ptr" %}
8598 opcode(0x8D); /* 0x8D /r */
8599 ins_encode(REX_reg_reg_wide(dst, src0), OpcP, reg_lea(dst, src0, src1));// XXX
8600 ins_pipe(ialu_reg_reg);
8601 %}
8603 instruct checkCastPP(rRegP dst)
8604 %{
8605 match(Set dst (CheckCastPP dst));
8607 size(0);
8608 format %{ "# checkcastPP of $dst" %}
8609 ins_encode(/* empty encoding */);
8610 ins_pipe(empty);
8611 %}
8613 instruct castPP(rRegP dst)
8614 %{
8615 match(Set dst (CastPP dst));
8617 size(0);
8618 format %{ "# castPP of $dst" %}
8619 ins_encode(/* empty encoding */);
8620 ins_pipe(empty);
8621 %}
8623 instruct castII(rRegI dst)
8624 %{
8625 match(Set dst (CastII dst));
8627 size(0);
8628 format %{ "# castII of $dst" %}
8629 ins_encode(/* empty encoding */);
8630 ins_cost(0);
8631 ins_pipe(empty);
8632 %}
8634 // LoadP-locked same as a regular LoadP when used with compare-swap
8635 instruct loadPLocked(rRegP dst, memory mem)
8636 %{
8637 match(Set dst (LoadPLocked mem));
8639 ins_cost(125); // XXX
8640 format %{ "movq $dst, $mem\t# ptr locked" %}
8641 opcode(0x8B);
8642 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
8643 ins_pipe(ialu_reg_mem); // XXX
8644 %}
8646 // LoadL-locked - same as a regular LoadL when used with compare-swap
8647 instruct loadLLocked(rRegL dst, memory mem)
8648 %{
8649 match(Set dst (LoadLLocked mem));
8651 ins_cost(125); // XXX
8652 format %{ "movq $dst, $mem\t# long locked" %}
8653 opcode(0x8B);
8654 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
8655 ins_pipe(ialu_reg_mem); // XXX
8656 %}
8658 // Conditional-store of the updated heap-top.
8659 // Used during allocation of the shared heap.
8660 // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.
8662 instruct storePConditional(memory heap_top_ptr,
8663 rax_RegP oldval, rRegP newval,
8664 rFlagsReg cr)
8665 %{
8666 match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8668 format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) "
8669 "If rax == $heap_top_ptr then store $newval into $heap_top_ptr" %}
8670 opcode(0x0F, 0xB1);
8671 ins_encode(lock_prefix,
8672 REX_reg_mem_wide(newval, heap_top_ptr),
8673 OpcP, OpcS,
8674 reg_mem(newval, heap_top_ptr));
8675 ins_pipe(pipe_cmpxchg);
8676 %}
8678 // Conditional-store of an int value.
8679 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
8680 instruct storeIConditional(memory mem, rax_RegI oldval, rRegI newval, rFlagsReg cr)
8681 %{
8682 match(Set cr (StoreIConditional mem (Binary oldval newval)));
8683 effect(KILL oldval);
8685 format %{ "cmpxchgl $mem, $newval\t# If rax == $mem then store $newval into $mem" %}
8686 opcode(0x0F, 0xB1);
8687 ins_encode(lock_prefix,
8688 REX_reg_mem(newval, mem),
8689 OpcP, OpcS,
8690 reg_mem(newval, mem));
8691 ins_pipe(pipe_cmpxchg);
8692 %}
8694 // Conditional-store of a long value.
8695 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
8696 instruct storeLConditional(memory mem, rax_RegL oldval, rRegL newval, rFlagsReg cr)
8697 %{
8698 match(Set cr (StoreLConditional mem (Binary oldval newval)));
8699 effect(KILL oldval);
8701 format %{ "cmpxchgq $mem, $newval\t# If rax == $mem then store $newval into $mem" %}
8702 opcode(0x0F, 0xB1);
8703 ins_encode(lock_prefix,
8704 REX_reg_mem_wide(newval, mem),
8705 OpcP, OpcS,
8706 reg_mem(newval, mem));
8707 ins_pipe(pipe_cmpxchg);
8708 %}
8711 // XXX No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
8712 instruct compareAndSwapP(rRegI res,
8713 memory mem_ptr,
8714 rax_RegP oldval, rRegP newval,
8715 rFlagsReg cr)
8716 %{
8717 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
8718 effect(KILL cr, KILL oldval);
8720 format %{ "cmpxchgq $mem_ptr,$newval\t# "
8721 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8722 "sete $res\n\t"
8723 "movzbl $res, $res" %}
8724 opcode(0x0F, 0xB1);
8725 ins_encode(lock_prefix,
8726 REX_reg_mem_wide(newval, mem_ptr),
8727 OpcP, OpcS,
8728 reg_mem(newval, mem_ptr),
8729 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8730 REX_reg_breg(res, res), // movzbl
8731 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8732 ins_pipe( pipe_cmpxchg );
8733 %}
8735 instruct compareAndSwapL(rRegI res,
8736 memory mem_ptr,
8737 rax_RegL oldval, rRegL newval,
8738 rFlagsReg cr)
8739 %{
8740 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
8741 effect(KILL cr, KILL oldval);
8743 format %{ "cmpxchgq $mem_ptr,$newval\t# "
8744 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8745 "sete $res\n\t"
8746 "movzbl $res, $res" %}
8747 opcode(0x0F, 0xB1);
8748 ins_encode(lock_prefix,
8749 REX_reg_mem_wide(newval, mem_ptr),
8750 OpcP, OpcS,
8751 reg_mem(newval, mem_ptr),
8752 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8753 REX_reg_breg(res, res), // movzbl
8754 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8755 ins_pipe( pipe_cmpxchg );
8756 %}
8758 instruct compareAndSwapI(rRegI res,
8759 memory mem_ptr,
8760 rax_RegI oldval, rRegI newval,
8761 rFlagsReg cr)
8762 %{
8763 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
8764 effect(KILL cr, KILL oldval);
8766 format %{ "cmpxchgl $mem_ptr,$newval\t# "
8767 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8768 "sete $res\n\t"
8769 "movzbl $res, $res" %}
8770 opcode(0x0F, 0xB1);
8771 ins_encode(lock_prefix,
8772 REX_reg_mem(newval, mem_ptr),
8773 OpcP, OpcS,
8774 reg_mem(newval, mem_ptr),
8775 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8776 REX_reg_breg(res, res), // movzbl
8777 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8778 ins_pipe( pipe_cmpxchg );
8779 %}
8782 instruct compareAndSwapN(rRegI res,
8783 memory mem_ptr,
8784 rax_RegN oldval, rRegN newval,
8785 rFlagsReg cr) %{
8786 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
8787 effect(KILL cr, KILL oldval);
8789 format %{ "cmpxchgl $mem_ptr,$newval\t# "
8790 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8791 "sete $res\n\t"
8792 "movzbl $res, $res" %}
8793 opcode(0x0F, 0xB1);
8794 ins_encode(lock_prefix,
8795 REX_reg_mem(newval, mem_ptr),
8796 OpcP, OpcS,
8797 reg_mem(newval, mem_ptr),
8798 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8799 REX_reg_breg(res, res), // movzbl
8800 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8801 ins_pipe( pipe_cmpxchg );
8802 %}
8804 //----------Subtraction Instructions-------------------------------------------
8806 // Integer Subtraction Instructions
8807 instruct subI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
8808 %{
8809 match(Set dst (SubI dst src));
8810 effect(KILL cr);
8812 format %{ "subl $dst, $src\t# int" %}
8813 opcode(0x2B);
8814 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
8815 ins_pipe(ialu_reg_reg);
8816 %}
8818 instruct subI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
8819 %{
8820 match(Set dst (SubI dst src));
8821 effect(KILL cr);
8823 format %{ "subl $dst, $src\t# int" %}
8824 opcode(0x81, 0x05); /* Opcode 81 /5 */
8825 ins_encode(OpcSErm(dst, src), Con8or32(src));
8826 ins_pipe(ialu_reg);
8827 %}
8829 instruct subI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
8830 %{
8831 match(Set dst (SubI dst (LoadI src)));
8832 effect(KILL cr);
8834 ins_cost(125);
8835 format %{ "subl $dst, $src\t# int" %}
8836 opcode(0x2B);
8837 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
8838 ins_pipe(ialu_reg_mem);
8839 %}
8841 instruct subI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
8842 %{
8843 match(Set dst (StoreI dst (SubI (LoadI dst) src)));
8844 effect(KILL cr);
8846 ins_cost(150);
8847 format %{ "subl $dst, $src\t# int" %}
8848 opcode(0x29); /* Opcode 29 /r */
8849 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
8850 ins_pipe(ialu_mem_reg);
8851 %}
8853 instruct subI_mem_imm(memory dst, immI src, rFlagsReg cr)
8854 %{
8855 match(Set dst (StoreI dst (SubI (LoadI dst) src)));
8856 effect(KILL cr);
8858 ins_cost(125); // XXX
8859 format %{ "subl $dst, $src\t# int" %}
8860 opcode(0x81); /* Opcode 81 /5 id */
8861 ins_encode(REX_mem(dst), OpcSE(src), RM_opc_mem(0x05, dst), Con8or32(src));
8862 ins_pipe(ialu_mem_imm);
8863 %}
8865 instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
8866 %{
8867 match(Set dst (SubL dst src));
8868 effect(KILL cr);
8870 format %{ "subq $dst, $src\t# long" %}
8871 opcode(0x2B);
8872 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8873 ins_pipe(ialu_reg_reg);
8874 %}
8876 instruct subL_rReg_imm(rRegI dst, immL32 src, rFlagsReg cr)
8877 %{
8878 match(Set dst (SubL dst src));
8879 effect(KILL cr);
8881 format %{ "subq $dst, $src\t# long" %}
8882 opcode(0x81, 0x05); /* Opcode 81 /5 */
8883 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
8884 ins_pipe(ialu_reg);
8885 %}
8887 instruct subL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
8888 %{
8889 match(Set dst (SubL dst (LoadL src)));
8890 effect(KILL cr);
8892 ins_cost(125);
8893 format %{ "subq $dst, $src\t# long" %}
8894 opcode(0x2B);
8895 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
8896 ins_pipe(ialu_reg_mem);
8897 %}
8899 instruct subL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
8900 %{
8901 match(Set dst (StoreL dst (SubL (LoadL dst) src)));
8902 effect(KILL cr);
8904 ins_cost(150);
8905 format %{ "subq $dst, $src\t# long" %}
8906 opcode(0x29); /* Opcode 29 /r */
8907 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
8908 ins_pipe(ialu_mem_reg);
8909 %}
8911 instruct subL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
8912 %{
8913 match(Set dst (StoreL dst (SubL (LoadL dst) src)));
8914 effect(KILL cr);
8916 ins_cost(125); // XXX
8917 format %{ "subq $dst, $src\t# long" %}
8918 opcode(0x81); /* Opcode 81 /5 id */
8919 ins_encode(REX_mem_wide(dst),
8920 OpcSE(src), RM_opc_mem(0x05, dst), Con8or32(src));
8921 ins_pipe(ialu_mem_imm);
8922 %}
8924 // Subtract from a pointer
8925 // XXX hmpf???
8926 instruct subP_rReg(rRegP dst, rRegI src, immI0 zero, rFlagsReg cr)
8927 %{
8928 match(Set dst (AddP dst (SubI zero src)));
8929 effect(KILL cr);
8931 format %{ "subq $dst, $src\t# ptr - int" %}
8932 opcode(0x2B);
8933 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8934 ins_pipe(ialu_reg_reg);
8935 %}
8937 instruct negI_rReg(rRegI dst, immI0 zero, rFlagsReg cr)
8938 %{
8939 match(Set dst (SubI zero dst));
8940 effect(KILL cr);
8942 format %{ "negl $dst\t# int" %}
8943 opcode(0xF7, 0x03); // Opcode F7 /3
8944 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8945 ins_pipe(ialu_reg);
8946 %}
8948 instruct negI_mem(memory dst, immI0 zero, rFlagsReg cr)
8949 %{
8950 match(Set dst (StoreI dst (SubI zero (LoadI dst))));
8951 effect(KILL cr);
8953 format %{ "negl $dst\t# int" %}
8954 opcode(0xF7, 0x03); // Opcode F7 /3
8955 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8956 ins_pipe(ialu_reg);
8957 %}
8959 instruct negL_rReg(rRegL dst, immL0 zero, rFlagsReg cr)
8960 %{
8961 match(Set dst (SubL zero dst));
8962 effect(KILL cr);
8964 format %{ "negq $dst\t# long" %}
8965 opcode(0xF7, 0x03); // Opcode F7 /3
8966 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8967 ins_pipe(ialu_reg);
8968 %}
8970 instruct negL_mem(memory dst, immL0 zero, rFlagsReg cr)
8971 %{
8972 match(Set dst (StoreL dst (SubL zero (LoadL dst))));
8973 effect(KILL cr);
8975 format %{ "negq $dst\t# long" %}
8976 opcode(0xF7, 0x03); // Opcode F7 /3
8977 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
8978 ins_pipe(ialu_reg);
8979 %}
8982 //----------Multiplication/Division Instructions-------------------------------
8983 // Integer Multiplication Instructions
8984 // Multiply Register
8986 instruct mulI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
8987 %{
8988 match(Set dst (MulI dst src));
8989 effect(KILL cr);
8991 ins_cost(300);
8992 format %{ "imull $dst, $src\t# int" %}
8993 opcode(0x0F, 0xAF);
8994 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
8995 ins_pipe(ialu_reg_reg_alu0);
8996 %}
8998 instruct mulI_rReg_imm(rRegI dst, rRegI src, immI imm, rFlagsReg cr)
8999 %{
9000 match(Set dst (MulI src imm));
9001 effect(KILL cr);
9003 ins_cost(300);
9004 format %{ "imull $dst, $src, $imm\t# int" %}
9005 opcode(0x69); /* 69 /r id */
9006 ins_encode(REX_reg_reg(dst, src),
9007 OpcSE(imm), reg_reg(dst, src), Con8or32(imm));
9008 ins_pipe(ialu_reg_reg_alu0);
9009 %}
9011 instruct mulI_mem(rRegI dst, memory src, rFlagsReg cr)
9012 %{
9013 match(Set dst (MulI dst (LoadI src)));
9014 effect(KILL cr);
9016 ins_cost(350);
9017 format %{ "imull $dst, $src\t# int" %}
9018 opcode(0x0F, 0xAF);
9019 ins_encode(REX_reg_mem(dst, src), OpcP, OpcS, reg_mem(dst, src));
9020 ins_pipe(ialu_reg_mem_alu0);
9021 %}
9023 instruct mulI_mem_imm(rRegI dst, memory src, immI imm, rFlagsReg cr)
9024 %{
9025 match(Set dst (MulI (LoadI src) imm));
9026 effect(KILL cr);
9028 ins_cost(300);
9029 format %{ "imull $dst, $src, $imm\t# int" %}
9030 opcode(0x69); /* 69 /r id */
9031 ins_encode(REX_reg_mem(dst, src),
9032 OpcSE(imm), reg_mem(dst, src), Con8or32(imm));
9033 ins_pipe(ialu_reg_mem_alu0);
9034 %}
9036 instruct mulL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
9037 %{
9038 match(Set dst (MulL dst src));
9039 effect(KILL cr);
9041 ins_cost(300);
9042 format %{ "imulq $dst, $src\t# long" %}
9043 opcode(0x0F, 0xAF);
9044 ins_encode(REX_reg_reg_wide(dst, src), OpcP, OpcS, reg_reg(dst, src));
9045 ins_pipe(ialu_reg_reg_alu0);
9046 %}
9048 instruct mulL_rReg_imm(rRegL dst, rRegL src, immL32 imm, rFlagsReg cr)
9049 %{
9050 match(Set dst (MulL src imm));
9051 effect(KILL cr);
9053 ins_cost(300);
9054 format %{ "imulq $dst, $src, $imm\t# long" %}
9055 opcode(0x69); /* 69 /r id */
9056 ins_encode(REX_reg_reg_wide(dst, src),
9057 OpcSE(imm), reg_reg(dst, src), Con8or32(imm));
9058 ins_pipe(ialu_reg_reg_alu0);
9059 %}
9061 instruct mulL_mem(rRegL dst, memory src, rFlagsReg cr)
9062 %{
9063 match(Set dst (MulL dst (LoadL src)));
9064 effect(KILL cr);
9066 ins_cost(350);
9067 format %{ "imulq $dst, $src\t# long" %}
9068 opcode(0x0F, 0xAF);
9069 ins_encode(REX_reg_mem_wide(dst, src), OpcP, OpcS, reg_mem(dst, src));
9070 ins_pipe(ialu_reg_mem_alu0);
9071 %}
9073 instruct mulL_mem_imm(rRegL dst, memory src, immL32 imm, rFlagsReg cr)
9074 %{
9075 match(Set dst (MulL (LoadL src) imm));
9076 effect(KILL cr);
9078 ins_cost(300);
9079 format %{ "imulq $dst, $src, $imm\t# long" %}
9080 opcode(0x69); /* 69 /r id */
9081 ins_encode(REX_reg_mem_wide(dst, src),
9082 OpcSE(imm), reg_mem(dst, src), Con8or32(imm));
9083 ins_pipe(ialu_reg_mem_alu0);
9084 %}
9086 instruct mulHiL_rReg(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr)
9087 %{
9088 match(Set dst (MulHiL src rax));
9089 effect(USE_KILL rax, KILL cr);
9091 ins_cost(300);
9092 format %{ "imulq RDX:RAX, RAX, $src\t# mulhi" %}
9093 opcode(0xF7, 0x5); /* Opcode F7 /5 */
9094 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src));
9095 ins_pipe(ialu_reg_reg_alu0);
9096 %}
9098 instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
9099 rFlagsReg cr)
9100 %{
9101 match(Set rax (DivI rax div));
9102 effect(KILL rdx, KILL cr);
9104 ins_cost(30*100+10*100); // XXX
9105 format %{ "cmpl rax, 0x80000000\t# idiv\n\t"
9106 "jne,s normal\n\t"
9107 "xorl rdx, rdx\n\t"
9108 "cmpl $div, -1\n\t"
9109 "je,s done\n"
9110 "normal: cdql\n\t"
9111 "idivl $div\n"
9112 "done:" %}
9113 opcode(0xF7, 0x7); /* Opcode F7 /7 */
9114 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
9115 ins_pipe(ialu_reg_reg_alu0);
9116 %}
9118 instruct divL_rReg(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div,
9119 rFlagsReg cr)
9120 %{
9121 match(Set rax (DivL rax div));
9122 effect(KILL rdx, KILL cr);
9124 ins_cost(30*100+10*100); // XXX
9125 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t"
9126 "cmpq rax, rdx\n\t"
9127 "jne,s normal\n\t"
9128 "xorl rdx, rdx\n\t"
9129 "cmpq $div, -1\n\t"
9130 "je,s done\n"
9131 "normal: cdqq\n\t"
9132 "idivq $div\n"
9133 "done:" %}
9134 opcode(0xF7, 0x7); /* Opcode F7 /7 */
9135 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
9136 ins_pipe(ialu_reg_reg_alu0);
9137 %}
9139 // Integer DIVMOD with Register, both quotient and mod results
9140 instruct divModI_rReg_divmod(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
9141 rFlagsReg cr)
9142 %{
9143 match(DivModI rax div);
9144 effect(KILL cr);
9146 ins_cost(30*100+10*100); // XXX
9147 format %{ "cmpl rax, 0x80000000\t# idiv\n\t"
9148 "jne,s normal\n\t"
9149 "xorl rdx, rdx\n\t"
9150 "cmpl $div, -1\n\t"
9151 "je,s done\n"
9152 "normal: cdql\n\t"
9153 "idivl $div\n"
9154 "done:" %}
9155 opcode(0xF7, 0x7); /* Opcode F7 /7 */
9156 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
9157 ins_pipe(pipe_slow);
9158 %}
9160 // Long DIVMOD with Register, both quotient and mod results
9161 instruct divModL_rReg_divmod(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div,
9162 rFlagsReg cr)
9163 %{
9164 match(DivModL rax div);
9165 effect(KILL cr);
9167 ins_cost(30*100+10*100); // XXX
9168 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t"
9169 "cmpq rax, rdx\n\t"
9170 "jne,s normal\n\t"
9171 "xorl rdx, rdx\n\t"
9172 "cmpq $div, -1\n\t"
9173 "je,s done\n"
9174 "normal: cdqq\n\t"
9175 "idivq $div\n"
9176 "done:" %}
9177 opcode(0xF7, 0x7); /* Opcode F7 /7 */
9178 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
9179 ins_pipe(pipe_slow);
9180 %}
9182 //----------- DivL-By-Constant-Expansions--------------------------------------
9183 // DivI cases are handled by the compiler
9185 // Magic constant, reciprocal of 10
9186 instruct loadConL_0x6666666666666667(rRegL dst)
9187 %{
9188 effect(DEF dst);
9190 format %{ "movq $dst, #0x666666666666667\t# Used in div-by-10" %}
9191 ins_encode(load_immL(dst, 0x6666666666666667));
9192 ins_pipe(ialu_reg);
9193 %}
9195 instruct mul_hi(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr)
9196 %{
9197 effect(DEF dst, USE src, USE_KILL rax, KILL cr);
9199 format %{ "imulq rdx:rax, rax, $src\t# Used in div-by-10" %}
9200 opcode(0xF7, 0x5); /* Opcode F7 /5 */
9201 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src));
9202 ins_pipe(ialu_reg_reg_alu0);
9203 %}
9205 instruct sarL_rReg_63(rRegL dst, rFlagsReg cr)
9206 %{
9207 effect(USE_DEF dst, KILL cr);
9209 format %{ "sarq $dst, #63\t# Used in div-by-10" %}
9210 opcode(0xC1, 0x7); /* C1 /7 ib */
9211 ins_encode(reg_opc_imm_wide(dst, 0x3F));
9212 ins_pipe(ialu_reg);
9213 %}
9215 instruct sarL_rReg_2(rRegL dst, rFlagsReg cr)
9216 %{
9217 effect(USE_DEF dst, KILL cr);
9219 format %{ "sarq $dst, #2\t# Used in div-by-10" %}
9220 opcode(0xC1, 0x7); /* C1 /7 ib */
9221 ins_encode(reg_opc_imm_wide(dst, 0x2));
9222 ins_pipe(ialu_reg);
9223 %}
9225 instruct divL_10(rdx_RegL dst, no_rax_RegL src, immL10 div)
9226 %{
9227 match(Set dst (DivL src div));
9229 ins_cost((5+8)*100);
9230 expand %{
9231 rax_RegL rax; // Killed temp
9232 rFlagsReg cr; // Killed
9233 loadConL_0x6666666666666667(rax); // movq rax, 0x6666666666666667
9234 mul_hi(dst, src, rax, cr); // mulq rdx:rax <= rax * $src
9235 sarL_rReg_63(src, cr); // sarq src, 63
9236 sarL_rReg_2(dst, cr); // sarq rdx, 2
9237 subL_rReg(dst, src, cr); // subl rdx, src
9238 %}
9239 %}
9241 //-----------------------------------------------------------------------------
9243 instruct modI_rReg(rdx_RegI rdx, rax_RegI rax, no_rax_rdx_RegI div,
9244 rFlagsReg cr)
9245 %{
9246 match(Set rdx (ModI rax div));
9247 effect(KILL rax, KILL cr);
9249 ins_cost(300); // XXX
9250 format %{ "cmpl rax, 0x80000000\t# irem\n\t"
9251 "jne,s normal\n\t"
9252 "xorl rdx, rdx\n\t"
9253 "cmpl $div, -1\n\t"
9254 "je,s done\n"
9255 "normal: cdql\n\t"
9256 "idivl $div\n"
9257 "done:" %}
9258 opcode(0xF7, 0x7); /* Opcode F7 /7 */
9259 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
9260 ins_pipe(ialu_reg_reg_alu0);
9261 %}
9263 instruct modL_rReg(rdx_RegL rdx, rax_RegL rax, no_rax_rdx_RegL div,
9264 rFlagsReg cr)
9265 %{
9266 match(Set rdx (ModL rax div));
9267 effect(KILL rax, KILL cr);
9269 ins_cost(300); // XXX
9270 format %{ "movq rdx, 0x8000000000000000\t# lrem\n\t"
9271 "cmpq rax, rdx\n\t"
9272 "jne,s normal\n\t"
9273 "xorl rdx, rdx\n\t"
9274 "cmpq $div, -1\n\t"
9275 "je,s done\n"
9276 "normal: cdqq\n\t"
9277 "idivq $div\n"
9278 "done:" %}
9279 opcode(0xF7, 0x7); /* Opcode F7 /7 */
9280 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
9281 ins_pipe(ialu_reg_reg_alu0);
9282 %}
9284 // Integer Shift Instructions
9285 // Shift Left by one
9286 instruct salI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
9287 %{
9288 match(Set dst (LShiftI dst shift));
9289 effect(KILL cr);
9291 format %{ "sall $dst, $shift" %}
9292 opcode(0xD1, 0x4); /* D1 /4 */
9293 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9294 ins_pipe(ialu_reg);
9295 %}
9297 // Shift Left by one
9298 instruct salI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9299 %{
9300 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
9301 effect(KILL cr);
9303 format %{ "sall $dst, $shift\t" %}
9304 opcode(0xD1, 0x4); /* D1 /4 */
9305 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
9306 ins_pipe(ialu_mem_imm);
9307 %}
9309 // Shift Left by 8-bit immediate
9310 instruct salI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
9311 %{
9312 match(Set dst (LShiftI dst shift));
9313 effect(KILL cr);
9315 format %{ "sall $dst, $shift" %}
9316 opcode(0xC1, 0x4); /* C1 /4 ib */
9317 ins_encode(reg_opc_imm(dst, shift));
9318 ins_pipe(ialu_reg);
9319 %}
9321 // Shift Left by 8-bit immediate
9322 instruct salI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9323 %{
9324 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
9325 effect(KILL cr);
9327 format %{ "sall $dst, $shift" %}
9328 opcode(0xC1, 0x4); /* C1 /4 ib */
9329 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
9330 ins_pipe(ialu_mem_imm);
9331 %}
9333 // Shift Left by variable
9334 instruct salI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
9335 %{
9336 match(Set dst (LShiftI dst shift));
9337 effect(KILL cr);
9339 format %{ "sall $dst, $shift" %}
9340 opcode(0xD3, 0x4); /* D3 /4 */
9341 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9342 ins_pipe(ialu_reg_reg);
9343 %}
9345 // Shift Left by variable
9346 instruct salI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9347 %{
9348 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
9349 effect(KILL cr);
9351 format %{ "sall $dst, $shift" %}
9352 opcode(0xD3, 0x4); /* D3 /4 */
9353 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
9354 ins_pipe(ialu_mem_reg);
9355 %}
9357 // Arithmetic shift right by one
9358 instruct sarI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
9359 %{
9360 match(Set dst (RShiftI dst shift));
9361 effect(KILL cr);
9363 format %{ "sarl $dst, $shift" %}
9364 opcode(0xD1, 0x7); /* D1 /7 */
9365 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9366 ins_pipe(ialu_reg);
9367 %}
9369 // Arithmetic shift right by one
9370 instruct sarI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9371 %{
9372 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
9373 effect(KILL cr);
9375 format %{ "sarl $dst, $shift" %}
9376 opcode(0xD1, 0x7); /* D1 /7 */
9377 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
9378 ins_pipe(ialu_mem_imm);
9379 %}
9381 // Arithmetic Shift Right by 8-bit immediate
9382 instruct sarI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
9383 %{
9384 match(Set dst (RShiftI dst shift));
9385 effect(KILL cr);
9387 format %{ "sarl $dst, $shift" %}
9388 opcode(0xC1, 0x7); /* C1 /7 ib */
9389 ins_encode(reg_opc_imm(dst, shift));
9390 ins_pipe(ialu_mem_imm);
9391 %}
9393 // Arithmetic Shift Right by 8-bit immediate
9394 instruct sarI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9395 %{
9396 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
9397 effect(KILL cr);
9399 format %{ "sarl $dst, $shift" %}
9400 opcode(0xC1, 0x7); /* C1 /7 ib */
9401 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
9402 ins_pipe(ialu_mem_imm);
9403 %}
9405 // Arithmetic Shift Right by variable
9406 instruct sarI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
9407 %{
9408 match(Set dst (RShiftI dst shift));
9409 effect(KILL cr);
9411 format %{ "sarl $dst, $shift" %}
9412 opcode(0xD3, 0x7); /* D3 /7 */
9413 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9414 ins_pipe(ialu_reg_reg);
9415 %}
9417 // Arithmetic Shift Right by variable
9418 instruct sarI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9419 %{
9420 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
9421 effect(KILL cr);
9423 format %{ "sarl $dst, $shift" %}
9424 opcode(0xD3, 0x7); /* D3 /7 */
9425 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
9426 ins_pipe(ialu_mem_reg);
9427 %}
9429 // Logical shift right by one
9430 instruct shrI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
9431 %{
9432 match(Set dst (URShiftI dst shift));
9433 effect(KILL cr);
9435 format %{ "shrl $dst, $shift" %}
9436 opcode(0xD1, 0x5); /* D1 /5 */
9437 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9438 ins_pipe(ialu_reg);
9439 %}
9441 // Logical shift right by one
9442 instruct shrI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9443 %{
9444 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
9445 effect(KILL cr);
9447 format %{ "shrl $dst, $shift" %}
9448 opcode(0xD1, 0x5); /* D1 /5 */
9449 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
9450 ins_pipe(ialu_mem_imm);
9451 %}
9453 // Logical Shift Right by 8-bit immediate
9454 instruct shrI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
9455 %{
9456 match(Set dst (URShiftI dst shift));
9457 effect(KILL cr);
9459 format %{ "shrl $dst, $shift" %}
9460 opcode(0xC1, 0x5); /* C1 /5 ib */
9461 ins_encode(reg_opc_imm(dst, shift));
9462 ins_pipe(ialu_reg);
9463 %}
9465 // Logical Shift Right by 8-bit immediate
9466 instruct shrI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9467 %{
9468 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
9469 effect(KILL cr);
9471 format %{ "shrl $dst, $shift" %}
9472 opcode(0xC1, 0x5); /* C1 /5 ib */
9473 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
9474 ins_pipe(ialu_mem_imm);
9475 %}
9477 // Logical Shift Right by variable
9478 instruct shrI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
9479 %{
9480 match(Set dst (URShiftI dst shift));
9481 effect(KILL cr);
9483 format %{ "shrl $dst, $shift" %}
9484 opcode(0xD3, 0x5); /* D3 /5 */
9485 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9486 ins_pipe(ialu_reg_reg);
9487 %}
9489 // Logical Shift Right by variable
9490 instruct shrI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9491 %{
9492 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
9493 effect(KILL cr);
9495 format %{ "shrl $dst, $shift" %}
9496 opcode(0xD3, 0x5); /* D3 /5 */
9497 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
9498 ins_pipe(ialu_mem_reg);
9499 %}
9501 // Long Shift Instructions
9502 // Shift Left by one
9503 instruct salL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
9504 %{
9505 match(Set dst (LShiftL dst shift));
9506 effect(KILL cr);
9508 format %{ "salq $dst, $shift" %}
9509 opcode(0xD1, 0x4); /* D1 /4 */
9510 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9511 ins_pipe(ialu_reg);
9512 %}
9514 // Shift Left by one
9515 instruct salL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9516 %{
9517 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
9518 effect(KILL cr);
9520 format %{ "salq $dst, $shift" %}
9521 opcode(0xD1, 0x4); /* D1 /4 */
9522 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9523 ins_pipe(ialu_mem_imm);
9524 %}
9526 // Shift Left by 8-bit immediate
9527 instruct salL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
9528 %{
9529 match(Set dst (LShiftL dst shift));
9530 effect(KILL cr);
9532 format %{ "salq $dst, $shift" %}
9533 opcode(0xC1, 0x4); /* C1 /4 ib */
9534 ins_encode(reg_opc_imm_wide(dst, shift));
9535 ins_pipe(ialu_reg);
9536 %}
9538 // Shift Left by 8-bit immediate
9539 instruct salL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9540 %{
9541 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
9542 effect(KILL cr);
9544 format %{ "salq $dst, $shift" %}
9545 opcode(0xC1, 0x4); /* C1 /4 ib */
9546 ins_encode(REX_mem_wide(dst), OpcP,
9547 RM_opc_mem(secondary, dst), Con8or32(shift));
9548 ins_pipe(ialu_mem_imm);
9549 %}
9551 // Shift Left by variable
9552 instruct salL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
9553 %{
9554 match(Set dst (LShiftL dst shift));
9555 effect(KILL cr);
9557 format %{ "salq $dst, $shift" %}
9558 opcode(0xD3, 0x4); /* D3 /4 */
9559 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9560 ins_pipe(ialu_reg_reg);
9561 %}
9563 // Shift Left by variable
9564 instruct salL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9565 %{
9566 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
9567 effect(KILL cr);
9569 format %{ "salq $dst, $shift" %}
9570 opcode(0xD3, 0x4); /* D3 /4 */
9571 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9572 ins_pipe(ialu_mem_reg);
9573 %}
9575 // Arithmetic shift right by one
9576 instruct sarL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
9577 %{
9578 match(Set dst (RShiftL dst shift));
9579 effect(KILL cr);
9581 format %{ "sarq $dst, $shift" %}
9582 opcode(0xD1, 0x7); /* D1 /7 */
9583 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9584 ins_pipe(ialu_reg);
9585 %}
9587 // Arithmetic shift right by one
9588 instruct sarL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9589 %{
9590 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
9591 effect(KILL cr);
9593 format %{ "sarq $dst, $shift" %}
9594 opcode(0xD1, 0x7); /* D1 /7 */
9595 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9596 ins_pipe(ialu_mem_imm);
9597 %}
9599 // Arithmetic Shift Right by 8-bit immediate
9600 instruct sarL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
9601 %{
9602 match(Set dst (RShiftL dst shift));
9603 effect(KILL cr);
9605 format %{ "sarq $dst, $shift" %}
9606 opcode(0xC1, 0x7); /* C1 /7 ib */
9607 ins_encode(reg_opc_imm_wide(dst, shift));
9608 ins_pipe(ialu_mem_imm);
9609 %}
9611 // Arithmetic Shift Right by 8-bit immediate
9612 instruct sarL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9613 %{
9614 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
9615 effect(KILL cr);
9617 format %{ "sarq $dst, $shift" %}
9618 opcode(0xC1, 0x7); /* C1 /7 ib */
9619 ins_encode(REX_mem_wide(dst), OpcP,
9620 RM_opc_mem(secondary, dst), Con8or32(shift));
9621 ins_pipe(ialu_mem_imm);
9622 %}
9624 // Arithmetic Shift Right by variable
9625 instruct sarL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
9626 %{
9627 match(Set dst (RShiftL dst shift));
9628 effect(KILL cr);
9630 format %{ "sarq $dst, $shift" %}
9631 opcode(0xD3, 0x7); /* D3 /7 */
9632 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9633 ins_pipe(ialu_reg_reg);
9634 %}
9636 // Arithmetic Shift Right by variable
9637 instruct sarL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9638 %{
9639 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
9640 effect(KILL cr);
9642 format %{ "sarq $dst, $shift" %}
9643 opcode(0xD3, 0x7); /* D3 /7 */
9644 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9645 ins_pipe(ialu_mem_reg);
9646 %}
9648 // Logical shift right by one
9649 instruct shrL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
9650 %{
9651 match(Set dst (URShiftL dst shift));
9652 effect(KILL cr);
9654 format %{ "shrq $dst, $shift" %}
9655 opcode(0xD1, 0x5); /* D1 /5 */
9656 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst ));
9657 ins_pipe(ialu_reg);
9658 %}
9660 // Logical shift right by one
9661 instruct shrL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9662 %{
9663 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
9664 effect(KILL cr);
9666 format %{ "shrq $dst, $shift" %}
9667 opcode(0xD1, 0x5); /* D1 /5 */
9668 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9669 ins_pipe(ialu_mem_imm);
9670 %}
9672 // Logical Shift Right by 8-bit immediate
9673 instruct shrL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
9674 %{
9675 match(Set dst (URShiftL dst shift));
9676 effect(KILL cr);
9678 format %{ "shrq $dst, $shift" %}
9679 opcode(0xC1, 0x5); /* C1 /5 ib */
9680 ins_encode(reg_opc_imm_wide(dst, shift));
9681 ins_pipe(ialu_reg);
9682 %}
9685 // Logical Shift Right by 8-bit immediate
9686 instruct shrL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9687 %{
9688 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
9689 effect(KILL cr);
9691 format %{ "shrq $dst, $shift" %}
9692 opcode(0xC1, 0x5); /* C1 /5 ib */
9693 ins_encode(REX_mem_wide(dst), OpcP,
9694 RM_opc_mem(secondary, dst), Con8or32(shift));
9695 ins_pipe(ialu_mem_imm);
9696 %}
9698 // Logical Shift Right by variable
9699 instruct shrL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
9700 %{
9701 match(Set dst (URShiftL dst shift));
9702 effect(KILL cr);
9704 format %{ "shrq $dst, $shift" %}
9705 opcode(0xD3, 0x5); /* D3 /5 */
9706 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9707 ins_pipe(ialu_reg_reg);
9708 %}
9710 // Logical Shift Right by variable
9711 instruct shrL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9712 %{
9713 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
9714 effect(KILL cr);
9716 format %{ "shrq $dst, $shift" %}
9717 opcode(0xD3, 0x5); /* D3 /5 */
9718 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9719 ins_pipe(ialu_mem_reg);
9720 %}
9722 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
9723 // This idiom is used by the compiler for the i2b bytecode.
9724 instruct i2b(rRegI dst, rRegI src, immI_24 twentyfour)
9725 %{
9726 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
9728 format %{ "movsbl $dst, $src\t# i2b" %}
9729 opcode(0x0F, 0xBE);
9730 ins_encode(REX_reg_breg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9731 ins_pipe(ialu_reg_reg);
9732 %}
9734 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
9735 // This idiom is used by the compiler the i2s bytecode.
9736 instruct i2s(rRegI dst, rRegI src, immI_16 sixteen)
9737 %{
9738 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
9740 format %{ "movswl $dst, $src\t# i2s" %}
9741 opcode(0x0F, 0xBF);
9742 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9743 ins_pipe(ialu_reg_reg);
9744 %}
9746 // ROL/ROR instructions
9748 // ROL expand
9749 instruct rolI_rReg_imm1(rRegI dst, rFlagsReg cr) %{
9750 effect(KILL cr, USE_DEF dst);
9752 format %{ "roll $dst" %}
9753 opcode(0xD1, 0x0); /* Opcode D1 /0 */
9754 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9755 ins_pipe(ialu_reg);
9756 %}
9758 instruct rolI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr) %{
9759 effect(USE_DEF dst, USE shift, KILL cr);
9761 format %{ "roll $dst, $shift" %}
9762 opcode(0xC1, 0x0); /* Opcode C1 /0 ib */
9763 ins_encode( reg_opc_imm(dst, shift) );
9764 ins_pipe(ialu_reg);
9765 %}
9767 instruct rolI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr)
9768 %{
9769 effect(USE_DEF dst, USE shift, KILL cr);
9771 format %{ "roll $dst, $shift" %}
9772 opcode(0xD3, 0x0); /* Opcode D3 /0 */
9773 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9774 ins_pipe(ialu_reg_reg);
9775 %}
9776 // end of ROL expand
9778 // Rotate Left by one
9779 instruct rolI_rReg_i1(rRegI dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr)
9780 %{
9781 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
9783 expand %{
9784 rolI_rReg_imm1(dst, cr);
9785 %}
9786 %}
9788 // Rotate Left by 8-bit immediate
9789 instruct rolI_rReg_i8(rRegI dst, immI8 lshift, immI8 rshift, rFlagsReg cr)
9790 %{
9791 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
9792 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
9794 expand %{
9795 rolI_rReg_imm8(dst, lshift, cr);
9796 %}
9797 %}
9799 // Rotate Left by variable
9800 instruct rolI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9801 %{
9802 match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI zero shift))));
9804 expand %{
9805 rolI_rReg_CL(dst, shift, cr);
9806 %}
9807 %}
9809 // Rotate Left by variable
9810 instruct rolI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr)
9811 %{
9812 match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI c32 shift))));
9814 expand %{
9815 rolI_rReg_CL(dst, shift, cr);
9816 %}
9817 %}
9819 // ROR expand
9820 instruct rorI_rReg_imm1(rRegI dst, rFlagsReg cr)
9821 %{
9822 effect(USE_DEF dst, KILL cr);
9824 format %{ "rorl $dst" %}
9825 opcode(0xD1, 0x1); /* D1 /1 */
9826 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9827 ins_pipe(ialu_reg);
9828 %}
9830 instruct rorI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr)
9831 %{
9832 effect(USE_DEF dst, USE shift, KILL cr);
9834 format %{ "rorl $dst, $shift" %}
9835 opcode(0xC1, 0x1); /* C1 /1 ib */
9836 ins_encode(reg_opc_imm(dst, shift));
9837 ins_pipe(ialu_reg);
9838 %}
9840 instruct rorI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr)
9841 %{
9842 effect(USE_DEF dst, USE shift, KILL cr);
9844 format %{ "rorl $dst, $shift" %}
9845 opcode(0xD3, 0x1); /* D3 /1 */
9846 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9847 ins_pipe(ialu_reg_reg);
9848 %}
9849 // end of ROR expand
9851 // Rotate Right by one
9852 instruct rorI_rReg_i1(rRegI dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr)
9853 %{
9854 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
9856 expand %{
9857 rorI_rReg_imm1(dst, cr);
9858 %}
9859 %}
9861 // Rotate Right by 8-bit immediate
9862 instruct rorI_rReg_i8(rRegI dst, immI8 rshift, immI8 lshift, rFlagsReg cr)
9863 %{
9864 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
9865 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
9867 expand %{
9868 rorI_rReg_imm8(dst, rshift, cr);
9869 %}
9870 %}
9872 // Rotate Right by variable
9873 instruct rorI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9874 %{
9875 match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI zero shift))));
9877 expand %{
9878 rorI_rReg_CL(dst, shift, cr);
9879 %}
9880 %}
9882 // Rotate Right by variable
9883 instruct rorI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr)
9884 %{
9885 match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI c32 shift))));
9887 expand %{
9888 rorI_rReg_CL(dst, shift, cr);
9889 %}
9890 %}
9892 // for long rotate
9893 // ROL expand
9894 instruct rolL_rReg_imm1(rRegL dst, rFlagsReg cr) %{
9895 effect(USE_DEF dst, KILL cr);
9897 format %{ "rolq $dst" %}
9898 opcode(0xD1, 0x0); /* Opcode D1 /0 */
9899 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9900 ins_pipe(ialu_reg);
9901 %}
9903 instruct rolL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr) %{
9904 effect(USE_DEF dst, USE shift, KILL cr);
9906 format %{ "rolq $dst, $shift" %}
9907 opcode(0xC1, 0x0); /* Opcode C1 /0 ib */
9908 ins_encode( reg_opc_imm_wide(dst, shift) );
9909 ins_pipe(ialu_reg);
9910 %}
9912 instruct rolL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr)
9913 %{
9914 effect(USE_DEF dst, USE shift, KILL cr);
9916 format %{ "rolq $dst, $shift" %}
9917 opcode(0xD3, 0x0); /* Opcode D3 /0 */
9918 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9919 ins_pipe(ialu_reg_reg);
9920 %}
9921 // end of ROL expand
9923 // Rotate Left by one
9924 instruct rolL_rReg_i1(rRegL dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr)
9925 %{
9926 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
9928 expand %{
9929 rolL_rReg_imm1(dst, cr);
9930 %}
9931 %}
9933 // Rotate Left by 8-bit immediate
9934 instruct rolL_rReg_i8(rRegL dst, immI8 lshift, immI8 rshift, rFlagsReg cr)
9935 %{
9936 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
9937 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
9939 expand %{
9940 rolL_rReg_imm8(dst, lshift, cr);
9941 %}
9942 %}
9944 // Rotate Left by variable
9945 instruct rolL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9946 %{
9947 match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI zero shift))));
9949 expand %{
9950 rolL_rReg_CL(dst, shift, cr);
9951 %}
9952 %}
9954 // Rotate Left by variable
9955 instruct rolL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr)
9956 %{
9957 match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI c64 shift))));
9959 expand %{
9960 rolL_rReg_CL(dst, shift, cr);
9961 %}
9962 %}
9964 // ROR expand
9965 instruct rorL_rReg_imm1(rRegL dst, rFlagsReg cr)
9966 %{
9967 effect(USE_DEF dst, KILL cr);
9969 format %{ "rorq $dst" %}
9970 opcode(0xD1, 0x1); /* D1 /1 */
9971 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9972 ins_pipe(ialu_reg);
9973 %}
9975 instruct rorL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr)
9976 %{
9977 effect(USE_DEF dst, USE shift, KILL cr);
9979 format %{ "rorq $dst, $shift" %}
9980 opcode(0xC1, 0x1); /* C1 /1 ib */
9981 ins_encode(reg_opc_imm_wide(dst, shift));
9982 ins_pipe(ialu_reg);
9983 %}
9985 instruct rorL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr)
9986 %{
9987 effect(USE_DEF dst, USE shift, KILL cr);
9989 format %{ "rorq $dst, $shift" %}
9990 opcode(0xD3, 0x1); /* D3 /1 */
9991 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9992 ins_pipe(ialu_reg_reg);
9993 %}
9994 // end of ROR expand
9996 // Rotate Right by one
9997 instruct rorL_rReg_i1(rRegL dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr)
9998 %{
9999 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
10001 expand %{
10002 rorL_rReg_imm1(dst, cr);
10003 %}
10004 %}
10006 // Rotate Right by 8-bit immediate
10007 instruct rorL_rReg_i8(rRegL dst, immI8 rshift, immI8 lshift, rFlagsReg cr)
10008 %{
10009 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
10010 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
10012 expand %{
10013 rorL_rReg_imm8(dst, rshift, cr);
10014 %}
10015 %}
10017 // Rotate Right by variable
10018 instruct rorL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
10019 %{
10020 match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI zero shift))));
10022 expand %{
10023 rorL_rReg_CL(dst, shift, cr);
10024 %}
10025 %}
10027 // Rotate Right by variable
10028 instruct rorL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr)
10029 %{
10030 match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI c64 shift))));
10032 expand %{
10033 rorL_rReg_CL(dst, shift, cr);
10034 %}
10035 %}
10037 // Logical Instructions
10039 // Integer Logical Instructions
10041 // And Instructions
10042 // And Register with Register
10043 instruct andI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
10044 %{
10045 match(Set dst (AndI dst src));
10046 effect(KILL cr);
10048 format %{ "andl $dst, $src\t# int" %}
10049 opcode(0x23);
10050 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
10051 ins_pipe(ialu_reg_reg);
10052 %}
10054 // And Register with Immediate 255
10055 instruct andI_rReg_imm255(rRegI dst, immI_255 src)
10056 %{
10057 match(Set dst (AndI dst src));
10059 format %{ "movzbl $dst, $dst\t# int & 0xFF" %}
10060 opcode(0x0F, 0xB6);
10061 ins_encode(REX_reg_breg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
10062 ins_pipe(ialu_reg);
10063 %}
10065 // And Register with Immediate 255 and promote to long
10066 instruct andI2L_rReg_imm255(rRegL dst, rRegI src, immI_255 mask)
10067 %{
10068 match(Set dst (ConvI2L (AndI src mask)));
10070 format %{ "movzbl $dst, $src\t# int & 0xFF -> long" %}
10071 opcode(0x0F, 0xB6);
10072 ins_encode(REX_reg_breg(dst, src), OpcP, OpcS, reg_reg(dst, src));
10073 ins_pipe(ialu_reg);
10074 %}
10076 // And Register with Immediate 65535
10077 instruct andI_rReg_imm65535(rRegI dst, immI_65535 src)
10078 %{
10079 match(Set dst (AndI dst src));
10081 format %{ "movzwl $dst, $dst\t# int & 0xFFFF" %}
10082 opcode(0x0F, 0xB7);
10083 ins_encode(REX_reg_reg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
10084 ins_pipe(ialu_reg);
10085 %}
10087 // And Register with Immediate 65535 and promote to long
10088 instruct andI2L_rReg_imm65535(rRegL dst, rRegI src, immI_65535 mask)
10089 %{
10090 match(Set dst (ConvI2L (AndI src mask)));
10092 format %{ "movzwl $dst, $src\t# int & 0xFFFF -> long" %}
10093 opcode(0x0F, 0xB7);
10094 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
10095 ins_pipe(ialu_reg);
10096 %}
10098 // And Register with Immediate
10099 instruct andI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
10100 %{
10101 match(Set dst (AndI dst src));
10102 effect(KILL cr);
10104 format %{ "andl $dst, $src\t# int" %}
10105 opcode(0x81, 0x04); /* Opcode 81 /4 */
10106 ins_encode(OpcSErm(dst, src), Con8or32(src));
10107 ins_pipe(ialu_reg);
10108 %}
10110 // And Register with Memory
10111 instruct andI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
10112 %{
10113 match(Set dst (AndI dst (LoadI src)));
10114 effect(KILL cr);
10116 ins_cost(125);
10117 format %{ "andl $dst, $src\t# int" %}
10118 opcode(0x23);
10119 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
10120 ins_pipe(ialu_reg_mem);
10121 %}
10123 // And Memory with Register
10124 instruct andI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
10125 %{
10126 match(Set dst (StoreI dst (AndI (LoadI dst) src)));
10127 effect(KILL cr);
10129 ins_cost(150);
10130 format %{ "andl $dst, $src\t# int" %}
10131 opcode(0x21); /* Opcode 21 /r */
10132 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
10133 ins_pipe(ialu_mem_reg);
10134 %}
10136 // And Memory with Immediate
10137 instruct andI_mem_imm(memory dst, immI src, rFlagsReg cr)
10138 %{
10139 match(Set dst (StoreI dst (AndI (LoadI dst) src)));
10140 effect(KILL cr);
10142 ins_cost(125);
10143 format %{ "andl $dst, $src\t# int" %}
10144 opcode(0x81, 0x4); /* Opcode 81 /4 id */
10145 ins_encode(REX_mem(dst), OpcSE(src),
10146 RM_opc_mem(secondary, dst), Con8or32(src));
10147 ins_pipe(ialu_mem_imm);
10148 %}
10150 // Or Instructions
10151 // Or Register with Register
10152 instruct orI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
10153 %{
10154 match(Set dst (OrI dst src));
10155 effect(KILL cr);
10157 format %{ "orl $dst, $src\t# int" %}
10158 opcode(0x0B);
10159 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
10160 ins_pipe(ialu_reg_reg);
10161 %}
10163 // Or Register with Immediate
10164 instruct orI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
10165 %{
10166 match(Set dst (OrI dst src));
10167 effect(KILL cr);
10169 format %{ "orl $dst, $src\t# int" %}
10170 opcode(0x81, 0x01); /* Opcode 81 /1 id */
10171 ins_encode(OpcSErm(dst, src), Con8or32(src));
10172 ins_pipe(ialu_reg);
10173 %}
10175 // Or Register with Memory
10176 instruct orI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
10177 %{
10178 match(Set dst (OrI dst (LoadI src)));
10179 effect(KILL cr);
10181 ins_cost(125);
10182 format %{ "orl $dst, $src\t# int" %}
10183 opcode(0x0B);
10184 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
10185 ins_pipe(ialu_reg_mem);
10186 %}
10188 // Or Memory with Register
10189 instruct orI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
10190 %{
10191 match(Set dst (StoreI dst (OrI (LoadI dst) src)));
10192 effect(KILL cr);
10194 ins_cost(150);
10195 format %{ "orl $dst, $src\t# int" %}
10196 opcode(0x09); /* Opcode 09 /r */
10197 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
10198 ins_pipe(ialu_mem_reg);
10199 %}
10201 // Or Memory with Immediate
10202 instruct orI_mem_imm(memory dst, immI src, rFlagsReg cr)
10203 %{
10204 match(Set dst (StoreI dst (OrI (LoadI dst) src)));
10205 effect(KILL cr);
10207 ins_cost(125);
10208 format %{ "orl $dst, $src\t# int" %}
10209 opcode(0x81, 0x1); /* Opcode 81 /1 id */
10210 ins_encode(REX_mem(dst), OpcSE(src),
10211 RM_opc_mem(secondary, dst), Con8or32(src));
10212 ins_pipe(ialu_mem_imm);
10213 %}
10215 // Xor Instructions
10216 // Xor Register with Register
10217 instruct xorI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
10218 %{
10219 match(Set dst (XorI dst src));
10220 effect(KILL cr);
10222 format %{ "xorl $dst, $src\t# int" %}
10223 opcode(0x33);
10224 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
10225 ins_pipe(ialu_reg_reg);
10226 %}
10228 // Xor Register with Immediate -1
10229 instruct xorI_rReg_im1(rRegI dst, immI_M1 imm) %{
10230 match(Set dst (XorI dst imm));
10232 format %{ "not $dst" %}
10233 ins_encode %{
10234 __ notl($dst$$Register);
10235 %}
10236 ins_pipe(ialu_reg);
10237 %}
10239 // Xor Register with Immediate
10240 instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
10241 %{
10242 match(Set dst (XorI dst src));
10243 effect(KILL cr);
10245 format %{ "xorl $dst, $src\t# int" %}
10246 opcode(0x81, 0x06); /* Opcode 81 /6 id */
10247 ins_encode(OpcSErm(dst, src), Con8or32(src));
10248 ins_pipe(ialu_reg);
10249 %}
10251 // Xor Register with Memory
10252 instruct xorI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
10253 %{
10254 match(Set dst (XorI dst (LoadI src)));
10255 effect(KILL cr);
10257 ins_cost(125);
10258 format %{ "xorl $dst, $src\t# int" %}
10259 opcode(0x33);
10260 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
10261 ins_pipe(ialu_reg_mem);
10262 %}
10264 // Xor Memory with Register
10265 instruct xorI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
10266 %{
10267 match(Set dst (StoreI dst (XorI (LoadI dst) src)));
10268 effect(KILL cr);
10270 ins_cost(150);
10271 format %{ "xorl $dst, $src\t# int" %}
10272 opcode(0x31); /* Opcode 31 /r */
10273 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
10274 ins_pipe(ialu_mem_reg);
10275 %}
10277 // Xor Memory with Immediate
10278 instruct xorI_mem_imm(memory dst, immI src, rFlagsReg cr)
10279 %{
10280 match(Set dst (StoreI dst (XorI (LoadI dst) src)));
10281 effect(KILL cr);
10283 ins_cost(125);
10284 format %{ "xorl $dst, $src\t# int" %}
10285 opcode(0x81, 0x6); /* Opcode 81 /6 id */
10286 ins_encode(REX_mem(dst), OpcSE(src),
10287 RM_opc_mem(secondary, dst), Con8or32(src));
10288 ins_pipe(ialu_mem_imm);
10289 %}
10292 // Long Logical Instructions
10294 // And Instructions
10295 // And Register with Register
10296 instruct andL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
10297 %{
10298 match(Set dst (AndL dst src));
10299 effect(KILL cr);
10301 format %{ "andq $dst, $src\t# long" %}
10302 opcode(0x23);
10303 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
10304 ins_pipe(ialu_reg_reg);
10305 %}
10307 // And Register with Immediate 255
10308 instruct andL_rReg_imm255(rRegL dst, immL_255 src)
10309 %{
10310 match(Set dst (AndL dst src));
10312 format %{ "movzbq $dst, $dst\t# long & 0xFF" %}
10313 opcode(0x0F, 0xB6);
10314 ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
10315 ins_pipe(ialu_reg);
10316 %}
10318 // And Register with Immediate 65535
10319 instruct andL_rReg_imm65535(rRegL dst, immL_65535 src)
10320 %{
10321 match(Set dst (AndL dst src));
10323 format %{ "movzwq $dst, $dst\t# long & 0xFFFF" %}
10324 opcode(0x0F, 0xB7);
10325 ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
10326 ins_pipe(ialu_reg);
10327 %}
10329 // And Register with Immediate
10330 instruct andL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
10331 %{
10332 match(Set dst (AndL dst src));
10333 effect(KILL cr);
10335 format %{ "andq $dst, $src\t# long" %}
10336 opcode(0x81, 0x04); /* Opcode 81 /4 */
10337 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
10338 ins_pipe(ialu_reg);
10339 %}
10341 // And Register with Memory
10342 instruct andL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
10343 %{
10344 match(Set dst (AndL dst (LoadL src)));
10345 effect(KILL cr);
10347 ins_cost(125);
10348 format %{ "andq $dst, $src\t# long" %}
10349 opcode(0x23);
10350 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
10351 ins_pipe(ialu_reg_mem);
10352 %}
10354 // And Memory with Register
10355 instruct andL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
10356 %{
10357 match(Set dst (StoreL dst (AndL (LoadL dst) src)));
10358 effect(KILL cr);
10360 ins_cost(150);
10361 format %{ "andq $dst, $src\t# long" %}
10362 opcode(0x21); /* Opcode 21 /r */
10363 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
10364 ins_pipe(ialu_mem_reg);
10365 %}
10367 // And Memory with Immediate
10368 instruct andL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
10369 %{
10370 match(Set dst (StoreL dst (AndL (LoadL dst) src)));
10371 effect(KILL cr);
10373 ins_cost(125);
10374 format %{ "andq $dst, $src\t# long" %}
10375 opcode(0x81, 0x4); /* Opcode 81 /4 id */
10376 ins_encode(REX_mem_wide(dst), OpcSE(src),
10377 RM_opc_mem(secondary, dst), Con8or32(src));
10378 ins_pipe(ialu_mem_imm);
10379 %}
10381 // Or Instructions
10382 // Or Register with Register
10383 instruct orL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
10384 %{
10385 match(Set dst (OrL dst src));
10386 effect(KILL cr);
10388 format %{ "orq $dst, $src\t# long" %}
10389 opcode(0x0B);
10390 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
10391 ins_pipe(ialu_reg_reg);
10392 %}
10394 // Use any_RegP to match R15 (TLS register) without spilling.
10395 instruct orL_rReg_castP2X(rRegL dst, any_RegP src, rFlagsReg cr) %{
10396 match(Set dst (OrL dst (CastP2X src)));
10397 effect(KILL cr);
10399 format %{ "orq $dst, $src\t# long" %}
10400 opcode(0x0B);
10401 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
10402 ins_pipe(ialu_reg_reg);
10403 %}
10406 // Or Register with Immediate
10407 instruct orL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
10408 %{
10409 match(Set dst (OrL dst src));
10410 effect(KILL cr);
10412 format %{ "orq $dst, $src\t# long" %}
10413 opcode(0x81, 0x01); /* Opcode 81 /1 id */
10414 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
10415 ins_pipe(ialu_reg);
10416 %}
10418 // Or Register with Memory
10419 instruct orL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
10420 %{
10421 match(Set dst (OrL dst (LoadL src)));
10422 effect(KILL cr);
10424 ins_cost(125);
10425 format %{ "orq $dst, $src\t# long" %}
10426 opcode(0x0B);
10427 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
10428 ins_pipe(ialu_reg_mem);
10429 %}
10431 // Or Memory with Register
10432 instruct orL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
10433 %{
10434 match(Set dst (StoreL dst (OrL (LoadL dst) src)));
10435 effect(KILL cr);
10437 ins_cost(150);
10438 format %{ "orq $dst, $src\t# long" %}
10439 opcode(0x09); /* Opcode 09 /r */
10440 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
10441 ins_pipe(ialu_mem_reg);
10442 %}
10444 // Or Memory with Immediate
10445 instruct orL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
10446 %{
10447 match(Set dst (StoreL dst (OrL (LoadL dst) src)));
10448 effect(KILL cr);
10450 ins_cost(125);
10451 format %{ "orq $dst, $src\t# long" %}
10452 opcode(0x81, 0x1); /* Opcode 81 /1 id */
10453 ins_encode(REX_mem_wide(dst), OpcSE(src),
10454 RM_opc_mem(secondary, dst), Con8or32(src));
10455 ins_pipe(ialu_mem_imm);
10456 %}
10458 // Xor Instructions
10459 // Xor Register with Register
10460 instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
10461 %{
10462 match(Set dst (XorL dst src));
10463 effect(KILL cr);
10465 format %{ "xorq $dst, $src\t# long" %}
10466 opcode(0x33);
10467 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
10468 ins_pipe(ialu_reg_reg);
10469 %}
10471 // Xor Register with Immediate -1
10472 instruct xorL_rReg_im1(rRegL dst, immL_M1 imm) %{
10473 match(Set dst (XorL dst imm));
10475 format %{ "notq $dst" %}
10476 ins_encode %{
10477 __ notq($dst$$Register);
10478 %}
10479 ins_pipe(ialu_reg);
10480 %}
10482 // Xor Register with Immediate
10483 instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
10484 %{
10485 match(Set dst (XorL dst src));
10486 effect(KILL cr);
10488 format %{ "xorq $dst, $src\t# long" %}
10489 opcode(0x81, 0x06); /* Opcode 81 /6 id */
10490 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
10491 ins_pipe(ialu_reg);
10492 %}
10494 // Xor Register with Memory
10495 instruct xorL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
10496 %{
10497 match(Set dst (XorL dst (LoadL src)));
10498 effect(KILL cr);
10500 ins_cost(125);
10501 format %{ "xorq $dst, $src\t# long" %}
10502 opcode(0x33);
10503 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
10504 ins_pipe(ialu_reg_mem);
10505 %}
10507 // Xor Memory with Register
10508 instruct xorL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
10509 %{
10510 match(Set dst (StoreL dst (XorL (LoadL dst) src)));
10511 effect(KILL cr);
10513 ins_cost(150);
10514 format %{ "xorq $dst, $src\t# long" %}
10515 opcode(0x31); /* Opcode 31 /r */
10516 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
10517 ins_pipe(ialu_mem_reg);
10518 %}
10520 // Xor Memory with Immediate
10521 instruct xorL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
10522 %{
10523 match(Set dst (StoreL dst (XorL (LoadL dst) src)));
10524 effect(KILL cr);
10526 ins_cost(125);
10527 format %{ "xorq $dst, $src\t# long" %}
10528 opcode(0x81, 0x6); /* Opcode 81 /6 id */
10529 ins_encode(REX_mem_wide(dst), OpcSE(src),
10530 RM_opc_mem(secondary, dst), Con8or32(src));
10531 ins_pipe(ialu_mem_imm);
10532 %}
10534 // Convert Int to Boolean
10535 instruct convI2B(rRegI dst, rRegI src, rFlagsReg cr)
10536 %{
10537 match(Set dst (Conv2B src));
10538 effect(KILL cr);
10540 format %{ "testl $src, $src\t# ci2b\n\t"
10541 "setnz $dst\n\t"
10542 "movzbl $dst, $dst" %}
10543 ins_encode(REX_reg_reg(src, src), opc_reg_reg(0x85, src, src), // testl
10544 setNZ_reg(dst),
10545 REX_reg_breg(dst, dst), // movzbl
10546 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst));
10547 ins_pipe(pipe_slow); // XXX
10548 %}
10550 // Convert Pointer to Boolean
10551 instruct convP2B(rRegI dst, rRegP src, rFlagsReg cr)
10552 %{
10553 match(Set dst (Conv2B src));
10554 effect(KILL cr);
10556 format %{ "testq $src, $src\t# cp2b\n\t"
10557 "setnz $dst\n\t"
10558 "movzbl $dst, $dst" %}
10559 ins_encode(REX_reg_reg_wide(src, src), opc_reg_reg(0x85, src, src), // testq
10560 setNZ_reg(dst),
10561 REX_reg_breg(dst, dst), // movzbl
10562 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst));
10563 ins_pipe(pipe_slow); // XXX
10564 %}
10566 instruct cmpLTMask(rRegI dst, rRegI p, rRegI q, rFlagsReg cr)
10567 %{
10568 match(Set dst (CmpLTMask p q));
10569 effect(KILL cr);
10571 ins_cost(400); // XXX
10572 format %{ "cmpl $p, $q\t# cmpLTMask\n\t"
10573 "setlt $dst\n\t"
10574 "movzbl $dst, $dst\n\t"
10575 "negl $dst" %}
10576 ins_encode(REX_reg_reg(p, q), opc_reg_reg(0x3B, p, q), // cmpl
10577 setLT_reg(dst),
10578 REX_reg_breg(dst, dst), // movzbl
10579 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst),
10580 neg_reg(dst));
10581 ins_pipe(pipe_slow);
10582 %}
10584 instruct cmpLTMask0(rRegI dst, immI0 zero, rFlagsReg cr)
10585 %{
10586 match(Set dst (CmpLTMask dst zero));
10587 effect(KILL cr);
10589 ins_cost(100); // XXX
10590 format %{ "sarl $dst, #31\t# cmpLTMask0" %}
10591 opcode(0xC1, 0x7); /* C1 /7 ib */
10592 ins_encode(reg_opc_imm(dst, 0x1F));
10593 ins_pipe(ialu_reg);
10594 %}
10597 instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y,
10598 rRegI tmp,
10599 rFlagsReg cr)
10600 %{
10601 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
10602 effect(TEMP tmp, KILL cr);
10604 ins_cost(400); // XXX
10605 format %{ "subl $p, $q\t# cadd_cmpLTMask1\n\t"
10606 "sbbl $tmp, $tmp\n\t"
10607 "andl $tmp, $y\n\t"
10608 "addl $p, $tmp" %}
10609 ins_encode(enc_cmpLTP(p, q, y, tmp));
10610 ins_pipe(pipe_cmplt);
10611 %}
10613 /* If I enable this, I encourage spilling in the inner loop of compress.
10614 instruct cadd_cmpLTMask_mem( rRegI p, rRegI q, memory y, rRegI tmp, rFlagsReg cr )
10615 %{
10616 match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
10617 effect( TEMP tmp, KILL cr );
10618 ins_cost(400);
10620 format %{ "SUB $p,$q\n\t"
10621 "SBB RCX,RCX\n\t"
10622 "AND RCX,$y\n\t"
10623 "ADD $p,RCX" %}
10624 ins_encode( enc_cmpLTP_mem(p,q,y,tmp) );
10625 %}
10626 */
10628 //---------- FP Instructions------------------------------------------------
10630 instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
10631 %{
10632 match(Set cr (CmpF src1 src2));
10634 ins_cost(145);
10635 format %{ "ucomiss $src1, $src2\n\t"
10636 "jnp,s exit\n\t"
10637 "pushfq\t# saw NaN, set CF\n\t"
10638 "andq [rsp], #0xffffff2b\n\t"
10639 "popfq\n"
10640 "exit: nop\t# avoid branch to branch" %}
10641 opcode(0x0F, 0x2E);
10642 ins_encode(REX_reg_reg(src1, src2), OpcP, OpcS, reg_reg(src1, src2),
10643 cmpfp_fixup);
10644 ins_pipe(pipe_slow);
10645 %}
10647 instruct cmpF_cc_reg_CF(rFlagsRegUCF cr, regF src1, regF src2) %{
10648 match(Set cr (CmpF src1 src2));
10650 ins_cost(145);
10651 format %{ "ucomiss $src1, $src2" %}
10652 ins_encode %{
10653 __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
10654 %}
10655 ins_pipe(pipe_slow);
10656 %}
10658 instruct cmpF_cc_mem(rFlagsRegU cr, regF src1, memory src2)
10659 %{
10660 match(Set cr (CmpF src1 (LoadF src2)));
10662 ins_cost(145);
10663 format %{ "ucomiss $src1, $src2\n\t"
10664 "jnp,s exit\n\t"
10665 "pushfq\t# saw NaN, set CF\n\t"
10666 "andq [rsp], #0xffffff2b\n\t"
10667 "popfq\n"
10668 "exit: nop\t# avoid branch to branch" %}
10669 opcode(0x0F, 0x2E);
10670 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2),
10671 cmpfp_fixup);
10672 ins_pipe(pipe_slow);
10673 %}
10675 instruct cmpF_cc_memCF(rFlagsRegUCF cr, regF src1, memory src2) %{
10676 match(Set cr (CmpF src1 (LoadF src2)));
10678 ins_cost(100);
10679 format %{ "ucomiss $src1, $src2" %}
10680 opcode(0x0F, 0x2E);
10681 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2));
10682 ins_pipe(pipe_slow);
10683 %}
10685 instruct cmpF_cc_imm(rFlagsRegU cr, regF src1, immF src2)
10686 %{
10687 match(Set cr (CmpF src1 src2));
10689 ins_cost(145);
10690 format %{ "ucomiss $src1, $src2\n\t"
10691 "jnp,s exit\n\t"
10692 "pushfq\t# saw NaN, set CF\n\t"
10693 "andq [rsp], #0xffffff2b\n\t"
10694 "popfq\n"
10695 "exit: nop\t# avoid branch to branch" %}
10696 opcode(0x0F, 0x2E);
10697 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2),
10698 cmpfp_fixup);
10699 ins_pipe(pipe_slow);
10700 %}
10702 instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src1, immF src2) %{
10703 match(Set cr (CmpF src1 src2));
10705 ins_cost(100);
10706 format %{ "ucomiss $src1, $src2" %}
10707 opcode(0x0F, 0x2E);
10708 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2));
10709 ins_pipe(pipe_slow);
10710 %}
10712 instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2)
10713 %{
10714 match(Set cr (CmpD src1 src2));
10716 ins_cost(145);
10717 format %{ "ucomisd $src1, $src2\n\t"
10718 "jnp,s exit\n\t"
10719 "pushfq\t# saw NaN, set CF\n\t"
10720 "andq [rsp], #0xffffff2b\n\t"
10721 "popfq\n"
10722 "exit: nop\t# avoid branch to branch" %}
10723 opcode(0x66, 0x0F, 0x2E);
10724 ins_encode(OpcP, REX_reg_reg(src1, src2), OpcS, OpcT, reg_reg(src1, src2),
10725 cmpfp_fixup);
10726 ins_pipe(pipe_slow);
10727 %}
10729 instruct cmpD_cc_reg_CF(rFlagsRegUCF cr, regD src1, regD src2) %{
10730 match(Set cr (CmpD src1 src2));
10732 ins_cost(100);
10733 format %{ "ucomisd $src1, $src2 test" %}
10734 ins_encode %{
10735 __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
10736 %}
10737 ins_pipe(pipe_slow);
10738 %}
10740 instruct cmpD_cc_mem(rFlagsRegU cr, regD src1, memory src2)
10741 %{
10742 match(Set cr (CmpD src1 (LoadD src2)));
10744 ins_cost(145);
10745 format %{ "ucomisd $src1, $src2\n\t"
10746 "jnp,s exit\n\t"
10747 "pushfq\t# saw NaN, set CF\n\t"
10748 "andq [rsp], #0xffffff2b\n\t"
10749 "popfq\n"
10750 "exit: nop\t# avoid branch to branch" %}
10751 opcode(0x66, 0x0F, 0x2E);
10752 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2),
10753 cmpfp_fixup);
10754 ins_pipe(pipe_slow);
10755 %}
10757 instruct cmpD_cc_memCF(rFlagsRegUCF cr, regD src1, memory src2) %{
10758 match(Set cr (CmpD src1 (LoadD src2)));
10760 ins_cost(100);
10761 format %{ "ucomisd $src1, $src2" %}
10762 opcode(0x66, 0x0F, 0x2E);
10763 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2));
10764 ins_pipe(pipe_slow);
10765 %}
10767 instruct cmpD_cc_imm(rFlagsRegU cr, regD src1, immD src2)
10768 %{
10769 match(Set cr (CmpD src1 src2));
10771 ins_cost(145);
10772 format %{ "ucomisd $src1, [$src2]\n\t"
10773 "jnp,s exit\n\t"
10774 "pushfq\t# saw NaN, set CF\n\t"
10775 "andq [rsp], #0xffffff2b\n\t"
10776 "popfq\n"
10777 "exit: nop\t# avoid branch to branch" %}
10778 opcode(0x66, 0x0F, 0x2E);
10779 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2),
10780 cmpfp_fixup);
10781 ins_pipe(pipe_slow);
10782 %}
10784 instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src1, immD src2) %{
10785 match(Set cr (CmpD src1 src2));
10787 ins_cost(100);
10788 format %{ "ucomisd $src1, [$src2]" %}
10789 opcode(0x66, 0x0F, 0x2E);
10790 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2));
10791 ins_pipe(pipe_slow);
10792 %}
10794 // Compare into -1,0,1
10795 instruct cmpF_reg(rRegI dst, regF src1, regF src2, rFlagsReg cr)
10796 %{
10797 match(Set dst (CmpF3 src1 src2));
10798 effect(KILL cr);
10800 ins_cost(275);
10801 format %{ "ucomiss $src1, $src2\n\t"
10802 "movl $dst, #-1\n\t"
10803 "jp,s done\n\t"
10804 "jb,s done\n\t"
10805 "setne $dst\n\t"
10806 "movzbl $dst, $dst\n"
10807 "done:" %}
10809 opcode(0x0F, 0x2E);
10810 ins_encode(REX_reg_reg(src1, src2), OpcP, OpcS, reg_reg(src1, src2),
10811 cmpfp3(dst));
10812 ins_pipe(pipe_slow);
10813 %}
10815 // Compare into -1,0,1
10816 instruct cmpF_mem(rRegI dst, regF src1, memory src2, rFlagsReg cr)
10817 %{
10818 match(Set dst (CmpF3 src1 (LoadF src2)));
10819 effect(KILL cr);
10821 ins_cost(275);
10822 format %{ "ucomiss $src1, $src2\n\t"
10823 "movl $dst, #-1\n\t"
10824 "jp,s done\n\t"
10825 "jb,s done\n\t"
10826 "setne $dst\n\t"
10827 "movzbl $dst, $dst\n"
10828 "done:" %}
10830 opcode(0x0F, 0x2E);
10831 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2),
10832 cmpfp3(dst));
10833 ins_pipe(pipe_slow);
10834 %}
10836 // Compare into -1,0,1
10837 instruct cmpF_imm(rRegI dst, regF src1, immF src2, rFlagsReg cr)
10838 %{
10839 match(Set dst (CmpF3 src1 src2));
10840 effect(KILL cr);
10842 ins_cost(275);
10843 format %{ "ucomiss $src1, [$src2]\n\t"
10844 "movl $dst, #-1\n\t"
10845 "jp,s done\n\t"
10846 "jb,s done\n\t"
10847 "setne $dst\n\t"
10848 "movzbl $dst, $dst\n"
10849 "done:" %}
10851 opcode(0x0F, 0x2E);
10852 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2),
10853 cmpfp3(dst));
10854 ins_pipe(pipe_slow);
10855 %}
10857 // Compare into -1,0,1
10858 instruct cmpD_reg(rRegI dst, regD src1, regD src2, rFlagsReg cr)
10859 %{
10860 match(Set dst (CmpD3 src1 src2));
10861 effect(KILL cr);
10863 ins_cost(275);
10864 format %{ "ucomisd $src1, $src2\n\t"
10865 "movl $dst, #-1\n\t"
10866 "jp,s done\n\t"
10867 "jb,s done\n\t"
10868 "setne $dst\n\t"
10869 "movzbl $dst, $dst\n"
10870 "done:" %}
10872 opcode(0x66, 0x0F, 0x2E);
10873 ins_encode(OpcP, REX_reg_reg(src1, src2), OpcS, OpcT, reg_reg(src1, src2),
10874 cmpfp3(dst));
10875 ins_pipe(pipe_slow);
10876 %}
10878 // Compare into -1,0,1
10879 instruct cmpD_mem(rRegI dst, regD src1, memory src2, rFlagsReg cr)
10880 %{
10881 match(Set dst (CmpD3 src1 (LoadD src2)));
10882 effect(KILL cr);
10884 ins_cost(275);
10885 format %{ "ucomisd $src1, $src2\n\t"
10886 "movl $dst, #-1\n\t"
10887 "jp,s done\n\t"
10888 "jb,s done\n\t"
10889 "setne $dst\n\t"
10890 "movzbl $dst, $dst\n"
10891 "done:" %}
10893 opcode(0x66, 0x0F, 0x2E);
10894 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2),
10895 cmpfp3(dst));
10896 ins_pipe(pipe_slow);
10897 %}
10899 // Compare into -1,0,1
10900 instruct cmpD_imm(rRegI dst, regD src1, immD src2, rFlagsReg cr)
10901 %{
10902 match(Set dst (CmpD3 src1 src2));
10903 effect(KILL cr);
10905 ins_cost(275);
10906 format %{ "ucomisd $src1, [$src2]\n\t"
10907 "movl $dst, #-1\n\t"
10908 "jp,s done\n\t"
10909 "jb,s done\n\t"
10910 "setne $dst\n\t"
10911 "movzbl $dst, $dst\n"
10912 "done:" %}
10914 opcode(0x66, 0x0F, 0x2E);
10915 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2),
10916 cmpfp3(dst));
10917 ins_pipe(pipe_slow);
10918 %}
10920 instruct addF_reg(regF dst, regF src)
10921 %{
10922 match(Set dst (AddF dst src));
10924 format %{ "addss $dst, $src" %}
10925 ins_cost(150); // XXX
10926 opcode(0xF3, 0x0F, 0x58);
10927 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10928 ins_pipe(pipe_slow);
10929 %}
10931 instruct addF_mem(regF dst, memory src)
10932 %{
10933 match(Set dst (AddF dst (LoadF src)));
10935 format %{ "addss $dst, $src" %}
10936 ins_cost(150); // XXX
10937 opcode(0xF3, 0x0F, 0x58);
10938 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10939 ins_pipe(pipe_slow);
10940 %}
10942 instruct addF_imm(regF dst, immF src)
10943 %{
10944 match(Set dst (AddF dst src));
10946 format %{ "addss $dst, [$src]" %}
10947 ins_cost(150); // XXX
10948 opcode(0xF3, 0x0F, 0x58);
10949 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
10950 ins_pipe(pipe_slow);
10951 %}
10953 instruct addD_reg(regD dst, regD src)
10954 %{
10955 match(Set dst (AddD dst src));
10957 format %{ "addsd $dst, $src" %}
10958 ins_cost(150); // XXX
10959 opcode(0xF2, 0x0F, 0x58);
10960 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10961 ins_pipe(pipe_slow);
10962 %}
10964 instruct addD_mem(regD dst, memory src)
10965 %{
10966 match(Set dst (AddD dst (LoadD src)));
10968 format %{ "addsd $dst, $src" %}
10969 ins_cost(150); // XXX
10970 opcode(0xF2, 0x0F, 0x58);
10971 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10972 ins_pipe(pipe_slow);
10973 %}
10975 instruct addD_imm(regD dst, immD src)
10976 %{
10977 match(Set dst (AddD dst src));
10979 format %{ "addsd $dst, [$src]" %}
10980 ins_cost(150); // XXX
10981 opcode(0xF2, 0x0F, 0x58);
10982 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
10983 ins_pipe(pipe_slow);
10984 %}
10986 instruct subF_reg(regF dst, regF src)
10987 %{
10988 match(Set dst (SubF dst src));
10990 format %{ "subss $dst, $src" %}
10991 ins_cost(150); // XXX
10992 opcode(0xF3, 0x0F, 0x5C);
10993 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10994 ins_pipe(pipe_slow);
10995 %}
10997 instruct subF_mem(regF dst, memory src)
10998 %{
10999 match(Set dst (SubF dst (LoadF src)));
11001 format %{ "subss $dst, $src" %}
11002 ins_cost(150); // XXX
11003 opcode(0xF3, 0x0F, 0x5C);
11004 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11005 ins_pipe(pipe_slow);
11006 %}
11008 instruct subF_imm(regF dst, immF src)
11009 %{
11010 match(Set dst (SubF dst src));
11012 format %{ "subss $dst, [$src]" %}
11013 ins_cost(150); // XXX
11014 opcode(0xF3, 0x0F, 0x5C);
11015 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
11016 ins_pipe(pipe_slow);
11017 %}
11019 instruct subD_reg(regD dst, regD src)
11020 %{
11021 match(Set dst (SubD dst src));
11023 format %{ "subsd $dst, $src" %}
11024 ins_cost(150); // XXX
11025 opcode(0xF2, 0x0F, 0x5C);
11026 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11027 ins_pipe(pipe_slow);
11028 %}
11030 instruct subD_mem(regD dst, memory src)
11031 %{
11032 match(Set dst (SubD dst (LoadD src)));
11034 format %{ "subsd $dst, $src" %}
11035 ins_cost(150); // XXX
11036 opcode(0xF2, 0x0F, 0x5C);
11037 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11038 ins_pipe(pipe_slow);
11039 %}
11041 instruct subD_imm(regD dst, immD src)
11042 %{
11043 match(Set dst (SubD dst src));
11045 format %{ "subsd $dst, [$src]" %}
11046 ins_cost(150); // XXX
11047 opcode(0xF2, 0x0F, 0x5C);
11048 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
11049 ins_pipe(pipe_slow);
11050 %}
11052 instruct mulF_reg(regF dst, regF src)
11053 %{
11054 match(Set dst (MulF dst src));
11056 format %{ "mulss $dst, $src" %}
11057 ins_cost(150); // XXX
11058 opcode(0xF3, 0x0F, 0x59);
11059 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11060 ins_pipe(pipe_slow);
11061 %}
11063 instruct mulF_mem(regF dst, memory src)
11064 %{
11065 match(Set dst (MulF dst (LoadF src)));
11067 format %{ "mulss $dst, $src" %}
11068 ins_cost(150); // XXX
11069 opcode(0xF3, 0x0F, 0x59);
11070 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11071 ins_pipe(pipe_slow);
11072 %}
11074 instruct mulF_imm(regF dst, immF src)
11075 %{
11076 match(Set dst (MulF dst src));
11078 format %{ "mulss $dst, [$src]" %}
11079 ins_cost(150); // XXX
11080 opcode(0xF3, 0x0F, 0x59);
11081 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
11082 ins_pipe(pipe_slow);
11083 %}
11085 instruct mulD_reg(regD dst, regD src)
11086 %{
11087 match(Set dst (MulD dst src));
11089 format %{ "mulsd $dst, $src" %}
11090 ins_cost(150); // XXX
11091 opcode(0xF2, 0x0F, 0x59);
11092 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11093 ins_pipe(pipe_slow);
11094 %}
11096 instruct mulD_mem(regD dst, memory src)
11097 %{
11098 match(Set dst (MulD dst (LoadD src)));
11100 format %{ "mulsd $dst, $src" %}
11101 ins_cost(150); // XXX
11102 opcode(0xF2, 0x0F, 0x59);
11103 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11104 ins_pipe(pipe_slow);
11105 %}
11107 instruct mulD_imm(regD dst, immD src)
11108 %{
11109 match(Set dst (MulD dst src));
11111 format %{ "mulsd $dst, [$src]" %}
11112 ins_cost(150); // XXX
11113 opcode(0xF2, 0x0F, 0x59);
11114 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
11115 ins_pipe(pipe_slow);
11116 %}
11118 instruct divF_reg(regF dst, regF src)
11119 %{
11120 match(Set dst (DivF dst src));
11122 format %{ "divss $dst, $src" %}
11123 ins_cost(150); // XXX
11124 opcode(0xF3, 0x0F, 0x5E);
11125 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11126 ins_pipe(pipe_slow);
11127 %}
11129 instruct divF_mem(regF dst, memory src)
11130 %{
11131 match(Set dst (DivF dst (LoadF src)));
11133 format %{ "divss $dst, $src" %}
11134 ins_cost(150); // XXX
11135 opcode(0xF3, 0x0F, 0x5E);
11136 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11137 ins_pipe(pipe_slow);
11138 %}
11140 instruct divF_imm(regF dst, immF src)
11141 %{
11142 match(Set dst (DivF dst src));
11144 format %{ "divss $dst, [$src]" %}
11145 ins_cost(150); // XXX
11146 opcode(0xF3, 0x0F, 0x5E);
11147 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
11148 ins_pipe(pipe_slow);
11149 %}
11151 instruct divD_reg(regD dst, regD src)
11152 %{
11153 match(Set dst (DivD dst src));
11155 format %{ "divsd $dst, $src" %}
11156 ins_cost(150); // XXX
11157 opcode(0xF2, 0x0F, 0x5E);
11158 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11159 ins_pipe(pipe_slow);
11160 %}
11162 instruct divD_mem(regD dst, memory src)
11163 %{
11164 match(Set dst (DivD dst (LoadD src)));
11166 format %{ "divsd $dst, $src" %}
11167 ins_cost(150); // XXX
11168 opcode(0xF2, 0x0F, 0x5E);
11169 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11170 ins_pipe(pipe_slow);
11171 %}
11173 instruct divD_imm(regD dst, immD src)
11174 %{
11175 match(Set dst (DivD dst src));
11177 format %{ "divsd $dst, [$src]" %}
11178 ins_cost(150); // XXX
11179 opcode(0xF2, 0x0F, 0x5E);
11180 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
11181 ins_pipe(pipe_slow);
11182 %}
11184 instruct sqrtF_reg(regF dst, regF src)
11185 %{
11186 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
11188 format %{ "sqrtss $dst, $src" %}
11189 ins_cost(150); // XXX
11190 opcode(0xF3, 0x0F, 0x51);
11191 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11192 ins_pipe(pipe_slow);
11193 %}
11195 instruct sqrtF_mem(regF dst, memory src)
11196 %{
11197 match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src)))));
11199 format %{ "sqrtss $dst, $src" %}
11200 ins_cost(150); // XXX
11201 opcode(0xF3, 0x0F, 0x51);
11202 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11203 ins_pipe(pipe_slow);
11204 %}
11206 instruct sqrtF_imm(regF dst, immF src)
11207 %{
11208 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
11210 format %{ "sqrtss $dst, [$src]" %}
11211 ins_cost(150); // XXX
11212 opcode(0xF3, 0x0F, 0x51);
11213 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
11214 ins_pipe(pipe_slow);
11215 %}
11217 instruct sqrtD_reg(regD dst, regD src)
11218 %{
11219 match(Set dst (SqrtD src));
11221 format %{ "sqrtsd $dst, $src" %}
11222 ins_cost(150); // XXX
11223 opcode(0xF2, 0x0F, 0x51);
11224 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11225 ins_pipe(pipe_slow);
11226 %}
11228 instruct sqrtD_mem(regD dst, memory src)
11229 %{
11230 match(Set dst (SqrtD (LoadD src)));
11232 format %{ "sqrtsd $dst, $src" %}
11233 ins_cost(150); // XXX
11234 opcode(0xF2, 0x0F, 0x51);
11235 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11236 ins_pipe(pipe_slow);
11237 %}
11239 instruct sqrtD_imm(regD dst, immD src)
11240 %{
11241 match(Set dst (SqrtD src));
11243 format %{ "sqrtsd $dst, [$src]" %}
11244 ins_cost(150); // XXX
11245 opcode(0xF2, 0x0F, 0x51);
11246 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
11247 ins_pipe(pipe_slow);
11248 %}
11250 instruct absF_reg(regF dst)
11251 %{
11252 match(Set dst (AbsF dst));
11254 format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %}
11255 ins_encode(absF_encoding(dst));
11256 ins_pipe(pipe_slow);
11257 %}
11259 instruct absD_reg(regD dst)
11260 %{
11261 match(Set dst (AbsD dst));
11263 format %{ "andpd $dst, [0x7fffffffffffffff]\t"
11264 "# abs double by sign masking" %}
11265 ins_encode(absD_encoding(dst));
11266 ins_pipe(pipe_slow);
11267 %}
11269 instruct negF_reg(regF dst)
11270 %{
11271 match(Set dst (NegF dst));
11273 format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %}
11274 ins_encode(negF_encoding(dst));
11275 ins_pipe(pipe_slow);
11276 %}
11278 instruct negD_reg(regD dst)
11279 %{
11280 match(Set dst (NegD dst));
11282 format %{ "xorpd $dst, [0x8000000000000000]\t"
11283 "# neg double by sign flipping" %}
11284 ins_encode(negD_encoding(dst));
11285 ins_pipe(pipe_slow);
11286 %}
11288 // -----------Trig and Trancendental Instructions------------------------------
11289 instruct cosD_reg(regD dst) %{
11290 match(Set dst (CosD dst));
11292 format %{ "dcos $dst\n\t" %}
11293 opcode(0xD9, 0xFF);
11294 ins_encode( Push_SrcXD(dst), OpcP, OpcS, Push_ResultXD(dst) );
11295 ins_pipe( pipe_slow );
11296 %}
11298 instruct sinD_reg(regD dst) %{
11299 match(Set dst (SinD dst));
11301 format %{ "dsin $dst\n\t" %}
11302 opcode(0xD9, 0xFE);
11303 ins_encode( Push_SrcXD(dst), OpcP, OpcS, Push_ResultXD(dst) );
11304 ins_pipe( pipe_slow );
11305 %}
11307 instruct tanD_reg(regD dst) %{
11308 match(Set dst (TanD dst));
11310 format %{ "dtan $dst\n\t" %}
11311 ins_encode( Push_SrcXD(dst),
11312 Opcode(0xD9), Opcode(0xF2), //fptan
11313 Opcode(0xDD), Opcode(0xD8), //fstp st
11314 Push_ResultXD(dst) );
11315 ins_pipe( pipe_slow );
11316 %}
11318 instruct log10D_reg(regD dst) %{
11319 // The source and result Double operands in XMM registers
11320 match(Set dst (Log10D dst));
11321 // fldlg2 ; push log_10(2) on the FPU stack; full 80-bit number
11322 // fyl2x ; compute log_10(2) * log_2(x)
11323 format %{ "fldlg2\t\t\t#Log10\n\t"
11324 "fyl2x\t\t\t# Q=Log10*Log_2(x)\n\t"
11325 %}
11326 ins_encode(Opcode(0xD9), Opcode(0xEC), // fldlg2
11327 Push_SrcXD(dst),
11328 Opcode(0xD9), Opcode(0xF1), // fyl2x
11329 Push_ResultXD(dst));
11331 ins_pipe( pipe_slow );
11332 %}
11334 instruct logD_reg(regD dst) %{
11335 // The source and result Double operands in XMM registers
11336 match(Set dst (LogD dst));
11337 // fldln2 ; push log_e(2) on the FPU stack; full 80-bit number
11338 // fyl2x ; compute log_e(2) * log_2(x)
11339 format %{ "fldln2\t\t\t#Log_e\n\t"
11340 "fyl2x\t\t\t# Q=Log_e*Log_2(x)\n\t"
11341 %}
11342 ins_encode( Opcode(0xD9), Opcode(0xED), // fldln2
11343 Push_SrcXD(dst),
11344 Opcode(0xD9), Opcode(0xF1), // fyl2x
11345 Push_ResultXD(dst));
11346 ins_pipe( pipe_slow );
11347 %}
11351 //----------Arithmetic Conversion Instructions---------------------------------
11353 instruct roundFloat_nop(regF dst)
11354 %{
11355 match(Set dst (RoundFloat dst));
11357 ins_cost(0);
11358 ins_encode();
11359 ins_pipe(empty);
11360 %}
11362 instruct roundDouble_nop(regD dst)
11363 %{
11364 match(Set dst (RoundDouble dst));
11366 ins_cost(0);
11367 ins_encode();
11368 ins_pipe(empty);
11369 %}
11371 instruct convF2D_reg_reg(regD dst, regF src)
11372 %{
11373 match(Set dst (ConvF2D src));
11375 format %{ "cvtss2sd $dst, $src" %}
11376 opcode(0xF3, 0x0F, 0x5A);
11377 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11378 ins_pipe(pipe_slow); // XXX
11379 %}
11381 instruct convF2D_reg_mem(regD dst, memory src)
11382 %{
11383 match(Set dst (ConvF2D (LoadF src)));
11385 format %{ "cvtss2sd $dst, $src" %}
11386 opcode(0xF3, 0x0F, 0x5A);
11387 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11388 ins_pipe(pipe_slow); // XXX
11389 %}
11391 instruct convD2F_reg_reg(regF dst, regD src)
11392 %{
11393 match(Set dst (ConvD2F src));
11395 format %{ "cvtsd2ss $dst, $src" %}
11396 opcode(0xF2, 0x0F, 0x5A);
11397 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11398 ins_pipe(pipe_slow); // XXX
11399 %}
11401 instruct convD2F_reg_mem(regF dst, memory src)
11402 %{
11403 match(Set dst (ConvD2F (LoadD src)));
11405 format %{ "cvtsd2ss $dst, $src" %}
11406 opcode(0xF2, 0x0F, 0x5A);
11407 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11408 ins_pipe(pipe_slow); // XXX
11409 %}
11411 // XXX do mem variants
11412 instruct convF2I_reg_reg(rRegI dst, regF src, rFlagsReg cr)
11413 %{
11414 match(Set dst (ConvF2I src));
11415 effect(KILL cr);
11417 format %{ "cvttss2sil $dst, $src\t# f2i\n\t"
11418 "cmpl $dst, #0x80000000\n\t"
11419 "jne,s done\n\t"
11420 "subq rsp, #8\n\t"
11421 "movss [rsp], $src\n\t"
11422 "call f2i_fixup\n\t"
11423 "popq $dst\n"
11424 "done: "%}
11425 opcode(0xF3, 0x0F, 0x2C);
11426 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src),
11427 f2i_fixup(dst, src));
11428 ins_pipe(pipe_slow);
11429 %}
11431 instruct convF2L_reg_reg(rRegL dst, regF src, rFlagsReg cr)
11432 %{
11433 match(Set dst (ConvF2L src));
11434 effect(KILL cr);
11436 format %{ "cvttss2siq $dst, $src\t# f2l\n\t"
11437 "cmpq $dst, [0x8000000000000000]\n\t"
11438 "jne,s done\n\t"
11439 "subq rsp, #8\n\t"
11440 "movss [rsp], $src\n\t"
11441 "call f2l_fixup\n\t"
11442 "popq $dst\n"
11443 "done: "%}
11444 opcode(0xF3, 0x0F, 0x2C);
11445 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src),
11446 f2l_fixup(dst, src));
11447 ins_pipe(pipe_slow);
11448 %}
11450 instruct convD2I_reg_reg(rRegI dst, regD src, rFlagsReg cr)
11451 %{
11452 match(Set dst (ConvD2I src));
11453 effect(KILL cr);
11455 format %{ "cvttsd2sil $dst, $src\t# d2i\n\t"
11456 "cmpl $dst, #0x80000000\n\t"
11457 "jne,s done\n\t"
11458 "subq rsp, #8\n\t"
11459 "movsd [rsp], $src\n\t"
11460 "call d2i_fixup\n\t"
11461 "popq $dst\n"
11462 "done: "%}
11463 opcode(0xF2, 0x0F, 0x2C);
11464 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src),
11465 d2i_fixup(dst, src));
11466 ins_pipe(pipe_slow);
11467 %}
11469 instruct convD2L_reg_reg(rRegL dst, regD src, rFlagsReg cr)
11470 %{
11471 match(Set dst (ConvD2L src));
11472 effect(KILL cr);
11474 format %{ "cvttsd2siq $dst, $src\t# d2l\n\t"
11475 "cmpq $dst, [0x8000000000000000]\n\t"
11476 "jne,s done\n\t"
11477 "subq rsp, #8\n\t"
11478 "movsd [rsp], $src\n\t"
11479 "call d2l_fixup\n\t"
11480 "popq $dst\n"
11481 "done: "%}
11482 opcode(0xF2, 0x0F, 0x2C);
11483 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src),
11484 d2l_fixup(dst, src));
11485 ins_pipe(pipe_slow);
11486 %}
11488 instruct convI2F_reg_reg(regF dst, rRegI src)
11489 %{
11490 predicate(!UseXmmI2F);
11491 match(Set dst (ConvI2F src));
11493 format %{ "cvtsi2ssl $dst, $src\t# i2f" %}
11494 opcode(0xF3, 0x0F, 0x2A);
11495 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11496 ins_pipe(pipe_slow); // XXX
11497 %}
11499 instruct convI2F_reg_mem(regF dst, memory src)
11500 %{
11501 match(Set dst (ConvI2F (LoadI src)));
11503 format %{ "cvtsi2ssl $dst, $src\t# i2f" %}
11504 opcode(0xF3, 0x0F, 0x2A);
11505 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11506 ins_pipe(pipe_slow); // XXX
11507 %}
11509 instruct convI2D_reg_reg(regD dst, rRegI src)
11510 %{
11511 predicate(!UseXmmI2D);
11512 match(Set dst (ConvI2D src));
11514 format %{ "cvtsi2sdl $dst, $src\t# i2d" %}
11515 opcode(0xF2, 0x0F, 0x2A);
11516 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11517 ins_pipe(pipe_slow); // XXX
11518 %}
11520 instruct convI2D_reg_mem(regD dst, memory src)
11521 %{
11522 match(Set dst (ConvI2D (LoadI src)));
11524 format %{ "cvtsi2sdl $dst, $src\t# i2d" %}
11525 opcode(0xF2, 0x0F, 0x2A);
11526 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11527 ins_pipe(pipe_slow); // XXX
11528 %}
11530 instruct convXI2F_reg(regF dst, rRegI src)
11531 %{
11532 predicate(UseXmmI2F);
11533 match(Set dst (ConvI2F src));
11535 format %{ "movdl $dst, $src\n\t"
11536 "cvtdq2psl $dst, $dst\t# i2f" %}
11537 ins_encode %{
11538 __ movdl($dst$$XMMRegister, $src$$Register);
11539 __ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister);
11540 %}
11541 ins_pipe(pipe_slow); // XXX
11542 %}
11544 instruct convXI2D_reg(regD dst, rRegI src)
11545 %{
11546 predicate(UseXmmI2D);
11547 match(Set dst (ConvI2D src));
11549 format %{ "movdl $dst, $src\n\t"
11550 "cvtdq2pdl $dst, $dst\t# i2d" %}
11551 ins_encode %{
11552 __ movdl($dst$$XMMRegister, $src$$Register);
11553 __ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister);
11554 %}
11555 ins_pipe(pipe_slow); // XXX
11556 %}
11558 instruct convL2F_reg_reg(regF dst, rRegL src)
11559 %{
11560 match(Set dst (ConvL2F src));
11562 format %{ "cvtsi2ssq $dst, $src\t# l2f" %}
11563 opcode(0xF3, 0x0F, 0x2A);
11564 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src));
11565 ins_pipe(pipe_slow); // XXX
11566 %}
11568 instruct convL2F_reg_mem(regF dst, memory src)
11569 %{
11570 match(Set dst (ConvL2F (LoadL src)));
11572 format %{ "cvtsi2ssq $dst, $src\t# l2f" %}
11573 opcode(0xF3, 0x0F, 0x2A);
11574 ins_encode(OpcP, REX_reg_mem_wide(dst, src), OpcS, OpcT, reg_mem(dst, src));
11575 ins_pipe(pipe_slow); // XXX
11576 %}
11578 instruct convL2D_reg_reg(regD dst, rRegL src)
11579 %{
11580 match(Set dst (ConvL2D src));
11582 format %{ "cvtsi2sdq $dst, $src\t# l2d" %}
11583 opcode(0xF2, 0x0F, 0x2A);
11584 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src));
11585 ins_pipe(pipe_slow); // XXX
11586 %}
11588 instruct convL2D_reg_mem(regD dst, memory src)
11589 %{
11590 match(Set dst (ConvL2D (LoadL src)));
11592 format %{ "cvtsi2sdq $dst, $src\t# l2d" %}
11593 opcode(0xF2, 0x0F, 0x2A);
11594 ins_encode(OpcP, REX_reg_mem_wide(dst, src), OpcS, OpcT, reg_mem(dst, src));
11595 ins_pipe(pipe_slow); // XXX
11596 %}
11598 instruct convI2L_reg_reg(rRegL dst, rRegI src)
11599 %{
11600 match(Set dst (ConvI2L src));
11602 ins_cost(125);
11603 format %{ "movslq $dst, $src\t# i2l" %}
11604 opcode(0x63); // needs REX.W
11605 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst,src));
11606 ins_pipe(ialu_reg_reg);
11607 %}
11609 // instruct convI2L_reg_reg_foo(rRegL dst, rRegI src)
11610 // %{
11611 // match(Set dst (ConvI2L src));
11612 // // predicate(_kids[0]->_leaf->as_Type()->type()->is_int()->_lo >= 0 &&
11613 // // _kids[0]->_leaf->as_Type()->type()->is_int()->_hi >= 0);
11614 // predicate(((const TypeNode*) n)->type()->is_long()->_hi ==
11615 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_hi &&
11616 // ((const TypeNode*) n)->type()->is_long()->_lo ==
11617 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_lo);
11619 // format %{ "movl $dst, $src\t# unsigned i2l" %}
11620 // ins_encode(enc_copy(dst, src));
11621 // // opcode(0x63); // needs REX.W
11622 // // ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst,src));
11623 // ins_pipe(ialu_reg_reg);
11624 // %}
11626 // Zero-extend convert int to long
11627 instruct convI2L_reg_reg_zex(rRegL dst, rRegI src, immL_32bits mask)
11628 %{
11629 match(Set dst (AndL (ConvI2L src) mask));
11631 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %}
11632 ins_encode(enc_copy(dst, src));
11633 ins_pipe(ialu_reg_reg);
11634 %}
11636 // Zero-extend convert int to long
11637 instruct convI2L_reg_mem_zex(rRegL dst, memory src, immL_32bits mask)
11638 %{
11639 match(Set dst (AndL (ConvI2L (LoadI src)) mask));
11641 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %}
11642 opcode(0x8B);
11643 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
11644 ins_pipe(ialu_reg_mem);
11645 %}
11647 instruct zerox_long_reg_reg(rRegL dst, rRegL src, immL_32bits mask)
11648 %{
11649 match(Set dst (AndL src mask));
11651 format %{ "movl $dst, $src\t# zero-extend long" %}
11652 ins_encode(enc_copy_always(dst, src));
11653 ins_pipe(ialu_reg_reg);
11654 %}
11656 instruct convL2I_reg_reg(rRegI dst, rRegL src)
11657 %{
11658 match(Set dst (ConvL2I src));
11660 format %{ "movl $dst, $src\t# l2i" %}
11661 ins_encode(enc_copy_always(dst, src));
11662 ins_pipe(ialu_reg_reg);
11663 %}
11666 instruct MoveF2I_stack_reg(rRegI dst, stackSlotF src) %{
11667 match(Set dst (MoveF2I src));
11668 effect(DEF dst, USE src);
11670 ins_cost(125);
11671 format %{ "movl $dst, $src\t# MoveF2I_stack_reg" %}
11672 opcode(0x8B);
11673 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
11674 ins_pipe(ialu_reg_mem);
11675 %}
11677 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{
11678 match(Set dst (MoveI2F src));
11679 effect(DEF dst, USE src);
11681 ins_cost(125);
11682 format %{ "movss $dst, $src\t# MoveI2F_stack_reg" %}
11683 opcode(0xF3, 0x0F, 0x10);
11684 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11685 ins_pipe(pipe_slow);
11686 %}
11688 instruct MoveD2L_stack_reg(rRegL dst, stackSlotD src) %{
11689 match(Set dst (MoveD2L src));
11690 effect(DEF dst, USE src);
11692 ins_cost(125);
11693 format %{ "movq $dst, $src\t# MoveD2L_stack_reg" %}
11694 opcode(0x8B);
11695 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
11696 ins_pipe(ialu_reg_mem);
11697 %}
11699 instruct MoveL2D_stack_reg_partial(regD dst, stackSlotL src) %{
11700 predicate(!UseXmmLoadAndClearUpper);
11701 match(Set dst (MoveL2D src));
11702 effect(DEF dst, USE src);
11704 ins_cost(125);
11705 format %{ "movlpd $dst, $src\t# MoveL2D_stack_reg" %}
11706 opcode(0x66, 0x0F, 0x12);
11707 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11708 ins_pipe(pipe_slow);
11709 %}
11711 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{
11712 predicate(UseXmmLoadAndClearUpper);
11713 match(Set dst (MoveL2D src));
11714 effect(DEF dst, USE src);
11716 ins_cost(125);
11717 format %{ "movsd $dst, $src\t# MoveL2D_stack_reg" %}
11718 opcode(0xF2, 0x0F, 0x10);
11719 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11720 ins_pipe(pipe_slow);
11721 %}
11724 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{
11725 match(Set dst (MoveF2I src));
11726 effect(DEF dst, USE src);
11728 ins_cost(95); // XXX
11729 format %{ "movss $dst, $src\t# MoveF2I_reg_stack" %}
11730 opcode(0xF3, 0x0F, 0x11);
11731 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
11732 ins_pipe(pipe_slow);
11733 %}
11735 instruct MoveI2F_reg_stack(stackSlotF dst, rRegI src) %{
11736 match(Set dst (MoveI2F src));
11737 effect(DEF dst, USE src);
11739 ins_cost(100);
11740 format %{ "movl $dst, $src\t# MoveI2F_reg_stack" %}
11741 opcode(0x89);
11742 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
11743 ins_pipe( ialu_mem_reg );
11744 %}
11746 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{
11747 match(Set dst (MoveD2L src));
11748 effect(DEF dst, USE src);
11750 ins_cost(95); // XXX
11751 format %{ "movsd $dst, $src\t# MoveL2D_reg_stack" %}
11752 opcode(0xF2, 0x0F, 0x11);
11753 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
11754 ins_pipe(pipe_slow);
11755 %}
11757 instruct MoveL2D_reg_stack(stackSlotD dst, rRegL src) %{
11758 match(Set dst (MoveL2D src));
11759 effect(DEF dst, USE src);
11761 ins_cost(100);
11762 format %{ "movq $dst, $src\t# MoveL2D_reg_stack" %}
11763 opcode(0x89);
11764 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
11765 ins_pipe(ialu_mem_reg);
11766 %}
11768 instruct MoveF2I_reg_reg(rRegI dst, regF src) %{
11769 match(Set dst (MoveF2I src));
11770 effect(DEF dst, USE src);
11771 ins_cost(85);
11772 format %{ "movd $dst,$src\t# MoveF2I" %}
11773 ins_encode %{ __ movdl($dst$$Register, $src$$XMMRegister); %}
11774 ins_pipe( pipe_slow );
11775 %}
11777 instruct MoveD2L_reg_reg(rRegL dst, regD src) %{
11778 match(Set dst (MoveD2L src));
11779 effect(DEF dst, USE src);
11780 ins_cost(85);
11781 format %{ "movd $dst,$src\t# MoveD2L" %}
11782 ins_encode %{ __ movdq($dst$$Register, $src$$XMMRegister); %}
11783 ins_pipe( pipe_slow );
11784 %}
11786 // The next instructions have long latency and use Int unit. Set high cost.
11787 instruct MoveI2F_reg_reg(regF dst, rRegI src) %{
11788 match(Set dst (MoveI2F src));
11789 effect(DEF dst, USE src);
11790 ins_cost(300);
11791 format %{ "movd $dst,$src\t# MoveI2F" %}
11792 ins_encode %{ __ movdl($dst$$XMMRegister, $src$$Register); %}
11793 ins_pipe( pipe_slow );
11794 %}
11796 instruct MoveL2D_reg_reg(regD dst, rRegL src) %{
11797 match(Set dst (MoveL2D src));
11798 effect(DEF dst, USE src);
11799 ins_cost(300);
11800 format %{ "movd $dst,$src\t# MoveL2D" %}
11801 ins_encode %{ __ movdq($dst$$XMMRegister, $src$$Register); %}
11802 ins_pipe( pipe_slow );
11803 %}
11805 // Replicate scalar to packed byte (1 byte) values in xmm
11806 instruct Repl8B_reg(regD dst, regD src) %{
11807 match(Set dst (Replicate8B src));
11808 format %{ "MOVDQA $dst,$src\n\t"
11809 "PUNPCKLBW $dst,$dst\n\t"
11810 "PSHUFLW $dst,$dst,0x00\t! replicate8B" %}
11811 ins_encode( pshufd_8x8(dst, src));
11812 ins_pipe( pipe_slow );
11813 %}
11815 // Replicate scalar to packed byte (1 byte) values in xmm
11816 instruct Repl8B_rRegI(regD dst, rRegI src) %{
11817 match(Set dst (Replicate8B src));
11818 format %{ "MOVD $dst,$src\n\t"
11819 "PUNPCKLBW $dst,$dst\n\t"
11820 "PSHUFLW $dst,$dst,0x00\t! replicate8B" %}
11821 ins_encode( mov_i2x(dst, src), pshufd_8x8(dst, dst));
11822 ins_pipe( pipe_slow );
11823 %}
11825 // Replicate scalar zero to packed byte (1 byte) values in xmm
11826 instruct Repl8B_immI0(regD dst, immI0 zero) %{
11827 match(Set dst (Replicate8B zero));
11828 format %{ "PXOR $dst,$dst\t! replicate8B" %}
11829 ins_encode( pxor(dst, dst));
11830 ins_pipe( fpu_reg_reg );
11831 %}
11833 // Replicate scalar to packed shore (2 byte) values in xmm
11834 instruct Repl4S_reg(regD dst, regD src) %{
11835 match(Set dst (Replicate4S src));
11836 format %{ "PSHUFLW $dst,$src,0x00\t! replicate4S" %}
11837 ins_encode( pshufd_4x16(dst, src));
11838 ins_pipe( fpu_reg_reg );
11839 %}
11841 // Replicate scalar to packed shore (2 byte) values in xmm
11842 instruct Repl4S_rRegI(regD dst, rRegI src) %{
11843 match(Set dst (Replicate4S src));
11844 format %{ "MOVD $dst,$src\n\t"
11845 "PSHUFLW $dst,$dst,0x00\t! replicate4S" %}
11846 ins_encode( mov_i2x(dst, src), pshufd_4x16(dst, dst));
11847 ins_pipe( fpu_reg_reg );
11848 %}
11850 // Replicate scalar zero to packed short (2 byte) values in xmm
11851 instruct Repl4S_immI0(regD dst, immI0 zero) %{
11852 match(Set dst (Replicate4S zero));
11853 format %{ "PXOR $dst,$dst\t! replicate4S" %}
11854 ins_encode( pxor(dst, dst));
11855 ins_pipe( fpu_reg_reg );
11856 %}
11858 // Replicate scalar to packed char (2 byte) values in xmm
11859 instruct Repl4C_reg(regD dst, regD src) %{
11860 match(Set dst (Replicate4C src));
11861 format %{ "PSHUFLW $dst,$src,0x00\t! replicate4C" %}
11862 ins_encode( pshufd_4x16(dst, src));
11863 ins_pipe( fpu_reg_reg );
11864 %}
11866 // Replicate scalar to packed char (2 byte) values in xmm
11867 instruct Repl4C_rRegI(regD dst, rRegI src) %{
11868 match(Set dst (Replicate4C src));
11869 format %{ "MOVD $dst,$src\n\t"
11870 "PSHUFLW $dst,$dst,0x00\t! replicate4C" %}
11871 ins_encode( mov_i2x(dst, src), pshufd_4x16(dst, dst));
11872 ins_pipe( fpu_reg_reg );
11873 %}
11875 // Replicate scalar zero to packed char (2 byte) values in xmm
11876 instruct Repl4C_immI0(regD dst, immI0 zero) %{
11877 match(Set dst (Replicate4C zero));
11878 format %{ "PXOR $dst,$dst\t! replicate4C" %}
11879 ins_encode( pxor(dst, dst));
11880 ins_pipe( fpu_reg_reg );
11881 %}
11883 // Replicate scalar to packed integer (4 byte) values in xmm
11884 instruct Repl2I_reg(regD dst, regD src) %{
11885 match(Set dst (Replicate2I src));
11886 format %{ "PSHUFD $dst,$src,0x00\t! replicate2I" %}
11887 ins_encode( pshufd(dst, src, 0x00));
11888 ins_pipe( fpu_reg_reg );
11889 %}
11891 // Replicate scalar to packed integer (4 byte) values in xmm
11892 instruct Repl2I_rRegI(regD dst, rRegI src) %{
11893 match(Set dst (Replicate2I src));
11894 format %{ "MOVD $dst,$src\n\t"
11895 "PSHUFD $dst,$dst,0x00\t! replicate2I" %}
11896 ins_encode( mov_i2x(dst, src), pshufd(dst, dst, 0x00));
11897 ins_pipe( fpu_reg_reg );
11898 %}
11900 // Replicate scalar zero to packed integer (2 byte) values in xmm
11901 instruct Repl2I_immI0(regD dst, immI0 zero) %{
11902 match(Set dst (Replicate2I zero));
11903 format %{ "PXOR $dst,$dst\t! replicate2I" %}
11904 ins_encode( pxor(dst, dst));
11905 ins_pipe( fpu_reg_reg );
11906 %}
11908 // Replicate scalar to packed single precision floating point values in xmm
11909 instruct Repl2F_reg(regD dst, regD src) %{
11910 match(Set dst (Replicate2F src));
11911 format %{ "PSHUFD $dst,$src,0xe0\t! replicate2F" %}
11912 ins_encode( pshufd(dst, src, 0xe0));
11913 ins_pipe( fpu_reg_reg );
11914 %}
11916 // Replicate scalar to packed single precision floating point values in xmm
11917 instruct Repl2F_regF(regD dst, regF src) %{
11918 match(Set dst (Replicate2F src));
11919 format %{ "PSHUFD $dst,$src,0xe0\t! replicate2F" %}
11920 ins_encode( pshufd(dst, src, 0xe0));
11921 ins_pipe( fpu_reg_reg );
11922 %}
11924 // Replicate scalar to packed single precision floating point values in xmm
11925 instruct Repl2F_immF0(regD dst, immF0 zero) %{
11926 match(Set dst (Replicate2F zero));
11927 format %{ "PXOR $dst,$dst\t! replicate2F" %}
11928 ins_encode( pxor(dst, dst));
11929 ins_pipe( fpu_reg_reg );
11930 %}
11933 // =======================================================================
11934 // fast clearing of an array
11935 instruct rep_stos(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy,
11936 rFlagsReg cr)
11937 %{
11938 match(Set dummy (ClearArray cnt base));
11939 effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
11941 format %{ "xorl rax, rax\t# ClearArray:\n\t"
11942 "rep stosq\t# Store rax to *rdi++ while rcx--" %}
11943 ins_encode(opc_reg_reg(0x33, RAX, RAX), // xorl %eax, %eax
11944 Opcode(0xF3), Opcode(0x48), Opcode(0xAB)); // rep REX_W stos
11945 ins_pipe(pipe_slow);
11946 %}
11948 instruct string_compare(rdi_RegP str1, rsi_RegP str2, regD tmp1, regD tmp2,
11949 rax_RegI tmp3, rbx_RegI tmp4, rcx_RegI result, rFlagsReg cr)
11950 %{
11951 match(Set result (StrComp str1 str2));
11952 effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, KILL tmp3, KILL tmp4, KILL cr);
11953 //ins_cost(300);
11955 format %{ "String Compare $str1, $str2 -> $result // XXX KILL RAX, RBX" %}
11956 ins_encode( enc_String_Compare(str1, str2, tmp1, tmp2, tmp3, tmp4, result) );
11957 ins_pipe( pipe_slow );
11958 %}
11960 instruct string_indexof(rsi_RegP str1, rdi_RegP str2, regD tmp1, rax_RegI tmp2,
11961 rcx_RegI tmp3, rdx_RegI tmp4, rbx_RegI result, rFlagsReg cr)
11962 %{
11963 predicate(UseSSE42Intrinsics);
11964 match(Set result (StrIndexOf str1 str2));
11965 effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, KILL tmp2, KILL tmp3, KILL tmp4, KILL cr);
11967 format %{ "String IndexOf $str1,$str2 -> $result // KILL RAX, RCX, RDX" %}
11968 ins_encode( enc_String_IndexOf(str1, str2, tmp1, tmp2, tmp3, tmp4, result) );
11969 ins_pipe( pipe_slow );
11970 %}
11972 // fast string equals
11973 instruct string_equals(rdi_RegP str1, rsi_RegP str2, regD tmp1, regD tmp2, rbx_RegI tmp3,
11974 rcx_RegI tmp4, rax_RegI result, rFlagsReg cr)
11975 %{
11976 match(Set result (StrEquals str1 str2));
11977 effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, KILL tmp3, KILL tmp4, KILL cr);
11979 format %{ "String Equals $str1,$str2 -> $result // KILL RBX, RCX" %}
11980 ins_encode( enc_String_Equals(str1, str2, tmp1, tmp2, tmp3, tmp4, result) );
11981 ins_pipe( pipe_slow );
11982 %}
11984 // fast array equals
11985 instruct array_equals(rdi_RegP ary1, rsi_RegP ary2, regD tmp1, regD tmp2, rax_RegI tmp3,
11986 rbx_RegI tmp4, rcx_RegI result, rFlagsReg cr)
11987 %{
11988 match(Set result (AryEq ary1 ary2));
11989 effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr);
11990 //ins_cost(300);
11992 format %{ "Array Equals $ary1,$ary2 -> $result // KILL RAX, RBX" %}
11993 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, tmp2, tmp3, tmp4, result) );
11994 ins_pipe( pipe_slow );
11995 %}
11997 //----------Control Flow Instructions------------------------------------------
11998 // Signed compare Instructions
12000 // XXX more variants!!
12001 instruct compI_rReg(rFlagsReg cr, rRegI op1, rRegI op2)
12002 %{
12003 match(Set cr (CmpI op1 op2));
12004 effect(DEF cr, USE op1, USE op2);
12006 format %{ "cmpl $op1, $op2" %}
12007 opcode(0x3B); /* Opcode 3B /r */
12008 ins_encode(REX_reg_reg(op1, op2), OpcP, reg_reg(op1, op2));
12009 ins_pipe(ialu_cr_reg_reg);
12010 %}
12012 instruct compI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2)
12013 %{
12014 match(Set cr (CmpI op1 op2));
12016 format %{ "cmpl $op1, $op2" %}
12017 opcode(0x81, 0x07); /* Opcode 81 /7 */
12018 ins_encode(OpcSErm(op1, op2), Con8or32(op2));
12019 ins_pipe(ialu_cr_reg_imm);
12020 %}
12022 instruct compI_rReg_mem(rFlagsReg cr, rRegI op1, memory op2)
12023 %{
12024 match(Set cr (CmpI op1 (LoadI op2)));
12026 ins_cost(500); // XXX
12027 format %{ "cmpl $op1, $op2" %}
12028 opcode(0x3B); /* Opcode 3B /r */
12029 ins_encode(REX_reg_mem(op1, op2), OpcP, reg_mem(op1, op2));
12030 ins_pipe(ialu_cr_reg_mem);
12031 %}
12033 instruct testI_reg(rFlagsReg cr, rRegI src, immI0 zero)
12034 %{
12035 match(Set cr (CmpI src zero));
12037 format %{ "testl $src, $src" %}
12038 opcode(0x85);
12039 ins_encode(REX_reg_reg(src, src), OpcP, reg_reg(src, src));
12040 ins_pipe(ialu_cr_reg_imm);
12041 %}
12043 instruct testI_reg_imm(rFlagsReg cr, rRegI src, immI con, immI0 zero)
12044 %{
12045 match(Set cr (CmpI (AndI src con) zero));
12047 format %{ "testl $src, $con" %}
12048 opcode(0xF7, 0x00);
12049 ins_encode(REX_reg(src), OpcP, reg_opc(src), Con32(con));
12050 ins_pipe(ialu_cr_reg_imm);
12051 %}
12053 instruct testI_reg_mem(rFlagsReg cr, rRegI src, memory mem, immI0 zero)
12054 %{
12055 match(Set cr (CmpI (AndI src (LoadI mem)) zero));
12057 format %{ "testl $src, $mem" %}
12058 opcode(0x85);
12059 ins_encode(REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
12060 ins_pipe(ialu_cr_reg_mem);
12061 %}
12063 // Unsigned compare Instructions; really, same as signed except they
12064 // produce an rFlagsRegU instead of rFlagsReg.
12065 instruct compU_rReg(rFlagsRegU cr, rRegI op1, rRegI op2)
12066 %{
12067 match(Set cr (CmpU op1 op2));
12069 format %{ "cmpl $op1, $op2\t# unsigned" %}
12070 opcode(0x3B); /* Opcode 3B /r */
12071 ins_encode(REX_reg_reg(op1, op2), OpcP, reg_reg(op1, op2));
12072 ins_pipe(ialu_cr_reg_reg);
12073 %}
12075 instruct compU_rReg_imm(rFlagsRegU cr, rRegI op1, immI op2)
12076 %{
12077 match(Set cr (CmpU op1 op2));
12079 format %{ "cmpl $op1, $op2\t# unsigned" %}
12080 opcode(0x81,0x07); /* Opcode 81 /7 */
12081 ins_encode(OpcSErm(op1, op2), Con8or32(op2));
12082 ins_pipe(ialu_cr_reg_imm);
12083 %}
12085 instruct compU_rReg_mem(rFlagsRegU cr, rRegI op1, memory op2)
12086 %{
12087 match(Set cr (CmpU op1 (LoadI op2)));
12089 ins_cost(500); // XXX
12090 format %{ "cmpl $op1, $op2\t# unsigned" %}
12091 opcode(0x3B); /* Opcode 3B /r */
12092 ins_encode(REX_reg_mem(op1, op2), OpcP, reg_mem(op1, op2));
12093 ins_pipe(ialu_cr_reg_mem);
12094 %}
12096 // // // Cisc-spilled version of cmpU_rReg
12097 // //instruct compU_mem_rReg(rFlagsRegU cr, memory op1, rRegI op2)
12098 // //%{
12099 // // match(Set cr (CmpU (LoadI op1) op2));
12100 // //
12101 // // format %{ "CMPu $op1,$op2" %}
12102 // // ins_cost(500);
12103 // // opcode(0x39); /* Opcode 39 /r */
12104 // // ins_encode( OpcP, reg_mem( op1, op2) );
12105 // //%}
12107 instruct testU_reg(rFlagsRegU cr, rRegI src, immI0 zero)
12108 %{
12109 match(Set cr (CmpU src zero));
12111 format %{ "testl $src, $src\t# unsigned" %}
12112 opcode(0x85);
12113 ins_encode(REX_reg_reg(src, src), OpcP, reg_reg(src, src));
12114 ins_pipe(ialu_cr_reg_imm);
12115 %}
12117 instruct compP_rReg(rFlagsRegU cr, rRegP op1, rRegP op2)
12118 %{
12119 match(Set cr (CmpP op1 op2));
12121 format %{ "cmpq $op1, $op2\t# ptr" %}
12122 opcode(0x3B); /* Opcode 3B /r */
12123 ins_encode(REX_reg_reg_wide(op1, op2), OpcP, reg_reg(op1, op2));
12124 ins_pipe(ialu_cr_reg_reg);
12125 %}
12127 instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2)
12128 %{
12129 match(Set cr (CmpP op1 (LoadP op2)));
12131 ins_cost(500); // XXX
12132 format %{ "cmpq $op1, $op2\t# ptr" %}
12133 opcode(0x3B); /* Opcode 3B /r */
12134 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
12135 ins_pipe(ialu_cr_reg_mem);
12136 %}
12138 // // // Cisc-spilled version of cmpP_rReg
12139 // //instruct compP_mem_rReg(rFlagsRegU cr, memory op1, rRegP op2)
12140 // //%{
12141 // // match(Set cr (CmpP (LoadP op1) op2));
12142 // //
12143 // // format %{ "CMPu $op1,$op2" %}
12144 // // ins_cost(500);
12145 // // opcode(0x39); /* Opcode 39 /r */
12146 // // ins_encode( OpcP, reg_mem( op1, op2) );
12147 // //%}
12149 // XXX this is generalized by compP_rReg_mem???
12150 // Compare raw pointer (used in out-of-heap check).
12151 // Only works because non-oop pointers must be raw pointers
12152 // and raw pointers have no anti-dependencies.
12153 instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2)
12154 %{
12155 predicate(!n->in(2)->in(2)->bottom_type()->isa_oop_ptr());
12156 match(Set cr (CmpP op1 (LoadP op2)));
12158 format %{ "cmpq $op1, $op2\t# raw ptr" %}
12159 opcode(0x3B); /* Opcode 3B /r */
12160 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
12161 ins_pipe(ialu_cr_reg_mem);
12162 %}
12164 // This will generate a signed flags result. This should be OK since
12165 // any compare to a zero should be eq/neq.
12166 instruct testP_reg(rFlagsReg cr, rRegP src, immP0 zero)
12167 %{
12168 match(Set cr (CmpP src zero));
12170 format %{ "testq $src, $src\t# ptr" %}
12171 opcode(0x85);
12172 ins_encode(REX_reg_reg_wide(src, src), OpcP, reg_reg(src, src));
12173 ins_pipe(ialu_cr_reg_imm);
12174 %}
12176 // This will generate a signed flags result. This should be OK since
12177 // any compare to a zero should be eq/neq.
12178 instruct testP_mem(rFlagsReg cr, memory op, immP0 zero)
12179 %{
12180 predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL));
12181 match(Set cr (CmpP (LoadP op) zero));
12183 ins_cost(500); // XXX
12184 format %{ "testq $op, 0xffffffffffffffff\t# ptr" %}
12185 opcode(0xF7); /* Opcode F7 /0 */
12186 ins_encode(REX_mem_wide(op),
12187 OpcP, RM_opc_mem(0x00, op), Con_d32(0xFFFFFFFF));
12188 ins_pipe(ialu_cr_reg_imm);
12189 %}
12191 instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero)
12192 %{
12193 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
12194 match(Set cr (CmpP (LoadP mem) zero));
12196 format %{ "cmpq R12, $mem\t# ptr (R12_heapbase==0)" %}
12197 ins_encode %{
12198 __ cmpq(r12, $mem$$Address);
12199 %}
12200 ins_pipe(ialu_cr_reg_mem);
12201 %}
12203 instruct compN_rReg(rFlagsRegU cr, rRegN op1, rRegN op2)
12204 %{
12205 match(Set cr (CmpN op1 op2));
12207 format %{ "cmpl $op1, $op2\t# compressed ptr" %}
12208 ins_encode %{ __ cmpl($op1$$Register, $op2$$Register); %}
12209 ins_pipe(ialu_cr_reg_reg);
12210 %}
12212 instruct compN_rReg_mem(rFlagsRegU cr, rRegN src, memory mem)
12213 %{
12214 match(Set cr (CmpN src (LoadN mem)));
12216 format %{ "cmpl $src, $mem\t# compressed ptr" %}
12217 ins_encode %{
12218 __ cmpl($src$$Register, $mem$$Address);
12219 %}
12220 ins_pipe(ialu_cr_reg_mem);
12221 %}
12223 instruct compN_rReg_imm(rFlagsRegU cr, rRegN op1, immN op2) %{
12224 match(Set cr (CmpN op1 op2));
12226 format %{ "cmpl $op1, $op2\t# compressed ptr" %}
12227 ins_encode %{
12228 __ cmp_narrow_oop($op1$$Register, (jobject)$op2$$constant);
12229 %}
12230 ins_pipe(ialu_cr_reg_imm);
12231 %}
12233 instruct compN_mem_imm(rFlagsRegU cr, memory mem, immN src)
12234 %{
12235 match(Set cr (CmpN src (LoadN mem)));
12237 format %{ "cmpl $mem, $src\t# compressed ptr" %}
12238 ins_encode %{
12239 __ cmp_narrow_oop($mem$$Address, (jobject)$src$$constant);
12240 %}
12241 ins_pipe(ialu_cr_reg_mem);
12242 %}
12244 instruct testN_reg(rFlagsReg cr, rRegN src, immN0 zero) %{
12245 match(Set cr (CmpN src zero));
12247 format %{ "testl $src, $src\t# compressed ptr" %}
12248 ins_encode %{ __ testl($src$$Register, $src$$Register); %}
12249 ins_pipe(ialu_cr_reg_imm);
12250 %}
12252 instruct testN_mem(rFlagsReg cr, memory mem, immN0 zero)
12253 %{
12254 predicate(Universe::narrow_oop_base() != NULL);
12255 match(Set cr (CmpN (LoadN mem) zero));
12257 ins_cost(500); // XXX
12258 format %{ "testl $mem, 0xffffffff\t# compressed ptr" %}
12259 ins_encode %{
12260 __ cmpl($mem$$Address, (int)0xFFFFFFFF);
12261 %}
12262 ins_pipe(ialu_cr_reg_mem);
12263 %}
12265 instruct testN_mem_reg0(rFlagsReg cr, memory mem, immN0 zero)
12266 %{
12267 predicate(Universe::narrow_oop_base() == NULL);
12268 match(Set cr (CmpN (LoadN mem) zero));
12270 format %{ "cmpl R12, $mem\t# compressed ptr (R12_heapbase==0)" %}
12271 ins_encode %{
12272 __ cmpl(r12, $mem$$Address);
12273 %}
12274 ins_pipe(ialu_cr_reg_mem);
12275 %}
12277 // Yanked all unsigned pointer compare operations.
12278 // Pointer compares are done with CmpP which is already unsigned.
12280 instruct compL_rReg(rFlagsReg cr, rRegL op1, rRegL op2)
12281 %{
12282 match(Set cr (CmpL op1 op2));
12284 format %{ "cmpq $op1, $op2" %}
12285 opcode(0x3B); /* Opcode 3B /r */
12286 ins_encode(REX_reg_reg_wide(op1, op2), OpcP, reg_reg(op1, op2));
12287 ins_pipe(ialu_cr_reg_reg);
12288 %}
12290 instruct compL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2)
12291 %{
12292 match(Set cr (CmpL op1 op2));
12294 format %{ "cmpq $op1, $op2" %}
12295 opcode(0x81, 0x07); /* Opcode 81 /7 */
12296 ins_encode(OpcSErm_wide(op1, op2), Con8or32(op2));
12297 ins_pipe(ialu_cr_reg_imm);
12298 %}
12300 instruct compL_rReg_mem(rFlagsReg cr, rRegL op1, memory op2)
12301 %{
12302 match(Set cr (CmpL op1 (LoadL op2)));
12304 format %{ "cmpq $op1, $op2" %}
12305 opcode(0x3B); /* Opcode 3B /r */
12306 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
12307 ins_pipe(ialu_cr_reg_mem);
12308 %}
12310 instruct testL_reg(rFlagsReg cr, rRegL src, immL0 zero)
12311 %{
12312 match(Set cr (CmpL src zero));
12314 format %{ "testq $src, $src" %}
12315 opcode(0x85);
12316 ins_encode(REX_reg_reg_wide(src, src), OpcP, reg_reg(src, src));
12317 ins_pipe(ialu_cr_reg_imm);
12318 %}
12320 instruct testL_reg_imm(rFlagsReg cr, rRegL src, immL32 con, immL0 zero)
12321 %{
12322 match(Set cr (CmpL (AndL src con) zero));
12324 format %{ "testq $src, $con\t# long" %}
12325 opcode(0xF7, 0x00);
12326 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src), Con32(con));
12327 ins_pipe(ialu_cr_reg_imm);
12328 %}
12330 instruct testL_reg_mem(rFlagsReg cr, rRegL src, memory mem, immL0 zero)
12331 %{
12332 match(Set cr (CmpL (AndL src (LoadL mem)) zero));
12334 format %{ "testq $src, $mem" %}
12335 opcode(0x85);
12336 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
12337 ins_pipe(ialu_cr_reg_mem);
12338 %}
12340 // Manifest a CmpL result in an integer register. Very painful.
12341 // This is the test to avoid.
12342 instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags)
12343 %{
12344 match(Set dst (CmpL3 src1 src2));
12345 effect(KILL flags);
12347 ins_cost(275); // XXX
12348 format %{ "cmpq $src1, $src2\t# CmpL3\n\t"
12349 "movl $dst, -1\n\t"
12350 "jl,s done\n\t"
12351 "setne $dst\n\t"
12352 "movzbl $dst, $dst\n\t"
12353 "done:" %}
12354 ins_encode(cmpl3_flag(src1, src2, dst));
12355 ins_pipe(pipe_slow);
12356 %}
12358 //----------Max and Min--------------------------------------------------------
12359 // Min Instructions
12361 instruct cmovI_reg_g(rRegI dst, rRegI src, rFlagsReg cr)
12362 %{
12363 effect(USE_DEF dst, USE src, USE cr);
12365 format %{ "cmovlgt $dst, $src\t# min" %}
12366 opcode(0x0F, 0x4F);
12367 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
12368 ins_pipe(pipe_cmov_reg);
12369 %}
12372 instruct minI_rReg(rRegI dst, rRegI src)
12373 %{
12374 match(Set dst (MinI dst src));
12376 ins_cost(200);
12377 expand %{
12378 rFlagsReg cr;
12379 compI_rReg(cr, dst, src);
12380 cmovI_reg_g(dst, src, cr);
12381 %}
12382 %}
12384 instruct cmovI_reg_l(rRegI dst, rRegI src, rFlagsReg cr)
12385 %{
12386 effect(USE_DEF dst, USE src, USE cr);
12388 format %{ "cmovllt $dst, $src\t# max" %}
12389 opcode(0x0F, 0x4C);
12390 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
12391 ins_pipe(pipe_cmov_reg);
12392 %}
12395 instruct maxI_rReg(rRegI dst, rRegI src)
12396 %{
12397 match(Set dst (MaxI dst src));
12399 ins_cost(200);
12400 expand %{
12401 rFlagsReg cr;
12402 compI_rReg(cr, dst, src);
12403 cmovI_reg_l(dst, src, cr);
12404 %}
12405 %}
12407 // ============================================================================
12408 // Branch Instructions
12410 // Jump Direct - Label defines a relative address from JMP+1
12411 instruct jmpDir(label labl)
12412 %{
12413 match(Goto);
12414 effect(USE labl);
12416 ins_cost(300);
12417 format %{ "jmp $labl" %}
12418 size(5);
12419 opcode(0xE9);
12420 ins_encode(OpcP, Lbl(labl));
12421 ins_pipe(pipe_jmp);
12422 ins_pc_relative(1);
12423 %}
12425 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12426 instruct jmpCon(cmpOp cop, rFlagsReg cr, label labl)
12427 %{
12428 match(If cop cr);
12429 effect(USE labl);
12431 ins_cost(300);
12432 format %{ "j$cop $labl" %}
12433 size(6);
12434 opcode(0x0F, 0x80);
12435 ins_encode(Jcc(cop, labl));
12436 ins_pipe(pipe_jcc);
12437 ins_pc_relative(1);
12438 %}
12440 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12441 instruct jmpLoopEnd(cmpOp cop, rFlagsReg cr, label labl)
12442 %{
12443 match(CountedLoopEnd cop cr);
12444 effect(USE labl);
12446 ins_cost(300);
12447 format %{ "j$cop $labl\t# loop end" %}
12448 size(6);
12449 opcode(0x0F, 0x80);
12450 ins_encode(Jcc(cop, labl));
12451 ins_pipe(pipe_jcc);
12452 ins_pc_relative(1);
12453 %}
12455 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12456 instruct jmpLoopEndU(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12457 match(CountedLoopEnd cop cmp);
12458 effect(USE labl);
12460 ins_cost(300);
12461 format %{ "j$cop,u $labl\t# loop end" %}
12462 size(6);
12463 opcode(0x0F, 0x80);
12464 ins_encode(Jcc(cop, labl));
12465 ins_pipe(pipe_jcc);
12466 ins_pc_relative(1);
12467 %}
12469 instruct jmpLoopEndUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12470 match(CountedLoopEnd cop cmp);
12471 effect(USE labl);
12473 ins_cost(200);
12474 format %{ "j$cop,u $labl\t# loop end" %}
12475 size(6);
12476 opcode(0x0F, 0x80);
12477 ins_encode(Jcc(cop, labl));
12478 ins_pipe(pipe_jcc);
12479 ins_pc_relative(1);
12480 %}
12482 // Jump Direct Conditional - using unsigned comparison
12483 instruct jmpConU(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12484 match(If cop cmp);
12485 effect(USE labl);
12487 ins_cost(300);
12488 format %{ "j$cop,u $labl" %}
12489 size(6);
12490 opcode(0x0F, 0x80);
12491 ins_encode(Jcc(cop, labl));
12492 ins_pipe(pipe_jcc);
12493 ins_pc_relative(1);
12494 %}
12496 instruct jmpConUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12497 match(If cop cmp);
12498 effect(USE labl);
12500 ins_cost(200);
12501 format %{ "j$cop,u $labl" %}
12502 size(6);
12503 opcode(0x0F, 0x80);
12504 ins_encode(Jcc(cop, labl));
12505 ins_pipe(pipe_jcc);
12506 ins_pc_relative(1);
12507 %}
12509 instruct jmpConUCF2(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
12510 match(If cop cmp);
12511 effect(USE labl);
12513 ins_cost(200);
12514 format %{ $$template
12515 if ($cop$$cmpcode == Assembler::notEqual) {
12516 $$emit$$"jp,u $labl\n\t"
12517 $$emit$$"j$cop,u $labl"
12518 } else {
12519 $$emit$$"jp,u done\n\t"
12520 $$emit$$"j$cop,u $labl\n\t"
12521 $$emit$$"done:"
12522 }
12523 %}
12524 size(12);
12525 opcode(0x0F, 0x80);
12526 ins_encode %{
12527 Label* l = $labl$$label;
12528 $$$emit8$primary;
12529 emit_cc(cbuf, $secondary, Assembler::parity);
12530 int parity_disp = -1;
12531 if ($cop$$cmpcode == Assembler::notEqual) {
12532 // the two jumps 6 bytes apart so the jump distances are too
12533 parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
12534 } else if ($cop$$cmpcode == Assembler::equal) {
12535 parity_disp = 6;
12536 } else {
12537 ShouldNotReachHere();
12538 }
12539 emit_d32(cbuf, parity_disp);
12540 $$$emit8$primary;
12541 emit_cc(cbuf, $secondary, $cop$$cmpcode);
12542 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
12543 emit_d32(cbuf, disp);
12544 %}
12545 ins_pipe(pipe_jcc);
12546 ins_pc_relative(1);
12547 %}
12549 // ============================================================================
12550 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary
12551 // superklass array for an instance of the superklass. Set a hidden
12552 // internal cache on a hit (cache is checked with exposed code in
12553 // gen_subtype_check()). Return NZ for a miss or zero for a hit. The
12554 // encoding ALSO sets flags.
12556 instruct partialSubtypeCheck(rdi_RegP result,
12557 rsi_RegP sub, rax_RegP super, rcx_RegI rcx,
12558 rFlagsReg cr)
12559 %{
12560 match(Set result (PartialSubtypeCheck sub super));
12561 effect(KILL rcx, KILL cr);
12563 ins_cost(1100); // slightly larger than the next version
12564 format %{ "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t"
12565 "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
12566 "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
12567 "repne scasq\t# Scan *rdi++ for a match with rax while rcx--\n\t"
12568 "jne,s miss\t\t# Missed: rdi not-zero\n\t"
12569 "movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t"
12570 "xorq $result, $result\t\t Hit: rdi zero\n\t"
12571 "miss:\t" %}
12573 opcode(0x1); // Force a XOR of RDI
12574 ins_encode(enc_PartialSubtypeCheck());
12575 ins_pipe(pipe_slow);
12576 %}
12578 instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr,
12579 rsi_RegP sub, rax_RegP super, rcx_RegI rcx,
12580 immP0 zero,
12581 rdi_RegP result)
12582 %{
12583 match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
12584 effect(KILL rcx, KILL result);
12586 ins_cost(1000);
12587 format %{ "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t"
12588 "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
12589 "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
12590 "repne scasq\t# Scan *rdi++ for a match with rax while cx-- != 0\n\t"
12591 "jne,s miss\t\t# Missed: flags nz\n\t"
12592 "movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t"
12593 "miss:\t" %}
12595 opcode(0x0); // No need to XOR RDI
12596 ins_encode(enc_PartialSubtypeCheck());
12597 ins_pipe(pipe_slow);
12598 %}
12600 // ============================================================================
12601 // Branch Instructions -- short offset versions
12602 //
12603 // These instructions are used to replace jumps of a long offset (the default
12604 // match) with jumps of a shorter offset. These instructions are all tagged
12605 // with the ins_short_branch attribute, which causes the ADLC to suppress the
12606 // match rules in general matching. Instead, the ADLC generates a conversion
12607 // method in the MachNode which can be used to do in-place replacement of the
12608 // long variant with the shorter variant. The compiler will determine if a
12609 // branch can be taken by the is_short_branch_offset() predicate in the machine
12610 // specific code section of the file.
12612 // Jump Direct - Label defines a relative address from JMP+1
12613 instruct jmpDir_short(label labl) %{
12614 match(Goto);
12615 effect(USE labl);
12617 ins_cost(300);
12618 format %{ "jmp,s $labl" %}
12619 size(2);
12620 opcode(0xEB);
12621 ins_encode(OpcP, LblShort(labl));
12622 ins_pipe(pipe_jmp);
12623 ins_pc_relative(1);
12624 ins_short_branch(1);
12625 %}
12627 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12628 instruct jmpCon_short(cmpOp cop, rFlagsReg cr, label labl) %{
12629 match(If cop cr);
12630 effect(USE labl);
12632 ins_cost(300);
12633 format %{ "j$cop,s $labl" %}
12634 size(2);
12635 opcode(0x70);
12636 ins_encode(JccShort(cop, labl));
12637 ins_pipe(pipe_jcc);
12638 ins_pc_relative(1);
12639 ins_short_branch(1);
12640 %}
12642 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12643 instruct jmpLoopEnd_short(cmpOp cop, rFlagsReg cr, label labl) %{
12644 match(CountedLoopEnd cop cr);
12645 effect(USE labl);
12647 ins_cost(300);
12648 format %{ "j$cop,s $labl\t# loop end" %}
12649 size(2);
12650 opcode(0x70);
12651 ins_encode(JccShort(cop, labl));
12652 ins_pipe(pipe_jcc);
12653 ins_pc_relative(1);
12654 ins_short_branch(1);
12655 %}
12657 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12658 instruct jmpLoopEndU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12659 match(CountedLoopEnd cop cmp);
12660 effect(USE labl);
12662 ins_cost(300);
12663 format %{ "j$cop,us $labl\t# loop end" %}
12664 size(2);
12665 opcode(0x70);
12666 ins_encode(JccShort(cop, labl));
12667 ins_pipe(pipe_jcc);
12668 ins_pc_relative(1);
12669 ins_short_branch(1);
12670 %}
12672 instruct jmpLoopEndUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12673 match(CountedLoopEnd cop cmp);
12674 effect(USE labl);
12676 ins_cost(300);
12677 format %{ "j$cop,us $labl\t# loop end" %}
12678 size(2);
12679 opcode(0x70);
12680 ins_encode(JccShort(cop, labl));
12681 ins_pipe(pipe_jcc);
12682 ins_pc_relative(1);
12683 ins_short_branch(1);
12684 %}
12686 // Jump Direct Conditional - using unsigned comparison
12687 instruct jmpConU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12688 match(If cop cmp);
12689 effect(USE labl);
12691 ins_cost(300);
12692 format %{ "j$cop,us $labl" %}
12693 size(2);
12694 opcode(0x70);
12695 ins_encode(JccShort(cop, labl));
12696 ins_pipe(pipe_jcc);
12697 ins_pc_relative(1);
12698 ins_short_branch(1);
12699 %}
12701 instruct jmpConUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12702 match(If cop cmp);
12703 effect(USE labl);
12705 ins_cost(300);
12706 format %{ "j$cop,us $labl" %}
12707 size(2);
12708 opcode(0x70);
12709 ins_encode(JccShort(cop, labl));
12710 ins_pipe(pipe_jcc);
12711 ins_pc_relative(1);
12712 ins_short_branch(1);
12713 %}
12715 instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
12716 match(If cop cmp);
12717 effect(USE labl);
12719 ins_cost(300);
12720 format %{ $$template
12721 if ($cop$$cmpcode == Assembler::notEqual) {
12722 $$emit$$"jp,u,s $labl\n\t"
12723 $$emit$$"j$cop,u,s $labl"
12724 } else {
12725 $$emit$$"jp,u,s done\n\t"
12726 $$emit$$"j$cop,u,s $labl\n\t"
12727 $$emit$$"done:"
12728 }
12729 %}
12730 size(4);
12731 opcode(0x70);
12732 ins_encode %{
12733 Label* l = $labl$$label;
12734 emit_cc(cbuf, $primary, Assembler::parity);
12735 int parity_disp = -1;
12736 if ($cop$$cmpcode == Assembler::notEqual) {
12737 parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
12738 } else if ($cop$$cmpcode == Assembler::equal) {
12739 parity_disp = 2;
12740 } else {
12741 ShouldNotReachHere();
12742 }
12743 emit_d8(cbuf, parity_disp);
12744 emit_cc(cbuf, $primary, $cop$$cmpcode);
12745 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
12746 emit_d8(cbuf, disp);
12747 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
12748 assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp");
12749 %}
12750 ins_pipe(pipe_jcc);
12751 ins_pc_relative(1);
12752 ins_short_branch(1);
12753 %}
12755 // ============================================================================
12756 // inlined locking and unlocking
12758 instruct cmpFastLock(rFlagsReg cr,
12759 rRegP object, rRegP box, rax_RegI tmp, rRegP scr)
12760 %{
12761 match(Set cr (FastLock object box));
12762 effect(TEMP tmp, TEMP scr);
12764 ins_cost(300);
12765 format %{ "fastlock $object,$box,$tmp,$scr" %}
12766 ins_encode(Fast_Lock(object, box, tmp, scr));
12767 ins_pipe(pipe_slow);
12768 ins_pc_relative(1);
12769 %}
12771 instruct cmpFastUnlock(rFlagsReg cr,
12772 rRegP object, rax_RegP box, rRegP tmp)
12773 %{
12774 match(Set cr (FastUnlock object box));
12775 effect(TEMP tmp);
12777 ins_cost(300);
12778 format %{ "fastunlock $object, $box, $tmp" %}
12779 ins_encode(Fast_Unlock(object, box, tmp));
12780 ins_pipe(pipe_slow);
12781 ins_pc_relative(1);
12782 %}
12785 // ============================================================================
12786 // Safepoint Instructions
12787 instruct safePoint_poll(rFlagsReg cr)
12788 %{
12789 match(SafePoint);
12790 effect(KILL cr);
12792 format %{ "testl rax, [rip + #offset_to_poll_page]\t"
12793 "# Safepoint: poll for GC" %}
12794 size(6); // Opcode + ModRM + Disp32 == 6 bytes
12795 ins_cost(125);
12796 ins_encode(enc_safepoint_poll);
12797 ins_pipe(ialu_reg_mem);
12798 %}
12800 // ============================================================================
12801 // Procedure Call/Return Instructions
12802 // Call Java Static Instruction
12803 // Note: If this code changes, the corresponding ret_addr_offset() and
12804 // compute_padding() functions will have to be adjusted.
12805 instruct CallStaticJavaDirect(method meth)
12806 %{
12807 match(CallStaticJava);
12808 effect(USE meth);
12810 ins_cost(300);
12811 format %{ "call,static " %}
12812 opcode(0xE8); /* E8 cd */
12813 ins_encode(Java_Static_Call(meth), call_epilog);
12814 ins_pipe(pipe_slow);
12815 ins_pc_relative(1);
12816 ins_alignment(4);
12817 %}
12819 // Call Java Dynamic Instruction
12820 // Note: If this code changes, the corresponding ret_addr_offset() and
12821 // compute_padding() functions will have to be adjusted.
12822 instruct CallDynamicJavaDirect(method meth)
12823 %{
12824 match(CallDynamicJava);
12825 effect(USE meth);
12827 ins_cost(300);
12828 format %{ "movq rax, #Universe::non_oop_word()\n\t"
12829 "call,dynamic " %}
12830 opcode(0xE8); /* E8 cd */
12831 ins_encode(Java_Dynamic_Call(meth), call_epilog);
12832 ins_pipe(pipe_slow);
12833 ins_pc_relative(1);
12834 ins_alignment(4);
12835 %}
12837 // Call Runtime Instruction
12838 instruct CallRuntimeDirect(method meth)
12839 %{
12840 match(CallRuntime);
12841 effect(USE meth);
12843 ins_cost(300);
12844 format %{ "call,runtime " %}
12845 opcode(0xE8); /* E8 cd */
12846 ins_encode(Java_To_Runtime(meth));
12847 ins_pipe(pipe_slow);
12848 ins_pc_relative(1);
12849 %}
12851 // Call runtime without safepoint
12852 instruct CallLeafDirect(method meth)
12853 %{
12854 match(CallLeaf);
12855 effect(USE meth);
12857 ins_cost(300);
12858 format %{ "call_leaf,runtime " %}
12859 opcode(0xE8); /* E8 cd */
12860 ins_encode(Java_To_Runtime(meth));
12861 ins_pipe(pipe_slow);
12862 ins_pc_relative(1);
12863 %}
12865 // Call runtime without safepoint
12866 instruct CallLeafNoFPDirect(method meth)
12867 %{
12868 match(CallLeafNoFP);
12869 effect(USE meth);
12871 ins_cost(300);
12872 format %{ "call_leaf_nofp,runtime " %}
12873 opcode(0xE8); /* E8 cd */
12874 ins_encode(Java_To_Runtime(meth));
12875 ins_pipe(pipe_slow);
12876 ins_pc_relative(1);
12877 %}
12879 // Return Instruction
12880 // Remove the return address & jump to it.
12881 // Notice: We always emit a nop after a ret to make sure there is room
12882 // for safepoint patching
12883 instruct Ret()
12884 %{
12885 match(Return);
12887 format %{ "ret" %}
12888 opcode(0xC3);
12889 ins_encode(OpcP);
12890 ins_pipe(pipe_jmp);
12891 %}
12893 // Tail Call; Jump from runtime stub to Java code.
12894 // Also known as an 'interprocedural jump'.
12895 // Target of jump will eventually return to caller.
12896 // TailJump below removes the return address.
12897 instruct TailCalljmpInd(no_rbp_RegP jump_target, rbx_RegP method_oop)
12898 %{
12899 match(TailCall jump_target method_oop);
12901 ins_cost(300);
12902 format %{ "jmp $jump_target\t# rbx holds method oop" %}
12903 opcode(0xFF, 0x4); /* Opcode FF /4 */
12904 ins_encode(REX_reg(jump_target), OpcP, reg_opc(jump_target));
12905 ins_pipe(pipe_jmp);
12906 %}
12908 // Tail Jump; remove the return address; jump to target.
12909 // TailCall above leaves the return address around.
12910 instruct tailjmpInd(no_rbp_RegP jump_target, rax_RegP ex_oop)
12911 %{
12912 match(TailJump jump_target ex_oop);
12914 ins_cost(300);
12915 format %{ "popq rdx\t# pop return address\n\t"
12916 "jmp $jump_target" %}
12917 opcode(0xFF, 0x4); /* Opcode FF /4 */
12918 ins_encode(Opcode(0x5a), // popq rdx
12919 REX_reg(jump_target), OpcP, reg_opc(jump_target));
12920 ins_pipe(pipe_jmp);
12921 %}
12923 // Create exception oop: created by stack-crawling runtime code.
12924 // Created exception is now available to this handler, and is setup
12925 // just prior to jumping to this handler. No code emitted.
12926 instruct CreateException(rax_RegP ex_oop)
12927 %{
12928 match(Set ex_oop (CreateEx));
12930 size(0);
12931 // use the following format syntax
12932 format %{ "# exception oop is in rax; no code emitted" %}
12933 ins_encode();
12934 ins_pipe(empty);
12935 %}
12937 // Rethrow exception:
12938 // The exception oop will come in the first argument position.
12939 // Then JUMP (not call) to the rethrow stub code.
12940 instruct RethrowException()
12941 %{
12942 match(Rethrow);
12944 // use the following format syntax
12945 format %{ "jmp rethrow_stub" %}
12946 ins_encode(enc_rethrow);
12947 ins_pipe(pipe_jmp);
12948 %}
12951 //----------PEEPHOLE RULES-----------------------------------------------------
12952 // These must follow all instruction definitions as they use the names
12953 // defined in the instructions definitions.
12954 //
12955 // peepmatch ( root_instr_name [preceding_instruction]* );
12956 //
12957 // peepconstraint %{
12958 // (instruction_number.operand_name relational_op instruction_number.operand_name
12959 // [, ...] );
12960 // // instruction numbers are zero-based using left to right order in peepmatch
12961 //
12962 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
12963 // // provide an instruction_number.operand_name for each operand that appears
12964 // // in the replacement instruction's match rule
12965 //
12966 // ---------VM FLAGS---------------------------------------------------------
12967 //
12968 // All peephole optimizations can be turned off using -XX:-OptoPeephole
12969 //
12970 // Each peephole rule is given an identifying number starting with zero and
12971 // increasing by one in the order seen by the parser. An individual peephole
12972 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
12973 // on the command-line.
12974 //
12975 // ---------CURRENT LIMITATIONS----------------------------------------------
12976 //
12977 // Only match adjacent instructions in same basic block
12978 // Only equality constraints
12979 // Only constraints between operands, not (0.dest_reg == RAX_enc)
12980 // Only one replacement instruction
12981 //
12982 // ---------EXAMPLE----------------------------------------------------------
12983 //
12984 // // pertinent parts of existing instructions in architecture description
12985 // instruct movI(rRegI dst, rRegI src)
12986 // %{
12987 // match(Set dst (CopyI src));
12988 // %}
12989 //
12990 // instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr)
12991 // %{
12992 // match(Set dst (AddI dst src));
12993 // effect(KILL cr);
12994 // %}
12995 //
12996 // // Change (inc mov) to lea
12997 // peephole %{
12998 // // increment preceeded by register-register move
12999 // peepmatch ( incI_rReg movI );
13000 // // require that the destination register of the increment
13001 // // match the destination register of the move
13002 // peepconstraint ( 0.dst == 1.dst );
13003 // // construct a replacement instruction that sets
13004 // // the destination to ( move's source register + one )
13005 // peepreplace ( leaI_rReg_immI( 0.dst 1.src 0.src ) );
13006 // %}
13007 //
13009 // Implementation no longer uses movX instructions since
13010 // machine-independent system no longer uses CopyX nodes.
13011 //
13012 // peephole
13013 // %{
13014 // peepmatch (incI_rReg movI);
13015 // peepconstraint (0.dst == 1.dst);
13016 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
13017 // %}
13019 // peephole
13020 // %{
13021 // peepmatch (decI_rReg movI);
13022 // peepconstraint (0.dst == 1.dst);
13023 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
13024 // %}
13026 // peephole
13027 // %{
13028 // peepmatch (addI_rReg_imm movI);
13029 // peepconstraint (0.dst == 1.dst);
13030 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
13031 // %}
13033 // peephole
13034 // %{
13035 // peepmatch (incL_rReg movL);
13036 // peepconstraint (0.dst == 1.dst);
13037 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
13038 // %}
13040 // peephole
13041 // %{
13042 // peepmatch (decL_rReg movL);
13043 // peepconstraint (0.dst == 1.dst);
13044 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
13045 // %}
13047 // peephole
13048 // %{
13049 // peepmatch (addL_rReg_imm movL);
13050 // peepconstraint (0.dst == 1.dst);
13051 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
13052 // %}
13054 // peephole
13055 // %{
13056 // peepmatch (addP_rReg_imm movP);
13057 // peepconstraint (0.dst == 1.dst);
13058 // peepreplace (leaP_rReg_imm(0.dst 1.src 0.src));
13059 // %}
13061 // // Change load of spilled value to only a spill
13062 // instruct storeI(memory mem, rRegI src)
13063 // %{
13064 // match(Set mem (StoreI mem src));
13065 // %}
13066 //
13067 // instruct loadI(rRegI dst, memory mem)
13068 // %{
13069 // match(Set dst (LoadI mem));
13070 // %}
13071 //
13073 peephole
13074 %{
13075 peepmatch (loadI storeI);
13076 peepconstraint (1.src == 0.dst, 1.mem == 0.mem);
13077 peepreplace (storeI(1.mem 1.mem 1.src));
13078 %}
13080 peephole
13081 %{
13082 peepmatch (loadL storeL);
13083 peepconstraint (1.src == 0.dst, 1.mem == 0.mem);
13084 peepreplace (storeL(1.mem 1.mem 1.src));
13085 %}
13087 //----------SMARTSPILL RULES---------------------------------------------------
13088 // These must follow all instruction definitions as they use the names
13089 // defined in the instructions definitions.