Thu, 08 Sep 2011 10:12:25 +0200
7087445: Improve platform independence of JSR292 shared code
Summary: changes necessary for some JSR292 ports
Reviewed-by: jrose, dholmes
1 //
2 // Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 // or visit www.oracle.com if you need additional information or have any
21 // questions.
22 //
23 //
25 // AMD64 Architecture Description File
27 //----------REGISTER DEFINITION BLOCK------------------------------------------
28 // This information is used by the matcher and the register allocator to
29 // describe individual registers and classes of registers within the target
30 // archtecture.
32 register %{
33 //----------Architecture Description Register Definitions----------------------
34 // General Registers
35 // "reg_def" name ( register save type, C convention save type,
36 // ideal register type, encoding );
37 // Register Save Types:
38 //
39 // NS = No-Save: The register allocator assumes that these registers
40 // can be used without saving upon entry to the method, &
41 // that they do not need to be saved at call sites.
42 //
43 // SOC = Save-On-Call: The register allocator assumes that these registers
44 // can be used without saving upon entry to the method,
45 // but that they must be saved at call sites.
46 //
47 // SOE = Save-On-Entry: The register allocator assumes that these registers
48 // must be saved before using them upon entry to the
49 // method, but they do not need to be saved at call
50 // sites.
51 //
52 // AS = Always-Save: The register allocator assumes that these registers
53 // must be saved before using them upon entry to the
54 // method, & that they must be saved at call sites.
55 //
56 // Ideal Register Type is used to determine how to save & restore a
57 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
58 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
59 //
60 // The encoding number is the actual bit-pattern placed into the opcodes.
62 // General Registers
63 // R8-R15 must be encoded with REX. (RSP, RBP, RSI, RDI need REX when
64 // used as byte registers)
66 // Previously set RBX, RSI, and RDI as save-on-entry for java code
67 // Turn off SOE in java-code due to frequent use of uncommon-traps.
68 // Now that allocator is better, turn on RSI and RDI as SOE registers.
70 reg_def RAX (SOC, SOC, Op_RegI, 0, rax->as_VMReg());
71 reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax->as_VMReg()->next());
73 reg_def RCX (SOC, SOC, Op_RegI, 1, rcx->as_VMReg());
74 reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()->next());
76 reg_def RDX (SOC, SOC, Op_RegI, 2, rdx->as_VMReg());
77 reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()->next());
79 reg_def RBX (SOC, SOE, Op_RegI, 3, rbx->as_VMReg());
80 reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx->as_VMReg()->next());
82 reg_def RSP (NS, NS, Op_RegI, 4, rsp->as_VMReg());
83 reg_def RSP_H(NS, NS, Op_RegI, 4, rsp->as_VMReg()->next());
85 // now that adapter frames are gone RBP is always saved and restored by the prolog/epilog code
86 reg_def RBP (NS, SOE, Op_RegI, 5, rbp->as_VMReg());
87 reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp->as_VMReg()->next());
89 #ifdef _WIN64
91 reg_def RSI (SOC, SOE, Op_RegI, 6, rsi->as_VMReg());
92 reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()->next());
94 reg_def RDI (SOC, SOE, Op_RegI, 7, rdi->as_VMReg());
95 reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()->next());
97 #else
99 reg_def RSI (SOC, SOC, Op_RegI, 6, rsi->as_VMReg());
100 reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi->as_VMReg()->next());
102 reg_def RDI (SOC, SOC, Op_RegI, 7, rdi->as_VMReg());
103 reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi->as_VMReg()->next());
105 #endif
107 reg_def R8 (SOC, SOC, Op_RegI, 8, r8->as_VMReg());
108 reg_def R8_H (SOC, SOC, Op_RegI, 8, r8->as_VMReg()->next());
110 reg_def R9 (SOC, SOC, Op_RegI, 9, r9->as_VMReg());
111 reg_def R9_H (SOC, SOC, Op_RegI, 9, r9->as_VMReg()->next());
113 reg_def R10 (SOC, SOC, Op_RegI, 10, r10->as_VMReg());
114 reg_def R10_H(SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
116 reg_def R11 (SOC, SOC, Op_RegI, 11, r11->as_VMReg());
117 reg_def R11_H(SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
119 reg_def R12 (SOC, SOE, Op_RegI, 12, r12->as_VMReg());
120 reg_def R12_H(SOC, SOE, Op_RegI, 12, r12->as_VMReg()->next());
122 reg_def R13 (SOC, SOE, Op_RegI, 13, r13->as_VMReg());
123 reg_def R13_H(SOC, SOE, Op_RegI, 13, r13->as_VMReg()->next());
125 reg_def R14 (SOC, SOE, Op_RegI, 14, r14->as_VMReg());
126 reg_def R14_H(SOC, SOE, Op_RegI, 14, r14->as_VMReg()->next());
128 reg_def R15 (SOC, SOE, Op_RegI, 15, r15->as_VMReg());
129 reg_def R15_H(SOC, SOE, Op_RegI, 15, r15->as_VMReg()->next());
132 // Floating Point Registers
134 // XMM registers. 128-bit registers or 4 words each, labeled (a)-d.
135 // Word a in each register holds a Float, words ab hold a Double. We
136 // currently do not use the SIMD capabilities, so registers cd are
137 // unused at the moment.
138 // XMM8-XMM15 must be encoded with REX.
139 // Linux ABI: No register preserved across function calls
140 // XMM0-XMM7 might hold parameters
141 // Windows ABI: XMM6-XMM15 preserved across function calls
142 // XMM0-XMM3 might hold parameters
144 reg_def XMM0 (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg());
145 reg_def XMM0_H (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next());
147 reg_def XMM1 (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg());
148 reg_def XMM1_H (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next());
150 reg_def XMM2 (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg());
151 reg_def XMM2_H (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next());
153 reg_def XMM3 (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg());
154 reg_def XMM3_H (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next());
156 reg_def XMM4 (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg());
157 reg_def XMM4_H (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next());
159 reg_def XMM5 (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg());
160 reg_def XMM5_H (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next());
162 #ifdef _WIN64
164 reg_def XMM6 (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg());
165 reg_def XMM6_H (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next());
167 reg_def XMM7 (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg());
168 reg_def XMM7_H (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next());
170 reg_def XMM8 (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg());
171 reg_def XMM8_H (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next());
173 reg_def XMM9 (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg());
174 reg_def XMM9_H (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next());
176 reg_def XMM10 (SOC, SOE, Op_RegF, 10, xmm10->as_VMReg());
177 reg_def XMM10_H(SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next());
179 reg_def XMM11 (SOC, SOE, Op_RegF, 11, xmm11->as_VMReg());
180 reg_def XMM11_H(SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next());
182 reg_def XMM12 (SOC, SOE, Op_RegF, 12, xmm12->as_VMReg());
183 reg_def XMM12_H(SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next());
185 reg_def XMM13 (SOC, SOE, Op_RegF, 13, xmm13->as_VMReg());
186 reg_def XMM13_H(SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next());
188 reg_def XMM14 (SOC, SOE, Op_RegF, 14, xmm14->as_VMReg());
189 reg_def XMM14_H(SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next());
191 reg_def XMM15 (SOC, SOE, Op_RegF, 15, xmm15->as_VMReg());
192 reg_def XMM15_H(SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next());
194 #else
196 reg_def XMM6 (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg());
197 reg_def XMM6_H (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next());
199 reg_def XMM7 (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg());
200 reg_def XMM7_H (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next());
202 reg_def XMM8 (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg());
203 reg_def XMM8_H (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next());
205 reg_def XMM9 (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg());
206 reg_def XMM9_H (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next());
208 reg_def XMM10 (SOC, SOC, Op_RegF, 10, xmm10->as_VMReg());
209 reg_def XMM10_H(SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next());
211 reg_def XMM11 (SOC, SOC, Op_RegF, 11, xmm11->as_VMReg());
212 reg_def XMM11_H(SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next());
214 reg_def XMM12 (SOC, SOC, Op_RegF, 12, xmm12->as_VMReg());
215 reg_def XMM12_H(SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next());
217 reg_def XMM13 (SOC, SOC, Op_RegF, 13, xmm13->as_VMReg());
218 reg_def XMM13_H(SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next());
220 reg_def XMM14 (SOC, SOC, Op_RegF, 14, xmm14->as_VMReg());
221 reg_def XMM14_H(SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next());
223 reg_def XMM15 (SOC, SOC, Op_RegF, 15, xmm15->as_VMReg());
224 reg_def XMM15_H(SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next());
226 #endif // _WIN64
228 reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad());
230 // Specify priority of register selection within phases of register
231 // allocation. Highest priority is first. A useful heuristic is to
232 // give registers a low priority when they are required by machine
233 // instructions, like EAX and EDX on I486, and choose no-save registers
234 // before save-on-call, & save-on-call before save-on-entry. Registers
235 // which participate in fixed calling sequences should come last.
236 // Registers which are used as pairs must fall on an even boundary.
238 alloc_class chunk0(R10, R10_H,
239 R11, R11_H,
240 R8, R8_H,
241 R9, R9_H,
242 R12, R12_H,
243 RCX, RCX_H,
244 RBX, RBX_H,
245 RDI, RDI_H,
246 RDX, RDX_H,
247 RSI, RSI_H,
248 RAX, RAX_H,
249 RBP, RBP_H,
250 R13, R13_H,
251 R14, R14_H,
252 R15, R15_H,
253 RSP, RSP_H);
255 // XXX probably use 8-15 first on Linux
256 alloc_class chunk1(XMM0, XMM0_H,
257 XMM1, XMM1_H,
258 XMM2, XMM2_H,
259 XMM3, XMM3_H,
260 XMM4, XMM4_H,
261 XMM5, XMM5_H,
262 XMM6, XMM6_H,
263 XMM7, XMM7_H,
264 XMM8, XMM8_H,
265 XMM9, XMM9_H,
266 XMM10, XMM10_H,
267 XMM11, XMM11_H,
268 XMM12, XMM12_H,
269 XMM13, XMM13_H,
270 XMM14, XMM14_H,
271 XMM15, XMM15_H);
273 alloc_class chunk2(RFLAGS);
276 //----------Architecture Description Register Classes--------------------------
277 // Several register classes are automatically defined based upon information in
278 // this architecture description.
279 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
280 // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ )
281 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
283 //
285 // Class for all pointer registers (including RSP)
286 reg_class any_reg(RAX, RAX_H,
287 RDX, RDX_H,
288 RBP, RBP_H,
289 RDI, RDI_H,
290 RSI, RSI_H,
291 RCX, RCX_H,
292 RBX, RBX_H,
293 RSP, RSP_H,
294 R8, R8_H,
295 R9, R9_H,
296 R10, R10_H,
297 R11, R11_H,
298 R12, R12_H,
299 R13, R13_H,
300 R14, R14_H,
301 R15, R15_H);
303 // Class for all pointer registers except RSP
304 reg_class ptr_reg(RAX, RAX_H,
305 RDX, RDX_H,
306 RBP, RBP_H,
307 RDI, RDI_H,
308 RSI, RSI_H,
309 RCX, RCX_H,
310 RBX, RBX_H,
311 R8, R8_H,
312 R9, R9_H,
313 R10, R10_H,
314 R11, R11_H,
315 R13, R13_H,
316 R14, R14_H);
318 // Class for all pointer registers except RAX and RSP
319 reg_class ptr_no_rax_reg(RDX, RDX_H,
320 RBP, RBP_H,
321 RDI, RDI_H,
322 RSI, RSI_H,
323 RCX, RCX_H,
324 RBX, RBX_H,
325 R8, R8_H,
326 R9, R9_H,
327 R10, R10_H,
328 R11, R11_H,
329 R13, R13_H,
330 R14, R14_H);
332 reg_class ptr_no_rbp_reg(RDX, RDX_H,
333 RAX, RAX_H,
334 RDI, RDI_H,
335 RSI, RSI_H,
336 RCX, RCX_H,
337 RBX, RBX_H,
338 R8, R8_H,
339 R9, R9_H,
340 R10, R10_H,
341 R11, R11_H,
342 R13, R13_H,
343 R14, R14_H);
345 // Class for all pointer registers except RAX, RBX and RSP
346 reg_class ptr_no_rax_rbx_reg(RDX, RDX_H,
347 RBP, RBP_H,
348 RDI, RDI_H,
349 RSI, RSI_H,
350 RCX, RCX_H,
351 R8, R8_H,
352 R9, R9_H,
353 R10, R10_H,
354 R11, R11_H,
355 R13, R13_H,
356 R14, R14_H);
358 // Singleton class for RAX pointer register
359 reg_class ptr_rax_reg(RAX, RAX_H);
361 // Singleton class for RBX pointer register
362 reg_class ptr_rbx_reg(RBX, RBX_H);
364 // Singleton class for RSI pointer register
365 reg_class ptr_rsi_reg(RSI, RSI_H);
367 // Singleton class for RDI pointer register
368 reg_class ptr_rdi_reg(RDI, RDI_H);
370 // Singleton class for RBP pointer register
371 reg_class ptr_rbp_reg(RBP, RBP_H);
373 // Singleton class for stack pointer
374 reg_class ptr_rsp_reg(RSP, RSP_H);
376 // Singleton class for TLS pointer
377 reg_class ptr_r15_reg(R15, R15_H);
379 // Class for all long registers (except RSP)
380 reg_class long_reg(RAX, RAX_H,
381 RDX, RDX_H,
382 RBP, RBP_H,
383 RDI, RDI_H,
384 RSI, RSI_H,
385 RCX, RCX_H,
386 RBX, RBX_H,
387 R8, R8_H,
388 R9, R9_H,
389 R10, R10_H,
390 R11, R11_H,
391 R13, R13_H,
392 R14, R14_H);
394 // Class for all long registers except RAX, RDX (and RSP)
395 reg_class long_no_rax_rdx_reg(RBP, RBP_H,
396 RDI, RDI_H,
397 RSI, RSI_H,
398 RCX, RCX_H,
399 RBX, RBX_H,
400 R8, R8_H,
401 R9, R9_H,
402 R10, R10_H,
403 R11, R11_H,
404 R13, R13_H,
405 R14, R14_H);
407 // Class for all long registers except RCX (and RSP)
408 reg_class long_no_rcx_reg(RBP, RBP_H,
409 RDI, RDI_H,
410 RSI, RSI_H,
411 RAX, RAX_H,
412 RDX, RDX_H,
413 RBX, RBX_H,
414 R8, R8_H,
415 R9, R9_H,
416 R10, R10_H,
417 R11, R11_H,
418 R13, R13_H,
419 R14, R14_H);
421 // Class for all long registers except RAX (and RSP)
422 reg_class long_no_rax_reg(RBP, RBP_H,
423 RDX, RDX_H,
424 RDI, RDI_H,
425 RSI, RSI_H,
426 RCX, RCX_H,
427 RBX, RBX_H,
428 R8, R8_H,
429 R9, R9_H,
430 R10, R10_H,
431 R11, R11_H,
432 R13, R13_H,
433 R14, R14_H);
435 // Singleton class for RAX long register
436 reg_class long_rax_reg(RAX, RAX_H);
438 // Singleton class for RCX long register
439 reg_class long_rcx_reg(RCX, RCX_H);
441 // Singleton class for RDX long register
442 reg_class long_rdx_reg(RDX, RDX_H);
444 // Class for all int registers (except RSP)
445 reg_class int_reg(RAX,
446 RDX,
447 RBP,
448 RDI,
449 RSI,
450 RCX,
451 RBX,
452 R8,
453 R9,
454 R10,
455 R11,
456 R13,
457 R14);
459 // Class for all int registers except RCX (and RSP)
460 reg_class int_no_rcx_reg(RAX,
461 RDX,
462 RBP,
463 RDI,
464 RSI,
465 RBX,
466 R8,
467 R9,
468 R10,
469 R11,
470 R13,
471 R14);
473 // Class for all int registers except RAX, RDX (and RSP)
474 reg_class int_no_rax_rdx_reg(RBP,
475 RDI,
476 RSI,
477 RCX,
478 RBX,
479 R8,
480 R9,
481 R10,
482 R11,
483 R13,
484 R14);
486 // Singleton class for RAX int register
487 reg_class int_rax_reg(RAX);
489 // Singleton class for RBX int register
490 reg_class int_rbx_reg(RBX);
492 // Singleton class for RCX int register
493 reg_class int_rcx_reg(RCX);
495 // Singleton class for RCX int register
496 reg_class int_rdx_reg(RDX);
498 // Singleton class for RCX int register
499 reg_class int_rdi_reg(RDI);
501 // Singleton class for instruction pointer
502 // reg_class ip_reg(RIP);
504 // Singleton class for condition codes
505 reg_class int_flags(RFLAGS);
507 // Class for all float registers
508 reg_class float_reg(XMM0,
509 XMM1,
510 XMM2,
511 XMM3,
512 XMM4,
513 XMM5,
514 XMM6,
515 XMM7,
516 XMM8,
517 XMM9,
518 XMM10,
519 XMM11,
520 XMM12,
521 XMM13,
522 XMM14,
523 XMM15);
525 // Class for all double registers
526 reg_class double_reg(XMM0, XMM0_H,
527 XMM1, XMM1_H,
528 XMM2, XMM2_H,
529 XMM3, XMM3_H,
530 XMM4, XMM4_H,
531 XMM5, XMM5_H,
532 XMM6, XMM6_H,
533 XMM7, XMM7_H,
534 XMM8, XMM8_H,
535 XMM9, XMM9_H,
536 XMM10, XMM10_H,
537 XMM11, XMM11_H,
538 XMM12, XMM12_H,
539 XMM13, XMM13_H,
540 XMM14, XMM14_H,
541 XMM15, XMM15_H);
542 %}
545 //----------SOURCE BLOCK-------------------------------------------------------
546 // This is a block of C++ code which provides values, functions, and
547 // definitions necessary in the rest of the architecture description
548 source %{
549 #define RELOC_IMM64 Assembler::imm_operand
550 #define RELOC_DISP32 Assembler::disp32_operand
552 #define __ _masm.
554 static int preserve_SP_size() {
555 return LP64_ONLY(1 +) 2; // [rex,] op, rm(reg/reg)
556 }
558 // !!!!! Special hack to get all types of calls to specify the byte offset
559 // from the start of the call to the point where the return address
560 // will point.
561 int MachCallStaticJavaNode::ret_addr_offset()
562 {
563 int offset = 5; // 5 bytes from start of call to where return address points
564 if (_method_handle_invoke)
565 offset += preserve_SP_size();
566 return offset;
567 }
569 int MachCallDynamicJavaNode::ret_addr_offset()
570 {
571 return 15; // 15 bytes from start of call to where return address points
572 }
574 // In os_cpu .ad file
575 // int MachCallRuntimeNode::ret_addr_offset()
577 // Indicate if the safepoint node needs the polling page as an input,
578 // it does if the polling page is more than disp32 away.
579 bool SafePointNode::needs_polling_address_input()
580 {
581 return Assembler::is_polling_page_far();
582 }
584 //
585 // Compute padding required for nodes which need alignment
586 //
588 // The address of the call instruction needs to be 4-byte aligned to
589 // ensure that it does not span a cache line so that it can be patched.
590 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
591 {
592 current_offset += 1; // skip call opcode byte
593 return round_to(current_offset, alignment_required()) - current_offset;
594 }
596 // The address of the call instruction needs to be 4-byte aligned to
597 // ensure that it does not span a cache line so that it can be patched.
598 int CallStaticJavaHandleNode::compute_padding(int current_offset) const
599 {
600 current_offset += preserve_SP_size(); // skip mov rbp, rsp
601 current_offset += 1; // skip call opcode byte
602 return round_to(current_offset, alignment_required()) - current_offset;
603 }
605 // The address of the call instruction needs to be 4-byte aligned to
606 // ensure that it does not span a cache line so that it can be patched.
607 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
608 {
609 current_offset += 11; // skip movq instruction + call opcode byte
610 return round_to(current_offset, alignment_required()) - current_offset;
611 }
613 #ifndef PRODUCT
614 void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const
615 {
616 st->print("INT3");
617 }
618 #endif
620 // EMIT_RM()
621 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
622 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
623 cbuf.insts()->emit_int8(c);
624 }
626 // EMIT_CC()
627 void emit_cc(CodeBuffer &cbuf, int f1, int f2) {
628 unsigned char c = (unsigned char) (f1 | f2);
629 cbuf.insts()->emit_int8(c);
630 }
632 // EMIT_OPCODE()
633 void emit_opcode(CodeBuffer &cbuf, int code) {
634 cbuf.insts()->emit_int8((unsigned char) code);
635 }
637 // EMIT_OPCODE() w/ relocation information
638 void emit_opcode(CodeBuffer &cbuf,
639 int code, relocInfo::relocType reloc, int offset, int format)
640 {
641 cbuf.relocate(cbuf.insts_mark() + offset, reloc, format);
642 emit_opcode(cbuf, code);
643 }
645 // EMIT_D8()
646 void emit_d8(CodeBuffer &cbuf, int d8) {
647 cbuf.insts()->emit_int8((unsigned char) d8);
648 }
650 // EMIT_D16()
651 void emit_d16(CodeBuffer &cbuf, int d16) {
652 cbuf.insts()->emit_int16(d16);
653 }
655 // EMIT_D32()
656 void emit_d32(CodeBuffer &cbuf, int d32) {
657 cbuf.insts()->emit_int32(d32);
658 }
660 // EMIT_D64()
661 void emit_d64(CodeBuffer &cbuf, int64_t d64) {
662 cbuf.insts()->emit_int64(d64);
663 }
665 // emit 32 bit value and construct relocation entry from relocInfo::relocType
666 void emit_d32_reloc(CodeBuffer& cbuf,
667 int d32,
668 relocInfo::relocType reloc,
669 int format)
670 {
671 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc");
672 cbuf.relocate(cbuf.insts_mark(), reloc, format);
673 cbuf.insts()->emit_int32(d32);
674 }
676 // emit 32 bit value and construct relocation entry from RelocationHolder
677 void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, int format) {
678 #ifdef ASSERT
679 if (rspec.reloc()->type() == relocInfo::oop_type &&
680 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
681 assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
682 }
683 #endif
684 cbuf.relocate(cbuf.insts_mark(), rspec, format);
685 cbuf.insts()->emit_int32(d32);
686 }
688 void emit_d32_reloc(CodeBuffer& cbuf, address addr) {
689 address next_ip = cbuf.insts_end() + 4;
690 emit_d32_reloc(cbuf, (int) (addr - next_ip),
691 external_word_Relocation::spec(addr),
692 RELOC_DISP32);
693 }
696 // emit 64 bit value and construct relocation entry from relocInfo::relocType
697 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, relocInfo::relocType reloc, int format) {
698 cbuf.relocate(cbuf.insts_mark(), reloc, format);
699 cbuf.insts()->emit_int64(d64);
700 }
702 // emit 64 bit value and construct relocation entry from RelocationHolder
703 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec, int format) {
704 #ifdef ASSERT
705 if (rspec.reloc()->type() == relocInfo::oop_type &&
706 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
707 assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
708 "cannot embed scavengable oops in code");
709 }
710 #endif
711 cbuf.relocate(cbuf.insts_mark(), rspec, format);
712 cbuf.insts()->emit_int64(d64);
713 }
715 // Access stack slot for load or store
716 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp)
717 {
718 emit_opcode(cbuf, opcode); // (e.g., FILD [RSP+src])
719 if (-0x80 <= disp && disp < 0x80) {
720 emit_rm(cbuf, 0x01, rm_field, RSP_enc); // R/M byte
721 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
722 emit_d8(cbuf, disp); // Displacement // R/M byte
723 } else {
724 emit_rm(cbuf, 0x02, rm_field, RSP_enc); // R/M byte
725 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
726 emit_d32(cbuf, disp); // Displacement // R/M byte
727 }
728 }
730 // rRegI ereg, memory mem) %{ // emit_reg_mem
731 void encode_RegMem(CodeBuffer &cbuf,
732 int reg,
733 int base, int index, int scale, int disp, bool disp_is_oop)
734 {
735 assert(!disp_is_oop, "cannot have disp");
736 int regenc = reg & 7;
737 int baseenc = base & 7;
738 int indexenc = index & 7;
740 // There is no index & no scale, use form without SIB byte
741 if (index == 0x4 && scale == 0 && base != RSP_enc && base != R12_enc) {
742 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
743 if (disp == 0 && base != RBP_enc && base != R13_enc) {
744 emit_rm(cbuf, 0x0, regenc, baseenc); // *
745 } else if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
746 // If 8-bit displacement, mode 0x1
747 emit_rm(cbuf, 0x1, regenc, baseenc); // *
748 emit_d8(cbuf, disp);
749 } else {
750 // If 32-bit displacement
751 if (base == -1) { // Special flag for absolute address
752 emit_rm(cbuf, 0x0, regenc, 0x5); // *
753 if (disp_is_oop) {
754 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
755 } else {
756 emit_d32(cbuf, disp);
757 }
758 } else {
759 // Normal base + offset
760 emit_rm(cbuf, 0x2, regenc, baseenc); // *
761 if (disp_is_oop) {
762 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
763 } else {
764 emit_d32(cbuf, disp);
765 }
766 }
767 }
768 } else {
769 // Else, encode with the SIB byte
770 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
771 if (disp == 0 && base != RBP_enc && base != R13_enc) {
772 // If no displacement
773 emit_rm(cbuf, 0x0, regenc, 0x4); // *
774 emit_rm(cbuf, scale, indexenc, baseenc);
775 } else {
776 if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
777 // If 8-bit displacement, mode 0x1
778 emit_rm(cbuf, 0x1, regenc, 0x4); // *
779 emit_rm(cbuf, scale, indexenc, baseenc);
780 emit_d8(cbuf, disp);
781 } else {
782 // If 32-bit displacement
783 if (base == 0x04 ) {
784 emit_rm(cbuf, 0x2, regenc, 0x4);
785 emit_rm(cbuf, scale, indexenc, 0x04); // XXX is this valid???
786 } else {
787 emit_rm(cbuf, 0x2, regenc, 0x4);
788 emit_rm(cbuf, scale, indexenc, baseenc); // *
789 }
790 if (disp_is_oop) {
791 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
792 } else {
793 emit_d32(cbuf, disp);
794 }
795 }
796 }
797 }
798 }
800 void encode_copy(CodeBuffer &cbuf, int dstenc, int srcenc)
801 {
802 if (dstenc != srcenc) {
803 if (dstenc < 8) {
804 if (srcenc >= 8) {
805 emit_opcode(cbuf, Assembler::REX_B);
806 srcenc -= 8;
807 }
808 } else {
809 if (srcenc < 8) {
810 emit_opcode(cbuf, Assembler::REX_R);
811 } else {
812 emit_opcode(cbuf, Assembler::REX_RB);
813 srcenc -= 8;
814 }
815 dstenc -= 8;
816 }
818 emit_opcode(cbuf, 0x8B);
819 emit_rm(cbuf, 0x3, dstenc, srcenc);
820 }
821 }
823 void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
824 if( dst_encoding == src_encoding ) {
825 // reg-reg copy, use an empty encoding
826 } else {
827 MacroAssembler _masm(&cbuf);
829 __ movdqa(as_XMMRegister(dst_encoding), as_XMMRegister(src_encoding));
830 }
831 }
833 // This could be in MacroAssembler but it's fairly C2 specific
834 void emit_cmpfp_fixup(MacroAssembler& _masm) {
835 Label exit;
836 __ jccb(Assembler::noParity, exit);
837 __ pushf();
838 __ andq(Address(rsp, 0), 0xffffff2b);
839 __ popf();
840 __ bind(exit);
841 __ nop(); // (target for branch to avoid branch to branch)
842 }
845 //=============================================================================
846 const bool Matcher::constant_table_absolute_addressing = true;
847 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
849 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
850 // Empty encoding
851 }
853 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
854 return 0;
855 }
857 #ifndef PRODUCT
858 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
859 st->print("# MachConstantBaseNode (empty encoding)");
860 }
861 #endif
864 //=============================================================================
865 #ifndef PRODUCT
866 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
867 {
868 Compile* C = ra_->C;
870 int framesize = C->frame_slots() << LogBytesPerInt;
871 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
872 // Remove wordSize for return adr already pushed
873 // and another for the RBP we are going to save
874 framesize -= 2*wordSize;
875 bool need_nop = true;
877 // Calls to C2R adapters often do not accept exceptional returns.
878 // We require that their callers must bang for them. But be
879 // careful, because some VM calls (such as call site linkage) can
880 // use several kilobytes of stack. But the stack safety zone should
881 // account for that. See bugs 4446381, 4468289, 4497237.
882 if (C->need_stack_bang(framesize)) {
883 st->print_cr("# stack bang"); st->print("\t");
884 need_nop = false;
885 }
886 st->print_cr("pushq rbp"); st->print("\t");
888 if (VerifyStackAtCalls) {
889 // Majik cookie to verify stack depth
890 st->print_cr("pushq 0xffffffffbadb100d"
891 "\t# Majik cookie for stack depth check");
892 st->print("\t");
893 framesize -= wordSize; // Remove 2 for cookie
894 need_nop = false;
895 }
897 if (framesize) {
898 st->print("subq rsp, #%d\t# Create frame", framesize);
899 if (framesize < 0x80 && need_nop) {
900 st->print("\n\tnop\t# nop for patch_verified_entry");
901 }
902 }
903 }
904 #endif
906 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
907 {
908 Compile* C = ra_->C;
910 // WARNING: Initial instruction MUST be 5 bytes or longer so that
911 // NativeJump::patch_verified_entry will be able to patch out the entry
912 // code safely. The fldcw is ok at 6 bytes, the push to verify stack
913 // depth is ok at 5 bytes, the frame allocation can be either 3 or
914 // 6 bytes. So if we don't do the fldcw or the push then we must
915 // use the 6 byte frame allocation even if we have no frame. :-(
916 // If method sets FPU control word do it now
918 int framesize = C->frame_slots() << LogBytesPerInt;
919 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
920 // Remove wordSize for return adr already pushed
921 // and another for the RBP we are going to save
922 framesize -= 2*wordSize;
923 bool need_nop = true;
925 // Calls to C2R adapters often do not accept exceptional returns.
926 // We require that their callers must bang for them. But be
927 // careful, because some VM calls (such as call site linkage) can
928 // use several kilobytes of stack. But the stack safety zone should
929 // account for that. See bugs 4446381, 4468289, 4497237.
930 if (C->need_stack_bang(framesize)) {
931 MacroAssembler masm(&cbuf);
932 masm.generate_stack_overflow_check(framesize);
933 need_nop = false;
934 }
936 // We always push rbp so that on return to interpreter rbp will be
937 // restored correctly and we can correct the stack.
938 emit_opcode(cbuf, 0x50 | RBP_enc);
940 if (VerifyStackAtCalls) {
941 // Majik cookie to verify stack depth
942 emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d
943 emit_d32(cbuf, 0xbadb100d);
944 framesize -= wordSize; // Remove 2 for cookie
945 need_nop = false;
946 }
948 if (framesize) {
949 emit_opcode(cbuf, Assembler::REX_W);
950 if (framesize < 0x80) {
951 emit_opcode(cbuf, 0x83); // sub SP,#framesize
952 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
953 emit_d8(cbuf, framesize);
954 if (need_nop) {
955 emit_opcode(cbuf, 0x90); // nop
956 }
957 } else {
958 emit_opcode(cbuf, 0x81); // sub SP,#framesize
959 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
960 emit_d32(cbuf, framesize);
961 }
962 }
964 C->set_frame_complete(cbuf.insts_size());
966 #ifdef ASSERT
967 if (VerifyStackAtCalls) {
968 Label L;
969 MacroAssembler masm(&cbuf);
970 masm.push(rax);
971 masm.mov(rax, rsp);
972 masm.andptr(rax, StackAlignmentInBytes-1);
973 masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
974 masm.pop(rax);
975 masm.jcc(Assembler::equal, L);
976 masm.stop("Stack is not properly aligned!");
977 masm.bind(L);
978 }
979 #endif
980 }
982 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
983 {
984 return MachNode::size(ra_); // too many variables; just compute it
985 // the hard way
986 }
988 int MachPrologNode::reloc() const
989 {
990 return 0; // a large enough number
991 }
993 //=============================================================================
994 #ifndef PRODUCT
995 void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
996 {
997 Compile* C = ra_->C;
998 int framesize = C->frame_slots() << LogBytesPerInt;
999 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1000 // Remove word for return adr already pushed
1001 // and RBP
1002 framesize -= 2*wordSize;
1004 if (framesize) {
1005 st->print_cr("addq rsp, %d\t# Destroy frame", framesize);
1006 st->print("\t");
1007 }
1009 st->print_cr("popq rbp");
1010 if (do_polling() && C->is_method_compilation()) {
1011 st->print("\t");
1012 if (Assembler::is_polling_page_far()) {
1013 st->print_cr("movq rscratch1, #polling_page_address\n\t"
1014 "testl rax, [rscratch1]\t"
1015 "# Safepoint: poll for GC");
1016 } else {
1017 st->print_cr("testl rax, [rip + #offset_to_poll_page]\t"
1018 "# Safepoint: poll for GC");
1019 }
1020 }
1021 }
1022 #endif
1024 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1025 {
1026 Compile* C = ra_->C;
1027 int framesize = C->frame_slots() << LogBytesPerInt;
1028 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1029 // Remove word for return adr already pushed
1030 // and RBP
1031 framesize -= 2*wordSize;
1033 // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
1035 if (framesize) {
1036 emit_opcode(cbuf, Assembler::REX_W);
1037 if (framesize < 0x80) {
1038 emit_opcode(cbuf, 0x83); // addq rsp, #framesize
1039 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
1040 emit_d8(cbuf, framesize);
1041 } else {
1042 emit_opcode(cbuf, 0x81); // addq rsp, #framesize
1043 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
1044 emit_d32(cbuf, framesize);
1045 }
1046 }
1048 // popq rbp
1049 emit_opcode(cbuf, 0x58 | RBP_enc);
1051 if (do_polling() && C->is_method_compilation()) {
1052 MacroAssembler _masm(&cbuf);
1053 AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
1054 if (Assembler::is_polling_page_far()) {
1055 __ lea(rscratch1, polling_page);
1056 __ relocate(relocInfo::poll_return_type);
1057 __ testl(rax, Address(rscratch1, 0));
1058 } else {
1059 __ testl(rax, polling_page);
1060 }
1061 }
1062 }
1064 uint MachEpilogNode::size(PhaseRegAlloc* ra_) const
1065 {
1066 return MachNode::size(ra_); // too many variables; just compute it
1067 // the hard way
1068 }
1070 int MachEpilogNode::reloc() const
1071 {
1072 return 2; // a large enough number
1073 }
1075 const Pipeline* MachEpilogNode::pipeline() const
1076 {
1077 return MachNode::pipeline_class();
1078 }
1080 int MachEpilogNode::safepoint_offset() const
1081 {
1082 return 0;
1083 }
1085 //=============================================================================
1087 enum RC {
1088 rc_bad,
1089 rc_int,
1090 rc_float,
1091 rc_stack
1092 };
1094 static enum RC rc_class(OptoReg::Name reg)
1095 {
1096 if( !OptoReg::is_valid(reg) ) return rc_bad;
1098 if (OptoReg::is_stack(reg)) return rc_stack;
1100 VMReg r = OptoReg::as_VMReg(reg);
1102 if (r->is_Register()) return rc_int;
1104 assert(r->is_XMMRegister(), "must be");
1105 return rc_float;
1106 }
1108 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
1109 PhaseRegAlloc* ra_,
1110 bool do_size,
1111 outputStream* st) const
1112 {
1114 // Get registers to move
1115 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1116 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1117 OptoReg::Name dst_second = ra_->get_reg_second(this);
1118 OptoReg::Name dst_first = ra_->get_reg_first(this);
1120 enum RC src_second_rc = rc_class(src_second);
1121 enum RC src_first_rc = rc_class(src_first);
1122 enum RC dst_second_rc = rc_class(dst_second);
1123 enum RC dst_first_rc = rc_class(dst_first);
1125 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first),
1126 "must move at least 1 register" );
1128 if (src_first == dst_first && src_second == dst_second) {
1129 // Self copy, no move
1130 return 0;
1131 } else if (src_first_rc == rc_stack) {
1132 // mem ->
1133 if (dst_first_rc == rc_stack) {
1134 // mem -> mem
1135 assert(src_second != dst_first, "overlap");
1136 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1137 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1138 // 64-bit
1139 int src_offset = ra_->reg2offset(src_first);
1140 int dst_offset = ra_->reg2offset(dst_first);
1141 if (cbuf) {
1142 emit_opcode(*cbuf, 0xFF);
1143 encode_RegMem(*cbuf, RSI_enc, RSP_enc, 0x4, 0, src_offset, false);
1145 emit_opcode(*cbuf, 0x8F);
1146 encode_RegMem(*cbuf, RAX_enc, RSP_enc, 0x4, 0, dst_offset, false);
1148 #ifndef PRODUCT
1149 } else if (!do_size) {
1150 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
1151 "popq [rsp + #%d]",
1152 src_offset,
1153 dst_offset);
1154 #endif
1155 }
1156 return
1157 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) +
1158 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4));
1159 } else {
1160 // 32-bit
1161 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1162 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1163 // No pushl/popl, so:
1164 int src_offset = ra_->reg2offset(src_first);
1165 int dst_offset = ra_->reg2offset(dst_first);
1166 if (cbuf) {
1167 emit_opcode(*cbuf, Assembler::REX_W);
1168 emit_opcode(*cbuf, 0x89);
1169 emit_opcode(*cbuf, 0x44);
1170 emit_opcode(*cbuf, 0x24);
1171 emit_opcode(*cbuf, 0xF8);
1173 emit_opcode(*cbuf, 0x8B);
1174 encode_RegMem(*cbuf,
1175 RAX_enc,
1176 RSP_enc, 0x4, 0, src_offset,
1177 false);
1179 emit_opcode(*cbuf, 0x89);
1180 encode_RegMem(*cbuf,
1181 RAX_enc,
1182 RSP_enc, 0x4, 0, dst_offset,
1183 false);
1185 emit_opcode(*cbuf, Assembler::REX_W);
1186 emit_opcode(*cbuf, 0x8B);
1187 emit_opcode(*cbuf, 0x44);
1188 emit_opcode(*cbuf, 0x24);
1189 emit_opcode(*cbuf, 0xF8);
1191 #ifndef PRODUCT
1192 } else if (!do_size) {
1193 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t"
1194 "movl rax, [rsp + #%d]\n\t"
1195 "movl [rsp + #%d], rax\n\t"
1196 "movq rax, [rsp - #8]",
1197 src_offset,
1198 dst_offset);
1199 #endif
1200 }
1201 return
1202 5 + // movq
1203 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + // movl
1204 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)) + // movl
1205 5; // movq
1206 }
1207 } else if (dst_first_rc == rc_int) {
1208 // mem -> gpr
1209 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1210 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1211 // 64-bit
1212 int offset = ra_->reg2offset(src_first);
1213 if (cbuf) {
1214 if (Matcher::_regEncode[dst_first] < 8) {
1215 emit_opcode(*cbuf, Assembler::REX_W);
1216 } else {
1217 emit_opcode(*cbuf, Assembler::REX_WR);
1218 }
1219 emit_opcode(*cbuf, 0x8B);
1220 encode_RegMem(*cbuf,
1221 Matcher::_regEncode[dst_first],
1222 RSP_enc, 0x4, 0, offset,
1223 false);
1224 #ifndef PRODUCT
1225 } else if (!do_size) {
1226 st->print("movq %s, [rsp + #%d]\t# spill",
1227 Matcher::regName[dst_first],
1228 offset);
1229 #endif
1230 }
1231 return
1232 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
1233 } else {
1234 // 32-bit
1235 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1236 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1237 int offset = ra_->reg2offset(src_first);
1238 if (cbuf) {
1239 if (Matcher::_regEncode[dst_first] >= 8) {
1240 emit_opcode(*cbuf, Assembler::REX_R);
1241 }
1242 emit_opcode(*cbuf, 0x8B);
1243 encode_RegMem(*cbuf,
1244 Matcher::_regEncode[dst_first],
1245 RSP_enc, 0x4, 0, offset,
1246 false);
1247 #ifndef PRODUCT
1248 } else if (!do_size) {
1249 st->print("movl %s, [rsp + #%d]\t# spill",
1250 Matcher::regName[dst_first],
1251 offset);
1252 #endif
1253 }
1254 return
1255 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1256 ((Matcher::_regEncode[dst_first] < 8)
1257 ? 3
1258 : 4); // REX
1259 }
1260 } else if (dst_first_rc == rc_float) {
1261 // mem-> xmm
1262 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1263 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1264 // 64-bit
1265 int offset = ra_->reg2offset(src_first);
1266 if (cbuf) {
1267 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
1268 if (Matcher::_regEncode[dst_first] >= 8) {
1269 emit_opcode(*cbuf, Assembler::REX_R);
1270 }
1271 emit_opcode(*cbuf, 0x0F);
1272 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
1273 encode_RegMem(*cbuf,
1274 Matcher::_regEncode[dst_first],
1275 RSP_enc, 0x4, 0, offset,
1276 false);
1277 #ifndef PRODUCT
1278 } else if (!do_size) {
1279 st->print("%s %s, [rsp + #%d]\t# spill",
1280 UseXmmLoadAndClearUpper ? "movsd " : "movlpd",
1281 Matcher::regName[dst_first],
1282 offset);
1283 #endif
1284 }
1285 return
1286 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1287 ((Matcher::_regEncode[dst_first] < 8)
1288 ? 5
1289 : 6); // REX
1290 } else {
1291 // 32-bit
1292 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1293 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1294 int offset = ra_->reg2offset(src_first);
1295 if (cbuf) {
1296 emit_opcode(*cbuf, 0xF3);
1297 if (Matcher::_regEncode[dst_first] >= 8) {
1298 emit_opcode(*cbuf, Assembler::REX_R);
1299 }
1300 emit_opcode(*cbuf, 0x0F);
1301 emit_opcode(*cbuf, 0x10);
1302 encode_RegMem(*cbuf,
1303 Matcher::_regEncode[dst_first],
1304 RSP_enc, 0x4, 0, offset,
1305 false);
1306 #ifndef PRODUCT
1307 } else if (!do_size) {
1308 st->print("movss %s, [rsp + #%d]\t# spill",
1309 Matcher::regName[dst_first],
1310 offset);
1311 #endif
1312 }
1313 return
1314 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1315 ((Matcher::_regEncode[dst_first] < 8)
1316 ? 5
1317 : 6); // REX
1318 }
1319 }
1320 } else if (src_first_rc == rc_int) {
1321 // gpr ->
1322 if (dst_first_rc == rc_stack) {
1323 // gpr -> mem
1324 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1325 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1326 // 64-bit
1327 int offset = ra_->reg2offset(dst_first);
1328 if (cbuf) {
1329 if (Matcher::_regEncode[src_first] < 8) {
1330 emit_opcode(*cbuf, Assembler::REX_W);
1331 } else {
1332 emit_opcode(*cbuf, Assembler::REX_WR);
1333 }
1334 emit_opcode(*cbuf, 0x89);
1335 encode_RegMem(*cbuf,
1336 Matcher::_regEncode[src_first],
1337 RSP_enc, 0x4, 0, offset,
1338 false);
1339 #ifndef PRODUCT
1340 } else if (!do_size) {
1341 st->print("movq [rsp + #%d], %s\t# spill",
1342 offset,
1343 Matcher::regName[src_first]);
1344 #endif
1345 }
1346 return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
1347 } else {
1348 // 32-bit
1349 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1350 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1351 int offset = ra_->reg2offset(dst_first);
1352 if (cbuf) {
1353 if (Matcher::_regEncode[src_first] >= 8) {
1354 emit_opcode(*cbuf, Assembler::REX_R);
1355 }
1356 emit_opcode(*cbuf, 0x89);
1357 encode_RegMem(*cbuf,
1358 Matcher::_regEncode[src_first],
1359 RSP_enc, 0x4, 0, offset,
1360 false);
1361 #ifndef PRODUCT
1362 } else if (!do_size) {
1363 st->print("movl [rsp + #%d], %s\t# spill",
1364 offset,
1365 Matcher::regName[src_first]);
1366 #endif
1367 }
1368 return
1369 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1370 ((Matcher::_regEncode[src_first] < 8)
1371 ? 3
1372 : 4); // REX
1373 }
1374 } else if (dst_first_rc == rc_int) {
1375 // gpr -> gpr
1376 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1377 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1378 // 64-bit
1379 if (cbuf) {
1380 if (Matcher::_regEncode[dst_first] < 8) {
1381 if (Matcher::_regEncode[src_first] < 8) {
1382 emit_opcode(*cbuf, Assembler::REX_W);
1383 } else {
1384 emit_opcode(*cbuf, Assembler::REX_WB);
1385 }
1386 } else {
1387 if (Matcher::_regEncode[src_first] < 8) {
1388 emit_opcode(*cbuf, Assembler::REX_WR);
1389 } else {
1390 emit_opcode(*cbuf, Assembler::REX_WRB);
1391 }
1392 }
1393 emit_opcode(*cbuf, 0x8B);
1394 emit_rm(*cbuf, 0x3,
1395 Matcher::_regEncode[dst_first] & 7,
1396 Matcher::_regEncode[src_first] & 7);
1397 #ifndef PRODUCT
1398 } else if (!do_size) {
1399 st->print("movq %s, %s\t# spill",
1400 Matcher::regName[dst_first],
1401 Matcher::regName[src_first]);
1402 #endif
1403 }
1404 return 3; // REX
1405 } else {
1406 // 32-bit
1407 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1408 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1409 if (cbuf) {
1410 if (Matcher::_regEncode[dst_first] < 8) {
1411 if (Matcher::_regEncode[src_first] >= 8) {
1412 emit_opcode(*cbuf, Assembler::REX_B);
1413 }
1414 } else {
1415 if (Matcher::_regEncode[src_first] < 8) {
1416 emit_opcode(*cbuf, Assembler::REX_R);
1417 } else {
1418 emit_opcode(*cbuf, Assembler::REX_RB);
1419 }
1420 }
1421 emit_opcode(*cbuf, 0x8B);
1422 emit_rm(*cbuf, 0x3,
1423 Matcher::_regEncode[dst_first] & 7,
1424 Matcher::_regEncode[src_first] & 7);
1425 #ifndef PRODUCT
1426 } else if (!do_size) {
1427 st->print("movl %s, %s\t# spill",
1428 Matcher::regName[dst_first],
1429 Matcher::regName[src_first]);
1430 #endif
1431 }
1432 return
1433 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1434 ? 2
1435 : 3; // REX
1436 }
1437 } else if (dst_first_rc == rc_float) {
1438 // gpr -> xmm
1439 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1440 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1441 // 64-bit
1442 if (cbuf) {
1443 emit_opcode(*cbuf, 0x66);
1444 if (Matcher::_regEncode[dst_first] < 8) {
1445 if (Matcher::_regEncode[src_first] < 8) {
1446 emit_opcode(*cbuf, Assembler::REX_W);
1447 } else {
1448 emit_opcode(*cbuf, Assembler::REX_WB);
1449 }
1450 } else {
1451 if (Matcher::_regEncode[src_first] < 8) {
1452 emit_opcode(*cbuf, Assembler::REX_WR);
1453 } else {
1454 emit_opcode(*cbuf, Assembler::REX_WRB);
1455 }
1456 }
1457 emit_opcode(*cbuf, 0x0F);
1458 emit_opcode(*cbuf, 0x6E);
1459 emit_rm(*cbuf, 0x3,
1460 Matcher::_regEncode[dst_first] & 7,
1461 Matcher::_regEncode[src_first] & 7);
1462 #ifndef PRODUCT
1463 } else if (!do_size) {
1464 st->print("movdq %s, %s\t# spill",
1465 Matcher::regName[dst_first],
1466 Matcher::regName[src_first]);
1467 #endif
1468 }
1469 return 5; // REX
1470 } else {
1471 // 32-bit
1472 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1473 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1474 if (cbuf) {
1475 emit_opcode(*cbuf, 0x66);
1476 if (Matcher::_regEncode[dst_first] < 8) {
1477 if (Matcher::_regEncode[src_first] >= 8) {
1478 emit_opcode(*cbuf, Assembler::REX_B);
1479 }
1480 } else {
1481 if (Matcher::_regEncode[src_first] < 8) {
1482 emit_opcode(*cbuf, Assembler::REX_R);
1483 } else {
1484 emit_opcode(*cbuf, Assembler::REX_RB);
1485 }
1486 }
1487 emit_opcode(*cbuf, 0x0F);
1488 emit_opcode(*cbuf, 0x6E);
1489 emit_rm(*cbuf, 0x3,
1490 Matcher::_regEncode[dst_first] & 7,
1491 Matcher::_regEncode[src_first] & 7);
1492 #ifndef PRODUCT
1493 } else if (!do_size) {
1494 st->print("movdl %s, %s\t# spill",
1495 Matcher::regName[dst_first],
1496 Matcher::regName[src_first]);
1497 #endif
1498 }
1499 return
1500 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1501 ? 4
1502 : 5; // REX
1503 }
1504 }
1505 } else if (src_first_rc == rc_float) {
1506 // xmm ->
1507 if (dst_first_rc == rc_stack) {
1508 // xmm -> mem
1509 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1510 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1511 // 64-bit
1512 int offset = ra_->reg2offset(dst_first);
1513 if (cbuf) {
1514 emit_opcode(*cbuf, 0xF2);
1515 if (Matcher::_regEncode[src_first] >= 8) {
1516 emit_opcode(*cbuf, Assembler::REX_R);
1517 }
1518 emit_opcode(*cbuf, 0x0F);
1519 emit_opcode(*cbuf, 0x11);
1520 encode_RegMem(*cbuf,
1521 Matcher::_regEncode[src_first],
1522 RSP_enc, 0x4, 0, offset,
1523 false);
1524 #ifndef PRODUCT
1525 } else if (!do_size) {
1526 st->print("movsd [rsp + #%d], %s\t# spill",
1527 offset,
1528 Matcher::regName[src_first]);
1529 #endif
1530 }
1531 return
1532 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1533 ((Matcher::_regEncode[src_first] < 8)
1534 ? 5
1535 : 6); // REX
1536 } else {
1537 // 32-bit
1538 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1539 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1540 int offset = ra_->reg2offset(dst_first);
1541 if (cbuf) {
1542 emit_opcode(*cbuf, 0xF3);
1543 if (Matcher::_regEncode[src_first] >= 8) {
1544 emit_opcode(*cbuf, Assembler::REX_R);
1545 }
1546 emit_opcode(*cbuf, 0x0F);
1547 emit_opcode(*cbuf, 0x11);
1548 encode_RegMem(*cbuf,
1549 Matcher::_regEncode[src_first],
1550 RSP_enc, 0x4, 0, offset,
1551 false);
1552 #ifndef PRODUCT
1553 } else if (!do_size) {
1554 st->print("movss [rsp + #%d], %s\t# spill",
1555 offset,
1556 Matcher::regName[src_first]);
1557 #endif
1558 }
1559 return
1560 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1561 ((Matcher::_regEncode[src_first] < 8)
1562 ? 5
1563 : 6); // REX
1564 }
1565 } else if (dst_first_rc == rc_int) {
1566 // xmm -> gpr
1567 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1568 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1569 // 64-bit
1570 if (cbuf) {
1571 emit_opcode(*cbuf, 0x66);
1572 if (Matcher::_regEncode[dst_first] < 8) {
1573 if (Matcher::_regEncode[src_first] < 8) {
1574 emit_opcode(*cbuf, Assembler::REX_W);
1575 } else {
1576 emit_opcode(*cbuf, Assembler::REX_WR); // attention!
1577 }
1578 } else {
1579 if (Matcher::_regEncode[src_first] < 8) {
1580 emit_opcode(*cbuf, Assembler::REX_WB); // attention!
1581 } else {
1582 emit_opcode(*cbuf, Assembler::REX_WRB);
1583 }
1584 }
1585 emit_opcode(*cbuf, 0x0F);
1586 emit_opcode(*cbuf, 0x7E);
1587 emit_rm(*cbuf, 0x3,
1588 Matcher::_regEncode[src_first] & 7,
1589 Matcher::_regEncode[dst_first] & 7);
1590 #ifndef PRODUCT
1591 } else if (!do_size) {
1592 st->print("movdq %s, %s\t# spill",
1593 Matcher::regName[dst_first],
1594 Matcher::regName[src_first]);
1595 #endif
1596 }
1597 return 5; // REX
1598 } else {
1599 // 32-bit
1600 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1601 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1602 if (cbuf) {
1603 emit_opcode(*cbuf, 0x66);
1604 if (Matcher::_regEncode[dst_first] < 8) {
1605 if (Matcher::_regEncode[src_first] >= 8) {
1606 emit_opcode(*cbuf, Assembler::REX_R); // attention!
1607 }
1608 } else {
1609 if (Matcher::_regEncode[src_first] < 8) {
1610 emit_opcode(*cbuf, Assembler::REX_B); // attention!
1611 } else {
1612 emit_opcode(*cbuf, Assembler::REX_RB);
1613 }
1614 }
1615 emit_opcode(*cbuf, 0x0F);
1616 emit_opcode(*cbuf, 0x7E);
1617 emit_rm(*cbuf, 0x3,
1618 Matcher::_regEncode[src_first] & 7,
1619 Matcher::_regEncode[dst_first] & 7);
1620 #ifndef PRODUCT
1621 } else if (!do_size) {
1622 st->print("movdl %s, %s\t# spill",
1623 Matcher::regName[dst_first],
1624 Matcher::regName[src_first]);
1625 #endif
1626 }
1627 return
1628 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1629 ? 4
1630 : 5; // REX
1631 }
1632 } else if (dst_first_rc == rc_float) {
1633 // xmm -> xmm
1634 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1635 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1636 // 64-bit
1637 if (cbuf) {
1638 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
1639 if (Matcher::_regEncode[dst_first] < 8) {
1640 if (Matcher::_regEncode[src_first] >= 8) {
1641 emit_opcode(*cbuf, Assembler::REX_B);
1642 }
1643 } else {
1644 if (Matcher::_regEncode[src_first] < 8) {
1645 emit_opcode(*cbuf, Assembler::REX_R);
1646 } else {
1647 emit_opcode(*cbuf, Assembler::REX_RB);
1648 }
1649 }
1650 emit_opcode(*cbuf, 0x0F);
1651 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
1652 emit_rm(*cbuf, 0x3,
1653 Matcher::_regEncode[dst_first] & 7,
1654 Matcher::_regEncode[src_first] & 7);
1655 #ifndef PRODUCT
1656 } else if (!do_size) {
1657 st->print("%s %s, %s\t# spill",
1658 UseXmmRegToRegMoveAll ? "movapd" : "movsd ",
1659 Matcher::regName[dst_first],
1660 Matcher::regName[src_first]);
1661 #endif
1662 }
1663 return
1664 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1665 ? 4
1666 : 5; // REX
1667 } else {
1668 // 32-bit
1669 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1670 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1671 if (cbuf) {
1672 if (!UseXmmRegToRegMoveAll)
1673 emit_opcode(*cbuf, 0xF3);
1674 if (Matcher::_regEncode[dst_first] < 8) {
1675 if (Matcher::_regEncode[src_first] >= 8) {
1676 emit_opcode(*cbuf, Assembler::REX_B);
1677 }
1678 } else {
1679 if (Matcher::_regEncode[src_first] < 8) {
1680 emit_opcode(*cbuf, Assembler::REX_R);
1681 } else {
1682 emit_opcode(*cbuf, Assembler::REX_RB);
1683 }
1684 }
1685 emit_opcode(*cbuf, 0x0F);
1686 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
1687 emit_rm(*cbuf, 0x3,
1688 Matcher::_regEncode[dst_first] & 7,
1689 Matcher::_regEncode[src_first] & 7);
1690 #ifndef PRODUCT
1691 } else if (!do_size) {
1692 st->print("%s %s, %s\t# spill",
1693 UseXmmRegToRegMoveAll ? "movaps" : "movss ",
1694 Matcher::regName[dst_first],
1695 Matcher::regName[src_first]);
1696 #endif
1697 }
1698 return
1699 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1700 ? (UseXmmRegToRegMoveAll ? 3 : 4)
1701 : (UseXmmRegToRegMoveAll ? 4 : 5); // REX
1702 }
1703 }
1704 }
1706 assert(0," foo ");
1707 Unimplemented();
1709 return 0;
1710 }
1712 #ifndef PRODUCT
1713 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const
1714 {
1715 implementation(NULL, ra_, false, st);
1716 }
1717 #endif
1719 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
1720 {
1721 implementation(&cbuf, ra_, false, NULL);
1722 }
1724 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const
1725 {
1726 return implementation(NULL, ra_, true, NULL);
1727 }
1729 //=============================================================================
1730 #ifndef PRODUCT
1731 void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const
1732 {
1733 st->print("nop \t# %d bytes pad for loops and calls", _count);
1734 }
1735 #endif
1737 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const
1738 {
1739 MacroAssembler _masm(&cbuf);
1740 __ nop(_count);
1741 }
1743 uint MachNopNode::size(PhaseRegAlloc*) const
1744 {
1745 return _count;
1746 }
1749 //=============================================================================
1750 #ifndef PRODUCT
1751 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1752 {
1753 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1754 int reg = ra_->get_reg_first(this);
1755 st->print("leaq %s, [rsp + #%d]\t# box lock",
1756 Matcher::regName[reg], offset);
1757 }
1758 #endif
1760 void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1761 {
1762 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1763 int reg = ra_->get_encode(this);
1764 if (offset >= 0x80) {
1765 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
1766 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1767 emit_rm(cbuf, 0x2, reg & 7, 0x04);
1768 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
1769 emit_d32(cbuf, offset);
1770 } else {
1771 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
1772 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1773 emit_rm(cbuf, 0x1, reg & 7, 0x04);
1774 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
1775 emit_d8(cbuf, offset);
1776 }
1777 }
1779 uint BoxLockNode::size(PhaseRegAlloc *ra_) const
1780 {
1781 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1782 return (offset < 0x80) ? 5 : 8; // REX
1783 }
1785 //=============================================================================
1787 // emit call stub, compiled java to interpreter
1788 void emit_java_to_interp(CodeBuffer& cbuf)
1789 {
1790 // Stub is fixed up when the corresponding call is converted from
1791 // calling compiled code to calling interpreted code.
1792 // movq rbx, 0
1793 // jmp -5 # to self
1795 address mark = cbuf.insts_mark(); // get mark within main instrs section
1797 // Note that the code buffer's insts_mark is always relative to insts.
1798 // That's why we must use the macroassembler to generate a stub.
1799 MacroAssembler _masm(&cbuf);
1801 address base =
1802 __ start_a_stub(Compile::MAX_stubs_size);
1803 if (base == NULL) return; // CodeBuffer::expand failed
1804 // static stub relocation stores the instruction address of the call
1805 __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
1806 // static stub relocation also tags the methodOop in the code-stream.
1807 __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time
1808 // This is recognized as unresolved by relocs/nativeinst/ic code
1809 __ jump(RuntimeAddress(__ pc()));
1811 // Update current stubs pointer and restore insts_end.
1812 __ end_a_stub();
1813 }
1815 // size of call stub, compiled java to interpretor
1816 uint size_java_to_interp()
1817 {
1818 return 15; // movq (1+1+8); jmp (1+4)
1819 }
1821 // relocation entries for call stub, compiled java to interpretor
1822 uint reloc_java_to_interp()
1823 {
1824 return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
1825 }
1827 //=============================================================================
1828 #ifndef PRODUCT
1829 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1830 {
1831 if (UseCompressedOops) {
1832 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1833 if (Universe::narrow_oop_shift() != 0) {
1834 st->print_cr("\tdecode_heap_oop_not_null rscratch1, rscratch1");
1835 }
1836 st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
1837 } else {
1838 st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
1839 "# Inline cache check");
1840 }
1841 st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
1842 st->print_cr("\tnop\t# nops to align entry point");
1843 }
1844 #endif
1846 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1847 {
1848 MacroAssembler masm(&cbuf);
1849 uint insts_size = cbuf.insts_size();
1850 if (UseCompressedOops) {
1851 masm.load_klass(rscratch1, j_rarg0);
1852 masm.cmpptr(rax, rscratch1);
1853 } else {
1854 masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
1855 }
1857 masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1859 /* WARNING these NOPs are critical so that verified entry point is properly
1860 4 bytes aligned for patching by NativeJump::patch_verified_entry() */
1861 int nops_cnt = 4 - ((cbuf.insts_size() - insts_size) & 0x3);
1862 if (OptoBreakpoint) {
1863 // Leave space for int3
1864 nops_cnt -= 1;
1865 }
1866 nops_cnt &= 0x3; // Do not add nops if code is aligned.
1867 if (nops_cnt > 0)
1868 masm.nop(nops_cnt);
1869 }
1871 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1872 {
1873 return MachNode::size(ra_); // too many variables; just compute it
1874 // the hard way
1875 }
1878 //=============================================================================
1879 uint size_exception_handler()
1880 {
1881 // NativeCall instruction size is the same as NativeJump.
1882 // Note that this value is also credited (in output.cpp) to
1883 // the size of the code section.
1884 return NativeJump::instruction_size;
1885 }
1887 // Emit exception handler code.
1888 int emit_exception_handler(CodeBuffer& cbuf)
1889 {
1891 // Note that the code buffer's insts_mark is always relative to insts.
1892 // That's why we must use the macroassembler to generate a handler.
1893 MacroAssembler _masm(&cbuf);
1894 address base =
1895 __ start_a_stub(size_exception_handler());
1896 if (base == NULL) return 0; // CodeBuffer::expand failed
1897 int offset = __ offset();
1898 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
1899 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1900 __ end_a_stub();
1901 return offset;
1902 }
1904 uint size_deopt_handler()
1905 {
1906 // three 5 byte instructions
1907 return 15;
1908 }
1910 // Emit deopt handler code.
1911 int emit_deopt_handler(CodeBuffer& cbuf)
1912 {
1914 // Note that the code buffer's insts_mark is always relative to insts.
1915 // That's why we must use the macroassembler to generate a handler.
1916 MacroAssembler _masm(&cbuf);
1917 address base =
1918 __ start_a_stub(size_deopt_handler());
1919 if (base == NULL) return 0; // CodeBuffer::expand failed
1920 int offset = __ offset();
1921 address the_pc = (address) __ pc();
1922 Label next;
1923 // push a "the_pc" on the stack without destroying any registers
1924 // as they all may be live.
1926 // push address of "next"
1927 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
1928 __ bind(next);
1929 // adjust it so it matches "the_pc"
1930 __ subptr(Address(rsp, 0), __ offset() - offset);
1931 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
1932 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1933 __ end_a_stub();
1934 return offset;
1935 }
1938 const bool Matcher::match_rule_supported(int opcode) {
1939 if (!has_match_rule(opcode))
1940 return false;
1942 return true; // Per default match rules are supported.
1943 }
1945 int Matcher::regnum_to_fpu_offset(int regnum)
1946 {
1947 return regnum - 32; // The FP registers are in the second chunk
1948 }
1950 // This is UltraSparc specific, true just means we have fast l2f conversion
1951 const bool Matcher::convL2FSupported(void) {
1952 return true;
1953 }
1955 // Vector width in bytes
1956 const uint Matcher::vector_width_in_bytes(void) {
1957 return 8;
1958 }
1960 // Vector ideal reg
1961 const uint Matcher::vector_ideal_reg(void) {
1962 return Op_RegD;
1963 }
1965 // Is this branch offset short enough that a short branch can be used?
1966 //
1967 // NOTE: If the platform does not provide any short branch variants, then
1968 // this method should return false for offset 0.
1969 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1970 // The passed offset is relative to address of the branch.
1971 // On 86 a branch displacement is calculated relative to address
1972 // of a next instruction.
1973 offset -= br_size;
1975 // the short version of jmpConUCF2 contains multiple branches,
1976 // making the reach slightly less
1977 if (rule == jmpConUCF2_rule)
1978 return (-126 <= offset && offset <= 125);
1979 return (-128 <= offset && offset <= 127);
1980 }
1982 const bool Matcher::isSimpleConstant64(jlong value) {
1983 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1984 //return value == (int) value; // Cf. storeImmL and immL32.
1986 // Probably always true, even if a temp register is required.
1987 return true;
1988 }
1990 // The ecx parameter to rep stosq for the ClearArray node is in words.
1991 const bool Matcher::init_array_count_is_in_bytes = false;
1993 // Threshold size for cleararray.
1994 const int Matcher::init_array_short_size = 8 * BytesPerLong;
1996 // Should the Matcher clone shifts on addressing modes, expecting them
1997 // to be subsumed into complex addressing expressions or compute them
1998 // into registers? True for Intel but false for most RISCs
1999 const bool Matcher::clone_shift_expressions = true;
2001 // Do we need to mask the count passed to shift instructions or does
2002 // the cpu only look at the lower 5/6 bits anyway?
2003 const bool Matcher::need_masked_shift_count = false;
2005 bool Matcher::narrow_oop_use_complex_address() {
2006 assert(UseCompressedOops, "only for compressed oops code");
2007 return (LogMinObjAlignmentInBytes <= 3);
2008 }
2010 // Is it better to copy float constants, or load them directly from
2011 // memory? Intel can load a float constant from a direct address,
2012 // requiring no extra registers. Most RISCs will have to materialize
2013 // an address into a register first, so they would do better to copy
2014 // the constant from stack.
2015 const bool Matcher::rematerialize_float_constants = true; // XXX
2017 // If CPU can load and store mis-aligned doubles directly then no
2018 // fixup is needed. Else we split the double into 2 integer pieces
2019 // and move it piece-by-piece. Only happens when passing doubles into
2020 // C code as the Java calling convention forces doubles to be aligned.
2021 const bool Matcher::misaligned_doubles_ok = true;
2023 // No-op on amd64
2024 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
2026 // Advertise here if the CPU requires explicit rounding operations to
2027 // implement the UseStrictFP mode.
2028 const bool Matcher::strict_fp_requires_explicit_rounding = true;
2030 // Are floats conerted to double when stored to stack during deoptimization?
2031 // On x64 it is stored without convertion so we can use normal access.
2032 bool Matcher::float_in_double() { return false; }
2034 // Do ints take an entire long register or just half?
2035 const bool Matcher::int_in_long = true;
2037 // Return whether or not this register is ever used as an argument.
2038 // This function is used on startup to build the trampoline stubs in
2039 // generateOptoStub. Registers not mentioned will be killed by the VM
2040 // call in the trampoline, and arguments in those registers not be
2041 // available to the callee.
2042 bool Matcher::can_be_java_arg(int reg)
2043 {
2044 return
2045 reg == RDI_num || reg == RDI_H_num ||
2046 reg == RSI_num || reg == RSI_H_num ||
2047 reg == RDX_num || reg == RDX_H_num ||
2048 reg == RCX_num || reg == RCX_H_num ||
2049 reg == R8_num || reg == R8_H_num ||
2050 reg == R9_num || reg == R9_H_num ||
2051 reg == R12_num || reg == R12_H_num ||
2052 reg == XMM0_num || reg == XMM0_H_num ||
2053 reg == XMM1_num || reg == XMM1_H_num ||
2054 reg == XMM2_num || reg == XMM2_H_num ||
2055 reg == XMM3_num || reg == XMM3_H_num ||
2056 reg == XMM4_num || reg == XMM4_H_num ||
2057 reg == XMM5_num || reg == XMM5_H_num ||
2058 reg == XMM6_num || reg == XMM6_H_num ||
2059 reg == XMM7_num || reg == XMM7_H_num;
2060 }
2062 bool Matcher::is_spillable_arg(int reg)
2063 {
2064 return can_be_java_arg(reg);
2065 }
2067 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
2068 // In 64 bit mode a code which use multiply when
2069 // devisor is constant is faster than hardware
2070 // DIV instruction (it uses MulHiL).
2071 return false;
2072 }
2074 // Register for DIVI projection of divmodI
2075 RegMask Matcher::divI_proj_mask() {
2076 return INT_RAX_REG_mask;
2077 }
2079 // Register for MODI projection of divmodI
2080 RegMask Matcher::modI_proj_mask() {
2081 return INT_RDX_REG_mask;
2082 }
2084 // Register for DIVL projection of divmodL
2085 RegMask Matcher::divL_proj_mask() {
2086 return LONG_RAX_REG_mask;
2087 }
2089 // Register for MODL projection of divmodL
2090 RegMask Matcher::modL_proj_mask() {
2091 return LONG_RDX_REG_mask;
2092 }
2094 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2095 return PTR_RBP_REG_mask;
2096 }
2098 static Address build_address(int b, int i, int s, int d) {
2099 Register index = as_Register(i);
2100 Address::ScaleFactor scale = (Address::ScaleFactor)s;
2101 if (index == rsp) {
2102 index = noreg;
2103 scale = Address::no_scale;
2104 }
2105 Address addr(as_Register(b), index, scale, d);
2106 return addr;
2107 }
2109 %}
2111 //----------ENCODING BLOCK-----------------------------------------------------
2112 // This block specifies the encoding classes used by the compiler to
2113 // output byte streams. Encoding classes are parameterized macros
2114 // used by Machine Instruction Nodes in order to generate the bit
2115 // encoding of the instruction. Operands specify their base encoding
2116 // interface with the interface keyword. There are currently
2117 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2118 // COND_INTER. REG_INTER causes an operand to generate a function
2119 // which returns its register number when queried. CONST_INTER causes
2120 // an operand to generate a function which returns the value of the
2121 // constant when queried. MEMORY_INTER causes an operand to generate
2122 // four functions which return the Base Register, the Index Register,
2123 // the Scale Value, and the Offset Value of the operand when queried.
2124 // COND_INTER causes an operand to generate six functions which return
2125 // the encoding code (ie - encoding bits for the instruction)
2126 // associated with each basic boolean condition for a conditional
2127 // instruction.
2128 //
2129 // Instructions specify two basic values for encoding. Again, a
2130 // function is available to check if the constant displacement is an
2131 // oop. They use the ins_encode keyword to specify their encoding
2132 // classes (which must be a sequence of enc_class names, and their
2133 // parameters, specified in the encoding block), and they use the
2134 // opcode keyword to specify, in order, their primary, secondary, and
2135 // tertiary opcode. Only the opcode sections which a particular
2136 // instruction needs for encoding need to be specified.
2137 encode %{
2138 // Build emit functions for each basic byte or larger field in the
2139 // intel encoding scheme (opcode, rm, sib, immediate), and call them
2140 // from C++ code in the enc_class source block. Emit functions will
2141 // live in the main source block for now. In future, we can
2142 // generalize this by adding a syntax that specifies the sizes of
2143 // fields in an order, so that the adlc can build the emit functions
2144 // automagically
2146 // Emit primary opcode
2147 enc_class OpcP
2148 %{
2149 emit_opcode(cbuf, $primary);
2150 %}
2152 // Emit secondary opcode
2153 enc_class OpcS
2154 %{
2155 emit_opcode(cbuf, $secondary);
2156 %}
2158 // Emit tertiary opcode
2159 enc_class OpcT
2160 %{
2161 emit_opcode(cbuf, $tertiary);
2162 %}
2164 // Emit opcode directly
2165 enc_class Opcode(immI d8)
2166 %{
2167 emit_opcode(cbuf, $d8$$constant);
2168 %}
2170 // Emit size prefix
2171 enc_class SizePrefix
2172 %{
2173 emit_opcode(cbuf, 0x66);
2174 %}
2176 enc_class reg(rRegI reg)
2177 %{
2178 emit_rm(cbuf, 0x3, 0, $reg$$reg & 7);
2179 %}
2181 enc_class reg_reg(rRegI dst, rRegI src)
2182 %{
2183 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2184 %}
2186 enc_class opc_reg_reg(immI opcode, rRegI dst, rRegI src)
2187 %{
2188 emit_opcode(cbuf, $opcode$$constant);
2189 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2190 %}
2192 enc_class cmpfp_fixup() %{
2193 MacroAssembler _masm(&cbuf);
2194 emit_cmpfp_fixup(_masm);
2195 %}
2197 enc_class cmpfp3(rRegI dst)
2198 %{
2199 int dstenc = $dst$$reg;
2201 // movl $dst, -1
2202 if (dstenc >= 8) {
2203 emit_opcode(cbuf, Assembler::REX_B);
2204 }
2205 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
2206 emit_d32(cbuf, -1);
2208 // jp,s done
2209 emit_opcode(cbuf, 0x7A);
2210 emit_d8(cbuf, dstenc < 4 ? 0x08 : 0x0A);
2212 // jb,s done
2213 emit_opcode(cbuf, 0x72);
2214 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
2216 // setne $dst
2217 if (dstenc >= 4) {
2218 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
2219 }
2220 emit_opcode(cbuf, 0x0F);
2221 emit_opcode(cbuf, 0x95);
2222 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
2224 // movzbl $dst, $dst
2225 if (dstenc >= 4) {
2226 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
2227 }
2228 emit_opcode(cbuf, 0x0F);
2229 emit_opcode(cbuf, 0xB6);
2230 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
2231 %}
2233 enc_class cdql_enc(no_rax_rdx_RegI div)
2234 %{
2235 // Full implementation of Java idiv and irem; checks for
2236 // special case as described in JVM spec., p.243 & p.271.
2237 //
2238 // normal case special case
2239 //
2240 // input : rax: dividend min_int
2241 // reg: divisor -1
2242 //
2243 // output: rax: quotient (= rax idiv reg) min_int
2244 // rdx: remainder (= rax irem reg) 0
2245 //
2246 // Code sequnce:
2247 //
2248 // 0: 3d 00 00 00 80 cmp $0x80000000,%eax
2249 // 5: 75 07/08 jne e <normal>
2250 // 7: 33 d2 xor %edx,%edx
2251 // [div >= 8 -> offset + 1]
2252 // [REX_B]
2253 // 9: 83 f9 ff cmp $0xffffffffffffffff,$div
2254 // c: 74 03/04 je 11 <done>
2255 // 000000000000000e <normal>:
2256 // e: 99 cltd
2257 // [div >= 8 -> offset + 1]
2258 // [REX_B]
2259 // f: f7 f9 idiv $div
2260 // 0000000000000011 <done>:
2262 // cmp $0x80000000,%eax
2263 emit_opcode(cbuf, 0x3d);
2264 emit_d8(cbuf, 0x00);
2265 emit_d8(cbuf, 0x00);
2266 emit_d8(cbuf, 0x00);
2267 emit_d8(cbuf, 0x80);
2269 // jne e <normal>
2270 emit_opcode(cbuf, 0x75);
2271 emit_d8(cbuf, $div$$reg < 8 ? 0x07 : 0x08);
2273 // xor %edx,%edx
2274 emit_opcode(cbuf, 0x33);
2275 emit_d8(cbuf, 0xD2);
2277 // cmp $0xffffffffffffffff,%ecx
2278 if ($div$$reg >= 8) {
2279 emit_opcode(cbuf, Assembler::REX_B);
2280 }
2281 emit_opcode(cbuf, 0x83);
2282 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
2283 emit_d8(cbuf, 0xFF);
2285 // je 11 <done>
2286 emit_opcode(cbuf, 0x74);
2287 emit_d8(cbuf, $div$$reg < 8 ? 0x03 : 0x04);
2289 // <normal>
2290 // cltd
2291 emit_opcode(cbuf, 0x99);
2293 // idivl (note: must be emitted by the user of this rule)
2294 // <done>
2295 %}
2297 enc_class cdqq_enc(no_rax_rdx_RegL div)
2298 %{
2299 // Full implementation of Java ldiv and lrem; checks for
2300 // special case as described in JVM spec., p.243 & p.271.
2301 //
2302 // normal case special case
2303 //
2304 // input : rax: dividend min_long
2305 // reg: divisor -1
2306 //
2307 // output: rax: quotient (= rax idiv reg) min_long
2308 // rdx: remainder (= rax irem reg) 0
2309 //
2310 // Code sequnce:
2311 //
2312 // 0: 48 ba 00 00 00 00 00 mov $0x8000000000000000,%rdx
2313 // 7: 00 00 80
2314 // a: 48 39 d0 cmp %rdx,%rax
2315 // d: 75 08 jne 17 <normal>
2316 // f: 33 d2 xor %edx,%edx
2317 // 11: 48 83 f9 ff cmp $0xffffffffffffffff,$div
2318 // 15: 74 05 je 1c <done>
2319 // 0000000000000017 <normal>:
2320 // 17: 48 99 cqto
2321 // 19: 48 f7 f9 idiv $div
2322 // 000000000000001c <done>:
2324 // mov $0x8000000000000000,%rdx
2325 emit_opcode(cbuf, Assembler::REX_W);
2326 emit_opcode(cbuf, 0xBA);
2327 emit_d8(cbuf, 0x00);
2328 emit_d8(cbuf, 0x00);
2329 emit_d8(cbuf, 0x00);
2330 emit_d8(cbuf, 0x00);
2331 emit_d8(cbuf, 0x00);
2332 emit_d8(cbuf, 0x00);
2333 emit_d8(cbuf, 0x00);
2334 emit_d8(cbuf, 0x80);
2336 // cmp %rdx,%rax
2337 emit_opcode(cbuf, Assembler::REX_W);
2338 emit_opcode(cbuf, 0x39);
2339 emit_d8(cbuf, 0xD0);
2341 // jne 17 <normal>
2342 emit_opcode(cbuf, 0x75);
2343 emit_d8(cbuf, 0x08);
2345 // xor %edx,%edx
2346 emit_opcode(cbuf, 0x33);
2347 emit_d8(cbuf, 0xD2);
2349 // cmp $0xffffffffffffffff,$div
2350 emit_opcode(cbuf, $div$$reg < 8 ? Assembler::REX_W : Assembler::REX_WB);
2351 emit_opcode(cbuf, 0x83);
2352 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
2353 emit_d8(cbuf, 0xFF);
2355 // je 1e <done>
2356 emit_opcode(cbuf, 0x74);
2357 emit_d8(cbuf, 0x05);
2359 // <normal>
2360 // cqto
2361 emit_opcode(cbuf, Assembler::REX_W);
2362 emit_opcode(cbuf, 0x99);
2364 // idivq (note: must be emitted by the user of this rule)
2365 // <done>
2366 %}
2368 // Opcde enc_class for 8/32 bit immediate instructions with sign-extension
2369 enc_class OpcSE(immI imm)
2370 %{
2371 // Emit primary opcode and set sign-extend bit
2372 // Check for 8-bit immediate, and set sign extend bit in opcode
2373 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2374 emit_opcode(cbuf, $primary | 0x02);
2375 } else {
2376 // 32-bit immediate
2377 emit_opcode(cbuf, $primary);
2378 }
2379 %}
2381 enc_class OpcSErm(rRegI dst, immI imm)
2382 %{
2383 // OpcSEr/m
2384 int dstenc = $dst$$reg;
2385 if (dstenc >= 8) {
2386 emit_opcode(cbuf, Assembler::REX_B);
2387 dstenc -= 8;
2388 }
2389 // Emit primary opcode and set sign-extend bit
2390 // Check for 8-bit immediate, and set sign extend bit in opcode
2391 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2392 emit_opcode(cbuf, $primary | 0x02);
2393 } else {
2394 // 32-bit immediate
2395 emit_opcode(cbuf, $primary);
2396 }
2397 // Emit r/m byte with secondary opcode, after primary opcode.
2398 emit_rm(cbuf, 0x3, $secondary, dstenc);
2399 %}
2401 enc_class OpcSErm_wide(rRegL dst, immI imm)
2402 %{
2403 // OpcSEr/m
2404 int dstenc = $dst$$reg;
2405 if (dstenc < 8) {
2406 emit_opcode(cbuf, Assembler::REX_W);
2407 } else {
2408 emit_opcode(cbuf, Assembler::REX_WB);
2409 dstenc -= 8;
2410 }
2411 // Emit primary opcode and set sign-extend bit
2412 // Check for 8-bit immediate, and set sign extend bit in opcode
2413 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2414 emit_opcode(cbuf, $primary | 0x02);
2415 } else {
2416 // 32-bit immediate
2417 emit_opcode(cbuf, $primary);
2418 }
2419 // Emit r/m byte with secondary opcode, after primary opcode.
2420 emit_rm(cbuf, 0x3, $secondary, dstenc);
2421 %}
2423 enc_class Con8or32(immI imm)
2424 %{
2425 // Check for 8-bit immediate, and set sign extend bit in opcode
2426 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2427 $$$emit8$imm$$constant;
2428 } else {
2429 // 32-bit immediate
2430 $$$emit32$imm$$constant;
2431 }
2432 %}
2434 enc_class opc2_reg(rRegI dst)
2435 %{
2436 // BSWAP
2437 emit_cc(cbuf, $secondary, $dst$$reg);
2438 %}
2440 enc_class opc3_reg(rRegI dst)
2441 %{
2442 // BSWAP
2443 emit_cc(cbuf, $tertiary, $dst$$reg);
2444 %}
2446 enc_class reg_opc(rRegI div)
2447 %{
2448 // INC, DEC, IDIV, IMOD, JMP indirect, ...
2449 emit_rm(cbuf, 0x3, $secondary, $div$$reg & 7);
2450 %}
2452 enc_class enc_cmov(cmpOp cop)
2453 %{
2454 // CMOV
2455 $$$emit8$primary;
2456 emit_cc(cbuf, $secondary, $cop$$cmpcode);
2457 %}
2459 enc_class enc_cmovf_branch(cmpOp cop, regF dst, regF src)
2460 %{
2461 // Invert sense of branch from sense of cmov
2462 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
2463 emit_d8(cbuf, ($dst$$reg < 8 && $src$$reg < 8)
2464 ? (UseXmmRegToRegMoveAll ? 3 : 4)
2465 : (UseXmmRegToRegMoveAll ? 4 : 5) ); // REX
2466 // UseXmmRegToRegMoveAll ? movaps(dst, src) : movss(dst, src)
2467 if (!UseXmmRegToRegMoveAll) emit_opcode(cbuf, 0xF3);
2468 if ($dst$$reg < 8) {
2469 if ($src$$reg >= 8) {
2470 emit_opcode(cbuf, Assembler::REX_B);
2471 }
2472 } else {
2473 if ($src$$reg < 8) {
2474 emit_opcode(cbuf, Assembler::REX_R);
2475 } else {
2476 emit_opcode(cbuf, Assembler::REX_RB);
2477 }
2478 }
2479 emit_opcode(cbuf, 0x0F);
2480 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
2481 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2482 %}
2484 enc_class enc_cmovd_branch(cmpOp cop, regD dst, regD src)
2485 %{
2486 // Invert sense of branch from sense of cmov
2487 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
2488 emit_d8(cbuf, $dst$$reg < 8 && $src$$reg < 8 ? 4 : 5); // REX
2490 // UseXmmRegToRegMoveAll ? movapd(dst, src) : movsd(dst, src)
2491 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
2492 if ($dst$$reg < 8) {
2493 if ($src$$reg >= 8) {
2494 emit_opcode(cbuf, Assembler::REX_B);
2495 }
2496 } else {
2497 if ($src$$reg < 8) {
2498 emit_opcode(cbuf, Assembler::REX_R);
2499 } else {
2500 emit_opcode(cbuf, Assembler::REX_RB);
2501 }
2502 }
2503 emit_opcode(cbuf, 0x0F);
2504 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
2505 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2506 %}
2508 enc_class enc_PartialSubtypeCheck()
2509 %{
2510 Register Rrdi = as_Register(RDI_enc); // result register
2511 Register Rrax = as_Register(RAX_enc); // super class
2512 Register Rrcx = as_Register(RCX_enc); // killed
2513 Register Rrsi = as_Register(RSI_enc); // sub class
2514 Label miss;
2515 const bool set_cond_codes = true;
2517 MacroAssembler _masm(&cbuf);
2518 __ check_klass_subtype_slow_path(Rrsi, Rrax, Rrcx, Rrdi,
2519 NULL, &miss,
2520 /*set_cond_codes:*/ true);
2521 if ($primary) {
2522 __ xorptr(Rrdi, Rrdi);
2523 }
2524 __ bind(miss);
2525 %}
2527 enc_class Java_To_Interpreter(method meth)
2528 %{
2529 // CALL Java_To_Interpreter
2530 // This is the instruction starting address for relocation info.
2531 cbuf.set_insts_mark();
2532 $$$emit8$primary;
2533 // CALL directly to the runtime
2534 emit_d32_reloc(cbuf,
2535 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
2536 runtime_call_Relocation::spec(),
2537 RELOC_DISP32);
2538 %}
2540 enc_class preserve_SP %{
2541 debug_only(int off0 = cbuf.insts_size());
2542 MacroAssembler _masm(&cbuf);
2543 // RBP is preserved across all calls, even compiled calls.
2544 // Use it to preserve RSP in places where the callee might change the SP.
2545 __ movptr(rbp_mh_SP_save, rsp);
2546 debug_only(int off1 = cbuf.insts_size());
2547 assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
2548 %}
2550 enc_class restore_SP %{
2551 MacroAssembler _masm(&cbuf);
2552 __ movptr(rsp, rbp_mh_SP_save);
2553 %}
2555 enc_class Java_Static_Call(method meth)
2556 %{
2557 // JAVA STATIC CALL
2558 // CALL to fixup routine. Fixup routine uses ScopeDesc info to
2559 // determine who we intended to call.
2560 cbuf.set_insts_mark();
2561 $$$emit8$primary;
2563 if (!_method) {
2564 emit_d32_reloc(cbuf,
2565 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
2566 runtime_call_Relocation::spec(),
2567 RELOC_DISP32);
2568 } else if (_optimized_virtual) {
2569 emit_d32_reloc(cbuf,
2570 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
2571 opt_virtual_call_Relocation::spec(),
2572 RELOC_DISP32);
2573 } else {
2574 emit_d32_reloc(cbuf,
2575 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
2576 static_call_Relocation::spec(),
2577 RELOC_DISP32);
2578 }
2579 if (_method) {
2580 // Emit stub for static call
2581 emit_java_to_interp(cbuf);
2582 }
2583 %}
2585 enc_class Java_Dynamic_Call(method meth)
2586 %{
2587 // JAVA DYNAMIC CALL
2588 // !!!!!
2589 // Generate "movq rax, -1", placeholder instruction to load oop-info
2590 // emit_call_dynamic_prologue( cbuf );
2591 cbuf.set_insts_mark();
2593 // movq rax, -1
2594 emit_opcode(cbuf, Assembler::REX_W);
2595 emit_opcode(cbuf, 0xB8 | RAX_enc);
2596 emit_d64_reloc(cbuf,
2597 (int64_t) Universe::non_oop_word(),
2598 oop_Relocation::spec_for_immediate(), RELOC_IMM64);
2599 address virtual_call_oop_addr = cbuf.insts_mark();
2600 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2601 // who we intended to call.
2602 cbuf.set_insts_mark();
2603 $$$emit8$primary;
2604 emit_d32_reloc(cbuf,
2605 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
2606 virtual_call_Relocation::spec(virtual_call_oop_addr),
2607 RELOC_DISP32);
2608 %}
2610 enc_class Java_Compiled_Call(method meth)
2611 %{
2612 // JAVA COMPILED CALL
2613 int disp = in_bytes(methodOopDesc:: from_compiled_offset());
2615 // XXX XXX offset is 128 is 1.5 NON-PRODUCT !!!
2616 // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small");
2618 // callq *disp(%rax)
2619 cbuf.set_insts_mark();
2620 $$$emit8$primary;
2621 if (disp < 0x80) {
2622 emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte
2623 emit_d8(cbuf, disp); // Displacement
2624 } else {
2625 emit_rm(cbuf, 0x02, $secondary, RAX_enc); // R/M byte
2626 emit_d32(cbuf, disp); // Displacement
2627 }
2628 %}
2630 enc_class reg_opc_imm(rRegI dst, immI8 shift)
2631 %{
2632 // SAL, SAR, SHR
2633 int dstenc = $dst$$reg;
2634 if (dstenc >= 8) {
2635 emit_opcode(cbuf, Assembler::REX_B);
2636 dstenc -= 8;
2637 }
2638 $$$emit8$primary;
2639 emit_rm(cbuf, 0x3, $secondary, dstenc);
2640 $$$emit8$shift$$constant;
2641 %}
2643 enc_class reg_opc_imm_wide(rRegL dst, immI8 shift)
2644 %{
2645 // SAL, SAR, SHR
2646 int dstenc = $dst$$reg;
2647 if (dstenc < 8) {
2648 emit_opcode(cbuf, Assembler::REX_W);
2649 } else {
2650 emit_opcode(cbuf, Assembler::REX_WB);
2651 dstenc -= 8;
2652 }
2653 $$$emit8$primary;
2654 emit_rm(cbuf, 0x3, $secondary, dstenc);
2655 $$$emit8$shift$$constant;
2656 %}
2658 enc_class load_immI(rRegI dst, immI src)
2659 %{
2660 int dstenc = $dst$$reg;
2661 if (dstenc >= 8) {
2662 emit_opcode(cbuf, Assembler::REX_B);
2663 dstenc -= 8;
2664 }
2665 emit_opcode(cbuf, 0xB8 | dstenc);
2666 $$$emit32$src$$constant;
2667 %}
2669 enc_class load_immL(rRegL dst, immL src)
2670 %{
2671 int dstenc = $dst$$reg;
2672 if (dstenc < 8) {
2673 emit_opcode(cbuf, Assembler::REX_W);
2674 } else {
2675 emit_opcode(cbuf, Assembler::REX_WB);
2676 dstenc -= 8;
2677 }
2678 emit_opcode(cbuf, 0xB8 | dstenc);
2679 emit_d64(cbuf, $src$$constant);
2680 %}
2682 enc_class load_immUL32(rRegL dst, immUL32 src)
2683 %{
2684 // same as load_immI, but this time we care about zeroes in the high word
2685 int dstenc = $dst$$reg;
2686 if (dstenc >= 8) {
2687 emit_opcode(cbuf, Assembler::REX_B);
2688 dstenc -= 8;
2689 }
2690 emit_opcode(cbuf, 0xB8 | dstenc);
2691 $$$emit32$src$$constant;
2692 %}
2694 enc_class load_immL32(rRegL dst, immL32 src)
2695 %{
2696 int dstenc = $dst$$reg;
2697 if (dstenc < 8) {
2698 emit_opcode(cbuf, Assembler::REX_W);
2699 } else {
2700 emit_opcode(cbuf, Assembler::REX_WB);
2701 dstenc -= 8;
2702 }
2703 emit_opcode(cbuf, 0xC7);
2704 emit_rm(cbuf, 0x03, 0x00, dstenc);
2705 $$$emit32$src$$constant;
2706 %}
2708 enc_class load_immP31(rRegP dst, immP32 src)
2709 %{
2710 // same as load_immI, but this time we care about zeroes in the high word
2711 int dstenc = $dst$$reg;
2712 if (dstenc >= 8) {
2713 emit_opcode(cbuf, Assembler::REX_B);
2714 dstenc -= 8;
2715 }
2716 emit_opcode(cbuf, 0xB8 | dstenc);
2717 $$$emit32$src$$constant;
2718 %}
2720 enc_class load_immP(rRegP dst, immP src)
2721 %{
2722 int dstenc = $dst$$reg;
2723 if (dstenc < 8) {
2724 emit_opcode(cbuf, Assembler::REX_W);
2725 } else {
2726 emit_opcode(cbuf, Assembler::REX_WB);
2727 dstenc -= 8;
2728 }
2729 emit_opcode(cbuf, 0xB8 | dstenc);
2730 // This next line should be generated from ADLC
2731 if ($src->constant_is_oop()) {
2732 emit_d64_reloc(cbuf, $src$$constant, relocInfo::oop_type, RELOC_IMM64);
2733 } else {
2734 emit_d64(cbuf, $src$$constant);
2735 }
2736 %}
2738 // Encode a reg-reg copy. If it is useless, then empty encoding.
2739 enc_class enc_copy(rRegI dst, rRegI src)
2740 %{
2741 encode_copy(cbuf, $dst$$reg, $src$$reg);
2742 %}
2744 // Encode xmm reg-reg copy. If it is useless, then empty encoding.
2745 enc_class enc_CopyXD( RegD dst, RegD src ) %{
2746 encode_CopyXD( cbuf, $dst$$reg, $src$$reg );
2747 %}
2749 enc_class enc_copy_always(rRegI dst, rRegI src)
2750 %{
2751 int srcenc = $src$$reg;
2752 int dstenc = $dst$$reg;
2754 if (dstenc < 8) {
2755 if (srcenc >= 8) {
2756 emit_opcode(cbuf, Assembler::REX_B);
2757 srcenc -= 8;
2758 }
2759 } else {
2760 if (srcenc < 8) {
2761 emit_opcode(cbuf, Assembler::REX_R);
2762 } else {
2763 emit_opcode(cbuf, Assembler::REX_RB);
2764 srcenc -= 8;
2765 }
2766 dstenc -= 8;
2767 }
2769 emit_opcode(cbuf, 0x8B);
2770 emit_rm(cbuf, 0x3, dstenc, srcenc);
2771 %}
2773 enc_class enc_copy_wide(rRegL dst, rRegL src)
2774 %{
2775 int srcenc = $src$$reg;
2776 int dstenc = $dst$$reg;
2778 if (dstenc != srcenc) {
2779 if (dstenc < 8) {
2780 if (srcenc < 8) {
2781 emit_opcode(cbuf, Assembler::REX_W);
2782 } else {
2783 emit_opcode(cbuf, Assembler::REX_WB);
2784 srcenc -= 8;
2785 }
2786 } else {
2787 if (srcenc < 8) {
2788 emit_opcode(cbuf, Assembler::REX_WR);
2789 } else {
2790 emit_opcode(cbuf, Assembler::REX_WRB);
2791 srcenc -= 8;
2792 }
2793 dstenc -= 8;
2794 }
2795 emit_opcode(cbuf, 0x8B);
2796 emit_rm(cbuf, 0x3, dstenc, srcenc);
2797 }
2798 %}
2800 enc_class Con32(immI src)
2801 %{
2802 // Output immediate
2803 $$$emit32$src$$constant;
2804 %}
2806 enc_class Con64(immL src)
2807 %{
2808 // Output immediate
2809 emit_d64($src$$constant);
2810 %}
2812 enc_class Con32F_as_bits(immF src)
2813 %{
2814 // Output Float immediate bits
2815 jfloat jf = $src$$constant;
2816 jint jf_as_bits = jint_cast(jf);
2817 emit_d32(cbuf, jf_as_bits);
2818 %}
2820 enc_class Con16(immI src)
2821 %{
2822 // Output immediate
2823 $$$emit16$src$$constant;
2824 %}
2826 // How is this different from Con32??? XXX
2827 enc_class Con_d32(immI src)
2828 %{
2829 emit_d32(cbuf,$src$$constant);
2830 %}
2832 enc_class conmemref (rRegP t1) %{ // Con32(storeImmI)
2833 // Output immediate memory reference
2834 emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
2835 emit_d32(cbuf, 0x00);
2836 %}
2838 enc_class lock_prefix()
2839 %{
2840 if (os::is_MP()) {
2841 emit_opcode(cbuf, 0xF0); // lock
2842 }
2843 %}
2845 enc_class REX_mem(memory mem)
2846 %{
2847 if ($mem$$base >= 8) {
2848 if ($mem$$index < 8) {
2849 emit_opcode(cbuf, Assembler::REX_B);
2850 } else {
2851 emit_opcode(cbuf, Assembler::REX_XB);
2852 }
2853 } else {
2854 if ($mem$$index >= 8) {
2855 emit_opcode(cbuf, Assembler::REX_X);
2856 }
2857 }
2858 %}
2860 enc_class REX_mem_wide(memory mem)
2861 %{
2862 if ($mem$$base >= 8) {
2863 if ($mem$$index < 8) {
2864 emit_opcode(cbuf, Assembler::REX_WB);
2865 } else {
2866 emit_opcode(cbuf, Assembler::REX_WXB);
2867 }
2868 } else {
2869 if ($mem$$index < 8) {
2870 emit_opcode(cbuf, Assembler::REX_W);
2871 } else {
2872 emit_opcode(cbuf, Assembler::REX_WX);
2873 }
2874 }
2875 %}
2877 // for byte regs
2878 enc_class REX_breg(rRegI reg)
2879 %{
2880 if ($reg$$reg >= 4) {
2881 emit_opcode(cbuf, $reg$$reg < 8 ? Assembler::REX : Assembler::REX_B);
2882 }
2883 %}
2885 // for byte regs
2886 enc_class REX_reg_breg(rRegI dst, rRegI src)
2887 %{
2888 if ($dst$$reg < 8) {
2889 if ($src$$reg >= 4) {
2890 emit_opcode(cbuf, $src$$reg < 8 ? Assembler::REX : Assembler::REX_B);
2891 }
2892 } else {
2893 if ($src$$reg < 8) {
2894 emit_opcode(cbuf, Assembler::REX_R);
2895 } else {
2896 emit_opcode(cbuf, Assembler::REX_RB);
2897 }
2898 }
2899 %}
2901 // for byte regs
2902 enc_class REX_breg_mem(rRegI reg, memory mem)
2903 %{
2904 if ($reg$$reg < 8) {
2905 if ($mem$$base < 8) {
2906 if ($mem$$index >= 8) {
2907 emit_opcode(cbuf, Assembler::REX_X);
2908 } else if ($reg$$reg >= 4) {
2909 emit_opcode(cbuf, Assembler::REX);
2910 }
2911 } else {
2912 if ($mem$$index < 8) {
2913 emit_opcode(cbuf, Assembler::REX_B);
2914 } else {
2915 emit_opcode(cbuf, Assembler::REX_XB);
2916 }
2917 }
2918 } else {
2919 if ($mem$$base < 8) {
2920 if ($mem$$index < 8) {
2921 emit_opcode(cbuf, Assembler::REX_R);
2922 } else {
2923 emit_opcode(cbuf, Assembler::REX_RX);
2924 }
2925 } else {
2926 if ($mem$$index < 8) {
2927 emit_opcode(cbuf, Assembler::REX_RB);
2928 } else {
2929 emit_opcode(cbuf, Assembler::REX_RXB);
2930 }
2931 }
2932 }
2933 %}
2935 enc_class REX_reg(rRegI reg)
2936 %{
2937 if ($reg$$reg >= 8) {
2938 emit_opcode(cbuf, Assembler::REX_B);
2939 }
2940 %}
2942 enc_class REX_reg_wide(rRegI reg)
2943 %{
2944 if ($reg$$reg < 8) {
2945 emit_opcode(cbuf, Assembler::REX_W);
2946 } else {
2947 emit_opcode(cbuf, Assembler::REX_WB);
2948 }
2949 %}
2951 enc_class REX_reg_reg(rRegI dst, rRegI src)
2952 %{
2953 if ($dst$$reg < 8) {
2954 if ($src$$reg >= 8) {
2955 emit_opcode(cbuf, Assembler::REX_B);
2956 }
2957 } else {
2958 if ($src$$reg < 8) {
2959 emit_opcode(cbuf, Assembler::REX_R);
2960 } else {
2961 emit_opcode(cbuf, Assembler::REX_RB);
2962 }
2963 }
2964 %}
2966 enc_class REX_reg_reg_wide(rRegI dst, rRegI src)
2967 %{
2968 if ($dst$$reg < 8) {
2969 if ($src$$reg < 8) {
2970 emit_opcode(cbuf, Assembler::REX_W);
2971 } else {
2972 emit_opcode(cbuf, Assembler::REX_WB);
2973 }
2974 } else {
2975 if ($src$$reg < 8) {
2976 emit_opcode(cbuf, Assembler::REX_WR);
2977 } else {
2978 emit_opcode(cbuf, Assembler::REX_WRB);
2979 }
2980 }
2981 %}
2983 enc_class REX_reg_mem(rRegI reg, memory mem)
2984 %{
2985 if ($reg$$reg < 8) {
2986 if ($mem$$base < 8) {
2987 if ($mem$$index >= 8) {
2988 emit_opcode(cbuf, Assembler::REX_X);
2989 }
2990 } else {
2991 if ($mem$$index < 8) {
2992 emit_opcode(cbuf, Assembler::REX_B);
2993 } else {
2994 emit_opcode(cbuf, Assembler::REX_XB);
2995 }
2996 }
2997 } else {
2998 if ($mem$$base < 8) {
2999 if ($mem$$index < 8) {
3000 emit_opcode(cbuf, Assembler::REX_R);
3001 } else {
3002 emit_opcode(cbuf, Assembler::REX_RX);
3003 }
3004 } else {
3005 if ($mem$$index < 8) {
3006 emit_opcode(cbuf, Assembler::REX_RB);
3007 } else {
3008 emit_opcode(cbuf, Assembler::REX_RXB);
3009 }
3010 }
3011 }
3012 %}
3014 enc_class REX_reg_mem_wide(rRegL reg, memory mem)
3015 %{
3016 if ($reg$$reg < 8) {
3017 if ($mem$$base < 8) {
3018 if ($mem$$index < 8) {
3019 emit_opcode(cbuf, Assembler::REX_W);
3020 } else {
3021 emit_opcode(cbuf, Assembler::REX_WX);
3022 }
3023 } else {
3024 if ($mem$$index < 8) {
3025 emit_opcode(cbuf, Assembler::REX_WB);
3026 } else {
3027 emit_opcode(cbuf, Assembler::REX_WXB);
3028 }
3029 }
3030 } else {
3031 if ($mem$$base < 8) {
3032 if ($mem$$index < 8) {
3033 emit_opcode(cbuf, Assembler::REX_WR);
3034 } else {
3035 emit_opcode(cbuf, Assembler::REX_WRX);
3036 }
3037 } else {
3038 if ($mem$$index < 8) {
3039 emit_opcode(cbuf, Assembler::REX_WRB);
3040 } else {
3041 emit_opcode(cbuf, Assembler::REX_WRXB);
3042 }
3043 }
3044 }
3045 %}
3047 enc_class reg_mem(rRegI ereg, memory mem)
3048 %{
3049 // High registers handle in encode_RegMem
3050 int reg = $ereg$$reg;
3051 int base = $mem$$base;
3052 int index = $mem$$index;
3053 int scale = $mem$$scale;
3054 int disp = $mem$$disp;
3055 bool disp_is_oop = $mem->disp_is_oop();
3057 encode_RegMem(cbuf, reg, base, index, scale, disp, disp_is_oop);
3058 %}
3060 enc_class RM_opc_mem(immI rm_opcode, memory mem)
3061 %{
3062 int rm_byte_opcode = $rm_opcode$$constant;
3064 // High registers handle in encode_RegMem
3065 int base = $mem$$base;
3066 int index = $mem$$index;
3067 int scale = $mem$$scale;
3068 int displace = $mem$$disp;
3070 bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when
3071 // working with static
3072 // globals
3073 encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace,
3074 disp_is_oop);
3075 %}
3077 enc_class reg_lea(rRegI dst, rRegI src0, immI src1)
3078 %{
3079 int reg_encoding = $dst$$reg;
3080 int base = $src0$$reg; // 0xFFFFFFFF indicates no base
3081 int index = 0x04; // 0x04 indicates no index
3082 int scale = 0x00; // 0x00 indicates no scale
3083 int displace = $src1$$constant; // 0x00 indicates no displacement
3084 bool disp_is_oop = false;
3085 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace,
3086 disp_is_oop);
3087 %}
3089 enc_class neg_reg(rRegI dst)
3090 %{
3091 int dstenc = $dst$$reg;
3092 if (dstenc >= 8) {
3093 emit_opcode(cbuf, Assembler::REX_B);
3094 dstenc -= 8;
3095 }
3096 // NEG $dst
3097 emit_opcode(cbuf, 0xF7);
3098 emit_rm(cbuf, 0x3, 0x03, dstenc);
3099 %}
3101 enc_class neg_reg_wide(rRegI dst)
3102 %{
3103 int dstenc = $dst$$reg;
3104 if (dstenc < 8) {
3105 emit_opcode(cbuf, Assembler::REX_W);
3106 } else {
3107 emit_opcode(cbuf, Assembler::REX_WB);
3108 dstenc -= 8;
3109 }
3110 // NEG $dst
3111 emit_opcode(cbuf, 0xF7);
3112 emit_rm(cbuf, 0x3, 0x03, dstenc);
3113 %}
3115 enc_class setLT_reg(rRegI dst)
3116 %{
3117 int dstenc = $dst$$reg;
3118 if (dstenc >= 8) {
3119 emit_opcode(cbuf, Assembler::REX_B);
3120 dstenc -= 8;
3121 } else if (dstenc >= 4) {
3122 emit_opcode(cbuf, Assembler::REX);
3123 }
3124 // SETLT $dst
3125 emit_opcode(cbuf, 0x0F);
3126 emit_opcode(cbuf, 0x9C);
3127 emit_rm(cbuf, 0x3, 0x0, dstenc);
3128 %}
3130 enc_class setNZ_reg(rRegI dst)
3131 %{
3132 int dstenc = $dst$$reg;
3133 if (dstenc >= 8) {
3134 emit_opcode(cbuf, Assembler::REX_B);
3135 dstenc -= 8;
3136 } else if (dstenc >= 4) {
3137 emit_opcode(cbuf, Assembler::REX);
3138 }
3139 // SETNZ $dst
3140 emit_opcode(cbuf, 0x0F);
3141 emit_opcode(cbuf, 0x95);
3142 emit_rm(cbuf, 0x3, 0x0, dstenc);
3143 %}
3146 // Compare the lonogs and set -1, 0, or 1 into dst
3147 enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst)
3148 %{
3149 int src1enc = $src1$$reg;
3150 int src2enc = $src2$$reg;
3151 int dstenc = $dst$$reg;
3153 // cmpq $src1, $src2
3154 if (src1enc < 8) {
3155 if (src2enc < 8) {
3156 emit_opcode(cbuf, Assembler::REX_W);
3157 } else {
3158 emit_opcode(cbuf, Assembler::REX_WB);
3159 }
3160 } else {
3161 if (src2enc < 8) {
3162 emit_opcode(cbuf, Assembler::REX_WR);
3163 } else {
3164 emit_opcode(cbuf, Assembler::REX_WRB);
3165 }
3166 }
3167 emit_opcode(cbuf, 0x3B);
3168 emit_rm(cbuf, 0x3, src1enc & 7, src2enc & 7);
3170 // movl $dst, -1
3171 if (dstenc >= 8) {
3172 emit_opcode(cbuf, Assembler::REX_B);
3173 }
3174 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
3175 emit_d32(cbuf, -1);
3177 // jl,s done
3178 emit_opcode(cbuf, 0x7C);
3179 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
3181 // setne $dst
3182 if (dstenc >= 4) {
3183 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
3184 }
3185 emit_opcode(cbuf, 0x0F);
3186 emit_opcode(cbuf, 0x95);
3187 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
3189 // movzbl $dst, $dst
3190 if (dstenc >= 4) {
3191 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
3192 }
3193 emit_opcode(cbuf, 0x0F);
3194 emit_opcode(cbuf, 0xB6);
3195 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
3196 %}
3198 enc_class Push_ResultXD(regD dst) %{
3199 int dstenc = $dst$$reg;
3201 store_to_stackslot( cbuf, 0xDD, 0x03, 0 ); //FSTP [RSP]
3203 // UseXmmLoadAndClearUpper ? movsd dst,[rsp] : movlpd dst,[rsp]
3204 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
3205 if (dstenc >= 8) {
3206 emit_opcode(cbuf, Assembler::REX_R);
3207 }
3208 emit_opcode (cbuf, 0x0F );
3209 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12 );
3210 encode_RegMem(cbuf, dstenc, RSP_enc, 0x4, 0, 0, false);
3212 // add rsp,8
3213 emit_opcode(cbuf, Assembler::REX_W);
3214 emit_opcode(cbuf,0x83);
3215 emit_rm(cbuf,0x3, 0x0, RSP_enc);
3216 emit_d8(cbuf,0x08);
3217 %}
3219 enc_class Push_SrcXD(regD src) %{
3220 int srcenc = $src$$reg;
3222 // subq rsp,#8
3223 emit_opcode(cbuf, Assembler::REX_W);
3224 emit_opcode(cbuf, 0x83);
3225 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
3226 emit_d8(cbuf, 0x8);
3228 // movsd [rsp],src
3229 emit_opcode(cbuf, 0xF2);
3230 if (srcenc >= 8) {
3231 emit_opcode(cbuf, Assembler::REX_R);
3232 }
3233 emit_opcode(cbuf, 0x0F);
3234 emit_opcode(cbuf, 0x11);
3235 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false);
3237 // fldd [rsp]
3238 emit_opcode(cbuf, 0x66);
3239 emit_opcode(cbuf, 0xDD);
3240 encode_RegMem(cbuf, 0x0, RSP_enc, 0x4, 0, 0, false);
3241 %}
3244 enc_class movq_ld(regD dst, memory mem) %{
3245 MacroAssembler _masm(&cbuf);
3246 __ movq($dst$$XMMRegister, $mem$$Address);
3247 %}
3249 enc_class movq_st(memory mem, regD src) %{
3250 MacroAssembler _masm(&cbuf);
3251 __ movq($mem$$Address, $src$$XMMRegister);
3252 %}
3254 enc_class pshufd_8x8(regF dst, regF src) %{
3255 MacroAssembler _masm(&cbuf);
3257 encode_CopyXD(cbuf, $dst$$reg, $src$$reg);
3258 __ punpcklbw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg));
3259 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg), 0x00);
3260 %}
3262 enc_class pshufd_4x16(regF dst, regF src) %{
3263 MacroAssembler _masm(&cbuf);
3265 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), 0x00);
3266 %}
3268 enc_class pshufd(regD dst, regD src, int mode) %{
3269 MacroAssembler _masm(&cbuf);
3271 __ pshufd(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), $mode);
3272 %}
3274 enc_class pxor(regD dst, regD src) %{
3275 MacroAssembler _masm(&cbuf);
3277 __ pxor(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg));
3278 %}
3280 enc_class mov_i2x(regD dst, rRegI src) %{
3281 MacroAssembler _masm(&cbuf);
3283 __ movdl(as_XMMRegister($dst$$reg), as_Register($src$$reg));
3284 %}
3286 // obj: object to lock
3287 // box: box address (header location) -- killed
3288 // tmp: rax -- killed
3289 // scr: rbx -- killed
3290 //
3291 // What follows is a direct transliteration of fast_lock() and fast_unlock()
3292 // from i486.ad. See that file for comments.
3293 // TODO: where possible switch from movq (r, 0) to movl(r,0) and
3294 // use the shorter encoding. (Movl clears the high-order 32-bits).
3297 enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr)
3298 %{
3299 Register objReg = as_Register((int)$obj$$reg);
3300 Register boxReg = as_Register((int)$box$$reg);
3301 Register tmpReg = as_Register($tmp$$reg);
3302 Register scrReg = as_Register($scr$$reg);
3303 MacroAssembler masm(&cbuf);
3305 // Verify uniqueness of register assignments -- necessary but not sufficient
3306 assert (objReg != boxReg && objReg != tmpReg &&
3307 objReg != scrReg && tmpReg != scrReg, "invariant") ;
3309 if (_counters != NULL) {
3310 masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
3311 }
3312 if (EmitSync & 1) {
3313 // Without cast to int32_t a movptr will destroy r10 which is typically obj
3314 masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
3315 masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
3316 } else
3317 if (EmitSync & 2) {
3318 Label DONE_LABEL;
3319 if (UseBiasedLocking) {
3320 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
3321 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
3322 }
3323 // QQQ was movl...
3324 masm.movptr(tmpReg, 0x1);
3325 masm.orptr(tmpReg, Address(objReg, 0));
3326 masm.movptr(Address(boxReg, 0), tmpReg);
3327 if (os::is_MP()) {
3328 masm.lock();
3329 }
3330 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
3331 masm.jcc(Assembler::equal, DONE_LABEL);
3333 // Recursive locking
3334 masm.subptr(tmpReg, rsp);
3335 masm.andptr(tmpReg, 7 - os::vm_page_size());
3336 masm.movptr(Address(boxReg, 0), tmpReg);
3338 masm.bind(DONE_LABEL);
3339 masm.nop(); // avoid branch to branch
3340 } else {
3341 Label DONE_LABEL, IsInflated, Egress;
3343 masm.movptr(tmpReg, Address(objReg, 0)) ;
3344 masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
3345 masm.jcc (Assembler::notZero, IsInflated) ;
3347 // it's stack-locked, biased or neutral
3348 // TODO: optimize markword triage order to reduce the number of
3349 // conditional branches in the most common cases.
3350 // Beware -- there's a subtle invariant that fetch of the markword
3351 // at [FETCH], below, will never observe a biased encoding (*101b).
3352 // If this invariant is not held we'll suffer exclusion (safety) failure.
3354 if (UseBiasedLocking && !UseOptoBiasInlining) {
3355 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
3356 masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
3357 }
3359 // was q will it destroy high?
3360 masm.orl (tmpReg, 1) ;
3361 masm.movptr(Address(boxReg, 0), tmpReg) ;
3362 if (os::is_MP()) { masm.lock(); }
3363 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
3364 if (_counters != NULL) {
3365 masm.cond_inc32(Assembler::equal,
3366 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
3367 }
3368 masm.jcc (Assembler::equal, DONE_LABEL);
3370 // Recursive locking
3371 masm.subptr(tmpReg, rsp);
3372 masm.andptr(tmpReg, 7 - os::vm_page_size());
3373 masm.movptr(Address(boxReg, 0), tmpReg);
3374 if (_counters != NULL) {
3375 masm.cond_inc32(Assembler::equal,
3376 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
3377 }
3378 masm.jmp (DONE_LABEL) ;
3380 masm.bind (IsInflated) ;
3381 // It's inflated
3383 // TODO: someday avoid the ST-before-CAS penalty by
3384 // relocating (deferring) the following ST.
3385 // We should also think about trying a CAS without having
3386 // fetched _owner. If the CAS is successful we may
3387 // avoid an RTO->RTS upgrade on the $line.
3388 // Without cast to int32_t a movptr will destroy r10 which is typically obj
3389 masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
3391 masm.mov (boxReg, tmpReg) ;
3392 masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3393 masm.testptr(tmpReg, tmpReg) ;
3394 masm.jcc (Assembler::notZero, DONE_LABEL) ;
3396 // It's inflated and appears unlocked
3397 if (os::is_MP()) { masm.lock(); }
3398 masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3399 // Intentional fall-through into DONE_LABEL ...
3401 masm.bind (DONE_LABEL) ;
3402 masm.nop () ; // avoid jmp to jmp
3403 }
3404 %}
3406 // obj: object to unlock
3407 // box: box address (displaced header location), killed
3408 // RBX: killed tmp; cannot be obj nor box
3409 enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp)
3410 %{
3412 Register objReg = as_Register($obj$$reg);
3413 Register boxReg = as_Register($box$$reg);
3414 Register tmpReg = as_Register($tmp$$reg);
3415 MacroAssembler masm(&cbuf);
3417 if (EmitSync & 4) {
3418 masm.cmpptr(rsp, 0) ;
3419 } else
3420 if (EmitSync & 8) {
3421 Label DONE_LABEL;
3422 if (UseBiasedLocking) {
3423 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3424 }
3426 // Check whether the displaced header is 0
3427 //(=> recursive unlock)
3428 masm.movptr(tmpReg, Address(boxReg, 0));
3429 masm.testptr(tmpReg, tmpReg);
3430 masm.jcc(Assembler::zero, DONE_LABEL);
3432 // If not recursive lock, reset the header to displaced header
3433 if (os::is_MP()) {
3434 masm.lock();
3435 }
3436 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
3437 masm.bind(DONE_LABEL);
3438 masm.nop(); // avoid branch to branch
3439 } else {
3440 Label DONE_LABEL, Stacked, CheckSucc ;
3442 if (UseBiasedLocking && !UseOptoBiasInlining) {
3443 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3444 }
3446 masm.movptr(tmpReg, Address(objReg, 0)) ;
3447 masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
3448 masm.jcc (Assembler::zero, DONE_LABEL) ;
3449 masm.testl (tmpReg, 0x02) ;
3450 masm.jcc (Assembler::zero, Stacked) ;
3452 // It's inflated
3453 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3454 masm.xorptr(boxReg, r15_thread) ;
3455 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
3456 masm.jcc (Assembler::notZero, DONE_LABEL) ;
3457 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
3458 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
3459 masm.jcc (Assembler::notZero, CheckSucc) ;
3460 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3461 masm.jmp (DONE_LABEL) ;
3463 if ((EmitSync & 65536) == 0) {
3464 Label LSuccess, LGoSlowPath ;
3465 masm.bind (CheckSucc) ;
3466 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3467 masm.jcc (Assembler::zero, LGoSlowPath) ;
3469 // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
3470 // the explicit ST;MEMBAR combination, but masm doesn't currently support
3471 // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
3472 // are all faster when the write buffer is populated.
3473 masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3474 if (os::is_MP()) {
3475 masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
3476 }
3477 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3478 masm.jcc (Assembler::notZero, LSuccess) ;
3480 masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX
3481 if (os::is_MP()) { masm.lock(); }
3482 masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
3483 masm.jcc (Assembler::notEqual, LSuccess) ;
3484 // Intentional fall-through into slow-path
3486 masm.bind (LGoSlowPath) ;
3487 masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure
3488 masm.jmp (DONE_LABEL) ;
3490 masm.bind (LSuccess) ;
3491 masm.testl (boxReg, 0) ; // set ICC.ZF=1 to indicate success
3492 masm.jmp (DONE_LABEL) ;
3493 }
3495 masm.bind (Stacked) ;
3496 masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch
3497 if (os::is_MP()) { masm.lock(); }
3498 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
3500 if (EmitSync & 65536) {
3501 masm.bind (CheckSucc) ;
3502 }
3503 masm.bind(DONE_LABEL);
3504 if (EmitSync & 32768) {
3505 masm.nop(); // avoid branch to branch
3506 }
3507 }
3508 %}
3511 enc_class enc_rethrow()
3512 %{
3513 cbuf.set_insts_mark();
3514 emit_opcode(cbuf, 0xE9); // jmp entry
3515 emit_d32_reloc(cbuf,
3516 (int) (OptoRuntime::rethrow_stub() - cbuf.insts_end() - 4),
3517 runtime_call_Relocation::spec(),
3518 RELOC_DISP32);
3519 %}
3521 enc_class absF_encoding(regF dst)
3522 %{
3523 int dstenc = $dst$$reg;
3524 address signmask_address = (address) StubRoutines::x86::float_sign_mask();
3526 cbuf.set_insts_mark();
3527 if (dstenc >= 8) {
3528 emit_opcode(cbuf, Assembler::REX_R);
3529 dstenc -= 8;
3530 }
3531 // XXX reg_mem doesn't support RIP-relative addressing yet
3532 emit_opcode(cbuf, 0x0F);
3533 emit_opcode(cbuf, 0x54);
3534 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3535 emit_d32_reloc(cbuf, signmask_address);
3536 %}
3538 enc_class absD_encoding(regD dst)
3539 %{
3540 int dstenc = $dst$$reg;
3541 address signmask_address = (address) StubRoutines::x86::double_sign_mask();
3543 cbuf.set_insts_mark();
3544 emit_opcode(cbuf, 0x66);
3545 if (dstenc >= 8) {
3546 emit_opcode(cbuf, Assembler::REX_R);
3547 dstenc -= 8;
3548 }
3549 // XXX reg_mem doesn't support RIP-relative addressing yet
3550 emit_opcode(cbuf, 0x0F);
3551 emit_opcode(cbuf, 0x54);
3552 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3553 emit_d32_reloc(cbuf, signmask_address);
3554 %}
3556 enc_class negF_encoding(regF dst)
3557 %{
3558 int dstenc = $dst$$reg;
3559 address signflip_address = (address) StubRoutines::x86::float_sign_flip();
3561 cbuf.set_insts_mark();
3562 if (dstenc >= 8) {
3563 emit_opcode(cbuf, Assembler::REX_R);
3564 dstenc -= 8;
3565 }
3566 // XXX reg_mem doesn't support RIP-relative addressing yet
3567 emit_opcode(cbuf, 0x0F);
3568 emit_opcode(cbuf, 0x57);
3569 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3570 emit_d32_reloc(cbuf, signflip_address);
3571 %}
3573 enc_class negD_encoding(regD dst)
3574 %{
3575 int dstenc = $dst$$reg;
3576 address signflip_address = (address) StubRoutines::x86::double_sign_flip();
3578 cbuf.set_insts_mark();
3579 emit_opcode(cbuf, 0x66);
3580 if (dstenc >= 8) {
3581 emit_opcode(cbuf, Assembler::REX_R);
3582 dstenc -= 8;
3583 }
3584 // XXX reg_mem doesn't support RIP-relative addressing yet
3585 emit_opcode(cbuf, 0x0F);
3586 emit_opcode(cbuf, 0x57);
3587 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3588 emit_d32_reloc(cbuf, signflip_address);
3589 %}
3591 enc_class f2i_fixup(rRegI dst, regF src)
3592 %{
3593 int dstenc = $dst$$reg;
3594 int srcenc = $src$$reg;
3596 // cmpl $dst, #0x80000000
3597 if (dstenc >= 8) {
3598 emit_opcode(cbuf, Assembler::REX_B);
3599 }
3600 emit_opcode(cbuf, 0x81);
3601 emit_rm(cbuf, 0x3, 0x7, dstenc & 7);
3602 emit_d32(cbuf, 0x80000000);
3604 // jne,s done
3605 emit_opcode(cbuf, 0x75);
3606 if (srcenc < 8 && dstenc < 8) {
3607 emit_d8(cbuf, 0xF);
3608 } else if (srcenc >= 8 && dstenc >= 8) {
3609 emit_d8(cbuf, 0x11);
3610 } else {
3611 emit_d8(cbuf, 0x10);
3612 }
3614 // subq rsp, #8
3615 emit_opcode(cbuf, Assembler::REX_W);
3616 emit_opcode(cbuf, 0x83);
3617 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
3618 emit_d8(cbuf, 8);
3620 // movss [rsp], $src
3621 emit_opcode(cbuf, 0xF3);
3622 if (srcenc >= 8) {
3623 emit_opcode(cbuf, Assembler::REX_R);
3624 }
3625 emit_opcode(cbuf, 0x0F);
3626 emit_opcode(cbuf, 0x11);
3627 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
3629 // call f2i_fixup
3630 cbuf.set_insts_mark();
3631 emit_opcode(cbuf, 0xE8);
3632 emit_d32_reloc(cbuf,
3633 (int)
3634 (StubRoutines::x86::f2i_fixup() - cbuf.insts_end() - 4),
3635 runtime_call_Relocation::spec(),
3636 RELOC_DISP32);
3638 // popq $dst
3639 if (dstenc >= 8) {
3640 emit_opcode(cbuf, Assembler::REX_B);
3641 }
3642 emit_opcode(cbuf, 0x58 | (dstenc & 7));
3644 // done:
3645 %}
3647 enc_class f2l_fixup(rRegL dst, regF src)
3648 %{
3649 int dstenc = $dst$$reg;
3650 int srcenc = $src$$reg;
3651 address const_address = (address) StubRoutines::x86::double_sign_flip();
3653 // cmpq $dst, [0x8000000000000000]
3654 cbuf.set_insts_mark();
3655 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
3656 emit_opcode(cbuf, 0x39);
3657 // XXX reg_mem doesn't support RIP-relative addressing yet
3658 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101
3659 emit_d32_reloc(cbuf, const_address);
3662 // jne,s done
3663 emit_opcode(cbuf, 0x75);
3664 if (srcenc < 8 && dstenc < 8) {
3665 emit_d8(cbuf, 0xF);
3666 } else if (srcenc >= 8 && dstenc >= 8) {
3667 emit_d8(cbuf, 0x11);
3668 } else {
3669 emit_d8(cbuf, 0x10);
3670 }
3672 // subq rsp, #8
3673 emit_opcode(cbuf, Assembler::REX_W);
3674 emit_opcode(cbuf, 0x83);
3675 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
3676 emit_d8(cbuf, 8);
3678 // movss [rsp], $src
3679 emit_opcode(cbuf, 0xF3);
3680 if (srcenc >= 8) {
3681 emit_opcode(cbuf, Assembler::REX_R);
3682 }
3683 emit_opcode(cbuf, 0x0F);
3684 emit_opcode(cbuf, 0x11);
3685 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
3687 // call f2l_fixup
3688 cbuf.set_insts_mark();
3689 emit_opcode(cbuf, 0xE8);
3690 emit_d32_reloc(cbuf,
3691 (int)
3692 (StubRoutines::x86::f2l_fixup() - cbuf.insts_end() - 4),
3693 runtime_call_Relocation::spec(),
3694 RELOC_DISP32);
3696 // popq $dst
3697 if (dstenc >= 8) {
3698 emit_opcode(cbuf, Assembler::REX_B);
3699 }
3700 emit_opcode(cbuf, 0x58 | (dstenc & 7));
3702 // done:
3703 %}
3705 enc_class d2i_fixup(rRegI dst, regD src)
3706 %{
3707 int dstenc = $dst$$reg;
3708 int srcenc = $src$$reg;
3710 // cmpl $dst, #0x80000000
3711 if (dstenc >= 8) {
3712 emit_opcode(cbuf, Assembler::REX_B);
3713 }
3714 emit_opcode(cbuf, 0x81);
3715 emit_rm(cbuf, 0x3, 0x7, dstenc & 7);
3716 emit_d32(cbuf, 0x80000000);
3718 // jne,s done
3719 emit_opcode(cbuf, 0x75);
3720 if (srcenc < 8 && dstenc < 8) {
3721 emit_d8(cbuf, 0xF);
3722 } else if (srcenc >= 8 && dstenc >= 8) {
3723 emit_d8(cbuf, 0x11);
3724 } else {
3725 emit_d8(cbuf, 0x10);
3726 }
3728 // subq rsp, #8
3729 emit_opcode(cbuf, Assembler::REX_W);
3730 emit_opcode(cbuf, 0x83);
3731 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
3732 emit_d8(cbuf, 8);
3734 // movsd [rsp], $src
3735 emit_opcode(cbuf, 0xF2);
3736 if (srcenc >= 8) {
3737 emit_opcode(cbuf, Assembler::REX_R);
3738 }
3739 emit_opcode(cbuf, 0x0F);
3740 emit_opcode(cbuf, 0x11);
3741 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
3743 // call d2i_fixup
3744 cbuf.set_insts_mark();
3745 emit_opcode(cbuf, 0xE8);
3746 emit_d32_reloc(cbuf,
3747 (int)
3748 (StubRoutines::x86::d2i_fixup() - cbuf.insts_end() - 4),
3749 runtime_call_Relocation::spec(),
3750 RELOC_DISP32);
3752 // popq $dst
3753 if (dstenc >= 8) {
3754 emit_opcode(cbuf, Assembler::REX_B);
3755 }
3756 emit_opcode(cbuf, 0x58 | (dstenc & 7));
3758 // done:
3759 %}
3761 enc_class d2l_fixup(rRegL dst, regD src)
3762 %{
3763 int dstenc = $dst$$reg;
3764 int srcenc = $src$$reg;
3765 address const_address = (address) StubRoutines::x86::double_sign_flip();
3767 // cmpq $dst, [0x8000000000000000]
3768 cbuf.set_insts_mark();
3769 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
3770 emit_opcode(cbuf, 0x39);
3771 // XXX reg_mem doesn't support RIP-relative addressing yet
3772 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101
3773 emit_d32_reloc(cbuf, const_address);
3776 // jne,s done
3777 emit_opcode(cbuf, 0x75);
3778 if (srcenc < 8 && dstenc < 8) {
3779 emit_d8(cbuf, 0xF);
3780 } else if (srcenc >= 8 && dstenc >= 8) {
3781 emit_d8(cbuf, 0x11);
3782 } else {
3783 emit_d8(cbuf, 0x10);
3784 }
3786 // subq rsp, #8
3787 emit_opcode(cbuf, Assembler::REX_W);
3788 emit_opcode(cbuf, 0x83);
3789 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
3790 emit_d8(cbuf, 8);
3792 // movsd [rsp], $src
3793 emit_opcode(cbuf, 0xF2);
3794 if (srcenc >= 8) {
3795 emit_opcode(cbuf, Assembler::REX_R);
3796 }
3797 emit_opcode(cbuf, 0x0F);
3798 emit_opcode(cbuf, 0x11);
3799 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
3801 // call d2l_fixup
3802 cbuf.set_insts_mark();
3803 emit_opcode(cbuf, 0xE8);
3804 emit_d32_reloc(cbuf,
3805 (int)
3806 (StubRoutines::x86::d2l_fixup() - cbuf.insts_end() - 4),
3807 runtime_call_Relocation::spec(),
3808 RELOC_DISP32);
3810 // popq $dst
3811 if (dstenc >= 8) {
3812 emit_opcode(cbuf, Assembler::REX_B);
3813 }
3814 emit_opcode(cbuf, 0x58 | (dstenc & 7));
3816 // done:
3817 %}
3818 %}
3822 //----------FRAME--------------------------------------------------------------
3823 // Definition of frame structure and management information.
3824 //
3825 // S T A C K L A Y O U T Allocators stack-slot number
3826 // | (to get allocators register number
3827 // G Owned by | | v add OptoReg::stack0())
3828 // r CALLER | |
3829 // o | +--------+ pad to even-align allocators stack-slot
3830 // w V | pad0 | numbers; owned by CALLER
3831 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3832 // h ^ | in | 5
3833 // | | args | 4 Holes in incoming args owned by SELF
3834 // | | | | 3
3835 // | | +--------+
3836 // V | | old out| Empty on Intel, window on Sparc
3837 // | old |preserve| Must be even aligned.
3838 // | SP-+--------+----> Matcher::_old_SP, even aligned
3839 // | | in | 3 area for Intel ret address
3840 // Owned by |preserve| Empty on Sparc.
3841 // SELF +--------+
3842 // | | pad2 | 2 pad to align old SP
3843 // | +--------+ 1
3844 // | | locks | 0
3845 // | +--------+----> OptoReg::stack0(), even aligned
3846 // | | pad1 | 11 pad to align new SP
3847 // | +--------+
3848 // | | | 10
3849 // | | spills | 9 spills
3850 // V | | 8 (pad0 slot for callee)
3851 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3852 // ^ | out | 7
3853 // | | args | 6 Holes in outgoing args owned by CALLEE
3854 // Owned by +--------+
3855 // CALLEE | new out| 6 Empty on Intel, window on Sparc
3856 // | new |preserve| Must be even-aligned.
3857 // | SP-+--------+----> Matcher::_new_SP, even aligned
3858 // | | |
3859 //
3860 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3861 // known from SELF's arguments and the Java calling convention.
3862 // Region 6-7 is determined per call site.
3863 // Note 2: If the calling convention leaves holes in the incoming argument
3864 // area, those holes are owned by SELF. Holes in the outgoing area
3865 // are owned by the CALLEE. Holes should not be nessecary in the
3866 // incoming area, as the Java calling convention is completely under
3867 // the control of the AD file. Doubles can be sorted and packed to
3868 // avoid holes. Holes in the outgoing arguments may be nessecary for
3869 // varargs C calling conventions.
3870 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3871 // even aligned with pad0 as needed.
3872 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3873 // region 6-11 is even aligned; it may be padded out more so that
3874 // the region from SP to FP meets the minimum stack alignment.
3875 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3876 // alignment. Region 11, pad1, may be dynamically extended so that
3877 // SP meets the minimum alignment.
3879 frame
3880 %{
3881 // What direction does stack grow in (assumed to be same for C & Java)
3882 stack_direction(TOWARDS_LOW);
3884 // These three registers define part of the calling convention
3885 // between compiled code and the interpreter.
3886 inline_cache_reg(RAX); // Inline Cache Register
3887 interpreter_method_oop_reg(RBX); // Method Oop Register when
3888 // calling interpreter
3890 // Optional: name the operand used by cisc-spilling to access
3891 // [stack_pointer + offset]
3892 cisc_spilling_operand_name(indOffset32);
3894 // Number of stack slots consumed by locking an object
3895 sync_stack_slots(2);
3897 // Compiled code's Frame Pointer
3898 frame_pointer(RSP);
3900 // Interpreter stores its frame pointer in a register which is
3901 // stored to the stack by I2CAdaptors.
3902 // I2CAdaptors convert from interpreted java to compiled java.
3903 interpreter_frame_pointer(RBP);
3905 // Stack alignment requirement
3906 stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
3908 // Number of stack slots between incoming argument block and the start of
3909 // a new frame. The PROLOG must add this many slots to the stack. The
3910 // EPILOG must remove this many slots. amd64 needs two slots for
3911 // return address.
3912 in_preserve_stack_slots(4 + 2 * VerifyStackAtCalls);
3914 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3915 // for calls to C. Supports the var-args backing area for register parms.
3916 varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
3918 // The after-PROLOG location of the return address. Location of
3919 // return address specifies a type (REG or STACK) and a number
3920 // representing the register number (i.e. - use a register name) or
3921 // stack slot.
3922 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3923 // Otherwise, it is above the locks and verification slot and alignment word
3924 return_addr(STACK - 2 +
3925 round_to(2 + 2 * VerifyStackAtCalls +
3926 Compile::current()->fixed_slots(),
3927 WordsPerLong * 2));
3929 // Body of function which returns an integer array locating
3930 // arguments either in registers or in stack slots. Passed an array
3931 // of ideal registers called "sig" and a "length" count. Stack-slot
3932 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3933 // arguments for a CALLEE. Incoming stack arguments are
3934 // automatically biased by the preserve_stack_slots field above.
3936 calling_convention
3937 %{
3938 // No difference between ingoing/outgoing just pass false
3939 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3940 %}
3942 c_calling_convention
3943 %{
3944 // This is obviously always outgoing
3945 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
3946 %}
3948 // Location of compiled Java return values. Same as C for now.
3949 return_value
3950 %{
3951 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3952 "only return normal values");
3954 static const int lo[Op_RegL + 1] = {
3955 0,
3956 0,
3957 RAX_num, // Op_RegN
3958 RAX_num, // Op_RegI
3959 RAX_num, // Op_RegP
3960 XMM0_num, // Op_RegF
3961 XMM0_num, // Op_RegD
3962 RAX_num // Op_RegL
3963 };
3964 static const int hi[Op_RegL + 1] = {
3965 0,
3966 0,
3967 OptoReg::Bad, // Op_RegN
3968 OptoReg::Bad, // Op_RegI
3969 RAX_H_num, // Op_RegP
3970 OptoReg::Bad, // Op_RegF
3971 XMM0_H_num, // Op_RegD
3972 RAX_H_num // Op_RegL
3973 };
3974 assert(ARRAY_SIZE(hi) == _last_machine_leaf - 1, "missing type");
3975 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3976 %}
3977 %}
3979 //----------ATTRIBUTES---------------------------------------------------------
3980 //----------Operand Attributes-------------------------------------------------
3981 op_attrib op_cost(0); // Required cost attribute
3983 //----------Instruction Attributes---------------------------------------------
3984 ins_attrib ins_cost(100); // Required cost attribute
3985 ins_attrib ins_size(8); // Required size attribute (in bits)
3986 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3987 // a non-matching short branch variant
3988 // of some long branch?
3989 ins_attrib ins_alignment(1); // Required alignment attribute (must
3990 // be a power of 2) specifies the
3991 // alignment that some part of the
3992 // instruction (not necessarily the
3993 // start) requires. If > 1, a
3994 // compute_padding() function must be
3995 // provided for the instruction
3997 //----------OPERANDS-----------------------------------------------------------
3998 // Operand definitions must precede instruction definitions for correct parsing
3999 // in the ADLC because operands constitute user defined types which are used in
4000 // instruction definitions.
4002 //----------Simple Operands----------------------------------------------------
4003 // Immediate Operands
4004 // Integer Immediate
4005 operand immI()
4006 %{
4007 match(ConI);
4009 op_cost(10);
4010 format %{ %}
4011 interface(CONST_INTER);
4012 %}
4014 // Constant for test vs zero
4015 operand immI0()
4016 %{
4017 predicate(n->get_int() == 0);
4018 match(ConI);
4020 op_cost(0);
4021 format %{ %}
4022 interface(CONST_INTER);
4023 %}
4025 // Constant for increment
4026 operand immI1()
4027 %{
4028 predicate(n->get_int() == 1);
4029 match(ConI);
4031 op_cost(0);
4032 format %{ %}
4033 interface(CONST_INTER);
4034 %}
4036 // Constant for decrement
4037 operand immI_M1()
4038 %{
4039 predicate(n->get_int() == -1);
4040 match(ConI);
4042 op_cost(0);
4043 format %{ %}
4044 interface(CONST_INTER);
4045 %}
4047 // Valid scale values for addressing modes
4048 operand immI2()
4049 %{
4050 predicate(0 <= n->get_int() && (n->get_int() <= 3));
4051 match(ConI);
4053 format %{ %}
4054 interface(CONST_INTER);
4055 %}
4057 operand immI8()
4058 %{
4059 predicate((-0x80 <= n->get_int()) && (n->get_int() < 0x80));
4060 match(ConI);
4062 op_cost(5);
4063 format %{ %}
4064 interface(CONST_INTER);
4065 %}
4067 operand immI16()
4068 %{
4069 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
4070 match(ConI);
4072 op_cost(10);
4073 format %{ %}
4074 interface(CONST_INTER);
4075 %}
4077 // Constant for long shifts
4078 operand immI_32()
4079 %{
4080 predicate( n->get_int() == 32 );
4081 match(ConI);
4083 op_cost(0);
4084 format %{ %}
4085 interface(CONST_INTER);
4086 %}
4088 // Constant for long shifts
4089 operand immI_64()
4090 %{
4091 predicate( n->get_int() == 64 );
4092 match(ConI);
4094 op_cost(0);
4095 format %{ %}
4096 interface(CONST_INTER);
4097 %}
4099 // Pointer Immediate
4100 operand immP()
4101 %{
4102 match(ConP);
4104 op_cost(10);
4105 format %{ %}
4106 interface(CONST_INTER);
4107 %}
4109 // NULL Pointer Immediate
4110 operand immP0()
4111 %{
4112 predicate(n->get_ptr() == 0);
4113 match(ConP);
4115 op_cost(5);
4116 format %{ %}
4117 interface(CONST_INTER);
4118 %}
4120 operand immP_poll() %{
4121 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
4122 match(ConP);
4124 // formats are generated automatically for constants and base registers
4125 format %{ %}
4126 interface(CONST_INTER);
4127 %}
4129 // Pointer Immediate
4130 operand immN() %{
4131 match(ConN);
4133 op_cost(10);
4134 format %{ %}
4135 interface(CONST_INTER);
4136 %}
4138 // NULL Pointer Immediate
4139 operand immN0() %{
4140 predicate(n->get_narrowcon() == 0);
4141 match(ConN);
4143 op_cost(5);
4144 format %{ %}
4145 interface(CONST_INTER);
4146 %}
4148 operand immP31()
4149 %{
4150 predicate(!n->as_Type()->type()->isa_oopptr()
4151 && (n->get_ptr() >> 31) == 0);
4152 match(ConP);
4154 op_cost(5);
4155 format %{ %}
4156 interface(CONST_INTER);
4157 %}
4160 // Long Immediate
4161 operand immL()
4162 %{
4163 match(ConL);
4165 op_cost(20);
4166 format %{ %}
4167 interface(CONST_INTER);
4168 %}
4170 // Long Immediate 8-bit
4171 operand immL8()
4172 %{
4173 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
4174 match(ConL);
4176 op_cost(5);
4177 format %{ %}
4178 interface(CONST_INTER);
4179 %}
4181 // Long Immediate 32-bit unsigned
4182 operand immUL32()
4183 %{
4184 predicate(n->get_long() == (unsigned int) (n->get_long()));
4185 match(ConL);
4187 op_cost(10);
4188 format %{ %}
4189 interface(CONST_INTER);
4190 %}
4192 // Long Immediate 32-bit signed
4193 operand immL32()
4194 %{
4195 predicate(n->get_long() == (int) (n->get_long()));
4196 match(ConL);
4198 op_cost(15);
4199 format %{ %}
4200 interface(CONST_INTER);
4201 %}
4203 // Long Immediate zero
4204 operand immL0()
4205 %{
4206 predicate(n->get_long() == 0L);
4207 match(ConL);
4209 op_cost(10);
4210 format %{ %}
4211 interface(CONST_INTER);
4212 %}
4214 // Constant for increment
4215 operand immL1()
4216 %{
4217 predicate(n->get_long() == 1);
4218 match(ConL);
4220 format %{ %}
4221 interface(CONST_INTER);
4222 %}
4224 // Constant for decrement
4225 operand immL_M1()
4226 %{
4227 predicate(n->get_long() == -1);
4228 match(ConL);
4230 format %{ %}
4231 interface(CONST_INTER);
4232 %}
4234 // Long Immediate: the value 10
4235 operand immL10()
4236 %{
4237 predicate(n->get_long() == 10);
4238 match(ConL);
4240 format %{ %}
4241 interface(CONST_INTER);
4242 %}
4244 // Long immediate from 0 to 127.
4245 // Used for a shorter form of long mul by 10.
4246 operand immL_127()
4247 %{
4248 predicate(0 <= n->get_long() && n->get_long() < 0x80);
4249 match(ConL);
4251 op_cost(10);
4252 format %{ %}
4253 interface(CONST_INTER);
4254 %}
4256 // Long Immediate: low 32-bit mask
4257 operand immL_32bits()
4258 %{
4259 predicate(n->get_long() == 0xFFFFFFFFL);
4260 match(ConL);
4261 op_cost(20);
4263 format %{ %}
4264 interface(CONST_INTER);
4265 %}
4267 // Float Immediate zero
4268 operand immF0()
4269 %{
4270 predicate(jint_cast(n->getf()) == 0);
4271 match(ConF);
4273 op_cost(5);
4274 format %{ %}
4275 interface(CONST_INTER);
4276 %}
4278 // Float Immediate
4279 operand immF()
4280 %{
4281 match(ConF);
4283 op_cost(15);
4284 format %{ %}
4285 interface(CONST_INTER);
4286 %}
4288 // Double Immediate zero
4289 operand immD0()
4290 %{
4291 predicate(jlong_cast(n->getd()) == 0);
4292 match(ConD);
4294 op_cost(5);
4295 format %{ %}
4296 interface(CONST_INTER);
4297 %}
4299 // Double Immediate
4300 operand immD()
4301 %{
4302 match(ConD);
4304 op_cost(15);
4305 format %{ %}
4306 interface(CONST_INTER);
4307 %}
4309 // Immediates for special shifts (sign extend)
4311 // Constants for increment
4312 operand immI_16()
4313 %{
4314 predicate(n->get_int() == 16);
4315 match(ConI);
4317 format %{ %}
4318 interface(CONST_INTER);
4319 %}
4321 operand immI_24()
4322 %{
4323 predicate(n->get_int() == 24);
4324 match(ConI);
4326 format %{ %}
4327 interface(CONST_INTER);
4328 %}
4330 // Constant for byte-wide masking
4331 operand immI_255()
4332 %{
4333 predicate(n->get_int() == 255);
4334 match(ConI);
4336 format %{ %}
4337 interface(CONST_INTER);
4338 %}
4340 // Constant for short-wide masking
4341 operand immI_65535()
4342 %{
4343 predicate(n->get_int() == 65535);
4344 match(ConI);
4346 format %{ %}
4347 interface(CONST_INTER);
4348 %}
4350 // Constant for byte-wide masking
4351 operand immL_255()
4352 %{
4353 predicate(n->get_long() == 255);
4354 match(ConL);
4356 format %{ %}
4357 interface(CONST_INTER);
4358 %}
4360 // Constant for short-wide masking
4361 operand immL_65535()
4362 %{
4363 predicate(n->get_long() == 65535);
4364 match(ConL);
4366 format %{ %}
4367 interface(CONST_INTER);
4368 %}
4370 // Register Operands
4371 // Integer Register
4372 operand rRegI()
4373 %{
4374 constraint(ALLOC_IN_RC(int_reg));
4375 match(RegI);
4377 match(rax_RegI);
4378 match(rbx_RegI);
4379 match(rcx_RegI);
4380 match(rdx_RegI);
4381 match(rdi_RegI);
4383 format %{ %}
4384 interface(REG_INTER);
4385 %}
4387 // Special Registers
4388 operand rax_RegI()
4389 %{
4390 constraint(ALLOC_IN_RC(int_rax_reg));
4391 match(RegI);
4392 match(rRegI);
4394 format %{ "RAX" %}
4395 interface(REG_INTER);
4396 %}
4398 // Special Registers
4399 operand rbx_RegI()
4400 %{
4401 constraint(ALLOC_IN_RC(int_rbx_reg));
4402 match(RegI);
4403 match(rRegI);
4405 format %{ "RBX" %}
4406 interface(REG_INTER);
4407 %}
4409 operand rcx_RegI()
4410 %{
4411 constraint(ALLOC_IN_RC(int_rcx_reg));
4412 match(RegI);
4413 match(rRegI);
4415 format %{ "RCX" %}
4416 interface(REG_INTER);
4417 %}
4419 operand rdx_RegI()
4420 %{
4421 constraint(ALLOC_IN_RC(int_rdx_reg));
4422 match(RegI);
4423 match(rRegI);
4425 format %{ "RDX" %}
4426 interface(REG_INTER);
4427 %}
4429 operand rdi_RegI()
4430 %{
4431 constraint(ALLOC_IN_RC(int_rdi_reg));
4432 match(RegI);
4433 match(rRegI);
4435 format %{ "RDI" %}
4436 interface(REG_INTER);
4437 %}
4439 operand no_rcx_RegI()
4440 %{
4441 constraint(ALLOC_IN_RC(int_no_rcx_reg));
4442 match(RegI);
4443 match(rax_RegI);
4444 match(rbx_RegI);
4445 match(rdx_RegI);
4446 match(rdi_RegI);
4448 format %{ %}
4449 interface(REG_INTER);
4450 %}
4452 operand no_rax_rdx_RegI()
4453 %{
4454 constraint(ALLOC_IN_RC(int_no_rax_rdx_reg));
4455 match(RegI);
4456 match(rbx_RegI);
4457 match(rcx_RegI);
4458 match(rdi_RegI);
4460 format %{ %}
4461 interface(REG_INTER);
4462 %}
4464 // Pointer Register
4465 operand any_RegP()
4466 %{
4467 constraint(ALLOC_IN_RC(any_reg));
4468 match(RegP);
4469 match(rax_RegP);
4470 match(rbx_RegP);
4471 match(rdi_RegP);
4472 match(rsi_RegP);
4473 match(rbp_RegP);
4474 match(r15_RegP);
4475 match(rRegP);
4477 format %{ %}
4478 interface(REG_INTER);
4479 %}
4481 operand rRegP()
4482 %{
4483 constraint(ALLOC_IN_RC(ptr_reg));
4484 match(RegP);
4485 match(rax_RegP);
4486 match(rbx_RegP);
4487 match(rdi_RegP);
4488 match(rsi_RegP);
4489 match(rbp_RegP);
4490 match(r15_RegP); // See Q&A below about r15_RegP.
4492 format %{ %}
4493 interface(REG_INTER);
4494 %}
4496 operand rRegN() %{
4497 constraint(ALLOC_IN_RC(int_reg));
4498 match(RegN);
4500 format %{ %}
4501 interface(REG_INTER);
4502 %}
4504 // Question: Why is r15_RegP (the read-only TLS register) a match for rRegP?
4505 // Answer: Operand match rules govern the DFA as it processes instruction inputs.
4506 // It's fine for an instruction input which expects rRegP to match a r15_RegP.
4507 // The output of an instruction is controlled by the allocator, which respects
4508 // register class masks, not match rules. Unless an instruction mentions
4509 // r15_RegP or any_RegP explicitly as its output, r15 will not be considered
4510 // by the allocator as an input.
4512 operand no_rax_RegP()
4513 %{
4514 constraint(ALLOC_IN_RC(ptr_no_rax_reg));
4515 match(RegP);
4516 match(rbx_RegP);
4517 match(rsi_RegP);
4518 match(rdi_RegP);
4520 format %{ %}
4521 interface(REG_INTER);
4522 %}
4524 operand no_rbp_RegP()
4525 %{
4526 constraint(ALLOC_IN_RC(ptr_no_rbp_reg));
4527 match(RegP);
4528 match(rbx_RegP);
4529 match(rsi_RegP);
4530 match(rdi_RegP);
4532 format %{ %}
4533 interface(REG_INTER);
4534 %}
4536 operand no_rax_rbx_RegP()
4537 %{
4538 constraint(ALLOC_IN_RC(ptr_no_rax_rbx_reg));
4539 match(RegP);
4540 match(rsi_RegP);
4541 match(rdi_RegP);
4543 format %{ %}
4544 interface(REG_INTER);
4545 %}
4547 // Special Registers
4548 // Return a pointer value
4549 operand rax_RegP()
4550 %{
4551 constraint(ALLOC_IN_RC(ptr_rax_reg));
4552 match(RegP);
4553 match(rRegP);
4555 format %{ %}
4556 interface(REG_INTER);
4557 %}
4559 // Special Registers
4560 // Return a compressed pointer value
4561 operand rax_RegN()
4562 %{
4563 constraint(ALLOC_IN_RC(int_rax_reg));
4564 match(RegN);
4565 match(rRegN);
4567 format %{ %}
4568 interface(REG_INTER);
4569 %}
4571 // Used in AtomicAdd
4572 operand rbx_RegP()
4573 %{
4574 constraint(ALLOC_IN_RC(ptr_rbx_reg));
4575 match(RegP);
4576 match(rRegP);
4578 format %{ %}
4579 interface(REG_INTER);
4580 %}
4582 operand rsi_RegP()
4583 %{
4584 constraint(ALLOC_IN_RC(ptr_rsi_reg));
4585 match(RegP);
4586 match(rRegP);
4588 format %{ %}
4589 interface(REG_INTER);
4590 %}
4592 // Used in rep stosq
4593 operand rdi_RegP()
4594 %{
4595 constraint(ALLOC_IN_RC(ptr_rdi_reg));
4596 match(RegP);
4597 match(rRegP);
4599 format %{ %}
4600 interface(REG_INTER);
4601 %}
4603 operand rbp_RegP()
4604 %{
4605 constraint(ALLOC_IN_RC(ptr_rbp_reg));
4606 match(RegP);
4607 match(rRegP);
4609 format %{ %}
4610 interface(REG_INTER);
4611 %}
4613 operand r15_RegP()
4614 %{
4615 constraint(ALLOC_IN_RC(ptr_r15_reg));
4616 match(RegP);
4617 match(rRegP);
4619 format %{ %}
4620 interface(REG_INTER);
4621 %}
4623 operand rRegL()
4624 %{
4625 constraint(ALLOC_IN_RC(long_reg));
4626 match(RegL);
4627 match(rax_RegL);
4628 match(rdx_RegL);
4630 format %{ %}
4631 interface(REG_INTER);
4632 %}
4634 // Special Registers
4635 operand no_rax_rdx_RegL()
4636 %{
4637 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg));
4638 match(RegL);
4639 match(rRegL);
4641 format %{ %}
4642 interface(REG_INTER);
4643 %}
4645 operand no_rax_RegL()
4646 %{
4647 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg));
4648 match(RegL);
4649 match(rRegL);
4650 match(rdx_RegL);
4652 format %{ %}
4653 interface(REG_INTER);
4654 %}
4656 operand no_rcx_RegL()
4657 %{
4658 constraint(ALLOC_IN_RC(long_no_rcx_reg));
4659 match(RegL);
4660 match(rRegL);
4662 format %{ %}
4663 interface(REG_INTER);
4664 %}
4666 operand rax_RegL()
4667 %{
4668 constraint(ALLOC_IN_RC(long_rax_reg));
4669 match(RegL);
4670 match(rRegL);
4672 format %{ "RAX" %}
4673 interface(REG_INTER);
4674 %}
4676 operand rcx_RegL()
4677 %{
4678 constraint(ALLOC_IN_RC(long_rcx_reg));
4679 match(RegL);
4680 match(rRegL);
4682 format %{ %}
4683 interface(REG_INTER);
4684 %}
4686 operand rdx_RegL()
4687 %{
4688 constraint(ALLOC_IN_RC(long_rdx_reg));
4689 match(RegL);
4690 match(rRegL);
4692 format %{ %}
4693 interface(REG_INTER);
4694 %}
4696 // Flags register, used as output of compare instructions
4697 operand rFlagsReg()
4698 %{
4699 constraint(ALLOC_IN_RC(int_flags));
4700 match(RegFlags);
4702 format %{ "RFLAGS" %}
4703 interface(REG_INTER);
4704 %}
4706 // Flags register, used as output of FLOATING POINT compare instructions
4707 operand rFlagsRegU()
4708 %{
4709 constraint(ALLOC_IN_RC(int_flags));
4710 match(RegFlags);
4712 format %{ "RFLAGS_U" %}
4713 interface(REG_INTER);
4714 %}
4716 operand rFlagsRegUCF() %{
4717 constraint(ALLOC_IN_RC(int_flags));
4718 match(RegFlags);
4719 predicate(false);
4721 format %{ "RFLAGS_U_CF" %}
4722 interface(REG_INTER);
4723 %}
4725 // Float register operands
4726 operand regF()
4727 %{
4728 constraint(ALLOC_IN_RC(float_reg));
4729 match(RegF);
4731 format %{ %}
4732 interface(REG_INTER);
4733 %}
4735 // Double register operands
4736 operand regD()
4737 %{
4738 constraint(ALLOC_IN_RC(double_reg));
4739 match(RegD);
4741 format %{ %}
4742 interface(REG_INTER);
4743 %}
4746 //----------Memory Operands----------------------------------------------------
4747 // Direct Memory Operand
4748 // operand direct(immP addr)
4749 // %{
4750 // match(addr);
4752 // format %{ "[$addr]" %}
4753 // interface(MEMORY_INTER) %{
4754 // base(0xFFFFFFFF);
4755 // index(0x4);
4756 // scale(0x0);
4757 // disp($addr);
4758 // %}
4759 // %}
4761 // Indirect Memory Operand
4762 operand indirect(any_RegP reg)
4763 %{
4764 constraint(ALLOC_IN_RC(ptr_reg));
4765 match(reg);
4767 format %{ "[$reg]" %}
4768 interface(MEMORY_INTER) %{
4769 base($reg);
4770 index(0x4);
4771 scale(0x0);
4772 disp(0x0);
4773 %}
4774 %}
4776 // Indirect Memory Plus Short Offset Operand
4777 operand indOffset8(any_RegP reg, immL8 off)
4778 %{
4779 constraint(ALLOC_IN_RC(ptr_reg));
4780 match(AddP reg off);
4782 format %{ "[$reg + $off (8-bit)]" %}
4783 interface(MEMORY_INTER) %{
4784 base($reg);
4785 index(0x4);
4786 scale(0x0);
4787 disp($off);
4788 %}
4789 %}
4791 // Indirect Memory Plus Long Offset Operand
4792 operand indOffset32(any_RegP reg, immL32 off)
4793 %{
4794 constraint(ALLOC_IN_RC(ptr_reg));
4795 match(AddP reg off);
4797 format %{ "[$reg + $off (32-bit)]" %}
4798 interface(MEMORY_INTER) %{
4799 base($reg);
4800 index(0x4);
4801 scale(0x0);
4802 disp($off);
4803 %}
4804 %}
4806 // Indirect Memory Plus Index Register Plus Offset Operand
4807 operand indIndexOffset(any_RegP reg, rRegL lreg, immL32 off)
4808 %{
4809 constraint(ALLOC_IN_RC(ptr_reg));
4810 match(AddP (AddP reg lreg) off);
4812 op_cost(10);
4813 format %{"[$reg + $off + $lreg]" %}
4814 interface(MEMORY_INTER) %{
4815 base($reg);
4816 index($lreg);
4817 scale(0x0);
4818 disp($off);
4819 %}
4820 %}
4822 // Indirect Memory Plus Index Register Plus Offset Operand
4823 operand indIndex(any_RegP reg, rRegL lreg)
4824 %{
4825 constraint(ALLOC_IN_RC(ptr_reg));
4826 match(AddP reg lreg);
4828 op_cost(10);
4829 format %{"[$reg + $lreg]" %}
4830 interface(MEMORY_INTER) %{
4831 base($reg);
4832 index($lreg);
4833 scale(0x0);
4834 disp(0x0);
4835 %}
4836 %}
4838 // Indirect Memory Times Scale Plus Index Register
4839 operand indIndexScale(any_RegP reg, rRegL lreg, immI2 scale)
4840 %{
4841 constraint(ALLOC_IN_RC(ptr_reg));
4842 match(AddP reg (LShiftL lreg scale));
4844 op_cost(10);
4845 format %{"[$reg + $lreg << $scale]" %}
4846 interface(MEMORY_INTER) %{
4847 base($reg);
4848 index($lreg);
4849 scale($scale);
4850 disp(0x0);
4851 %}
4852 %}
4854 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
4855 operand indIndexScaleOffset(any_RegP reg, immL32 off, rRegL lreg, immI2 scale)
4856 %{
4857 constraint(ALLOC_IN_RC(ptr_reg));
4858 match(AddP (AddP reg (LShiftL lreg scale)) off);
4860 op_cost(10);
4861 format %{"[$reg + $off + $lreg << $scale]" %}
4862 interface(MEMORY_INTER) %{
4863 base($reg);
4864 index($lreg);
4865 scale($scale);
4866 disp($off);
4867 %}
4868 %}
4870 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
4871 operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
4872 %{
4873 constraint(ALLOC_IN_RC(ptr_reg));
4874 predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
4875 match(AddP (AddP reg (LShiftL (ConvI2L idx) scale)) off);
4877 op_cost(10);
4878 format %{"[$reg + $off + $idx << $scale]" %}
4879 interface(MEMORY_INTER) %{
4880 base($reg);
4881 index($idx);
4882 scale($scale);
4883 disp($off);
4884 %}
4885 %}
4887 // Indirect Narrow Oop Plus Offset Operand
4888 // Note: x86 architecture doesn't support "scale * index + offset" without a base
4889 // we can't free r12 even with Universe::narrow_oop_base() == NULL.
4890 operand indCompressedOopOffset(rRegN reg, immL32 off) %{
4891 predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
4892 constraint(ALLOC_IN_RC(ptr_reg));
4893 match(AddP (DecodeN reg) off);
4895 op_cost(10);
4896 format %{"[R12 + $reg << 3 + $off] (compressed oop addressing)" %}
4897 interface(MEMORY_INTER) %{
4898 base(0xc); // R12
4899 index($reg);
4900 scale(0x3);
4901 disp($off);
4902 %}
4903 %}
4905 // Indirect Memory Operand
4906 operand indirectNarrow(rRegN reg)
4907 %{
4908 predicate(Universe::narrow_oop_shift() == 0);
4909 constraint(ALLOC_IN_RC(ptr_reg));
4910 match(DecodeN reg);
4912 format %{ "[$reg]" %}
4913 interface(MEMORY_INTER) %{
4914 base($reg);
4915 index(0x4);
4916 scale(0x0);
4917 disp(0x0);
4918 %}
4919 %}
4921 // Indirect Memory Plus Short Offset Operand
4922 operand indOffset8Narrow(rRegN reg, immL8 off)
4923 %{
4924 predicate(Universe::narrow_oop_shift() == 0);
4925 constraint(ALLOC_IN_RC(ptr_reg));
4926 match(AddP (DecodeN reg) off);
4928 format %{ "[$reg + $off (8-bit)]" %}
4929 interface(MEMORY_INTER) %{
4930 base($reg);
4931 index(0x4);
4932 scale(0x0);
4933 disp($off);
4934 %}
4935 %}
4937 // Indirect Memory Plus Long Offset Operand
4938 operand indOffset32Narrow(rRegN reg, immL32 off)
4939 %{
4940 predicate(Universe::narrow_oop_shift() == 0);
4941 constraint(ALLOC_IN_RC(ptr_reg));
4942 match(AddP (DecodeN reg) off);
4944 format %{ "[$reg + $off (32-bit)]" %}
4945 interface(MEMORY_INTER) %{
4946 base($reg);
4947 index(0x4);
4948 scale(0x0);
4949 disp($off);
4950 %}
4951 %}
4953 // Indirect Memory Plus Index Register Plus Offset Operand
4954 operand indIndexOffsetNarrow(rRegN reg, rRegL lreg, immL32 off)
4955 %{
4956 predicate(Universe::narrow_oop_shift() == 0);
4957 constraint(ALLOC_IN_RC(ptr_reg));
4958 match(AddP (AddP (DecodeN reg) lreg) off);
4960 op_cost(10);
4961 format %{"[$reg + $off + $lreg]" %}
4962 interface(MEMORY_INTER) %{
4963 base($reg);
4964 index($lreg);
4965 scale(0x0);
4966 disp($off);
4967 %}
4968 %}
4970 // Indirect Memory Plus Index Register Plus Offset Operand
4971 operand indIndexNarrow(rRegN reg, rRegL lreg)
4972 %{
4973 predicate(Universe::narrow_oop_shift() == 0);
4974 constraint(ALLOC_IN_RC(ptr_reg));
4975 match(AddP (DecodeN reg) lreg);
4977 op_cost(10);
4978 format %{"[$reg + $lreg]" %}
4979 interface(MEMORY_INTER) %{
4980 base($reg);
4981 index($lreg);
4982 scale(0x0);
4983 disp(0x0);
4984 %}
4985 %}
4987 // Indirect Memory Times Scale Plus Index Register
4988 operand indIndexScaleNarrow(rRegN reg, rRegL lreg, immI2 scale)
4989 %{
4990 predicate(Universe::narrow_oop_shift() == 0);
4991 constraint(ALLOC_IN_RC(ptr_reg));
4992 match(AddP (DecodeN reg) (LShiftL lreg scale));
4994 op_cost(10);
4995 format %{"[$reg + $lreg << $scale]" %}
4996 interface(MEMORY_INTER) %{
4997 base($reg);
4998 index($lreg);
4999 scale($scale);
5000 disp(0x0);
5001 %}
5002 %}
5004 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5005 operand indIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegL lreg, immI2 scale)
5006 %{
5007 predicate(Universe::narrow_oop_shift() == 0);
5008 constraint(ALLOC_IN_RC(ptr_reg));
5009 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5011 op_cost(10);
5012 format %{"[$reg + $off + $lreg << $scale]" %}
5013 interface(MEMORY_INTER) %{
5014 base($reg);
5015 index($lreg);
5016 scale($scale);
5017 disp($off);
5018 %}
5019 %}
5021 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
5022 operand indPosIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegI idx, immI2 scale)
5023 %{
5024 constraint(ALLOC_IN_RC(ptr_reg));
5025 predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5026 match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L idx) scale)) off);
5028 op_cost(10);
5029 format %{"[$reg + $off + $idx << $scale]" %}
5030 interface(MEMORY_INTER) %{
5031 base($reg);
5032 index($idx);
5033 scale($scale);
5034 disp($off);
5035 %}
5036 %}
5039 //----------Special Memory Operands--------------------------------------------
5040 // Stack Slot Operand - This operand is used for loading and storing temporary
5041 // values on the stack where a match requires a value to
5042 // flow through memory.
5043 operand stackSlotP(sRegP reg)
5044 %{
5045 constraint(ALLOC_IN_RC(stack_slots));
5046 // No match rule because this operand is only generated in matching
5048 format %{ "[$reg]" %}
5049 interface(MEMORY_INTER) %{
5050 base(0x4); // RSP
5051 index(0x4); // No Index
5052 scale(0x0); // No Scale
5053 disp($reg); // Stack Offset
5054 %}
5055 %}
5057 operand stackSlotI(sRegI reg)
5058 %{
5059 constraint(ALLOC_IN_RC(stack_slots));
5060 // No match rule because this operand is only generated in matching
5062 format %{ "[$reg]" %}
5063 interface(MEMORY_INTER) %{
5064 base(0x4); // RSP
5065 index(0x4); // No Index
5066 scale(0x0); // No Scale
5067 disp($reg); // Stack Offset
5068 %}
5069 %}
5071 operand stackSlotF(sRegF reg)
5072 %{
5073 constraint(ALLOC_IN_RC(stack_slots));
5074 // No match rule because this operand is only generated in matching
5076 format %{ "[$reg]" %}
5077 interface(MEMORY_INTER) %{
5078 base(0x4); // RSP
5079 index(0x4); // No Index
5080 scale(0x0); // No Scale
5081 disp($reg); // Stack Offset
5082 %}
5083 %}
5085 operand stackSlotD(sRegD reg)
5086 %{
5087 constraint(ALLOC_IN_RC(stack_slots));
5088 // No match rule because this operand is only generated in matching
5090 format %{ "[$reg]" %}
5091 interface(MEMORY_INTER) %{
5092 base(0x4); // RSP
5093 index(0x4); // No Index
5094 scale(0x0); // No Scale
5095 disp($reg); // Stack Offset
5096 %}
5097 %}
5098 operand stackSlotL(sRegL reg)
5099 %{
5100 constraint(ALLOC_IN_RC(stack_slots));
5101 // No match rule because this operand is only generated in matching
5103 format %{ "[$reg]" %}
5104 interface(MEMORY_INTER) %{
5105 base(0x4); // RSP
5106 index(0x4); // No Index
5107 scale(0x0); // No Scale
5108 disp($reg); // Stack Offset
5109 %}
5110 %}
5112 //----------Conditional Branch Operands----------------------------------------
5113 // Comparison Op - This is the operation of the comparison, and is limited to
5114 // the following set of codes:
5115 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5116 //
5117 // Other attributes of the comparison, such as unsignedness, are specified
5118 // by the comparison instruction that sets a condition code flags register.
5119 // That result is represented by a flags operand whose subtype is appropriate
5120 // to the unsignedness (etc.) of the comparison.
5121 //
5122 // Later, the instruction which matches both the Comparison Op (a Bool) and
5123 // the flags (produced by the Cmp) specifies the coding of the comparison op
5124 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5126 // Comparision Code
5127 operand cmpOp()
5128 %{
5129 match(Bool);
5131 format %{ "" %}
5132 interface(COND_INTER) %{
5133 equal(0x4, "e");
5134 not_equal(0x5, "ne");
5135 less(0xC, "l");
5136 greater_equal(0xD, "ge");
5137 less_equal(0xE, "le");
5138 greater(0xF, "g");
5139 %}
5140 %}
5142 // Comparison Code, unsigned compare. Used by FP also, with
5143 // C2 (unordered) turned into GT or LT already. The other bits
5144 // C0 and C3 are turned into Carry & Zero flags.
5145 operand cmpOpU()
5146 %{
5147 match(Bool);
5149 format %{ "" %}
5150 interface(COND_INTER) %{
5151 equal(0x4, "e");
5152 not_equal(0x5, "ne");
5153 less(0x2, "b");
5154 greater_equal(0x3, "nb");
5155 less_equal(0x6, "be");
5156 greater(0x7, "nbe");
5157 %}
5158 %}
5161 // Floating comparisons that don't require any fixup for the unordered case
5162 operand cmpOpUCF() %{
5163 match(Bool);
5164 predicate(n->as_Bool()->_test._test == BoolTest::lt ||
5165 n->as_Bool()->_test._test == BoolTest::ge ||
5166 n->as_Bool()->_test._test == BoolTest::le ||
5167 n->as_Bool()->_test._test == BoolTest::gt);
5168 format %{ "" %}
5169 interface(COND_INTER) %{
5170 equal(0x4, "e");
5171 not_equal(0x5, "ne");
5172 less(0x2, "b");
5173 greater_equal(0x3, "nb");
5174 less_equal(0x6, "be");
5175 greater(0x7, "nbe");
5176 %}
5177 %}
5180 // Floating comparisons that can be fixed up with extra conditional jumps
5181 operand cmpOpUCF2() %{
5182 match(Bool);
5183 predicate(n->as_Bool()->_test._test == BoolTest::ne ||
5184 n->as_Bool()->_test._test == BoolTest::eq);
5185 format %{ "" %}
5186 interface(COND_INTER) %{
5187 equal(0x4, "e");
5188 not_equal(0x5, "ne");
5189 less(0x2, "b");
5190 greater_equal(0x3, "nb");
5191 less_equal(0x6, "be");
5192 greater(0x7, "nbe");
5193 %}
5194 %}
5197 //----------OPERAND CLASSES----------------------------------------------------
5198 // Operand Classes are groups of operands that are used as to simplify
5199 // instruction definitions by not requiring the AD writer to specify separate
5200 // instructions for every form of operand when the instruction accepts
5201 // multiple operand types with the same basic encoding and format. The classic
5202 // case of this is memory operands.
5204 opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
5205 indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset,
5206 indCompressedOopOffset,
5207 indirectNarrow, indOffset8Narrow, indOffset32Narrow,
5208 indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
5209 indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow);
5211 //----------PIPELINE-----------------------------------------------------------
5212 // Rules which define the behavior of the target architectures pipeline.
5213 pipeline %{
5215 //----------ATTRIBUTES---------------------------------------------------------
5216 attributes %{
5217 variable_size_instructions; // Fixed size instructions
5218 max_instructions_per_bundle = 3; // Up to 3 instructions per bundle
5219 instruction_unit_size = 1; // An instruction is 1 bytes long
5220 instruction_fetch_unit_size = 16; // The processor fetches one line
5221 instruction_fetch_units = 1; // of 16 bytes
5223 // List of nop instructions
5224 nops( MachNop );
5225 %}
5227 //----------RESOURCES----------------------------------------------------------
5228 // Resources are the functional units available to the machine
5230 // Generic P2/P3 pipeline
5231 // 3 decoders, only D0 handles big operands; a "bundle" is the limit of
5232 // 3 instructions decoded per cycle.
5233 // 2 load/store ops per cycle, 1 branch, 1 FPU,
5234 // 3 ALU op, only ALU0 handles mul instructions.
5235 resources( D0, D1, D2, DECODE = D0 | D1 | D2,
5236 MS0, MS1, MS2, MEM = MS0 | MS1 | MS2,
5237 BR, FPU,
5238 ALU0, ALU1, ALU2, ALU = ALU0 | ALU1 | ALU2);
5240 //----------PIPELINE DESCRIPTION-----------------------------------------------
5241 // Pipeline Description specifies the stages in the machine's pipeline
5243 // Generic P2/P3 pipeline
5244 pipe_desc(S0, S1, S2, S3, S4, S5);
5246 //----------PIPELINE CLASSES---------------------------------------------------
5247 // Pipeline Classes describe the stages in which input and output are
5248 // referenced by the hardware pipeline.
5250 // Naming convention: ialu or fpu
5251 // Then: _reg
5252 // Then: _reg if there is a 2nd register
5253 // Then: _long if it's a pair of instructions implementing a long
5254 // Then: _fat if it requires the big decoder
5255 // Or: _mem if it requires the big decoder and a memory unit.
5257 // Integer ALU reg operation
5258 pipe_class ialu_reg(rRegI dst)
5259 %{
5260 single_instruction;
5261 dst : S4(write);
5262 dst : S3(read);
5263 DECODE : S0; // any decoder
5264 ALU : S3; // any alu
5265 %}
5267 // Long ALU reg operation
5268 pipe_class ialu_reg_long(rRegL dst)
5269 %{
5270 instruction_count(2);
5271 dst : S4(write);
5272 dst : S3(read);
5273 DECODE : S0(2); // any 2 decoders
5274 ALU : S3(2); // both alus
5275 %}
5277 // Integer ALU reg operation using big decoder
5278 pipe_class ialu_reg_fat(rRegI dst)
5279 %{
5280 single_instruction;
5281 dst : S4(write);
5282 dst : S3(read);
5283 D0 : S0; // big decoder only
5284 ALU : S3; // any alu
5285 %}
5287 // Long ALU reg operation using big decoder
5288 pipe_class ialu_reg_long_fat(rRegL dst)
5289 %{
5290 instruction_count(2);
5291 dst : S4(write);
5292 dst : S3(read);
5293 D0 : S0(2); // big decoder only; twice
5294 ALU : S3(2); // any 2 alus
5295 %}
5297 // Integer ALU reg-reg operation
5298 pipe_class ialu_reg_reg(rRegI dst, rRegI src)
5299 %{
5300 single_instruction;
5301 dst : S4(write);
5302 src : S3(read);
5303 DECODE : S0; // any decoder
5304 ALU : S3; // any alu
5305 %}
5307 // Long ALU reg-reg operation
5308 pipe_class ialu_reg_reg_long(rRegL dst, rRegL src)
5309 %{
5310 instruction_count(2);
5311 dst : S4(write);
5312 src : S3(read);
5313 DECODE : S0(2); // any 2 decoders
5314 ALU : S3(2); // both alus
5315 %}
5317 // Integer ALU reg-reg operation
5318 pipe_class ialu_reg_reg_fat(rRegI dst, memory src)
5319 %{
5320 single_instruction;
5321 dst : S4(write);
5322 src : S3(read);
5323 D0 : S0; // big decoder only
5324 ALU : S3; // any alu
5325 %}
5327 // Long ALU reg-reg operation
5328 pipe_class ialu_reg_reg_long_fat(rRegL dst, rRegL src)
5329 %{
5330 instruction_count(2);
5331 dst : S4(write);
5332 src : S3(read);
5333 D0 : S0(2); // big decoder only; twice
5334 ALU : S3(2); // both alus
5335 %}
5337 // Integer ALU reg-mem operation
5338 pipe_class ialu_reg_mem(rRegI dst, memory mem)
5339 %{
5340 single_instruction;
5341 dst : S5(write);
5342 mem : S3(read);
5343 D0 : S0; // big decoder only
5344 ALU : S4; // any alu
5345 MEM : S3; // any mem
5346 %}
5348 // Integer mem operation (prefetch)
5349 pipe_class ialu_mem(memory mem)
5350 %{
5351 single_instruction;
5352 mem : S3(read);
5353 D0 : S0; // big decoder only
5354 MEM : S3; // any mem
5355 %}
5357 // Integer Store to Memory
5358 pipe_class ialu_mem_reg(memory mem, rRegI src)
5359 %{
5360 single_instruction;
5361 mem : S3(read);
5362 src : S5(read);
5363 D0 : S0; // big decoder only
5364 ALU : S4; // any alu
5365 MEM : S3;
5366 %}
5368 // // Long Store to Memory
5369 // pipe_class ialu_mem_long_reg(memory mem, rRegL src)
5370 // %{
5371 // instruction_count(2);
5372 // mem : S3(read);
5373 // src : S5(read);
5374 // D0 : S0(2); // big decoder only; twice
5375 // ALU : S4(2); // any 2 alus
5376 // MEM : S3(2); // Both mems
5377 // %}
5379 // Integer Store to Memory
5380 pipe_class ialu_mem_imm(memory mem)
5381 %{
5382 single_instruction;
5383 mem : S3(read);
5384 D0 : S0; // big decoder only
5385 ALU : S4; // any alu
5386 MEM : S3;
5387 %}
5389 // Integer ALU0 reg-reg operation
5390 pipe_class ialu_reg_reg_alu0(rRegI dst, rRegI src)
5391 %{
5392 single_instruction;
5393 dst : S4(write);
5394 src : S3(read);
5395 D0 : S0; // Big decoder only
5396 ALU0 : S3; // only alu0
5397 %}
5399 // Integer ALU0 reg-mem operation
5400 pipe_class ialu_reg_mem_alu0(rRegI dst, memory mem)
5401 %{
5402 single_instruction;
5403 dst : S5(write);
5404 mem : S3(read);
5405 D0 : S0; // big decoder only
5406 ALU0 : S4; // ALU0 only
5407 MEM : S3; // any mem
5408 %}
5410 // Integer ALU reg-reg operation
5411 pipe_class ialu_cr_reg_reg(rFlagsReg cr, rRegI src1, rRegI src2)
5412 %{
5413 single_instruction;
5414 cr : S4(write);
5415 src1 : S3(read);
5416 src2 : S3(read);
5417 DECODE : S0; // any decoder
5418 ALU : S3; // any alu
5419 %}
5421 // Integer ALU reg-imm operation
5422 pipe_class ialu_cr_reg_imm(rFlagsReg cr, rRegI src1)
5423 %{
5424 single_instruction;
5425 cr : S4(write);
5426 src1 : S3(read);
5427 DECODE : S0; // any decoder
5428 ALU : S3; // any alu
5429 %}
5431 // Integer ALU reg-mem operation
5432 pipe_class ialu_cr_reg_mem(rFlagsReg cr, rRegI src1, memory src2)
5433 %{
5434 single_instruction;
5435 cr : S4(write);
5436 src1 : S3(read);
5437 src2 : S3(read);
5438 D0 : S0; // big decoder only
5439 ALU : S4; // any alu
5440 MEM : S3;
5441 %}
5443 // Conditional move reg-reg
5444 pipe_class pipe_cmplt( rRegI p, rRegI q, rRegI y)
5445 %{
5446 instruction_count(4);
5447 y : S4(read);
5448 q : S3(read);
5449 p : S3(read);
5450 DECODE : S0(4); // any decoder
5451 %}
5453 // Conditional move reg-reg
5454 pipe_class pipe_cmov_reg( rRegI dst, rRegI src, rFlagsReg cr)
5455 %{
5456 single_instruction;
5457 dst : S4(write);
5458 src : S3(read);
5459 cr : S3(read);
5460 DECODE : S0; // any decoder
5461 %}
5463 // Conditional move reg-mem
5464 pipe_class pipe_cmov_mem( rFlagsReg cr, rRegI dst, memory src)
5465 %{
5466 single_instruction;
5467 dst : S4(write);
5468 src : S3(read);
5469 cr : S3(read);
5470 DECODE : S0; // any decoder
5471 MEM : S3;
5472 %}
5474 // Conditional move reg-reg long
5475 pipe_class pipe_cmov_reg_long( rFlagsReg cr, rRegL dst, rRegL src)
5476 %{
5477 single_instruction;
5478 dst : S4(write);
5479 src : S3(read);
5480 cr : S3(read);
5481 DECODE : S0(2); // any 2 decoders
5482 %}
5484 // XXX
5485 // // Conditional move double reg-reg
5486 // pipe_class pipe_cmovD_reg( rFlagsReg cr, regDPR1 dst, regD src)
5487 // %{
5488 // single_instruction;
5489 // dst : S4(write);
5490 // src : S3(read);
5491 // cr : S3(read);
5492 // DECODE : S0; // any decoder
5493 // %}
5495 // Float reg-reg operation
5496 pipe_class fpu_reg(regD dst)
5497 %{
5498 instruction_count(2);
5499 dst : S3(read);
5500 DECODE : S0(2); // any 2 decoders
5501 FPU : S3;
5502 %}
5504 // Float reg-reg operation
5505 pipe_class fpu_reg_reg(regD dst, regD src)
5506 %{
5507 instruction_count(2);
5508 dst : S4(write);
5509 src : S3(read);
5510 DECODE : S0(2); // any 2 decoders
5511 FPU : S3;
5512 %}
5514 // Float reg-reg operation
5515 pipe_class fpu_reg_reg_reg(regD dst, regD src1, regD src2)
5516 %{
5517 instruction_count(3);
5518 dst : S4(write);
5519 src1 : S3(read);
5520 src2 : S3(read);
5521 DECODE : S0(3); // any 3 decoders
5522 FPU : S3(2);
5523 %}
5525 // Float reg-reg operation
5526 pipe_class fpu_reg_reg_reg_reg(regD dst, regD src1, regD src2, regD src3)
5527 %{
5528 instruction_count(4);
5529 dst : S4(write);
5530 src1 : S3(read);
5531 src2 : S3(read);
5532 src3 : S3(read);
5533 DECODE : S0(4); // any 3 decoders
5534 FPU : S3(2);
5535 %}
5537 // Float reg-reg operation
5538 pipe_class fpu_reg_mem_reg_reg(regD dst, memory src1, regD src2, regD src3)
5539 %{
5540 instruction_count(4);
5541 dst : S4(write);
5542 src1 : S3(read);
5543 src2 : S3(read);
5544 src3 : S3(read);
5545 DECODE : S1(3); // any 3 decoders
5546 D0 : S0; // Big decoder only
5547 FPU : S3(2);
5548 MEM : S3;
5549 %}
5551 // Float reg-mem operation
5552 pipe_class fpu_reg_mem(regD dst, memory mem)
5553 %{
5554 instruction_count(2);
5555 dst : S5(write);
5556 mem : S3(read);
5557 D0 : S0; // big decoder only
5558 DECODE : S1; // any decoder for FPU POP
5559 FPU : S4;
5560 MEM : S3; // any mem
5561 %}
5563 // Float reg-mem operation
5564 pipe_class fpu_reg_reg_mem(regD dst, regD src1, memory mem)
5565 %{
5566 instruction_count(3);
5567 dst : S5(write);
5568 src1 : S3(read);
5569 mem : S3(read);
5570 D0 : S0; // big decoder only
5571 DECODE : S1(2); // any decoder for FPU POP
5572 FPU : S4;
5573 MEM : S3; // any mem
5574 %}
5576 // Float mem-reg operation
5577 pipe_class fpu_mem_reg(memory mem, regD src)
5578 %{
5579 instruction_count(2);
5580 src : S5(read);
5581 mem : S3(read);
5582 DECODE : S0; // any decoder for FPU PUSH
5583 D0 : S1; // big decoder only
5584 FPU : S4;
5585 MEM : S3; // any mem
5586 %}
5588 pipe_class fpu_mem_reg_reg(memory mem, regD src1, regD src2)
5589 %{
5590 instruction_count(3);
5591 src1 : S3(read);
5592 src2 : S3(read);
5593 mem : S3(read);
5594 DECODE : S0(2); // any decoder for FPU PUSH
5595 D0 : S1; // big decoder only
5596 FPU : S4;
5597 MEM : S3; // any mem
5598 %}
5600 pipe_class fpu_mem_reg_mem(memory mem, regD src1, memory src2)
5601 %{
5602 instruction_count(3);
5603 src1 : S3(read);
5604 src2 : S3(read);
5605 mem : S4(read);
5606 DECODE : S0; // any decoder for FPU PUSH
5607 D0 : S0(2); // big decoder only
5608 FPU : S4;
5609 MEM : S3(2); // any mem
5610 %}
5612 pipe_class fpu_mem_mem(memory dst, memory src1)
5613 %{
5614 instruction_count(2);
5615 src1 : S3(read);
5616 dst : S4(read);
5617 D0 : S0(2); // big decoder only
5618 MEM : S3(2); // any mem
5619 %}
5621 pipe_class fpu_mem_mem_mem(memory dst, memory src1, memory src2)
5622 %{
5623 instruction_count(3);
5624 src1 : S3(read);
5625 src2 : S3(read);
5626 dst : S4(read);
5627 D0 : S0(3); // big decoder only
5628 FPU : S4;
5629 MEM : S3(3); // any mem
5630 %}
5632 pipe_class fpu_mem_reg_con(memory mem, regD src1)
5633 %{
5634 instruction_count(3);
5635 src1 : S4(read);
5636 mem : S4(read);
5637 DECODE : S0; // any decoder for FPU PUSH
5638 D0 : S0(2); // big decoder only
5639 FPU : S4;
5640 MEM : S3(2); // any mem
5641 %}
5643 // Float load constant
5644 pipe_class fpu_reg_con(regD dst)
5645 %{
5646 instruction_count(2);
5647 dst : S5(write);
5648 D0 : S0; // big decoder only for the load
5649 DECODE : S1; // any decoder for FPU POP
5650 FPU : S4;
5651 MEM : S3; // any mem
5652 %}
5654 // Float load constant
5655 pipe_class fpu_reg_reg_con(regD dst, regD src)
5656 %{
5657 instruction_count(3);
5658 dst : S5(write);
5659 src : S3(read);
5660 D0 : S0; // big decoder only for the load
5661 DECODE : S1(2); // any decoder for FPU POP
5662 FPU : S4;
5663 MEM : S3; // any mem
5664 %}
5666 // UnConditional branch
5667 pipe_class pipe_jmp(label labl)
5668 %{
5669 single_instruction;
5670 BR : S3;
5671 %}
5673 // Conditional branch
5674 pipe_class pipe_jcc(cmpOp cmp, rFlagsReg cr, label labl)
5675 %{
5676 single_instruction;
5677 cr : S1(read);
5678 BR : S3;
5679 %}
5681 // Allocation idiom
5682 pipe_class pipe_cmpxchg(rRegP dst, rRegP heap_ptr)
5683 %{
5684 instruction_count(1); force_serialization;
5685 fixed_latency(6);
5686 heap_ptr : S3(read);
5687 DECODE : S0(3);
5688 D0 : S2;
5689 MEM : S3;
5690 ALU : S3(2);
5691 dst : S5(write);
5692 BR : S5;
5693 %}
5695 // Generic big/slow expanded idiom
5696 pipe_class pipe_slow()
5697 %{
5698 instruction_count(10); multiple_bundles; force_serialization;
5699 fixed_latency(100);
5700 D0 : S0(2);
5701 MEM : S3(2);
5702 %}
5704 // The real do-nothing guy
5705 pipe_class empty()
5706 %{
5707 instruction_count(0);
5708 %}
5710 // Define the class for the Nop node
5711 define
5712 %{
5713 MachNop = empty;
5714 %}
5716 %}
5718 //----------INSTRUCTIONS-------------------------------------------------------
5719 //
5720 // match -- States which machine-independent subtree may be replaced
5721 // by this instruction.
5722 // ins_cost -- The estimated cost of this instruction is used by instruction
5723 // selection to identify a minimum cost tree of machine
5724 // instructions that matches a tree of machine-independent
5725 // instructions.
5726 // format -- A string providing the disassembly for this instruction.
5727 // The value of an instruction's operand may be inserted
5728 // by referring to it with a '$' prefix.
5729 // opcode -- Three instruction opcodes may be provided. These are referred
5730 // to within an encode class as $primary, $secondary, and $tertiary
5731 // rrspectively. The primary opcode is commonly used to
5732 // indicate the type of machine instruction, while secondary
5733 // and tertiary are often used for prefix options or addressing
5734 // modes.
5735 // ins_encode -- A list of encode classes with parameters. The encode class
5736 // name must have been defined in an 'enc_class' specification
5737 // in the encode section of the architecture description.
5740 //----------Load/Store/Move Instructions---------------------------------------
5741 //----------Load Instructions--------------------------------------------------
5743 // Load Byte (8 bit signed)
5744 instruct loadB(rRegI dst, memory mem)
5745 %{
5746 match(Set dst (LoadB mem));
5748 ins_cost(125);
5749 format %{ "movsbl $dst, $mem\t# byte" %}
5751 ins_encode %{
5752 __ movsbl($dst$$Register, $mem$$Address);
5753 %}
5755 ins_pipe(ialu_reg_mem);
5756 %}
5758 // Load Byte (8 bit signed) into Long Register
5759 instruct loadB2L(rRegL dst, memory mem)
5760 %{
5761 match(Set dst (ConvI2L (LoadB mem)));
5763 ins_cost(125);
5764 format %{ "movsbq $dst, $mem\t# byte -> long" %}
5766 ins_encode %{
5767 __ movsbq($dst$$Register, $mem$$Address);
5768 %}
5770 ins_pipe(ialu_reg_mem);
5771 %}
5773 // Load Unsigned Byte (8 bit UNsigned)
5774 instruct loadUB(rRegI dst, memory mem)
5775 %{
5776 match(Set dst (LoadUB mem));
5778 ins_cost(125);
5779 format %{ "movzbl $dst, $mem\t# ubyte" %}
5781 ins_encode %{
5782 __ movzbl($dst$$Register, $mem$$Address);
5783 %}
5785 ins_pipe(ialu_reg_mem);
5786 %}
5788 // Load Unsigned Byte (8 bit UNsigned) into Long Register
5789 instruct loadUB2L(rRegL dst, memory mem)
5790 %{
5791 match(Set dst (ConvI2L (LoadUB mem)));
5793 ins_cost(125);
5794 format %{ "movzbq $dst, $mem\t# ubyte -> long" %}
5796 ins_encode %{
5797 __ movzbq($dst$$Register, $mem$$Address);
5798 %}
5800 ins_pipe(ialu_reg_mem);
5801 %}
5803 // Load Unsigned Byte (8 bit UNsigned) with a 8-bit mask into Long Register
5804 instruct loadUB2L_immI8(rRegL dst, memory mem, immI8 mask, rFlagsReg cr) %{
5805 match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
5806 effect(KILL cr);
5808 format %{ "movzbq $dst, $mem\t# ubyte & 8-bit mask -> long\n\t"
5809 "andl $dst, $mask" %}
5810 ins_encode %{
5811 Register Rdst = $dst$$Register;
5812 __ movzbq(Rdst, $mem$$Address);
5813 __ andl(Rdst, $mask$$constant);
5814 %}
5815 ins_pipe(ialu_reg_mem);
5816 %}
5818 // Load Short (16 bit signed)
5819 instruct loadS(rRegI dst, memory mem)
5820 %{
5821 match(Set dst (LoadS mem));
5823 ins_cost(125);
5824 format %{ "movswl $dst, $mem\t# short" %}
5826 ins_encode %{
5827 __ movswl($dst$$Register, $mem$$Address);
5828 %}
5830 ins_pipe(ialu_reg_mem);
5831 %}
5833 // Load Short (16 bit signed) to Byte (8 bit signed)
5834 instruct loadS2B(rRegI dst, memory mem, immI_24 twentyfour) %{
5835 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
5837 ins_cost(125);
5838 format %{ "movsbl $dst, $mem\t# short -> byte" %}
5839 ins_encode %{
5840 __ movsbl($dst$$Register, $mem$$Address);
5841 %}
5842 ins_pipe(ialu_reg_mem);
5843 %}
5845 // Load Short (16 bit signed) into Long Register
5846 instruct loadS2L(rRegL dst, memory mem)
5847 %{
5848 match(Set dst (ConvI2L (LoadS mem)));
5850 ins_cost(125);
5851 format %{ "movswq $dst, $mem\t# short -> long" %}
5853 ins_encode %{
5854 __ movswq($dst$$Register, $mem$$Address);
5855 %}
5857 ins_pipe(ialu_reg_mem);
5858 %}
5860 // Load Unsigned Short/Char (16 bit UNsigned)
5861 instruct loadUS(rRegI dst, memory mem)
5862 %{
5863 match(Set dst (LoadUS mem));
5865 ins_cost(125);
5866 format %{ "movzwl $dst, $mem\t# ushort/char" %}
5868 ins_encode %{
5869 __ movzwl($dst$$Register, $mem$$Address);
5870 %}
5872 ins_pipe(ialu_reg_mem);
5873 %}
5875 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
5876 instruct loadUS2B(rRegI dst, memory mem, immI_24 twentyfour) %{
5877 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour));
5879 ins_cost(125);
5880 format %{ "movsbl $dst, $mem\t# ushort -> byte" %}
5881 ins_encode %{
5882 __ movsbl($dst$$Register, $mem$$Address);
5883 %}
5884 ins_pipe(ialu_reg_mem);
5885 %}
5887 // Load Unsigned Short/Char (16 bit UNsigned) into Long Register
5888 instruct loadUS2L(rRegL dst, memory mem)
5889 %{
5890 match(Set dst (ConvI2L (LoadUS mem)));
5892 ins_cost(125);
5893 format %{ "movzwq $dst, $mem\t# ushort/char -> long" %}
5895 ins_encode %{
5896 __ movzwq($dst$$Register, $mem$$Address);
5897 %}
5899 ins_pipe(ialu_reg_mem);
5900 %}
5902 // Load Unsigned Short/Char (16 bit UNsigned) with mask 0xFF into Long Register
5903 instruct loadUS2L_immI_255(rRegL dst, memory mem, immI_255 mask) %{
5904 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5906 format %{ "movzbq $dst, $mem\t# ushort/char & 0xFF -> long" %}
5907 ins_encode %{
5908 __ movzbq($dst$$Register, $mem$$Address);
5909 %}
5910 ins_pipe(ialu_reg_mem);
5911 %}
5913 // Load Unsigned Short/Char (16 bit UNsigned) with mask into Long Register
5914 instruct loadUS2L_immI16(rRegL dst, memory mem, immI16 mask, rFlagsReg cr) %{
5915 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5916 effect(KILL cr);
5918 format %{ "movzwq $dst, $mem\t# ushort/char & 16-bit mask -> long\n\t"
5919 "andl $dst, $mask" %}
5920 ins_encode %{
5921 Register Rdst = $dst$$Register;
5922 __ movzwq(Rdst, $mem$$Address);
5923 __ andl(Rdst, $mask$$constant);
5924 %}
5925 ins_pipe(ialu_reg_mem);
5926 %}
5928 // Load Integer
5929 instruct loadI(rRegI dst, memory mem)
5930 %{
5931 match(Set dst (LoadI mem));
5933 ins_cost(125);
5934 format %{ "movl $dst, $mem\t# int" %}
5936 ins_encode %{
5937 __ movl($dst$$Register, $mem$$Address);
5938 %}
5940 ins_pipe(ialu_reg_mem);
5941 %}
5943 // Load Integer (32 bit signed) to Byte (8 bit signed)
5944 instruct loadI2B(rRegI dst, memory mem, immI_24 twentyfour) %{
5945 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5947 ins_cost(125);
5948 format %{ "movsbl $dst, $mem\t# int -> byte" %}
5949 ins_encode %{
5950 __ movsbl($dst$$Register, $mem$$Address);
5951 %}
5952 ins_pipe(ialu_reg_mem);
5953 %}
5955 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5956 instruct loadI2UB(rRegI dst, memory mem, immI_255 mask) %{
5957 match(Set dst (AndI (LoadI mem) mask));
5959 ins_cost(125);
5960 format %{ "movzbl $dst, $mem\t# int -> ubyte" %}
5961 ins_encode %{
5962 __ movzbl($dst$$Register, $mem$$Address);
5963 %}
5964 ins_pipe(ialu_reg_mem);
5965 %}
5967 // Load Integer (32 bit signed) to Short (16 bit signed)
5968 instruct loadI2S(rRegI dst, memory mem, immI_16 sixteen) %{
5969 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5971 ins_cost(125);
5972 format %{ "movswl $dst, $mem\t# int -> short" %}
5973 ins_encode %{
5974 __ movswl($dst$$Register, $mem$$Address);
5975 %}
5976 ins_pipe(ialu_reg_mem);
5977 %}
5979 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5980 instruct loadI2US(rRegI dst, memory mem, immI_65535 mask) %{
5981 match(Set dst (AndI (LoadI mem) mask));
5983 ins_cost(125);
5984 format %{ "movzwl $dst, $mem\t# int -> ushort/char" %}
5985 ins_encode %{
5986 __ movzwl($dst$$Register, $mem$$Address);
5987 %}
5988 ins_pipe(ialu_reg_mem);
5989 %}
5991 // Load Integer into Long Register
5992 instruct loadI2L(rRegL dst, memory mem)
5993 %{
5994 match(Set dst (ConvI2L (LoadI mem)));
5996 ins_cost(125);
5997 format %{ "movslq $dst, $mem\t# int -> long" %}
5999 ins_encode %{
6000 __ movslq($dst$$Register, $mem$$Address);
6001 %}
6003 ins_pipe(ialu_reg_mem);
6004 %}
6006 // Load Integer with mask 0xFF into Long Register
6007 instruct loadI2L_immI_255(rRegL dst, memory mem, immI_255 mask) %{
6008 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
6010 format %{ "movzbq $dst, $mem\t# int & 0xFF -> long" %}
6011 ins_encode %{
6012 __ movzbq($dst$$Register, $mem$$Address);
6013 %}
6014 ins_pipe(ialu_reg_mem);
6015 %}
6017 // Load Integer with mask 0xFFFF into Long Register
6018 instruct loadI2L_immI_65535(rRegL dst, memory mem, immI_65535 mask) %{
6019 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
6021 format %{ "movzwq $dst, $mem\t# int & 0xFFFF -> long" %}
6022 ins_encode %{
6023 __ movzwq($dst$$Register, $mem$$Address);
6024 %}
6025 ins_pipe(ialu_reg_mem);
6026 %}
6028 // Load Integer with a 32-bit mask into Long Register
6029 instruct loadI2L_immI(rRegL dst, memory mem, immI mask, rFlagsReg cr) %{
6030 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
6031 effect(KILL cr);
6033 format %{ "movl $dst, $mem\t# int & 32-bit mask -> long\n\t"
6034 "andl $dst, $mask" %}
6035 ins_encode %{
6036 Register Rdst = $dst$$Register;
6037 __ movl(Rdst, $mem$$Address);
6038 __ andl(Rdst, $mask$$constant);
6039 %}
6040 ins_pipe(ialu_reg_mem);
6041 %}
6043 // Load Unsigned Integer into Long Register
6044 instruct loadUI2L(rRegL dst, memory mem)
6045 %{
6046 match(Set dst (LoadUI2L mem));
6048 ins_cost(125);
6049 format %{ "movl $dst, $mem\t# uint -> long" %}
6051 ins_encode %{
6052 __ movl($dst$$Register, $mem$$Address);
6053 %}
6055 ins_pipe(ialu_reg_mem);
6056 %}
6058 // Load Long
6059 instruct loadL(rRegL dst, memory mem)
6060 %{
6061 match(Set dst (LoadL mem));
6063 ins_cost(125);
6064 format %{ "movq $dst, $mem\t# long" %}
6066 ins_encode %{
6067 __ movq($dst$$Register, $mem$$Address);
6068 %}
6070 ins_pipe(ialu_reg_mem); // XXX
6071 %}
6073 // Load Range
6074 instruct loadRange(rRegI dst, memory mem)
6075 %{
6076 match(Set dst (LoadRange mem));
6078 ins_cost(125); // XXX
6079 format %{ "movl $dst, $mem\t# range" %}
6080 opcode(0x8B);
6081 ins_encode(REX_reg_mem(dst, mem), OpcP, reg_mem(dst, mem));
6082 ins_pipe(ialu_reg_mem);
6083 %}
6085 // Load Pointer
6086 instruct loadP(rRegP dst, memory mem)
6087 %{
6088 match(Set dst (LoadP mem));
6090 ins_cost(125); // XXX
6091 format %{ "movq $dst, $mem\t# ptr" %}
6092 opcode(0x8B);
6093 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6094 ins_pipe(ialu_reg_mem); // XXX
6095 %}
6097 // Load Compressed Pointer
6098 instruct loadN(rRegN dst, memory mem)
6099 %{
6100 match(Set dst (LoadN mem));
6102 ins_cost(125); // XXX
6103 format %{ "movl $dst, $mem\t# compressed ptr" %}
6104 ins_encode %{
6105 __ movl($dst$$Register, $mem$$Address);
6106 %}
6107 ins_pipe(ialu_reg_mem); // XXX
6108 %}
6111 // Load Klass Pointer
6112 instruct loadKlass(rRegP dst, memory mem)
6113 %{
6114 match(Set dst (LoadKlass mem));
6116 ins_cost(125); // XXX
6117 format %{ "movq $dst, $mem\t# class" %}
6118 opcode(0x8B);
6119 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6120 ins_pipe(ialu_reg_mem); // XXX
6121 %}
6123 // Load narrow Klass Pointer
6124 instruct loadNKlass(rRegN dst, memory mem)
6125 %{
6126 match(Set dst (LoadNKlass mem));
6128 ins_cost(125); // XXX
6129 format %{ "movl $dst, $mem\t# compressed klass ptr" %}
6130 ins_encode %{
6131 __ movl($dst$$Register, $mem$$Address);
6132 %}
6133 ins_pipe(ialu_reg_mem); // XXX
6134 %}
6136 // Load Float
6137 instruct loadF(regF dst, memory mem)
6138 %{
6139 match(Set dst (LoadF mem));
6141 ins_cost(145); // XXX
6142 format %{ "movss $dst, $mem\t# float" %}
6143 opcode(0xF3, 0x0F, 0x10);
6144 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6145 ins_pipe(pipe_slow); // XXX
6146 %}
6148 // Load Double
6149 instruct loadD_partial(regD dst, memory mem)
6150 %{
6151 predicate(!UseXmmLoadAndClearUpper);
6152 match(Set dst (LoadD mem));
6154 ins_cost(145); // XXX
6155 format %{ "movlpd $dst, $mem\t# double" %}
6156 opcode(0x66, 0x0F, 0x12);
6157 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6158 ins_pipe(pipe_slow); // XXX
6159 %}
6161 instruct loadD(regD dst, memory mem)
6162 %{
6163 predicate(UseXmmLoadAndClearUpper);
6164 match(Set dst (LoadD mem));
6166 ins_cost(145); // XXX
6167 format %{ "movsd $dst, $mem\t# double" %}
6168 opcode(0xF2, 0x0F, 0x10);
6169 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6170 ins_pipe(pipe_slow); // XXX
6171 %}
6173 // Load Aligned Packed Byte to XMM register
6174 instruct loadA8B(regD dst, memory mem) %{
6175 match(Set dst (Load8B mem));
6176 ins_cost(125);
6177 format %{ "MOVQ $dst,$mem\t! packed8B" %}
6178 ins_encode( movq_ld(dst, mem));
6179 ins_pipe( pipe_slow );
6180 %}
6182 // Load Aligned Packed Short to XMM register
6183 instruct loadA4S(regD dst, memory mem) %{
6184 match(Set dst (Load4S mem));
6185 ins_cost(125);
6186 format %{ "MOVQ $dst,$mem\t! packed4S" %}
6187 ins_encode( movq_ld(dst, mem));
6188 ins_pipe( pipe_slow );
6189 %}
6191 // Load Aligned Packed Char to XMM register
6192 instruct loadA4C(regD dst, memory mem) %{
6193 match(Set dst (Load4C mem));
6194 ins_cost(125);
6195 format %{ "MOVQ $dst,$mem\t! packed4C" %}
6196 ins_encode( movq_ld(dst, mem));
6197 ins_pipe( pipe_slow );
6198 %}
6200 // Load Aligned Packed Integer to XMM register
6201 instruct load2IU(regD dst, memory mem) %{
6202 match(Set dst (Load2I mem));
6203 ins_cost(125);
6204 format %{ "MOVQ $dst,$mem\t! packed2I" %}
6205 ins_encode( movq_ld(dst, mem));
6206 ins_pipe( pipe_slow );
6207 %}
6209 // Load Aligned Packed Single to XMM
6210 instruct loadA2F(regD dst, memory mem) %{
6211 match(Set dst (Load2F mem));
6212 ins_cost(145);
6213 format %{ "MOVQ $dst,$mem\t! packed2F" %}
6214 ins_encode( movq_ld(dst, mem));
6215 ins_pipe( pipe_slow );
6216 %}
6218 // Load Effective Address
6219 instruct leaP8(rRegP dst, indOffset8 mem)
6220 %{
6221 match(Set dst mem);
6223 ins_cost(110); // XXX
6224 format %{ "leaq $dst, $mem\t# ptr 8" %}
6225 opcode(0x8D);
6226 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6227 ins_pipe(ialu_reg_reg_fat);
6228 %}
6230 instruct leaP32(rRegP dst, indOffset32 mem)
6231 %{
6232 match(Set dst mem);
6234 ins_cost(110);
6235 format %{ "leaq $dst, $mem\t# ptr 32" %}
6236 opcode(0x8D);
6237 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6238 ins_pipe(ialu_reg_reg_fat);
6239 %}
6241 // instruct leaPIdx(rRegP dst, indIndex mem)
6242 // %{
6243 // match(Set dst mem);
6245 // ins_cost(110);
6246 // format %{ "leaq $dst, $mem\t# ptr idx" %}
6247 // opcode(0x8D);
6248 // ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6249 // ins_pipe(ialu_reg_reg_fat);
6250 // %}
6252 instruct leaPIdxOff(rRegP dst, indIndexOffset mem)
6253 %{
6254 match(Set dst mem);
6256 ins_cost(110);
6257 format %{ "leaq $dst, $mem\t# ptr idxoff" %}
6258 opcode(0x8D);
6259 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6260 ins_pipe(ialu_reg_reg_fat);
6261 %}
6263 instruct leaPIdxScale(rRegP dst, indIndexScale mem)
6264 %{
6265 match(Set dst mem);
6267 ins_cost(110);
6268 format %{ "leaq $dst, $mem\t# ptr idxscale" %}
6269 opcode(0x8D);
6270 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6271 ins_pipe(ialu_reg_reg_fat);
6272 %}
6274 instruct leaPIdxScaleOff(rRegP dst, indIndexScaleOffset mem)
6275 %{
6276 match(Set dst mem);
6278 ins_cost(110);
6279 format %{ "leaq $dst, $mem\t# ptr idxscaleoff" %}
6280 opcode(0x8D);
6281 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6282 ins_pipe(ialu_reg_reg_fat);
6283 %}
6285 instruct leaPPosIdxScaleOff(rRegP dst, indPosIndexScaleOffset mem)
6286 %{
6287 match(Set dst mem);
6289 ins_cost(110);
6290 format %{ "leaq $dst, $mem\t# ptr posidxscaleoff" %}
6291 opcode(0x8D);
6292 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6293 ins_pipe(ialu_reg_reg_fat);
6294 %}
6296 // Load Effective Address which uses Narrow (32-bits) oop
6297 instruct leaPCompressedOopOffset(rRegP dst, indCompressedOopOffset mem)
6298 %{
6299 predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
6300 match(Set dst mem);
6302 ins_cost(110);
6303 format %{ "leaq $dst, $mem\t# ptr compressedoopoff32" %}
6304 opcode(0x8D);
6305 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6306 ins_pipe(ialu_reg_reg_fat);
6307 %}
6309 instruct leaP8Narrow(rRegP dst, indOffset8Narrow mem)
6310 %{
6311 predicate(Universe::narrow_oop_shift() == 0);
6312 match(Set dst mem);
6314 ins_cost(110); // XXX
6315 format %{ "leaq $dst, $mem\t# ptr off8narrow" %}
6316 opcode(0x8D);
6317 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6318 ins_pipe(ialu_reg_reg_fat);
6319 %}
6321 instruct leaP32Narrow(rRegP dst, indOffset32Narrow mem)
6322 %{
6323 predicate(Universe::narrow_oop_shift() == 0);
6324 match(Set dst mem);
6326 ins_cost(110);
6327 format %{ "leaq $dst, $mem\t# ptr off32narrow" %}
6328 opcode(0x8D);
6329 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6330 ins_pipe(ialu_reg_reg_fat);
6331 %}
6333 instruct leaPIdxOffNarrow(rRegP dst, indIndexOffsetNarrow mem)
6334 %{
6335 predicate(Universe::narrow_oop_shift() == 0);
6336 match(Set dst mem);
6338 ins_cost(110);
6339 format %{ "leaq $dst, $mem\t# ptr idxoffnarrow" %}
6340 opcode(0x8D);
6341 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6342 ins_pipe(ialu_reg_reg_fat);
6343 %}
6345 instruct leaPIdxScaleNarrow(rRegP dst, indIndexScaleNarrow mem)
6346 %{
6347 predicate(Universe::narrow_oop_shift() == 0);
6348 match(Set dst mem);
6350 ins_cost(110);
6351 format %{ "leaq $dst, $mem\t# ptr idxscalenarrow" %}
6352 opcode(0x8D);
6353 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6354 ins_pipe(ialu_reg_reg_fat);
6355 %}
6357 instruct leaPIdxScaleOffNarrow(rRegP dst, indIndexScaleOffsetNarrow mem)
6358 %{
6359 predicate(Universe::narrow_oop_shift() == 0);
6360 match(Set dst mem);
6362 ins_cost(110);
6363 format %{ "leaq $dst, $mem\t# ptr idxscaleoffnarrow" %}
6364 opcode(0x8D);
6365 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6366 ins_pipe(ialu_reg_reg_fat);
6367 %}
6369 instruct leaPPosIdxScaleOffNarrow(rRegP dst, indPosIndexScaleOffsetNarrow mem)
6370 %{
6371 predicate(Universe::narrow_oop_shift() == 0);
6372 match(Set dst mem);
6374 ins_cost(110);
6375 format %{ "leaq $dst, $mem\t# ptr posidxscaleoffnarrow" %}
6376 opcode(0x8D);
6377 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6378 ins_pipe(ialu_reg_reg_fat);
6379 %}
6381 instruct loadConI(rRegI dst, immI src)
6382 %{
6383 match(Set dst src);
6385 format %{ "movl $dst, $src\t# int" %}
6386 ins_encode(load_immI(dst, src));
6387 ins_pipe(ialu_reg_fat); // XXX
6388 %}
6390 instruct loadConI0(rRegI dst, immI0 src, rFlagsReg cr)
6391 %{
6392 match(Set dst src);
6393 effect(KILL cr);
6395 ins_cost(50);
6396 format %{ "xorl $dst, $dst\t# int" %}
6397 opcode(0x33); /* + rd */
6398 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6399 ins_pipe(ialu_reg);
6400 %}
6402 instruct loadConL(rRegL dst, immL src)
6403 %{
6404 match(Set dst src);
6406 ins_cost(150);
6407 format %{ "movq $dst, $src\t# long" %}
6408 ins_encode(load_immL(dst, src));
6409 ins_pipe(ialu_reg);
6410 %}
6412 instruct loadConL0(rRegL dst, immL0 src, rFlagsReg cr)
6413 %{
6414 match(Set dst src);
6415 effect(KILL cr);
6417 ins_cost(50);
6418 format %{ "xorl $dst, $dst\t# long" %}
6419 opcode(0x33); /* + rd */
6420 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6421 ins_pipe(ialu_reg); // XXX
6422 %}
6424 instruct loadConUL32(rRegL dst, immUL32 src)
6425 %{
6426 match(Set dst src);
6428 ins_cost(60);
6429 format %{ "movl $dst, $src\t# long (unsigned 32-bit)" %}
6430 ins_encode(load_immUL32(dst, src));
6431 ins_pipe(ialu_reg);
6432 %}
6434 instruct loadConL32(rRegL dst, immL32 src)
6435 %{
6436 match(Set dst src);
6438 ins_cost(70);
6439 format %{ "movq $dst, $src\t# long (32-bit)" %}
6440 ins_encode(load_immL32(dst, src));
6441 ins_pipe(ialu_reg);
6442 %}
6444 instruct loadConP(rRegP dst, immP con) %{
6445 match(Set dst con);
6447 format %{ "movq $dst, $con\t# ptr" %}
6448 ins_encode(load_immP(dst, con));
6449 ins_pipe(ialu_reg_fat); // XXX
6450 %}
6452 instruct loadConP0(rRegP dst, immP0 src, rFlagsReg cr)
6453 %{
6454 match(Set dst src);
6455 effect(KILL cr);
6457 ins_cost(50);
6458 format %{ "xorl $dst, $dst\t# ptr" %}
6459 opcode(0x33); /* + rd */
6460 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6461 ins_pipe(ialu_reg);
6462 %}
6464 instruct loadConP_poll(rRegP dst, immP_poll src) %{
6465 match(Set dst src);
6466 format %{ "movq $dst, $src\t!ptr" %}
6467 ins_encode %{
6468 AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
6469 __ lea($dst$$Register, polling_page);
6470 %}
6471 ins_pipe(ialu_reg_fat);
6472 %}
6474 instruct loadConP31(rRegP dst, immP31 src, rFlagsReg cr)
6475 %{
6476 match(Set dst src);
6477 effect(KILL cr);
6479 ins_cost(60);
6480 format %{ "movl $dst, $src\t# ptr (positive 32-bit)" %}
6481 ins_encode(load_immP31(dst, src));
6482 ins_pipe(ialu_reg);
6483 %}
6485 instruct loadConF(regF dst, immF con) %{
6486 match(Set dst con);
6487 ins_cost(125);
6488 format %{ "movss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
6489 ins_encode %{
6490 __ movflt($dst$$XMMRegister, $constantaddress($con));
6491 %}
6492 ins_pipe(pipe_slow);
6493 %}
6495 instruct loadConN0(rRegN dst, immN0 src, rFlagsReg cr) %{
6496 match(Set dst src);
6497 effect(KILL cr);
6498 format %{ "xorq $dst, $src\t# compressed NULL ptr" %}
6499 ins_encode %{
6500 __ xorq($dst$$Register, $dst$$Register);
6501 %}
6502 ins_pipe(ialu_reg);
6503 %}
6505 instruct loadConN(rRegN dst, immN src) %{
6506 match(Set dst src);
6508 ins_cost(125);
6509 format %{ "movl $dst, $src\t# compressed ptr" %}
6510 ins_encode %{
6511 address con = (address)$src$$constant;
6512 if (con == NULL) {
6513 ShouldNotReachHere();
6514 } else {
6515 __ set_narrow_oop($dst$$Register, (jobject)$src$$constant);
6516 }
6517 %}
6518 ins_pipe(ialu_reg_fat); // XXX
6519 %}
6521 instruct loadConF0(regF dst, immF0 src)
6522 %{
6523 match(Set dst src);
6524 ins_cost(100);
6526 format %{ "xorps $dst, $dst\t# float 0.0" %}
6527 opcode(0x0F, 0x57);
6528 ins_encode(REX_reg_reg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
6529 ins_pipe(pipe_slow);
6530 %}
6532 // Use the same format since predicate() can not be used here.
6533 instruct loadConD(regD dst, immD con) %{
6534 match(Set dst con);
6535 ins_cost(125);
6536 format %{ "movsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
6537 ins_encode %{
6538 __ movdbl($dst$$XMMRegister, $constantaddress($con));
6539 %}
6540 ins_pipe(pipe_slow);
6541 %}
6543 instruct loadConD0(regD dst, immD0 src)
6544 %{
6545 match(Set dst src);
6546 ins_cost(100);
6548 format %{ "xorpd $dst, $dst\t# double 0.0" %}
6549 opcode(0x66, 0x0F, 0x57);
6550 ins_encode(OpcP, REX_reg_reg(dst, dst), OpcS, OpcT, reg_reg(dst, dst));
6551 ins_pipe(pipe_slow);
6552 %}
6554 instruct loadSSI(rRegI dst, stackSlotI src)
6555 %{
6556 match(Set dst src);
6558 ins_cost(125);
6559 format %{ "movl $dst, $src\t# int stk" %}
6560 opcode(0x8B);
6561 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
6562 ins_pipe(ialu_reg_mem);
6563 %}
6565 instruct loadSSL(rRegL dst, stackSlotL src)
6566 %{
6567 match(Set dst src);
6569 ins_cost(125);
6570 format %{ "movq $dst, $src\t# long stk" %}
6571 opcode(0x8B);
6572 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
6573 ins_pipe(ialu_reg_mem);
6574 %}
6576 instruct loadSSP(rRegP dst, stackSlotP src)
6577 %{
6578 match(Set dst src);
6580 ins_cost(125);
6581 format %{ "movq $dst, $src\t# ptr stk" %}
6582 opcode(0x8B);
6583 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
6584 ins_pipe(ialu_reg_mem);
6585 %}
6587 instruct loadSSF(regF dst, stackSlotF src)
6588 %{
6589 match(Set dst src);
6591 ins_cost(125);
6592 format %{ "movss $dst, $src\t# float stk" %}
6593 opcode(0xF3, 0x0F, 0x10);
6594 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
6595 ins_pipe(pipe_slow); // XXX
6596 %}
6598 // Use the same format since predicate() can not be used here.
6599 instruct loadSSD(regD dst, stackSlotD src)
6600 %{
6601 match(Set dst src);
6603 ins_cost(125);
6604 format %{ "movsd $dst, $src\t# double stk" %}
6605 ins_encode %{
6606 __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp));
6607 %}
6608 ins_pipe(pipe_slow); // XXX
6609 %}
6611 // Prefetch instructions.
6612 // Must be safe to execute with invalid address (cannot fault).
6614 instruct prefetchr( memory mem ) %{
6615 predicate(ReadPrefetchInstr==3);
6616 match(PrefetchRead mem);
6617 ins_cost(125);
6619 format %{ "PREFETCHR $mem\t# Prefetch into level 1 cache" %}
6620 ins_encode %{
6621 __ prefetchr($mem$$Address);
6622 %}
6623 ins_pipe(ialu_mem);
6624 %}
6626 instruct prefetchrNTA( memory mem ) %{
6627 predicate(ReadPrefetchInstr==0);
6628 match(PrefetchRead mem);
6629 ins_cost(125);
6631 format %{ "PREFETCHNTA $mem\t# Prefetch into non-temporal cache for read" %}
6632 ins_encode %{
6633 __ prefetchnta($mem$$Address);
6634 %}
6635 ins_pipe(ialu_mem);
6636 %}
6638 instruct prefetchrT0( memory mem ) %{
6639 predicate(ReadPrefetchInstr==1);
6640 match(PrefetchRead mem);
6641 ins_cost(125);
6643 format %{ "PREFETCHT0 $mem\t# prefetch into L1 and L2 caches for read" %}
6644 ins_encode %{
6645 __ prefetcht0($mem$$Address);
6646 %}
6647 ins_pipe(ialu_mem);
6648 %}
6650 instruct prefetchrT2( memory mem ) %{
6651 predicate(ReadPrefetchInstr==2);
6652 match(PrefetchRead mem);
6653 ins_cost(125);
6655 format %{ "PREFETCHT2 $mem\t# prefetch into L2 caches for read" %}
6656 ins_encode %{
6657 __ prefetcht2($mem$$Address);
6658 %}
6659 ins_pipe(ialu_mem);
6660 %}
6662 instruct prefetchwNTA( memory mem ) %{
6663 match(PrefetchWrite mem);
6664 ins_cost(125);
6666 format %{ "PREFETCHNTA $mem\t# Prefetch to non-temporal cache for write" %}
6667 ins_encode %{
6668 __ prefetchnta($mem$$Address);
6669 %}
6670 ins_pipe(ialu_mem);
6671 %}
6673 // Prefetch instructions for allocation.
6675 instruct prefetchAlloc( memory mem ) %{
6676 predicate(AllocatePrefetchInstr==3);
6677 match(PrefetchAllocation mem);
6678 ins_cost(125);
6680 format %{ "PREFETCHW $mem\t# Prefetch allocation into level 1 cache and mark modified" %}
6681 ins_encode %{
6682 __ prefetchw($mem$$Address);
6683 %}
6684 ins_pipe(ialu_mem);
6685 %}
6687 instruct prefetchAllocNTA( memory mem ) %{
6688 predicate(AllocatePrefetchInstr==0);
6689 match(PrefetchAllocation mem);
6690 ins_cost(125);
6692 format %{ "PREFETCHNTA $mem\t# Prefetch allocation to non-temporal cache for write" %}
6693 ins_encode %{
6694 __ prefetchnta($mem$$Address);
6695 %}
6696 ins_pipe(ialu_mem);
6697 %}
6699 instruct prefetchAllocT0( memory mem ) %{
6700 predicate(AllocatePrefetchInstr==1);
6701 match(PrefetchAllocation mem);
6702 ins_cost(125);
6704 format %{ "PREFETCHT0 $mem\t# Prefetch allocation to level 1 and 2 caches for write" %}
6705 ins_encode %{
6706 __ prefetcht0($mem$$Address);
6707 %}
6708 ins_pipe(ialu_mem);
6709 %}
6711 instruct prefetchAllocT2( memory mem ) %{
6712 predicate(AllocatePrefetchInstr==2);
6713 match(PrefetchAllocation mem);
6714 ins_cost(125);
6716 format %{ "PREFETCHT2 $mem\t# Prefetch allocation to level 2 cache for write" %}
6717 ins_encode %{
6718 __ prefetcht2($mem$$Address);
6719 %}
6720 ins_pipe(ialu_mem);
6721 %}
6723 //----------Store Instructions-------------------------------------------------
6725 // Store Byte
6726 instruct storeB(memory mem, rRegI src)
6727 %{
6728 match(Set mem (StoreB mem src));
6730 ins_cost(125); // XXX
6731 format %{ "movb $mem, $src\t# byte" %}
6732 opcode(0x88);
6733 ins_encode(REX_breg_mem(src, mem), OpcP, reg_mem(src, mem));
6734 ins_pipe(ialu_mem_reg);
6735 %}
6737 // Store Char/Short
6738 instruct storeC(memory mem, rRegI src)
6739 %{
6740 match(Set mem (StoreC mem src));
6742 ins_cost(125); // XXX
6743 format %{ "movw $mem, $src\t# char/short" %}
6744 opcode(0x89);
6745 ins_encode(SizePrefix, REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
6746 ins_pipe(ialu_mem_reg);
6747 %}
6749 // Store Integer
6750 instruct storeI(memory mem, rRegI src)
6751 %{
6752 match(Set mem (StoreI mem src));
6754 ins_cost(125); // XXX
6755 format %{ "movl $mem, $src\t# int" %}
6756 opcode(0x89);
6757 ins_encode(REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
6758 ins_pipe(ialu_mem_reg);
6759 %}
6761 // Store Long
6762 instruct storeL(memory mem, rRegL src)
6763 %{
6764 match(Set mem (StoreL mem src));
6766 ins_cost(125); // XXX
6767 format %{ "movq $mem, $src\t# long" %}
6768 opcode(0x89);
6769 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
6770 ins_pipe(ialu_mem_reg); // XXX
6771 %}
6773 // Store Pointer
6774 instruct storeP(memory mem, any_RegP src)
6775 %{
6776 match(Set mem (StoreP mem src));
6778 ins_cost(125); // XXX
6779 format %{ "movq $mem, $src\t# ptr" %}
6780 opcode(0x89);
6781 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
6782 ins_pipe(ialu_mem_reg);
6783 %}
6785 instruct storeImmP0(memory mem, immP0 zero)
6786 %{
6787 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
6788 match(Set mem (StoreP mem zero));
6790 ins_cost(125); // XXX
6791 format %{ "movq $mem, R12\t# ptr (R12_heapbase==0)" %}
6792 ins_encode %{
6793 __ movq($mem$$Address, r12);
6794 %}
6795 ins_pipe(ialu_mem_reg);
6796 %}
6798 // Store NULL Pointer, mark word, or other simple pointer constant.
6799 instruct storeImmP(memory mem, immP31 src)
6800 %{
6801 match(Set mem (StoreP mem src));
6803 ins_cost(150); // XXX
6804 format %{ "movq $mem, $src\t# ptr" %}
6805 opcode(0xC7); /* C7 /0 */
6806 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
6807 ins_pipe(ialu_mem_imm);
6808 %}
6810 // Store Compressed Pointer
6811 instruct storeN(memory mem, rRegN src)
6812 %{
6813 match(Set mem (StoreN mem src));
6815 ins_cost(125); // XXX
6816 format %{ "movl $mem, $src\t# compressed ptr" %}
6817 ins_encode %{
6818 __ movl($mem$$Address, $src$$Register);
6819 %}
6820 ins_pipe(ialu_mem_reg);
6821 %}
6823 instruct storeImmN0(memory mem, immN0 zero)
6824 %{
6825 predicate(Universe::narrow_oop_base() == NULL);
6826 match(Set mem (StoreN mem zero));
6828 ins_cost(125); // XXX
6829 format %{ "movl $mem, R12\t# compressed ptr (R12_heapbase==0)" %}
6830 ins_encode %{
6831 __ movl($mem$$Address, r12);
6832 %}
6833 ins_pipe(ialu_mem_reg);
6834 %}
6836 instruct storeImmN(memory mem, immN src)
6837 %{
6838 match(Set mem (StoreN mem src));
6840 ins_cost(150); // XXX
6841 format %{ "movl $mem, $src\t# compressed ptr" %}
6842 ins_encode %{
6843 address con = (address)$src$$constant;
6844 if (con == NULL) {
6845 __ movl($mem$$Address, (int32_t)0);
6846 } else {
6847 __ set_narrow_oop($mem$$Address, (jobject)$src$$constant);
6848 }
6849 %}
6850 ins_pipe(ialu_mem_imm);
6851 %}
6853 // Store Integer Immediate
6854 instruct storeImmI0(memory mem, immI0 zero)
6855 %{
6856 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
6857 match(Set mem (StoreI mem zero));
6859 ins_cost(125); // XXX
6860 format %{ "movl $mem, R12\t# int (R12_heapbase==0)" %}
6861 ins_encode %{
6862 __ movl($mem$$Address, r12);
6863 %}
6864 ins_pipe(ialu_mem_reg);
6865 %}
6867 instruct storeImmI(memory mem, immI src)
6868 %{
6869 match(Set mem (StoreI mem src));
6871 ins_cost(150);
6872 format %{ "movl $mem, $src\t# int" %}
6873 opcode(0xC7); /* C7 /0 */
6874 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
6875 ins_pipe(ialu_mem_imm);
6876 %}
6878 // Store Long Immediate
6879 instruct storeImmL0(memory mem, immL0 zero)
6880 %{
6881 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
6882 match(Set mem (StoreL mem zero));
6884 ins_cost(125); // XXX
6885 format %{ "movq $mem, R12\t# long (R12_heapbase==0)" %}
6886 ins_encode %{
6887 __ movq($mem$$Address, r12);
6888 %}
6889 ins_pipe(ialu_mem_reg);
6890 %}
6892 instruct storeImmL(memory mem, immL32 src)
6893 %{
6894 match(Set mem (StoreL mem src));
6896 ins_cost(150);
6897 format %{ "movq $mem, $src\t# long" %}
6898 opcode(0xC7); /* C7 /0 */
6899 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
6900 ins_pipe(ialu_mem_imm);
6901 %}
6903 // Store Short/Char Immediate
6904 instruct storeImmC0(memory mem, immI0 zero)
6905 %{
6906 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
6907 match(Set mem (StoreC mem zero));
6909 ins_cost(125); // XXX
6910 format %{ "movw $mem, R12\t# short/char (R12_heapbase==0)" %}
6911 ins_encode %{
6912 __ movw($mem$$Address, r12);
6913 %}
6914 ins_pipe(ialu_mem_reg);
6915 %}
6917 instruct storeImmI16(memory mem, immI16 src)
6918 %{
6919 predicate(UseStoreImmI16);
6920 match(Set mem (StoreC mem src));
6922 ins_cost(150);
6923 format %{ "movw $mem, $src\t# short/char" %}
6924 opcode(0xC7); /* C7 /0 Same as 32 store immediate with prefix */
6925 ins_encode(SizePrefix, REX_mem(mem), OpcP, RM_opc_mem(0x00, mem),Con16(src));
6926 ins_pipe(ialu_mem_imm);
6927 %}
6929 // Store Byte Immediate
6930 instruct storeImmB0(memory mem, immI0 zero)
6931 %{
6932 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
6933 match(Set mem (StoreB mem zero));
6935 ins_cost(125); // XXX
6936 format %{ "movb $mem, R12\t# short/char (R12_heapbase==0)" %}
6937 ins_encode %{
6938 __ movb($mem$$Address, r12);
6939 %}
6940 ins_pipe(ialu_mem_reg);
6941 %}
6943 instruct storeImmB(memory mem, immI8 src)
6944 %{
6945 match(Set mem (StoreB mem src));
6947 ins_cost(150); // XXX
6948 format %{ "movb $mem, $src\t# byte" %}
6949 opcode(0xC6); /* C6 /0 */
6950 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src));
6951 ins_pipe(ialu_mem_imm);
6952 %}
6954 // Store Aligned Packed Byte XMM register to memory
6955 instruct storeA8B(memory mem, regD src) %{
6956 match(Set mem (Store8B mem src));
6957 ins_cost(145);
6958 format %{ "MOVQ $mem,$src\t! packed8B" %}
6959 ins_encode( movq_st(mem, src));
6960 ins_pipe( pipe_slow );
6961 %}
6963 // Store Aligned Packed Char/Short XMM register to memory
6964 instruct storeA4C(memory mem, regD src) %{
6965 match(Set mem (Store4C mem src));
6966 ins_cost(145);
6967 format %{ "MOVQ $mem,$src\t! packed4C" %}
6968 ins_encode( movq_st(mem, src));
6969 ins_pipe( pipe_slow );
6970 %}
6972 // Store Aligned Packed Integer XMM register to memory
6973 instruct storeA2I(memory mem, regD src) %{
6974 match(Set mem (Store2I mem src));
6975 ins_cost(145);
6976 format %{ "MOVQ $mem,$src\t! packed2I" %}
6977 ins_encode( movq_st(mem, src));
6978 ins_pipe( pipe_slow );
6979 %}
6981 // Store CMS card-mark Immediate
6982 instruct storeImmCM0_reg(memory mem, immI0 zero)
6983 %{
6984 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
6985 match(Set mem (StoreCM mem zero));
6987 ins_cost(125); // XXX
6988 format %{ "movb $mem, R12\t# CMS card-mark byte 0 (R12_heapbase==0)" %}
6989 ins_encode %{
6990 __ movb($mem$$Address, r12);
6991 %}
6992 ins_pipe(ialu_mem_reg);
6993 %}
6995 instruct storeImmCM0(memory mem, immI0 src)
6996 %{
6997 match(Set mem (StoreCM mem src));
6999 ins_cost(150); // XXX
7000 format %{ "movb $mem, $src\t# CMS card-mark byte 0" %}
7001 opcode(0xC6); /* C6 /0 */
7002 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src));
7003 ins_pipe(ialu_mem_imm);
7004 %}
7006 // Store Aligned Packed Single Float XMM register to memory
7007 instruct storeA2F(memory mem, regD src) %{
7008 match(Set mem (Store2F mem src));
7009 ins_cost(145);
7010 format %{ "MOVQ $mem,$src\t! packed2F" %}
7011 ins_encode( movq_st(mem, src));
7012 ins_pipe( pipe_slow );
7013 %}
7015 // Store Float
7016 instruct storeF(memory mem, regF src)
7017 %{
7018 match(Set mem (StoreF mem src));
7020 ins_cost(95); // XXX
7021 format %{ "movss $mem, $src\t# float" %}
7022 opcode(0xF3, 0x0F, 0x11);
7023 ins_encode(OpcP, REX_reg_mem(src, mem), OpcS, OpcT, reg_mem(src, mem));
7024 ins_pipe(pipe_slow); // XXX
7025 %}
7027 // Store immediate Float value (it is faster than store from XMM register)
7028 instruct storeF0(memory mem, immF0 zero)
7029 %{
7030 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7031 match(Set mem (StoreF mem zero));
7033 ins_cost(25); // XXX
7034 format %{ "movl $mem, R12\t# float 0. (R12_heapbase==0)" %}
7035 ins_encode %{
7036 __ movl($mem$$Address, r12);
7037 %}
7038 ins_pipe(ialu_mem_reg);
7039 %}
7041 instruct storeF_imm(memory mem, immF src)
7042 %{
7043 match(Set mem (StoreF mem src));
7045 ins_cost(50);
7046 format %{ "movl $mem, $src\t# float" %}
7047 opcode(0xC7); /* C7 /0 */
7048 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con32F_as_bits(src));
7049 ins_pipe(ialu_mem_imm);
7050 %}
7052 // Store Double
7053 instruct storeD(memory mem, regD src)
7054 %{
7055 match(Set mem (StoreD mem src));
7057 ins_cost(95); // XXX
7058 format %{ "movsd $mem, $src\t# double" %}
7059 opcode(0xF2, 0x0F, 0x11);
7060 ins_encode(OpcP, REX_reg_mem(src, mem), OpcS, OpcT, reg_mem(src, mem));
7061 ins_pipe(pipe_slow); // XXX
7062 %}
7064 // Store immediate double 0.0 (it is faster than store from XMM register)
7065 instruct storeD0_imm(memory mem, immD0 src)
7066 %{
7067 predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL));
7068 match(Set mem (StoreD mem src));
7070 ins_cost(50);
7071 format %{ "movq $mem, $src\t# double 0." %}
7072 opcode(0xC7); /* C7 /0 */
7073 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32F_as_bits(src));
7074 ins_pipe(ialu_mem_imm);
7075 %}
7077 instruct storeD0(memory mem, immD0 zero)
7078 %{
7079 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7080 match(Set mem (StoreD mem zero));
7082 ins_cost(25); // XXX
7083 format %{ "movq $mem, R12\t# double 0. (R12_heapbase==0)" %}
7084 ins_encode %{
7085 __ movq($mem$$Address, r12);
7086 %}
7087 ins_pipe(ialu_mem_reg);
7088 %}
7090 instruct storeSSI(stackSlotI dst, rRegI src)
7091 %{
7092 match(Set dst src);
7094 ins_cost(100);
7095 format %{ "movl $dst, $src\t# int stk" %}
7096 opcode(0x89);
7097 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
7098 ins_pipe( ialu_mem_reg );
7099 %}
7101 instruct storeSSL(stackSlotL dst, rRegL src)
7102 %{
7103 match(Set dst src);
7105 ins_cost(100);
7106 format %{ "movq $dst, $src\t# long stk" %}
7107 opcode(0x89);
7108 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
7109 ins_pipe(ialu_mem_reg);
7110 %}
7112 instruct storeSSP(stackSlotP dst, rRegP src)
7113 %{
7114 match(Set dst src);
7116 ins_cost(100);
7117 format %{ "movq $dst, $src\t# ptr stk" %}
7118 opcode(0x89);
7119 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
7120 ins_pipe(ialu_mem_reg);
7121 %}
7123 instruct storeSSF(stackSlotF dst, regF src)
7124 %{
7125 match(Set dst src);
7127 ins_cost(95); // XXX
7128 format %{ "movss $dst, $src\t# float stk" %}
7129 opcode(0xF3, 0x0F, 0x11);
7130 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
7131 ins_pipe(pipe_slow); // XXX
7132 %}
7134 instruct storeSSD(stackSlotD dst, regD src)
7135 %{
7136 match(Set dst src);
7138 ins_cost(95); // XXX
7139 format %{ "movsd $dst, $src\t# double stk" %}
7140 opcode(0xF2, 0x0F, 0x11);
7141 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
7142 ins_pipe(pipe_slow); // XXX
7143 %}
7145 //----------BSWAP Instructions-------------------------------------------------
7146 instruct bytes_reverse_int(rRegI dst) %{
7147 match(Set dst (ReverseBytesI dst));
7149 format %{ "bswapl $dst" %}
7150 opcode(0x0F, 0xC8); /*Opcode 0F /C8 */
7151 ins_encode( REX_reg(dst), OpcP, opc2_reg(dst) );
7152 ins_pipe( ialu_reg );
7153 %}
7155 instruct bytes_reverse_long(rRegL dst) %{
7156 match(Set dst (ReverseBytesL dst));
7158 format %{ "bswapq $dst" %}
7160 opcode(0x0F, 0xC8); /* Opcode 0F /C8 */
7161 ins_encode( REX_reg_wide(dst), OpcP, opc2_reg(dst) );
7162 ins_pipe( ialu_reg);
7163 %}
7165 instruct bytes_reverse_unsigned_short(rRegI dst) %{
7166 match(Set dst (ReverseBytesUS dst));
7168 format %{ "bswapl $dst\n\t"
7169 "shrl $dst,16\n\t" %}
7170 ins_encode %{
7171 __ bswapl($dst$$Register);
7172 __ shrl($dst$$Register, 16);
7173 %}
7174 ins_pipe( ialu_reg );
7175 %}
7177 instruct bytes_reverse_short(rRegI dst) %{
7178 match(Set dst (ReverseBytesS dst));
7180 format %{ "bswapl $dst\n\t"
7181 "sar $dst,16\n\t" %}
7182 ins_encode %{
7183 __ bswapl($dst$$Register);
7184 __ sarl($dst$$Register, 16);
7185 %}
7186 ins_pipe( ialu_reg );
7187 %}
7189 //---------- Zeros Count Instructions ------------------------------------------
7191 instruct countLeadingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{
7192 predicate(UseCountLeadingZerosInstruction);
7193 match(Set dst (CountLeadingZerosI src));
7194 effect(KILL cr);
7196 format %{ "lzcntl $dst, $src\t# count leading zeros (int)" %}
7197 ins_encode %{
7198 __ lzcntl($dst$$Register, $src$$Register);
7199 %}
7200 ins_pipe(ialu_reg);
7201 %}
7203 instruct countLeadingZerosI_bsr(rRegI dst, rRegI src, rFlagsReg cr) %{
7204 predicate(!UseCountLeadingZerosInstruction);
7205 match(Set dst (CountLeadingZerosI src));
7206 effect(KILL cr);
7208 format %{ "bsrl $dst, $src\t# count leading zeros (int)\n\t"
7209 "jnz skip\n\t"
7210 "movl $dst, -1\n"
7211 "skip:\n\t"
7212 "negl $dst\n\t"
7213 "addl $dst, 31" %}
7214 ins_encode %{
7215 Register Rdst = $dst$$Register;
7216 Register Rsrc = $src$$Register;
7217 Label skip;
7218 __ bsrl(Rdst, Rsrc);
7219 __ jccb(Assembler::notZero, skip);
7220 __ movl(Rdst, -1);
7221 __ bind(skip);
7222 __ negl(Rdst);
7223 __ addl(Rdst, BitsPerInt - 1);
7224 %}
7225 ins_pipe(ialu_reg);
7226 %}
7228 instruct countLeadingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{
7229 predicate(UseCountLeadingZerosInstruction);
7230 match(Set dst (CountLeadingZerosL src));
7231 effect(KILL cr);
7233 format %{ "lzcntq $dst, $src\t# count leading zeros (long)" %}
7234 ins_encode %{
7235 __ lzcntq($dst$$Register, $src$$Register);
7236 %}
7237 ins_pipe(ialu_reg);
7238 %}
7240 instruct countLeadingZerosL_bsr(rRegI dst, rRegL src, rFlagsReg cr) %{
7241 predicate(!UseCountLeadingZerosInstruction);
7242 match(Set dst (CountLeadingZerosL src));
7243 effect(KILL cr);
7245 format %{ "bsrq $dst, $src\t# count leading zeros (long)\n\t"
7246 "jnz skip\n\t"
7247 "movl $dst, -1\n"
7248 "skip:\n\t"
7249 "negl $dst\n\t"
7250 "addl $dst, 63" %}
7251 ins_encode %{
7252 Register Rdst = $dst$$Register;
7253 Register Rsrc = $src$$Register;
7254 Label skip;
7255 __ bsrq(Rdst, Rsrc);
7256 __ jccb(Assembler::notZero, skip);
7257 __ movl(Rdst, -1);
7258 __ bind(skip);
7259 __ negl(Rdst);
7260 __ addl(Rdst, BitsPerLong - 1);
7261 %}
7262 ins_pipe(ialu_reg);
7263 %}
7265 instruct countTrailingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{
7266 match(Set dst (CountTrailingZerosI src));
7267 effect(KILL cr);
7269 format %{ "bsfl $dst, $src\t# count trailing zeros (int)\n\t"
7270 "jnz done\n\t"
7271 "movl $dst, 32\n"
7272 "done:" %}
7273 ins_encode %{
7274 Register Rdst = $dst$$Register;
7275 Label done;
7276 __ bsfl(Rdst, $src$$Register);
7277 __ jccb(Assembler::notZero, done);
7278 __ movl(Rdst, BitsPerInt);
7279 __ bind(done);
7280 %}
7281 ins_pipe(ialu_reg);
7282 %}
7284 instruct countTrailingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{
7285 match(Set dst (CountTrailingZerosL src));
7286 effect(KILL cr);
7288 format %{ "bsfq $dst, $src\t# count trailing zeros (long)\n\t"
7289 "jnz done\n\t"
7290 "movl $dst, 64\n"
7291 "done:" %}
7292 ins_encode %{
7293 Register Rdst = $dst$$Register;
7294 Label done;
7295 __ bsfq(Rdst, $src$$Register);
7296 __ jccb(Assembler::notZero, done);
7297 __ movl(Rdst, BitsPerLong);
7298 __ bind(done);
7299 %}
7300 ins_pipe(ialu_reg);
7301 %}
7304 //---------- Population Count Instructions -------------------------------------
7306 instruct popCountI(rRegI dst, rRegI src) %{
7307 predicate(UsePopCountInstruction);
7308 match(Set dst (PopCountI src));
7310 format %{ "popcnt $dst, $src" %}
7311 ins_encode %{
7312 __ popcntl($dst$$Register, $src$$Register);
7313 %}
7314 ins_pipe(ialu_reg);
7315 %}
7317 instruct popCountI_mem(rRegI dst, memory mem) %{
7318 predicate(UsePopCountInstruction);
7319 match(Set dst (PopCountI (LoadI mem)));
7321 format %{ "popcnt $dst, $mem" %}
7322 ins_encode %{
7323 __ popcntl($dst$$Register, $mem$$Address);
7324 %}
7325 ins_pipe(ialu_reg);
7326 %}
7328 // Note: Long.bitCount(long) returns an int.
7329 instruct popCountL(rRegI dst, rRegL src) %{
7330 predicate(UsePopCountInstruction);
7331 match(Set dst (PopCountL src));
7333 format %{ "popcnt $dst, $src" %}
7334 ins_encode %{
7335 __ popcntq($dst$$Register, $src$$Register);
7336 %}
7337 ins_pipe(ialu_reg);
7338 %}
7340 // Note: Long.bitCount(long) returns an int.
7341 instruct popCountL_mem(rRegI dst, memory mem) %{
7342 predicate(UsePopCountInstruction);
7343 match(Set dst (PopCountL (LoadL mem)));
7345 format %{ "popcnt $dst, $mem" %}
7346 ins_encode %{
7347 __ popcntq($dst$$Register, $mem$$Address);
7348 %}
7349 ins_pipe(ialu_reg);
7350 %}
7353 //----------MemBar Instructions-----------------------------------------------
7354 // Memory barrier flavors
7356 instruct membar_acquire()
7357 %{
7358 match(MemBarAcquire);
7359 ins_cost(0);
7361 size(0);
7362 format %{ "MEMBAR-acquire ! (empty encoding)" %}
7363 ins_encode();
7364 ins_pipe(empty);
7365 %}
7367 instruct membar_acquire_lock()
7368 %{
7369 match(MemBarAcquireLock);
7370 ins_cost(0);
7372 size(0);
7373 format %{ "MEMBAR-acquire (prior CMPXCHG in FastLock so empty encoding)" %}
7374 ins_encode();
7375 ins_pipe(empty);
7376 %}
7378 instruct membar_release()
7379 %{
7380 match(MemBarRelease);
7381 ins_cost(0);
7383 size(0);
7384 format %{ "MEMBAR-release ! (empty encoding)" %}
7385 ins_encode();
7386 ins_pipe(empty);
7387 %}
7389 instruct membar_release_lock()
7390 %{
7391 match(MemBarReleaseLock);
7392 ins_cost(0);
7394 size(0);
7395 format %{ "MEMBAR-release (a FastUnlock follows so empty encoding)" %}
7396 ins_encode();
7397 ins_pipe(empty);
7398 %}
7400 instruct membar_volatile(rFlagsReg cr) %{
7401 match(MemBarVolatile);
7402 effect(KILL cr);
7403 ins_cost(400);
7405 format %{
7406 $$template
7407 if (os::is_MP()) {
7408 $$emit$$"lock addl [rsp + #0], 0\t! membar_volatile"
7409 } else {
7410 $$emit$$"MEMBAR-volatile ! (empty encoding)"
7411 }
7412 %}
7413 ins_encode %{
7414 __ membar(Assembler::StoreLoad);
7415 %}
7416 ins_pipe(pipe_slow);
7417 %}
7419 instruct unnecessary_membar_volatile()
7420 %{
7421 match(MemBarVolatile);
7422 predicate(Matcher::post_store_load_barrier(n));
7423 ins_cost(0);
7425 size(0);
7426 format %{ "MEMBAR-volatile (unnecessary so empty encoding)" %}
7427 ins_encode();
7428 ins_pipe(empty);
7429 %}
7431 //----------Move Instructions--------------------------------------------------
7433 instruct castX2P(rRegP dst, rRegL src)
7434 %{
7435 match(Set dst (CastX2P src));
7437 format %{ "movq $dst, $src\t# long->ptr" %}
7438 ins_encode(enc_copy_wide(dst, src));
7439 ins_pipe(ialu_reg_reg); // XXX
7440 %}
7442 instruct castP2X(rRegL dst, rRegP src)
7443 %{
7444 match(Set dst (CastP2X src));
7446 format %{ "movq $dst, $src\t# ptr -> long" %}
7447 ins_encode(enc_copy_wide(dst, src));
7448 ins_pipe(ialu_reg_reg); // XXX
7449 %}
7452 // Convert oop pointer into compressed form
7453 instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{
7454 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7455 match(Set dst (EncodeP src));
7456 effect(KILL cr);
7457 format %{ "encode_heap_oop $dst,$src" %}
7458 ins_encode %{
7459 Register s = $src$$Register;
7460 Register d = $dst$$Register;
7461 if (s != d) {
7462 __ movq(d, s);
7463 }
7464 __ encode_heap_oop(d);
7465 %}
7466 ins_pipe(ialu_reg_long);
7467 %}
7469 instruct encodeHeapOop_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
7470 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7471 match(Set dst (EncodeP src));
7472 effect(KILL cr);
7473 format %{ "encode_heap_oop_not_null $dst,$src" %}
7474 ins_encode %{
7475 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7476 %}
7477 ins_pipe(ialu_reg_long);
7478 %}
7480 instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
7481 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
7482 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant);
7483 match(Set dst (DecodeN src));
7484 effect(KILL cr);
7485 format %{ "decode_heap_oop $dst,$src" %}
7486 ins_encode %{
7487 Register s = $src$$Register;
7488 Register d = $dst$$Register;
7489 if (s != d) {
7490 __ movq(d, s);
7491 }
7492 __ decode_heap_oop(d);
7493 %}
7494 ins_pipe(ialu_reg_long);
7495 %}
7497 instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
7498 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
7499 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
7500 match(Set dst (DecodeN src));
7501 effect(KILL cr);
7502 format %{ "decode_heap_oop_not_null $dst,$src" %}
7503 ins_encode %{
7504 Register s = $src$$Register;
7505 Register d = $dst$$Register;
7506 if (s != d) {
7507 __ decode_heap_oop_not_null(d, s);
7508 } else {
7509 __ decode_heap_oop_not_null(d);
7510 }
7511 %}
7512 ins_pipe(ialu_reg_long);
7513 %}
7516 //----------Conditional Move---------------------------------------------------
7517 // Jump
7518 // dummy instruction for generating temp registers
7519 instruct jumpXtnd_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
7520 match(Jump (LShiftL switch_val shift));
7521 ins_cost(350);
7522 predicate(false);
7523 effect(TEMP dest);
7525 format %{ "leaq $dest, [$constantaddress]\n\t"
7526 "jmp [$dest + $switch_val << $shift]\n\t" %}
7527 ins_encode %{
7528 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
7529 // to do that and the compiler is using that register as one it can allocate.
7530 // So we build it all by hand.
7531 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);
7532 // ArrayAddress dispatch(table, index);
7533 Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant);
7534 __ lea($dest$$Register, $constantaddress);
7535 __ jmp(dispatch);
7536 %}
7537 ins_pipe(pipe_jmp);
7538 %}
7540 instruct jumpXtnd_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
7541 match(Jump (AddL (LShiftL switch_val shift) offset));
7542 ins_cost(350);
7543 effect(TEMP dest);
7545 format %{ "leaq $dest, [$constantaddress]\n\t"
7546 "jmp [$dest + $switch_val << $shift + $offset]\n\t" %}
7547 ins_encode %{
7548 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
7549 // to do that and the compiler is using that register as one it can allocate.
7550 // So we build it all by hand.
7551 // Address index(noreg, switch_reg, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant);
7552 // ArrayAddress dispatch(table, index);
7553 Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant);
7554 __ lea($dest$$Register, $constantaddress);
7555 __ jmp(dispatch);
7556 %}
7557 ins_pipe(pipe_jmp);
7558 %}
7560 instruct jumpXtnd(rRegL switch_val, rRegI dest) %{
7561 match(Jump switch_val);
7562 ins_cost(350);
7563 effect(TEMP dest);
7565 format %{ "leaq $dest, [$constantaddress]\n\t"
7566 "jmp [$dest + $switch_val]\n\t" %}
7567 ins_encode %{
7568 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
7569 // to do that and the compiler is using that register as one it can allocate.
7570 // So we build it all by hand.
7571 // Address index(noreg, switch_reg, Address::times_1);
7572 // ArrayAddress dispatch(table, index);
7573 Address dispatch($dest$$Register, $switch_val$$Register, Address::times_1);
7574 __ lea($dest$$Register, $constantaddress);
7575 __ jmp(dispatch);
7576 %}
7577 ins_pipe(pipe_jmp);
7578 %}
7580 // Conditional move
7581 instruct cmovI_reg(rRegI dst, rRegI src, rFlagsReg cr, cmpOp cop)
7582 %{
7583 match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
7585 ins_cost(200); // XXX
7586 format %{ "cmovl$cop $dst, $src\t# signed, int" %}
7587 opcode(0x0F, 0x40);
7588 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7589 ins_pipe(pipe_cmov_reg);
7590 %}
7592 instruct cmovI_regU(cmpOpU cop, rFlagsRegU cr, rRegI dst, rRegI src) %{
7593 match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
7595 ins_cost(200); // XXX
7596 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %}
7597 opcode(0x0F, 0x40);
7598 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7599 ins_pipe(pipe_cmov_reg);
7600 %}
7602 instruct cmovI_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, rRegI src) %{
7603 match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
7604 ins_cost(200);
7605 expand %{
7606 cmovI_regU(cop, cr, dst, src);
7607 %}
7608 %}
7610 // Conditional move
7611 instruct cmovI_mem(cmpOp cop, rFlagsReg cr, rRegI dst, memory src) %{
7612 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
7614 ins_cost(250); // XXX
7615 format %{ "cmovl$cop $dst, $src\t# signed, int" %}
7616 opcode(0x0F, 0x40);
7617 ins_encode(REX_reg_mem(dst, src), enc_cmov(cop), reg_mem(dst, src));
7618 ins_pipe(pipe_cmov_mem);
7619 %}
7621 // Conditional move
7622 instruct cmovI_memU(cmpOpU cop, rFlagsRegU cr, rRegI dst, memory src)
7623 %{
7624 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
7626 ins_cost(250); // XXX
7627 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %}
7628 opcode(0x0F, 0x40);
7629 ins_encode(REX_reg_mem(dst, src), enc_cmov(cop), reg_mem(dst, src));
7630 ins_pipe(pipe_cmov_mem);
7631 %}
7633 instruct cmovI_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, memory src) %{
7634 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
7635 ins_cost(250);
7636 expand %{
7637 cmovI_memU(cop, cr, dst, src);
7638 %}
7639 %}
7641 // Conditional move
7642 instruct cmovN_reg(rRegN dst, rRegN src, rFlagsReg cr, cmpOp cop)
7643 %{
7644 match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
7646 ins_cost(200); // XXX
7647 format %{ "cmovl$cop $dst, $src\t# signed, compressed ptr" %}
7648 opcode(0x0F, 0x40);
7649 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7650 ins_pipe(pipe_cmov_reg);
7651 %}
7653 // Conditional move
7654 instruct cmovN_regU(cmpOpU cop, rFlagsRegU cr, rRegN dst, rRegN src)
7655 %{
7656 match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
7658 ins_cost(200); // XXX
7659 format %{ "cmovl$cop $dst, $src\t# unsigned, compressed ptr" %}
7660 opcode(0x0F, 0x40);
7661 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7662 ins_pipe(pipe_cmov_reg);
7663 %}
7665 instruct cmovN_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegN dst, rRegN src) %{
7666 match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
7667 ins_cost(200);
7668 expand %{
7669 cmovN_regU(cop, cr, dst, src);
7670 %}
7671 %}
7673 // Conditional move
7674 instruct cmovP_reg(rRegP dst, rRegP src, rFlagsReg cr, cmpOp cop)
7675 %{
7676 match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
7678 ins_cost(200); // XXX
7679 format %{ "cmovq$cop $dst, $src\t# signed, ptr" %}
7680 opcode(0x0F, 0x40);
7681 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7682 ins_pipe(pipe_cmov_reg); // XXX
7683 %}
7685 // Conditional move
7686 instruct cmovP_regU(cmpOpU cop, rFlagsRegU cr, rRegP dst, rRegP src)
7687 %{
7688 match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
7690 ins_cost(200); // XXX
7691 format %{ "cmovq$cop $dst, $src\t# unsigned, ptr" %}
7692 opcode(0x0F, 0x40);
7693 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7694 ins_pipe(pipe_cmov_reg); // XXX
7695 %}
7697 instruct cmovP_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegP dst, rRegP src) %{
7698 match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
7699 ins_cost(200);
7700 expand %{
7701 cmovP_regU(cop, cr, dst, src);
7702 %}
7703 %}
7705 // DISABLED: Requires the ADLC to emit a bottom_type call that
7706 // correctly meets the two pointer arguments; one is an incoming
7707 // register but the other is a memory operand. ALSO appears to
7708 // be buggy with implicit null checks.
7709 //
7710 //// Conditional move
7711 //instruct cmovP_mem(cmpOp cop, rFlagsReg cr, rRegP dst, memory src)
7712 //%{
7713 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
7714 // ins_cost(250);
7715 // format %{ "CMOV$cop $dst,$src\t# ptr" %}
7716 // opcode(0x0F,0x40);
7717 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) );
7718 // ins_pipe( pipe_cmov_mem );
7719 //%}
7720 //
7721 //// Conditional move
7722 //instruct cmovP_memU(cmpOpU cop, rFlagsRegU cr, rRegP dst, memory src)
7723 //%{
7724 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
7725 // ins_cost(250);
7726 // format %{ "CMOV$cop $dst,$src\t# ptr" %}
7727 // opcode(0x0F,0x40);
7728 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) );
7729 // ins_pipe( pipe_cmov_mem );
7730 //%}
7732 instruct cmovL_reg(cmpOp cop, rFlagsReg cr, rRegL dst, rRegL src)
7733 %{
7734 match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
7736 ins_cost(200); // XXX
7737 format %{ "cmovq$cop $dst, $src\t# signed, long" %}
7738 opcode(0x0F, 0x40);
7739 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7740 ins_pipe(pipe_cmov_reg); // XXX
7741 %}
7743 instruct cmovL_mem(cmpOp cop, rFlagsReg cr, rRegL dst, memory src)
7744 %{
7745 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
7747 ins_cost(200); // XXX
7748 format %{ "cmovq$cop $dst, $src\t# signed, long" %}
7749 opcode(0x0F, 0x40);
7750 ins_encode(REX_reg_mem_wide(dst, src), enc_cmov(cop), reg_mem(dst, src));
7751 ins_pipe(pipe_cmov_mem); // XXX
7752 %}
7754 instruct cmovL_regU(cmpOpU cop, rFlagsRegU cr, rRegL dst, rRegL src)
7755 %{
7756 match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
7758 ins_cost(200); // XXX
7759 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %}
7760 opcode(0x0F, 0x40);
7761 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7762 ins_pipe(pipe_cmov_reg); // XXX
7763 %}
7765 instruct cmovL_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, rRegL src) %{
7766 match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
7767 ins_cost(200);
7768 expand %{
7769 cmovL_regU(cop, cr, dst, src);
7770 %}
7771 %}
7773 instruct cmovL_memU(cmpOpU cop, rFlagsRegU cr, rRegL dst, memory src)
7774 %{
7775 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
7777 ins_cost(200); // XXX
7778 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %}
7779 opcode(0x0F, 0x40);
7780 ins_encode(REX_reg_mem_wide(dst, src), enc_cmov(cop), reg_mem(dst, src));
7781 ins_pipe(pipe_cmov_mem); // XXX
7782 %}
7784 instruct cmovL_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, memory src) %{
7785 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
7786 ins_cost(200);
7787 expand %{
7788 cmovL_memU(cop, cr, dst, src);
7789 %}
7790 %}
7792 instruct cmovF_reg(cmpOp cop, rFlagsReg cr, regF dst, regF src)
7793 %{
7794 match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
7796 ins_cost(200); // XXX
7797 format %{ "jn$cop skip\t# signed cmove float\n\t"
7798 "movss $dst, $src\n"
7799 "skip:" %}
7800 ins_encode(enc_cmovf_branch(cop, dst, src));
7801 ins_pipe(pipe_slow);
7802 %}
7804 // instruct cmovF_mem(cmpOp cop, rFlagsReg cr, regF dst, memory src)
7805 // %{
7806 // match(Set dst (CMoveF (Binary cop cr) (Binary dst (LoadL src))));
7808 // ins_cost(200); // XXX
7809 // format %{ "jn$cop skip\t# signed cmove float\n\t"
7810 // "movss $dst, $src\n"
7811 // "skip:" %}
7812 // ins_encode(enc_cmovf_mem_branch(cop, dst, src));
7813 // ins_pipe(pipe_slow);
7814 // %}
7816 instruct cmovF_regU(cmpOpU cop, rFlagsRegU cr, regF dst, regF src)
7817 %{
7818 match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
7820 ins_cost(200); // XXX
7821 format %{ "jn$cop skip\t# unsigned cmove float\n\t"
7822 "movss $dst, $src\n"
7823 "skip:" %}
7824 ins_encode(enc_cmovf_branch(cop, dst, src));
7825 ins_pipe(pipe_slow);
7826 %}
7828 instruct cmovF_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regF dst, regF src) %{
7829 match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
7830 ins_cost(200);
7831 expand %{
7832 cmovF_regU(cop, cr, dst, src);
7833 %}
7834 %}
7836 instruct cmovD_reg(cmpOp cop, rFlagsReg cr, regD dst, regD src)
7837 %{
7838 match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
7840 ins_cost(200); // XXX
7841 format %{ "jn$cop skip\t# signed cmove double\n\t"
7842 "movsd $dst, $src\n"
7843 "skip:" %}
7844 ins_encode(enc_cmovd_branch(cop, dst, src));
7845 ins_pipe(pipe_slow);
7846 %}
7848 instruct cmovD_regU(cmpOpU cop, rFlagsRegU cr, regD dst, regD src)
7849 %{
7850 match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
7852 ins_cost(200); // XXX
7853 format %{ "jn$cop skip\t# unsigned cmove double\n\t"
7854 "movsd $dst, $src\n"
7855 "skip:" %}
7856 ins_encode(enc_cmovd_branch(cop, dst, src));
7857 ins_pipe(pipe_slow);
7858 %}
7860 instruct cmovD_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regD dst, regD src) %{
7861 match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
7862 ins_cost(200);
7863 expand %{
7864 cmovD_regU(cop, cr, dst, src);
7865 %}
7866 %}
7868 //----------Arithmetic Instructions--------------------------------------------
7869 //----------Addition Instructions----------------------------------------------
7871 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
7872 %{
7873 match(Set dst (AddI dst src));
7874 effect(KILL cr);
7876 format %{ "addl $dst, $src\t# int" %}
7877 opcode(0x03);
7878 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
7879 ins_pipe(ialu_reg_reg);
7880 %}
7882 instruct addI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
7883 %{
7884 match(Set dst (AddI dst src));
7885 effect(KILL cr);
7887 format %{ "addl $dst, $src\t# int" %}
7888 opcode(0x81, 0x00); /* /0 id */
7889 ins_encode(OpcSErm(dst, src), Con8or32(src));
7890 ins_pipe( ialu_reg );
7891 %}
7893 instruct addI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
7894 %{
7895 match(Set dst (AddI dst (LoadI src)));
7896 effect(KILL cr);
7898 ins_cost(125); // XXX
7899 format %{ "addl $dst, $src\t# int" %}
7900 opcode(0x03);
7901 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
7902 ins_pipe(ialu_reg_mem);
7903 %}
7905 instruct addI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
7906 %{
7907 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7908 effect(KILL cr);
7910 ins_cost(150); // XXX
7911 format %{ "addl $dst, $src\t# int" %}
7912 opcode(0x01); /* Opcode 01 /r */
7913 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
7914 ins_pipe(ialu_mem_reg);
7915 %}
7917 instruct addI_mem_imm(memory dst, immI src, rFlagsReg cr)
7918 %{
7919 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7920 effect(KILL cr);
7922 ins_cost(125); // XXX
7923 format %{ "addl $dst, $src\t# int" %}
7924 opcode(0x81); /* Opcode 81 /0 id */
7925 ins_encode(REX_mem(dst), OpcSE(src), RM_opc_mem(0x00, dst), Con8or32(src));
7926 ins_pipe(ialu_mem_imm);
7927 %}
7929 instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr)
7930 %{
7931 predicate(UseIncDec);
7932 match(Set dst (AddI dst src));
7933 effect(KILL cr);
7935 format %{ "incl $dst\t# int" %}
7936 opcode(0xFF, 0x00); // FF /0
7937 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
7938 ins_pipe(ialu_reg);
7939 %}
7941 instruct incI_mem(memory dst, immI1 src, rFlagsReg cr)
7942 %{
7943 predicate(UseIncDec);
7944 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7945 effect(KILL cr);
7947 ins_cost(125); // XXX
7948 format %{ "incl $dst\t# int" %}
7949 opcode(0xFF); /* Opcode FF /0 */
7950 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(0x00, dst));
7951 ins_pipe(ialu_mem_imm);
7952 %}
7954 // XXX why does that use AddI
7955 instruct decI_rReg(rRegI dst, immI_M1 src, rFlagsReg cr)
7956 %{
7957 predicate(UseIncDec);
7958 match(Set dst (AddI dst src));
7959 effect(KILL cr);
7961 format %{ "decl $dst\t# int" %}
7962 opcode(0xFF, 0x01); // FF /1
7963 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
7964 ins_pipe(ialu_reg);
7965 %}
7967 // XXX why does that use AddI
7968 instruct decI_mem(memory dst, immI_M1 src, rFlagsReg cr)
7969 %{
7970 predicate(UseIncDec);
7971 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7972 effect(KILL cr);
7974 ins_cost(125); // XXX
7975 format %{ "decl $dst\t# int" %}
7976 opcode(0xFF); /* Opcode FF /1 */
7977 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(0x01, dst));
7978 ins_pipe(ialu_mem_imm);
7979 %}
7981 instruct leaI_rReg_immI(rRegI dst, rRegI src0, immI src1)
7982 %{
7983 match(Set dst (AddI src0 src1));
7985 ins_cost(110);
7986 format %{ "addr32 leal $dst, [$src0 + $src1]\t# int" %}
7987 opcode(0x8D); /* 0x8D /r */
7988 ins_encode(Opcode(0x67), REX_reg_reg(dst, src0), OpcP, reg_lea(dst, src0, src1)); // XXX
7989 ins_pipe(ialu_reg_reg);
7990 %}
7992 instruct addL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
7993 %{
7994 match(Set dst (AddL dst src));
7995 effect(KILL cr);
7997 format %{ "addq $dst, $src\t# long" %}
7998 opcode(0x03);
7999 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8000 ins_pipe(ialu_reg_reg);
8001 %}
8003 instruct addL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
8004 %{
8005 match(Set dst (AddL dst src));
8006 effect(KILL cr);
8008 format %{ "addq $dst, $src\t# long" %}
8009 opcode(0x81, 0x00); /* /0 id */
8010 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
8011 ins_pipe( ialu_reg );
8012 %}
8014 instruct addL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
8015 %{
8016 match(Set dst (AddL dst (LoadL src)));
8017 effect(KILL cr);
8019 ins_cost(125); // XXX
8020 format %{ "addq $dst, $src\t# long" %}
8021 opcode(0x03);
8022 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
8023 ins_pipe(ialu_reg_mem);
8024 %}
8026 instruct addL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
8027 %{
8028 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8029 effect(KILL cr);
8031 ins_cost(150); // XXX
8032 format %{ "addq $dst, $src\t# long" %}
8033 opcode(0x01); /* Opcode 01 /r */
8034 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
8035 ins_pipe(ialu_mem_reg);
8036 %}
8038 instruct addL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
8039 %{
8040 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8041 effect(KILL cr);
8043 ins_cost(125); // XXX
8044 format %{ "addq $dst, $src\t# long" %}
8045 opcode(0x81); /* Opcode 81 /0 id */
8046 ins_encode(REX_mem_wide(dst),
8047 OpcSE(src), RM_opc_mem(0x00, dst), Con8or32(src));
8048 ins_pipe(ialu_mem_imm);
8049 %}
8051 instruct incL_rReg(rRegI dst, immL1 src, rFlagsReg cr)
8052 %{
8053 predicate(UseIncDec);
8054 match(Set dst (AddL dst src));
8055 effect(KILL cr);
8057 format %{ "incq $dst\t# long" %}
8058 opcode(0xFF, 0x00); // FF /0
8059 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8060 ins_pipe(ialu_reg);
8061 %}
8063 instruct incL_mem(memory dst, immL1 src, rFlagsReg cr)
8064 %{
8065 predicate(UseIncDec);
8066 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8067 effect(KILL cr);
8069 ins_cost(125); // XXX
8070 format %{ "incq $dst\t# long" %}
8071 opcode(0xFF); /* Opcode FF /0 */
8072 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(0x00, dst));
8073 ins_pipe(ialu_mem_imm);
8074 %}
8076 // XXX why does that use AddL
8077 instruct decL_rReg(rRegL dst, immL_M1 src, rFlagsReg cr)
8078 %{
8079 predicate(UseIncDec);
8080 match(Set dst (AddL dst src));
8081 effect(KILL cr);
8083 format %{ "decq $dst\t# long" %}
8084 opcode(0xFF, 0x01); // FF /1
8085 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8086 ins_pipe(ialu_reg);
8087 %}
8089 // XXX why does that use AddL
8090 instruct decL_mem(memory dst, immL_M1 src, rFlagsReg cr)
8091 %{
8092 predicate(UseIncDec);
8093 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8094 effect(KILL cr);
8096 ins_cost(125); // XXX
8097 format %{ "decq $dst\t# long" %}
8098 opcode(0xFF); /* Opcode FF /1 */
8099 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(0x01, dst));
8100 ins_pipe(ialu_mem_imm);
8101 %}
8103 instruct leaL_rReg_immL(rRegL dst, rRegL src0, immL32 src1)
8104 %{
8105 match(Set dst (AddL src0 src1));
8107 ins_cost(110);
8108 format %{ "leaq $dst, [$src0 + $src1]\t# long" %}
8109 opcode(0x8D); /* 0x8D /r */
8110 ins_encode(REX_reg_reg_wide(dst, src0), OpcP, reg_lea(dst, src0, src1)); // XXX
8111 ins_pipe(ialu_reg_reg);
8112 %}
8114 instruct addP_rReg(rRegP dst, rRegL src, rFlagsReg cr)
8115 %{
8116 match(Set dst (AddP dst src));
8117 effect(KILL cr);
8119 format %{ "addq $dst, $src\t# ptr" %}
8120 opcode(0x03);
8121 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8122 ins_pipe(ialu_reg_reg);
8123 %}
8125 instruct addP_rReg_imm(rRegP dst, immL32 src, rFlagsReg cr)
8126 %{
8127 match(Set dst (AddP dst src));
8128 effect(KILL cr);
8130 format %{ "addq $dst, $src\t# ptr" %}
8131 opcode(0x81, 0x00); /* /0 id */
8132 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
8133 ins_pipe( ialu_reg );
8134 %}
8136 // XXX addP mem ops ????
8138 instruct leaP_rReg_imm(rRegP dst, rRegP src0, immL32 src1)
8139 %{
8140 match(Set dst (AddP src0 src1));
8142 ins_cost(110);
8143 format %{ "leaq $dst, [$src0 + $src1]\t# ptr" %}
8144 opcode(0x8D); /* 0x8D /r */
8145 ins_encode(REX_reg_reg_wide(dst, src0), OpcP, reg_lea(dst, src0, src1));// XXX
8146 ins_pipe(ialu_reg_reg);
8147 %}
8149 instruct checkCastPP(rRegP dst)
8150 %{
8151 match(Set dst (CheckCastPP dst));
8153 size(0);
8154 format %{ "# checkcastPP of $dst" %}
8155 ins_encode(/* empty encoding */);
8156 ins_pipe(empty);
8157 %}
8159 instruct castPP(rRegP dst)
8160 %{
8161 match(Set dst (CastPP dst));
8163 size(0);
8164 format %{ "# castPP of $dst" %}
8165 ins_encode(/* empty encoding */);
8166 ins_pipe(empty);
8167 %}
8169 instruct castII(rRegI dst)
8170 %{
8171 match(Set dst (CastII dst));
8173 size(0);
8174 format %{ "# castII of $dst" %}
8175 ins_encode(/* empty encoding */);
8176 ins_cost(0);
8177 ins_pipe(empty);
8178 %}
8180 // LoadP-locked same as a regular LoadP when used with compare-swap
8181 instruct loadPLocked(rRegP dst, memory mem)
8182 %{
8183 match(Set dst (LoadPLocked mem));
8185 ins_cost(125); // XXX
8186 format %{ "movq $dst, $mem\t# ptr locked" %}
8187 opcode(0x8B);
8188 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
8189 ins_pipe(ialu_reg_mem); // XXX
8190 %}
8192 // LoadL-locked - same as a regular LoadL when used with compare-swap
8193 instruct loadLLocked(rRegL dst, memory mem)
8194 %{
8195 match(Set dst (LoadLLocked mem));
8197 ins_cost(125); // XXX
8198 format %{ "movq $dst, $mem\t# long locked" %}
8199 opcode(0x8B);
8200 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
8201 ins_pipe(ialu_reg_mem); // XXX
8202 %}
8204 // Conditional-store of the updated heap-top.
8205 // Used during allocation of the shared heap.
8206 // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.
8208 instruct storePConditional(memory heap_top_ptr,
8209 rax_RegP oldval, rRegP newval,
8210 rFlagsReg cr)
8211 %{
8212 match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8214 format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) "
8215 "If rax == $heap_top_ptr then store $newval into $heap_top_ptr" %}
8216 opcode(0x0F, 0xB1);
8217 ins_encode(lock_prefix,
8218 REX_reg_mem_wide(newval, heap_top_ptr),
8219 OpcP, OpcS,
8220 reg_mem(newval, heap_top_ptr));
8221 ins_pipe(pipe_cmpxchg);
8222 %}
8224 // Conditional-store of an int value.
8225 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
8226 instruct storeIConditional(memory mem, rax_RegI oldval, rRegI newval, rFlagsReg cr)
8227 %{
8228 match(Set cr (StoreIConditional mem (Binary oldval newval)));
8229 effect(KILL oldval);
8231 format %{ "cmpxchgl $mem, $newval\t# If rax == $mem then store $newval into $mem" %}
8232 opcode(0x0F, 0xB1);
8233 ins_encode(lock_prefix,
8234 REX_reg_mem(newval, mem),
8235 OpcP, OpcS,
8236 reg_mem(newval, mem));
8237 ins_pipe(pipe_cmpxchg);
8238 %}
8240 // Conditional-store of a long value.
8241 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
8242 instruct storeLConditional(memory mem, rax_RegL oldval, rRegL newval, rFlagsReg cr)
8243 %{
8244 match(Set cr (StoreLConditional mem (Binary oldval newval)));
8245 effect(KILL oldval);
8247 format %{ "cmpxchgq $mem, $newval\t# If rax == $mem then store $newval into $mem" %}
8248 opcode(0x0F, 0xB1);
8249 ins_encode(lock_prefix,
8250 REX_reg_mem_wide(newval, mem),
8251 OpcP, OpcS,
8252 reg_mem(newval, mem));
8253 ins_pipe(pipe_cmpxchg);
8254 %}
8257 // XXX No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
8258 instruct compareAndSwapP(rRegI res,
8259 memory mem_ptr,
8260 rax_RegP oldval, rRegP newval,
8261 rFlagsReg cr)
8262 %{
8263 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
8264 effect(KILL cr, KILL oldval);
8266 format %{ "cmpxchgq $mem_ptr,$newval\t# "
8267 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8268 "sete $res\n\t"
8269 "movzbl $res, $res" %}
8270 opcode(0x0F, 0xB1);
8271 ins_encode(lock_prefix,
8272 REX_reg_mem_wide(newval, mem_ptr),
8273 OpcP, OpcS,
8274 reg_mem(newval, mem_ptr),
8275 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8276 REX_reg_breg(res, res), // movzbl
8277 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8278 ins_pipe( pipe_cmpxchg );
8279 %}
8281 instruct compareAndSwapL(rRegI res,
8282 memory mem_ptr,
8283 rax_RegL oldval, rRegL newval,
8284 rFlagsReg cr)
8285 %{
8286 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
8287 effect(KILL cr, KILL oldval);
8289 format %{ "cmpxchgq $mem_ptr,$newval\t# "
8290 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8291 "sete $res\n\t"
8292 "movzbl $res, $res" %}
8293 opcode(0x0F, 0xB1);
8294 ins_encode(lock_prefix,
8295 REX_reg_mem_wide(newval, mem_ptr),
8296 OpcP, OpcS,
8297 reg_mem(newval, mem_ptr),
8298 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8299 REX_reg_breg(res, res), // movzbl
8300 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8301 ins_pipe( pipe_cmpxchg );
8302 %}
8304 instruct compareAndSwapI(rRegI res,
8305 memory mem_ptr,
8306 rax_RegI oldval, rRegI newval,
8307 rFlagsReg cr)
8308 %{
8309 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
8310 effect(KILL cr, KILL oldval);
8312 format %{ "cmpxchgl $mem_ptr,$newval\t# "
8313 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8314 "sete $res\n\t"
8315 "movzbl $res, $res" %}
8316 opcode(0x0F, 0xB1);
8317 ins_encode(lock_prefix,
8318 REX_reg_mem(newval, mem_ptr),
8319 OpcP, OpcS,
8320 reg_mem(newval, mem_ptr),
8321 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8322 REX_reg_breg(res, res), // movzbl
8323 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8324 ins_pipe( pipe_cmpxchg );
8325 %}
8328 instruct compareAndSwapN(rRegI res,
8329 memory mem_ptr,
8330 rax_RegN oldval, rRegN newval,
8331 rFlagsReg cr) %{
8332 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
8333 effect(KILL cr, KILL oldval);
8335 format %{ "cmpxchgl $mem_ptr,$newval\t# "
8336 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8337 "sete $res\n\t"
8338 "movzbl $res, $res" %}
8339 opcode(0x0F, 0xB1);
8340 ins_encode(lock_prefix,
8341 REX_reg_mem(newval, mem_ptr),
8342 OpcP, OpcS,
8343 reg_mem(newval, mem_ptr),
8344 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8345 REX_reg_breg(res, res), // movzbl
8346 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8347 ins_pipe( pipe_cmpxchg );
8348 %}
8350 //----------Subtraction Instructions-------------------------------------------
8352 // Integer Subtraction Instructions
8353 instruct subI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
8354 %{
8355 match(Set dst (SubI dst src));
8356 effect(KILL cr);
8358 format %{ "subl $dst, $src\t# int" %}
8359 opcode(0x2B);
8360 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
8361 ins_pipe(ialu_reg_reg);
8362 %}
8364 instruct subI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
8365 %{
8366 match(Set dst (SubI dst src));
8367 effect(KILL cr);
8369 format %{ "subl $dst, $src\t# int" %}
8370 opcode(0x81, 0x05); /* Opcode 81 /5 */
8371 ins_encode(OpcSErm(dst, src), Con8or32(src));
8372 ins_pipe(ialu_reg);
8373 %}
8375 instruct subI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
8376 %{
8377 match(Set dst (SubI dst (LoadI src)));
8378 effect(KILL cr);
8380 ins_cost(125);
8381 format %{ "subl $dst, $src\t# int" %}
8382 opcode(0x2B);
8383 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
8384 ins_pipe(ialu_reg_mem);
8385 %}
8387 instruct subI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
8388 %{
8389 match(Set dst (StoreI dst (SubI (LoadI dst) src)));
8390 effect(KILL cr);
8392 ins_cost(150);
8393 format %{ "subl $dst, $src\t# int" %}
8394 opcode(0x29); /* Opcode 29 /r */
8395 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
8396 ins_pipe(ialu_mem_reg);
8397 %}
8399 instruct subI_mem_imm(memory dst, immI src, rFlagsReg cr)
8400 %{
8401 match(Set dst (StoreI dst (SubI (LoadI dst) src)));
8402 effect(KILL cr);
8404 ins_cost(125); // XXX
8405 format %{ "subl $dst, $src\t# int" %}
8406 opcode(0x81); /* Opcode 81 /5 id */
8407 ins_encode(REX_mem(dst), OpcSE(src), RM_opc_mem(0x05, dst), Con8or32(src));
8408 ins_pipe(ialu_mem_imm);
8409 %}
8411 instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
8412 %{
8413 match(Set dst (SubL dst src));
8414 effect(KILL cr);
8416 format %{ "subq $dst, $src\t# long" %}
8417 opcode(0x2B);
8418 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8419 ins_pipe(ialu_reg_reg);
8420 %}
8422 instruct subL_rReg_imm(rRegI dst, immL32 src, rFlagsReg cr)
8423 %{
8424 match(Set dst (SubL dst src));
8425 effect(KILL cr);
8427 format %{ "subq $dst, $src\t# long" %}
8428 opcode(0x81, 0x05); /* Opcode 81 /5 */
8429 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
8430 ins_pipe(ialu_reg);
8431 %}
8433 instruct subL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
8434 %{
8435 match(Set dst (SubL dst (LoadL src)));
8436 effect(KILL cr);
8438 ins_cost(125);
8439 format %{ "subq $dst, $src\t# long" %}
8440 opcode(0x2B);
8441 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
8442 ins_pipe(ialu_reg_mem);
8443 %}
8445 instruct subL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
8446 %{
8447 match(Set dst (StoreL dst (SubL (LoadL dst) src)));
8448 effect(KILL cr);
8450 ins_cost(150);
8451 format %{ "subq $dst, $src\t# long" %}
8452 opcode(0x29); /* Opcode 29 /r */
8453 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
8454 ins_pipe(ialu_mem_reg);
8455 %}
8457 instruct subL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
8458 %{
8459 match(Set dst (StoreL dst (SubL (LoadL dst) src)));
8460 effect(KILL cr);
8462 ins_cost(125); // XXX
8463 format %{ "subq $dst, $src\t# long" %}
8464 opcode(0x81); /* Opcode 81 /5 id */
8465 ins_encode(REX_mem_wide(dst),
8466 OpcSE(src), RM_opc_mem(0x05, dst), Con8or32(src));
8467 ins_pipe(ialu_mem_imm);
8468 %}
8470 // Subtract from a pointer
8471 // XXX hmpf???
8472 instruct subP_rReg(rRegP dst, rRegI src, immI0 zero, rFlagsReg cr)
8473 %{
8474 match(Set dst (AddP dst (SubI zero src)));
8475 effect(KILL cr);
8477 format %{ "subq $dst, $src\t# ptr - int" %}
8478 opcode(0x2B);
8479 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8480 ins_pipe(ialu_reg_reg);
8481 %}
8483 instruct negI_rReg(rRegI dst, immI0 zero, rFlagsReg cr)
8484 %{
8485 match(Set dst (SubI zero dst));
8486 effect(KILL cr);
8488 format %{ "negl $dst\t# int" %}
8489 opcode(0xF7, 0x03); // Opcode F7 /3
8490 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8491 ins_pipe(ialu_reg);
8492 %}
8494 instruct negI_mem(memory dst, immI0 zero, rFlagsReg cr)
8495 %{
8496 match(Set dst (StoreI dst (SubI zero (LoadI dst))));
8497 effect(KILL cr);
8499 format %{ "negl $dst\t# int" %}
8500 opcode(0xF7, 0x03); // Opcode F7 /3
8501 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8502 ins_pipe(ialu_reg);
8503 %}
8505 instruct negL_rReg(rRegL dst, immL0 zero, rFlagsReg cr)
8506 %{
8507 match(Set dst (SubL zero dst));
8508 effect(KILL cr);
8510 format %{ "negq $dst\t# long" %}
8511 opcode(0xF7, 0x03); // Opcode F7 /3
8512 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8513 ins_pipe(ialu_reg);
8514 %}
8516 instruct negL_mem(memory dst, immL0 zero, rFlagsReg cr)
8517 %{
8518 match(Set dst (StoreL dst (SubL zero (LoadL dst))));
8519 effect(KILL cr);
8521 format %{ "negq $dst\t# long" %}
8522 opcode(0xF7, 0x03); // Opcode F7 /3
8523 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
8524 ins_pipe(ialu_reg);
8525 %}
8528 //----------Multiplication/Division Instructions-------------------------------
8529 // Integer Multiplication Instructions
8530 // Multiply Register
8532 instruct mulI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
8533 %{
8534 match(Set dst (MulI dst src));
8535 effect(KILL cr);
8537 ins_cost(300);
8538 format %{ "imull $dst, $src\t# int" %}
8539 opcode(0x0F, 0xAF);
8540 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
8541 ins_pipe(ialu_reg_reg_alu0);
8542 %}
8544 instruct mulI_rReg_imm(rRegI dst, rRegI src, immI imm, rFlagsReg cr)
8545 %{
8546 match(Set dst (MulI src imm));
8547 effect(KILL cr);
8549 ins_cost(300);
8550 format %{ "imull $dst, $src, $imm\t# int" %}
8551 opcode(0x69); /* 69 /r id */
8552 ins_encode(REX_reg_reg(dst, src),
8553 OpcSE(imm), reg_reg(dst, src), Con8or32(imm));
8554 ins_pipe(ialu_reg_reg_alu0);
8555 %}
8557 instruct mulI_mem(rRegI dst, memory src, rFlagsReg cr)
8558 %{
8559 match(Set dst (MulI dst (LoadI src)));
8560 effect(KILL cr);
8562 ins_cost(350);
8563 format %{ "imull $dst, $src\t# int" %}
8564 opcode(0x0F, 0xAF);
8565 ins_encode(REX_reg_mem(dst, src), OpcP, OpcS, reg_mem(dst, src));
8566 ins_pipe(ialu_reg_mem_alu0);
8567 %}
8569 instruct mulI_mem_imm(rRegI dst, memory src, immI imm, rFlagsReg cr)
8570 %{
8571 match(Set dst (MulI (LoadI src) imm));
8572 effect(KILL cr);
8574 ins_cost(300);
8575 format %{ "imull $dst, $src, $imm\t# int" %}
8576 opcode(0x69); /* 69 /r id */
8577 ins_encode(REX_reg_mem(dst, src),
8578 OpcSE(imm), reg_mem(dst, src), Con8or32(imm));
8579 ins_pipe(ialu_reg_mem_alu0);
8580 %}
8582 instruct mulL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
8583 %{
8584 match(Set dst (MulL dst src));
8585 effect(KILL cr);
8587 ins_cost(300);
8588 format %{ "imulq $dst, $src\t# long" %}
8589 opcode(0x0F, 0xAF);
8590 ins_encode(REX_reg_reg_wide(dst, src), OpcP, OpcS, reg_reg(dst, src));
8591 ins_pipe(ialu_reg_reg_alu0);
8592 %}
8594 instruct mulL_rReg_imm(rRegL dst, rRegL src, immL32 imm, rFlagsReg cr)
8595 %{
8596 match(Set dst (MulL src imm));
8597 effect(KILL cr);
8599 ins_cost(300);
8600 format %{ "imulq $dst, $src, $imm\t# long" %}
8601 opcode(0x69); /* 69 /r id */
8602 ins_encode(REX_reg_reg_wide(dst, src),
8603 OpcSE(imm), reg_reg(dst, src), Con8or32(imm));
8604 ins_pipe(ialu_reg_reg_alu0);
8605 %}
8607 instruct mulL_mem(rRegL dst, memory src, rFlagsReg cr)
8608 %{
8609 match(Set dst (MulL dst (LoadL src)));
8610 effect(KILL cr);
8612 ins_cost(350);
8613 format %{ "imulq $dst, $src\t# long" %}
8614 opcode(0x0F, 0xAF);
8615 ins_encode(REX_reg_mem_wide(dst, src), OpcP, OpcS, reg_mem(dst, src));
8616 ins_pipe(ialu_reg_mem_alu0);
8617 %}
8619 instruct mulL_mem_imm(rRegL dst, memory src, immL32 imm, rFlagsReg cr)
8620 %{
8621 match(Set dst (MulL (LoadL src) imm));
8622 effect(KILL cr);
8624 ins_cost(300);
8625 format %{ "imulq $dst, $src, $imm\t# long" %}
8626 opcode(0x69); /* 69 /r id */
8627 ins_encode(REX_reg_mem_wide(dst, src),
8628 OpcSE(imm), reg_mem(dst, src), Con8or32(imm));
8629 ins_pipe(ialu_reg_mem_alu0);
8630 %}
8632 instruct mulHiL_rReg(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr)
8633 %{
8634 match(Set dst (MulHiL src rax));
8635 effect(USE_KILL rax, KILL cr);
8637 ins_cost(300);
8638 format %{ "imulq RDX:RAX, RAX, $src\t# mulhi" %}
8639 opcode(0xF7, 0x5); /* Opcode F7 /5 */
8640 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src));
8641 ins_pipe(ialu_reg_reg_alu0);
8642 %}
8644 instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
8645 rFlagsReg cr)
8646 %{
8647 match(Set rax (DivI rax div));
8648 effect(KILL rdx, KILL cr);
8650 ins_cost(30*100+10*100); // XXX
8651 format %{ "cmpl rax, 0x80000000\t# idiv\n\t"
8652 "jne,s normal\n\t"
8653 "xorl rdx, rdx\n\t"
8654 "cmpl $div, -1\n\t"
8655 "je,s done\n"
8656 "normal: cdql\n\t"
8657 "idivl $div\n"
8658 "done:" %}
8659 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8660 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
8661 ins_pipe(ialu_reg_reg_alu0);
8662 %}
8664 instruct divL_rReg(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div,
8665 rFlagsReg cr)
8666 %{
8667 match(Set rax (DivL rax div));
8668 effect(KILL rdx, KILL cr);
8670 ins_cost(30*100+10*100); // XXX
8671 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t"
8672 "cmpq rax, rdx\n\t"
8673 "jne,s normal\n\t"
8674 "xorl rdx, rdx\n\t"
8675 "cmpq $div, -1\n\t"
8676 "je,s done\n"
8677 "normal: cdqq\n\t"
8678 "idivq $div\n"
8679 "done:" %}
8680 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8681 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
8682 ins_pipe(ialu_reg_reg_alu0);
8683 %}
8685 // Integer DIVMOD with Register, both quotient and mod results
8686 instruct divModI_rReg_divmod(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
8687 rFlagsReg cr)
8688 %{
8689 match(DivModI rax div);
8690 effect(KILL cr);
8692 ins_cost(30*100+10*100); // XXX
8693 format %{ "cmpl rax, 0x80000000\t# idiv\n\t"
8694 "jne,s normal\n\t"
8695 "xorl rdx, rdx\n\t"
8696 "cmpl $div, -1\n\t"
8697 "je,s done\n"
8698 "normal: cdql\n\t"
8699 "idivl $div\n"
8700 "done:" %}
8701 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8702 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
8703 ins_pipe(pipe_slow);
8704 %}
8706 // Long DIVMOD with Register, both quotient and mod results
8707 instruct divModL_rReg_divmod(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div,
8708 rFlagsReg cr)
8709 %{
8710 match(DivModL rax div);
8711 effect(KILL cr);
8713 ins_cost(30*100+10*100); // XXX
8714 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t"
8715 "cmpq rax, rdx\n\t"
8716 "jne,s normal\n\t"
8717 "xorl rdx, rdx\n\t"
8718 "cmpq $div, -1\n\t"
8719 "je,s done\n"
8720 "normal: cdqq\n\t"
8721 "idivq $div\n"
8722 "done:" %}
8723 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8724 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
8725 ins_pipe(pipe_slow);
8726 %}
8728 //----------- DivL-By-Constant-Expansions--------------------------------------
8729 // DivI cases are handled by the compiler
8731 // Magic constant, reciprocal of 10
8732 instruct loadConL_0x6666666666666667(rRegL dst)
8733 %{
8734 effect(DEF dst);
8736 format %{ "movq $dst, #0x666666666666667\t# Used in div-by-10" %}
8737 ins_encode(load_immL(dst, 0x6666666666666667));
8738 ins_pipe(ialu_reg);
8739 %}
8741 instruct mul_hi(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr)
8742 %{
8743 effect(DEF dst, USE src, USE_KILL rax, KILL cr);
8745 format %{ "imulq rdx:rax, rax, $src\t# Used in div-by-10" %}
8746 opcode(0xF7, 0x5); /* Opcode F7 /5 */
8747 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src));
8748 ins_pipe(ialu_reg_reg_alu0);
8749 %}
8751 instruct sarL_rReg_63(rRegL dst, rFlagsReg cr)
8752 %{
8753 effect(USE_DEF dst, KILL cr);
8755 format %{ "sarq $dst, #63\t# Used in div-by-10" %}
8756 opcode(0xC1, 0x7); /* C1 /7 ib */
8757 ins_encode(reg_opc_imm_wide(dst, 0x3F));
8758 ins_pipe(ialu_reg);
8759 %}
8761 instruct sarL_rReg_2(rRegL dst, rFlagsReg cr)
8762 %{
8763 effect(USE_DEF dst, KILL cr);
8765 format %{ "sarq $dst, #2\t# Used in div-by-10" %}
8766 opcode(0xC1, 0x7); /* C1 /7 ib */
8767 ins_encode(reg_opc_imm_wide(dst, 0x2));
8768 ins_pipe(ialu_reg);
8769 %}
8771 instruct divL_10(rdx_RegL dst, no_rax_RegL src, immL10 div)
8772 %{
8773 match(Set dst (DivL src div));
8775 ins_cost((5+8)*100);
8776 expand %{
8777 rax_RegL rax; // Killed temp
8778 rFlagsReg cr; // Killed
8779 loadConL_0x6666666666666667(rax); // movq rax, 0x6666666666666667
8780 mul_hi(dst, src, rax, cr); // mulq rdx:rax <= rax * $src
8781 sarL_rReg_63(src, cr); // sarq src, 63
8782 sarL_rReg_2(dst, cr); // sarq rdx, 2
8783 subL_rReg(dst, src, cr); // subl rdx, src
8784 %}
8785 %}
8787 //-----------------------------------------------------------------------------
8789 instruct modI_rReg(rdx_RegI rdx, rax_RegI rax, no_rax_rdx_RegI div,
8790 rFlagsReg cr)
8791 %{
8792 match(Set rdx (ModI rax div));
8793 effect(KILL rax, KILL cr);
8795 ins_cost(300); // XXX
8796 format %{ "cmpl rax, 0x80000000\t# irem\n\t"
8797 "jne,s normal\n\t"
8798 "xorl rdx, rdx\n\t"
8799 "cmpl $div, -1\n\t"
8800 "je,s done\n"
8801 "normal: cdql\n\t"
8802 "idivl $div\n"
8803 "done:" %}
8804 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8805 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
8806 ins_pipe(ialu_reg_reg_alu0);
8807 %}
8809 instruct modL_rReg(rdx_RegL rdx, rax_RegL rax, no_rax_rdx_RegL div,
8810 rFlagsReg cr)
8811 %{
8812 match(Set rdx (ModL rax div));
8813 effect(KILL rax, KILL cr);
8815 ins_cost(300); // XXX
8816 format %{ "movq rdx, 0x8000000000000000\t# lrem\n\t"
8817 "cmpq rax, rdx\n\t"
8818 "jne,s normal\n\t"
8819 "xorl rdx, rdx\n\t"
8820 "cmpq $div, -1\n\t"
8821 "je,s done\n"
8822 "normal: cdqq\n\t"
8823 "idivq $div\n"
8824 "done:" %}
8825 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8826 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
8827 ins_pipe(ialu_reg_reg_alu0);
8828 %}
8830 // Integer Shift Instructions
8831 // Shift Left by one
8832 instruct salI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
8833 %{
8834 match(Set dst (LShiftI dst shift));
8835 effect(KILL cr);
8837 format %{ "sall $dst, $shift" %}
8838 opcode(0xD1, 0x4); /* D1 /4 */
8839 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8840 ins_pipe(ialu_reg);
8841 %}
8843 // Shift Left by one
8844 instruct salI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
8845 %{
8846 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
8847 effect(KILL cr);
8849 format %{ "sall $dst, $shift\t" %}
8850 opcode(0xD1, 0x4); /* D1 /4 */
8851 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8852 ins_pipe(ialu_mem_imm);
8853 %}
8855 // Shift Left by 8-bit immediate
8856 instruct salI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
8857 %{
8858 match(Set dst (LShiftI dst shift));
8859 effect(KILL cr);
8861 format %{ "sall $dst, $shift" %}
8862 opcode(0xC1, 0x4); /* C1 /4 ib */
8863 ins_encode(reg_opc_imm(dst, shift));
8864 ins_pipe(ialu_reg);
8865 %}
8867 // Shift Left by 8-bit immediate
8868 instruct salI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
8869 %{
8870 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
8871 effect(KILL cr);
8873 format %{ "sall $dst, $shift" %}
8874 opcode(0xC1, 0x4); /* C1 /4 ib */
8875 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
8876 ins_pipe(ialu_mem_imm);
8877 %}
8879 // Shift Left by variable
8880 instruct salI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
8881 %{
8882 match(Set dst (LShiftI dst shift));
8883 effect(KILL cr);
8885 format %{ "sall $dst, $shift" %}
8886 opcode(0xD3, 0x4); /* D3 /4 */
8887 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8888 ins_pipe(ialu_reg_reg);
8889 %}
8891 // Shift Left by variable
8892 instruct salI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
8893 %{
8894 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
8895 effect(KILL cr);
8897 format %{ "sall $dst, $shift" %}
8898 opcode(0xD3, 0x4); /* D3 /4 */
8899 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8900 ins_pipe(ialu_mem_reg);
8901 %}
8903 // Arithmetic shift right by one
8904 instruct sarI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
8905 %{
8906 match(Set dst (RShiftI dst shift));
8907 effect(KILL cr);
8909 format %{ "sarl $dst, $shift" %}
8910 opcode(0xD1, 0x7); /* D1 /7 */
8911 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8912 ins_pipe(ialu_reg);
8913 %}
8915 // Arithmetic shift right by one
8916 instruct sarI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
8917 %{
8918 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
8919 effect(KILL cr);
8921 format %{ "sarl $dst, $shift" %}
8922 opcode(0xD1, 0x7); /* D1 /7 */
8923 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8924 ins_pipe(ialu_mem_imm);
8925 %}
8927 // Arithmetic Shift Right by 8-bit immediate
8928 instruct sarI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
8929 %{
8930 match(Set dst (RShiftI dst shift));
8931 effect(KILL cr);
8933 format %{ "sarl $dst, $shift" %}
8934 opcode(0xC1, 0x7); /* C1 /7 ib */
8935 ins_encode(reg_opc_imm(dst, shift));
8936 ins_pipe(ialu_mem_imm);
8937 %}
8939 // Arithmetic Shift Right by 8-bit immediate
8940 instruct sarI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
8941 %{
8942 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
8943 effect(KILL cr);
8945 format %{ "sarl $dst, $shift" %}
8946 opcode(0xC1, 0x7); /* C1 /7 ib */
8947 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
8948 ins_pipe(ialu_mem_imm);
8949 %}
8951 // Arithmetic Shift Right by variable
8952 instruct sarI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
8953 %{
8954 match(Set dst (RShiftI dst shift));
8955 effect(KILL cr);
8957 format %{ "sarl $dst, $shift" %}
8958 opcode(0xD3, 0x7); /* D3 /7 */
8959 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8960 ins_pipe(ialu_reg_reg);
8961 %}
8963 // Arithmetic Shift Right by variable
8964 instruct sarI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
8965 %{
8966 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
8967 effect(KILL cr);
8969 format %{ "sarl $dst, $shift" %}
8970 opcode(0xD3, 0x7); /* D3 /7 */
8971 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8972 ins_pipe(ialu_mem_reg);
8973 %}
8975 // Logical shift right by one
8976 instruct shrI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
8977 %{
8978 match(Set dst (URShiftI dst shift));
8979 effect(KILL cr);
8981 format %{ "shrl $dst, $shift" %}
8982 opcode(0xD1, 0x5); /* D1 /5 */
8983 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8984 ins_pipe(ialu_reg);
8985 %}
8987 // Logical shift right by one
8988 instruct shrI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
8989 %{
8990 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
8991 effect(KILL cr);
8993 format %{ "shrl $dst, $shift" %}
8994 opcode(0xD1, 0x5); /* D1 /5 */
8995 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8996 ins_pipe(ialu_mem_imm);
8997 %}
8999 // Logical Shift Right by 8-bit immediate
9000 instruct shrI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
9001 %{
9002 match(Set dst (URShiftI dst shift));
9003 effect(KILL cr);
9005 format %{ "shrl $dst, $shift" %}
9006 opcode(0xC1, 0x5); /* C1 /5 ib */
9007 ins_encode(reg_opc_imm(dst, shift));
9008 ins_pipe(ialu_reg);
9009 %}
9011 // Logical Shift Right by 8-bit immediate
9012 instruct shrI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9013 %{
9014 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
9015 effect(KILL cr);
9017 format %{ "shrl $dst, $shift" %}
9018 opcode(0xC1, 0x5); /* C1 /5 ib */
9019 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
9020 ins_pipe(ialu_mem_imm);
9021 %}
9023 // Logical Shift Right by variable
9024 instruct shrI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
9025 %{
9026 match(Set dst (URShiftI dst shift));
9027 effect(KILL cr);
9029 format %{ "shrl $dst, $shift" %}
9030 opcode(0xD3, 0x5); /* D3 /5 */
9031 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9032 ins_pipe(ialu_reg_reg);
9033 %}
9035 // Logical Shift Right by variable
9036 instruct shrI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9037 %{
9038 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
9039 effect(KILL cr);
9041 format %{ "shrl $dst, $shift" %}
9042 opcode(0xD3, 0x5); /* D3 /5 */
9043 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
9044 ins_pipe(ialu_mem_reg);
9045 %}
9047 // Long Shift Instructions
9048 // Shift Left by one
9049 instruct salL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
9050 %{
9051 match(Set dst (LShiftL dst shift));
9052 effect(KILL cr);
9054 format %{ "salq $dst, $shift" %}
9055 opcode(0xD1, 0x4); /* D1 /4 */
9056 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9057 ins_pipe(ialu_reg);
9058 %}
9060 // Shift Left by one
9061 instruct salL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9062 %{
9063 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
9064 effect(KILL cr);
9066 format %{ "salq $dst, $shift" %}
9067 opcode(0xD1, 0x4); /* D1 /4 */
9068 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9069 ins_pipe(ialu_mem_imm);
9070 %}
9072 // Shift Left by 8-bit immediate
9073 instruct salL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
9074 %{
9075 match(Set dst (LShiftL dst shift));
9076 effect(KILL cr);
9078 format %{ "salq $dst, $shift" %}
9079 opcode(0xC1, 0x4); /* C1 /4 ib */
9080 ins_encode(reg_opc_imm_wide(dst, shift));
9081 ins_pipe(ialu_reg);
9082 %}
9084 // Shift Left by 8-bit immediate
9085 instruct salL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9086 %{
9087 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
9088 effect(KILL cr);
9090 format %{ "salq $dst, $shift" %}
9091 opcode(0xC1, 0x4); /* C1 /4 ib */
9092 ins_encode(REX_mem_wide(dst), OpcP,
9093 RM_opc_mem(secondary, dst), Con8or32(shift));
9094 ins_pipe(ialu_mem_imm);
9095 %}
9097 // Shift Left by variable
9098 instruct salL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
9099 %{
9100 match(Set dst (LShiftL dst shift));
9101 effect(KILL cr);
9103 format %{ "salq $dst, $shift" %}
9104 opcode(0xD3, 0x4); /* D3 /4 */
9105 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9106 ins_pipe(ialu_reg_reg);
9107 %}
9109 // Shift Left by variable
9110 instruct salL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9111 %{
9112 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
9113 effect(KILL cr);
9115 format %{ "salq $dst, $shift" %}
9116 opcode(0xD3, 0x4); /* D3 /4 */
9117 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9118 ins_pipe(ialu_mem_reg);
9119 %}
9121 // Arithmetic shift right by one
9122 instruct sarL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
9123 %{
9124 match(Set dst (RShiftL dst shift));
9125 effect(KILL cr);
9127 format %{ "sarq $dst, $shift" %}
9128 opcode(0xD1, 0x7); /* D1 /7 */
9129 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9130 ins_pipe(ialu_reg);
9131 %}
9133 // Arithmetic shift right by one
9134 instruct sarL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9135 %{
9136 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
9137 effect(KILL cr);
9139 format %{ "sarq $dst, $shift" %}
9140 opcode(0xD1, 0x7); /* D1 /7 */
9141 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9142 ins_pipe(ialu_mem_imm);
9143 %}
9145 // Arithmetic Shift Right by 8-bit immediate
9146 instruct sarL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
9147 %{
9148 match(Set dst (RShiftL dst shift));
9149 effect(KILL cr);
9151 format %{ "sarq $dst, $shift" %}
9152 opcode(0xC1, 0x7); /* C1 /7 ib */
9153 ins_encode(reg_opc_imm_wide(dst, shift));
9154 ins_pipe(ialu_mem_imm);
9155 %}
9157 // Arithmetic Shift Right by 8-bit immediate
9158 instruct sarL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9159 %{
9160 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
9161 effect(KILL cr);
9163 format %{ "sarq $dst, $shift" %}
9164 opcode(0xC1, 0x7); /* C1 /7 ib */
9165 ins_encode(REX_mem_wide(dst), OpcP,
9166 RM_opc_mem(secondary, dst), Con8or32(shift));
9167 ins_pipe(ialu_mem_imm);
9168 %}
9170 // Arithmetic Shift Right by variable
9171 instruct sarL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
9172 %{
9173 match(Set dst (RShiftL dst shift));
9174 effect(KILL cr);
9176 format %{ "sarq $dst, $shift" %}
9177 opcode(0xD3, 0x7); /* D3 /7 */
9178 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9179 ins_pipe(ialu_reg_reg);
9180 %}
9182 // Arithmetic Shift Right by variable
9183 instruct sarL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9184 %{
9185 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
9186 effect(KILL cr);
9188 format %{ "sarq $dst, $shift" %}
9189 opcode(0xD3, 0x7); /* D3 /7 */
9190 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9191 ins_pipe(ialu_mem_reg);
9192 %}
9194 // Logical shift right by one
9195 instruct shrL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
9196 %{
9197 match(Set dst (URShiftL dst shift));
9198 effect(KILL cr);
9200 format %{ "shrq $dst, $shift" %}
9201 opcode(0xD1, 0x5); /* D1 /5 */
9202 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst ));
9203 ins_pipe(ialu_reg);
9204 %}
9206 // Logical shift right by one
9207 instruct shrL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9208 %{
9209 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
9210 effect(KILL cr);
9212 format %{ "shrq $dst, $shift" %}
9213 opcode(0xD1, 0x5); /* D1 /5 */
9214 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9215 ins_pipe(ialu_mem_imm);
9216 %}
9218 // Logical Shift Right by 8-bit immediate
9219 instruct shrL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
9220 %{
9221 match(Set dst (URShiftL dst shift));
9222 effect(KILL cr);
9224 format %{ "shrq $dst, $shift" %}
9225 opcode(0xC1, 0x5); /* C1 /5 ib */
9226 ins_encode(reg_opc_imm_wide(dst, shift));
9227 ins_pipe(ialu_reg);
9228 %}
9231 // Logical Shift Right by 8-bit immediate
9232 instruct shrL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9233 %{
9234 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
9235 effect(KILL cr);
9237 format %{ "shrq $dst, $shift" %}
9238 opcode(0xC1, 0x5); /* C1 /5 ib */
9239 ins_encode(REX_mem_wide(dst), OpcP,
9240 RM_opc_mem(secondary, dst), Con8or32(shift));
9241 ins_pipe(ialu_mem_imm);
9242 %}
9244 // Logical Shift Right by variable
9245 instruct shrL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
9246 %{
9247 match(Set dst (URShiftL dst shift));
9248 effect(KILL cr);
9250 format %{ "shrq $dst, $shift" %}
9251 opcode(0xD3, 0x5); /* D3 /5 */
9252 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9253 ins_pipe(ialu_reg_reg);
9254 %}
9256 // Logical Shift Right by variable
9257 instruct shrL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9258 %{
9259 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
9260 effect(KILL cr);
9262 format %{ "shrq $dst, $shift" %}
9263 opcode(0xD3, 0x5); /* D3 /5 */
9264 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9265 ins_pipe(ialu_mem_reg);
9266 %}
9268 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
9269 // This idiom is used by the compiler for the i2b bytecode.
9270 instruct i2b(rRegI dst, rRegI src, immI_24 twentyfour)
9271 %{
9272 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
9274 format %{ "movsbl $dst, $src\t# i2b" %}
9275 opcode(0x0F, 0xBE);
9276 ins_encode(REX_reg_breg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9277 ins_pipe(ialu_reg_reg);
9278 %}
9280 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
9281 // This idiom is used by the compiler the i2s bytecode.
9282 instruct i2s(rRegI dst, rRegI src, immI_16 sixteen)
9283 %{
9284 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
9286 format %{ "movswl $dst, $src\t# i2s" %}
9287 opcode(0x0F, 0xBF);
9288 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9289 ins_pipe(ialu_reg_reg);
9290 %}
9292 // ROL/ROR instructions
9294 // ROL expand
9295 instruct rolI_rReg_imm1(rRegI dst, rFlagsReg cr) %{
9296 effect(KILL cr, USE_DEF dst);
9298 format %{ "roll $dst" %}
9299 opcode(0xD1, 0x0); /* Opcode D1 /0 */
9300 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9301 ins_pipe(ialu_reg);
9302 %}
9304 instruct rolI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr) %{
9305 effect(USE_DEF dst, USE shift, KILL cr);
9307 format %{ "roll $dst, $shift" %}
9308 opcode(0xC1, 0x0); /* Opcode C1 /0 ib */
9309 ins_encode( reg_opc_imm(dst, shift) );
9310 ins_pipe(ialu_reg);
9311 %}
9313 instruct rolI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr)
9314 %{
9315 effect(USE_DEF dst, USE shift, KILL cr);
9317 format %{ "roll $dst, $shift" %}
9318 opcode(0xD3, 0x0); /* Opcode D3 /0 */
9319 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9320 ins_pipe(ialu_reg_reg);
9321 %}
9322 // end of ROL expand
9324 // Rotate Left by one
9325 instruct rolI_rReg_i1(rRegI dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr)
9326 %{
9327 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
9329 expand %{
9330 rolI_rReg_imm1(dst, cr);
9331 %}
9332 %}
9334 // Rotate Left by 8-bit immediate
9335 instruct rolI_rReg_i8(rRegI dst, immI8 lshift, immI8 rshift, rFlagsReg cr)
9336 %{
9337 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
9338 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
9340 expand %{
9341 rolI_rReg_imm8(dst, lshift, cr);
9342 %}
9343 %}
9345 // Rotate Left by variable
9346 instruct rolI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9347 %{
9348 match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI zero shift))));
9350 expand %{
9351 rolI_rReg_CL(dst, shift, cr);
9352 %}
9353 %}
9355 // Rotate Left by variable
9356 instruct rolI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr)
9357 %{
9358 match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI c32 shift))));
9360 expand %{
9361 rolI_rReg_CL(dst, shift, cr);
9362 %}
9363 %}
9365 // ROR expand
9366 instruct rorI_rReg_imm1(rRegI dst, rFlagsReg cr)
9367 %{
9368 effect(USE_DEF dst, KILL cr);
9370 format %{ "rorl $dst" %}
9371 opcode(0xD1, 0x1); /* D1 /1 */
9372 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9373 ins_pipe(ialu_reg);
9374 %}
9376 instruct rorI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr)
9377 %{
9378 effect(USE_DEF dst, USE shift, KILL cr);
9380 format %{ "rorl $dst, $shift" %}
9381 opcode(0xC1, 0x1); /* C1 /1 ib */
9382 ins_encode(reg_opc_imm(dst, shift));
9383 ins_pipe(ialu_reg);
9384 %}
9386 instruct rorI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr)
9387 %{
9388 effect(USE_DEF dst, USE shift, KILL cr);
9390 format %{ "rorl $dst, $shift" %}
9391 opcode(0xD3, 0x1); /* D3 /1 */
9392 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9393 ins_pipe(ialu_reg_reg);
9394 %}
9395 // end of ROR expand
9397 // Rotate Right by one
9398 instruct rorI_rReg_i1(rRegI dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr)
9399 %{
9400 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
9402 expand %{
9403 rorI_rReg_imm1(dst, cr);
9404 %}
9405 %}
9407 // Rotate Right by 8-bit immediate
9408 instruct rorI_rReg_i8(rRegI dst, immI8 rshift, immI8 lshift, rFlagsReg cr)
9409 %{
9410 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
9411 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
9413 expand %{
9414 rorI_rReg_imm8(dst, rshift, cr);
9415 %}
9416 %}
9418 // Rotate Right by variable
9419 instruct rorI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9420 %{
9421 match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI zero shift))));
9423 expand %{
9424 rorI_rReg_CL(dst, shift, cr);
9425 %}
9426 %}
9428 // Rotate Right by variable
9429 instruct rorI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr)
9430 %{
9431 match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI c32 shift))));
9433 expand %{
9434 rorI_rReg_CL(dst, shift, cr);
9435 %}
9436 %}
9438 // for long rotate
9439 // ROL expand
9440 instruct rolL_rReg_imm1(rRegL dst, rFlagsReg cr) %{
9441 effect(USE_DEF dst, KILL cr);
9443 format %{ "rolq $dst" %}
9444 opcode(0xD1, 0x0); /* Opcode D1 /0 */
9445 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9446 ins_pipe(ialu_reg);
9447 %}
9449 instruct rolL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr) %{
9450 effect(USE_DEF dst, USE shift, KILL cr);
9452 format %{ "rolq $dst, $shift" %}
9453 opcode(0xC1, 0x0); /* Opcode C1 /0 ib */
9454 ins_encode( reg_opc_imm_wide(dst, shift) );
9455 ins_pipe(ialu_reg);
9456 %}
9458 instruct rolL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr)
9459 %{
9460 effect(USE_DEF dst, USE shift, KILL cr);
9462 format %{ "rolq $dst, $shift" %}
9463 opcode(0xD3, 0x0); /* Opcode D3 /0 */
9464 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9465 ins_pipe(ialu_reg_reg);
9466 %}
9467 // end of ROL expand
9469 // Rotate Left by one
9470 instruct rolL_rReg_i1(rRegL dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr)
9471 %{
9472 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
9474 expand %{
9475 rolL_rReg_imm1(dst, cr);
9476 %}
9477 %}
9479 // Rotate Left by 8-bit immediate
9480 instruct rolL_rReg_i8(rRegL dst, immI8 lshift, immI8 rshift, rFlagsReg cr)
9481 %{
9482 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
9483 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
9485 expand %{
9486 rolL_rReg_imm8(dst, lshift, cr);
9487 %}
9488 %}
9490 // Rotate Left by variable
9491 instruct rolL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9492 %{
9493 match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI zero shift))));
9495 expand %{
9496 rolL_rReg_CL(dst, shift, cr);
9497 %}
9498 %}
9500 // Rotate Left by variable
9501 instruct rolL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr)
9502 %{
9503 match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI c64 shift))));
9505 expand %{
9506 rolL_rReg_CL(dst, shift, cr);
9507 %}
9508 %}
9510 // ROR expand
9511 instruct rorL_rReg_imm1(rRegL dst, rFlagsReg cr)
9512 %{
9513 effect(USE_DEF dst, KILL cr);
9515 format %{ "rorq $dst" %}
9516 opcode(0xD1, 0x1); /* D1 /1 */
9517 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9518 ins_pipe(ialu_reg);
9519 %}
9521 instruct rorL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr)
9522 %{
9523 effect(USE_DEF dst, USE shift, KILL cr);
9525 format %{ "rorq $dst, $shift" %}
9526 opcode(0xC1, 0x1); /* C1 /1 ib */
9527 ins_encode(reg_opc_imm_wide(dst, shift));
9528 ins_pipe(ialu_reg);
9529 %}
9531 instruct rorL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr)
9532 %{
9533 effect(USE_DEF dst, USE shift, KILL cr);
9535 format %{ "rorq $dst, $shift" %}
9536 opcode(0xD3, 0x1); /* D3 /1 */
9537 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9538 ins_pipe(ialu_reg_reg);
9539 %}
9540 // end of ROR expand
9542 // Rotate Right by one
9543 instruct rorL_rReg_i1(rRegL dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr)
9544 %{
9545 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
9547 expand %{
9548 rorL_rReg_imm1(dst, cr);
9549 %}
9550 %}
9552 // Rotate Right by 8-bit immediate
9553 instruct rorL_rReg_i8(rRegL dst, immI8 rshift, immI8 lshift, rFlagsReg cr)
9554 %{
9555 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
9556 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
9558 expand %{
9559 rorL_rReg_imm8(dst, rshift, cr);
9560 %}
9561 %}
9563 // Rotate Right by variable
9564 instruct rorL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9565 %{
9566 match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI zero shift))));
9568 expand %{
9569 rorL_rReg_CL(dst, shift, cr);
9570 %}
9571 %}
9573 // Rotate Right by variable
9574 instruct rorL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr)
9575 %{
9576 match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI c64 shift))));
9578 expand %{
9579 rorL_rReg_CL(dst, shift, cr);
9580 %}
9581 %}
9583 // Logical Instructions
9585 // Integer Logical Instructions
9587 // And Instructions
9588 // And Register with Register
9589 instruct andI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
9590 %{
9591 match(Set dst (AndI dst src));
9592 effect(KILL cr);
9594 format %{ "andl $dst, $src\t# int" %}
9595 opcode(0x23);
9596 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
9597 ins_pipe(ialu_reg_reg);
9598 %}
9600 // And Register with Immediate 255
9601 instruct andI_rReg_imm255(rRegI dst, immI_255 src)
9602 %{
9603 match(Set dst (AndI dst src));
9605 format %{ "movzbl $dst, $dst\t# int & 0xFF" %}
9606 opcode(0x0F, 0xB6);
9607 ins_encode(REX_reg_breg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9608 ins_pipe(ialu_reg);
9609 %}
9611 // And Register with Immediate 255 and promote to long
9612 instruct andI2L_rReg_imm255(rRegL dst, rRegI src, immI_255 mask)
9613 %{
9614 match(Set dst (ConvI2L (AndI src mask)));
9616 format %{ "movzbl $dst, $src\t# int & 0xFF -> long" %}
9617 opcode(0x0F, 0xB6);
9618 ins_encode(REX_reg_breg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9619 ins_pipe(ialu_reg);
9620 %}
9622 // And Register with Immediate 65535
9623 instruct andI_rReg_imm65535(rRegI dst, immI_65535 src)
9624 %{
9625 match(Set dst (AndI dst src));
9627 format %{ "movzwl $dst, $dst\t# int & 0xFFFF" %}
9628 opcode(0x0F, 0xB7);
9629 ins_encode(REX_reg_reg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9630 ins_pipe(ialu_reg);
9631 %}
9633 // And Register with Immediate 65535 and promote to long
9634 instruct andI2L_rReg_imm65535(rRegL dst, rRegI src, immI_65535 mask)
9635 %{
9636 match(Set dst (ConvI2L (AndI src mask)));
9638 format %{ "movzwl $dst, $src\t# int & 0xFFFF -> long" %}
9639 opcode(0x0F, 0xB7);
9640 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9641 ins_pipe(ialu_reg);
9642 %}
9644 // And Register with Immediate
9645 instruct andI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
9646 %{
9647 match(Set dst (AndI dst src));
9648 effect(KILL cr);
9650 format %{ "andl $dst, $src\t# int" %}
9651 opcode(0x81, 0x04); /* Opcode 81 /4 */
9652 ins_encode(OpcSErm(dst, src), Con8or32(src));
9653 ins_pipe(ialu_reg);
9654 %}
9656 // And Register with Memory
9657 instruct andI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
9658 %{
9659 match(Set dst (AndI dst (LoadI src)));
9660 effect(KILL cr);
9662 ins_cost(125);
9663 format %{ "andl $dst, $src\t# int" %}
9664 opcode(0x23);
9665 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
9666 ins_pipe(ialu_reg_mem);
9667 %}
9669 // And Memory with Register
9670 instruct andI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
9671 %{
9672 match(Set dst (StoreI dst (AndI (LoadI dst) src)));
9673 effect(KILL cr);
9675 ins_cost(150);
9676 format %{ "andl $dst, $src\t# int" %}
9677 opcode(0x21); /* Opcode 21 /r */
9678 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
9679 ins_pipe(ialu_mem_reg);
9680 %}
9682 // And Memory with Immediate
9683 instruct andI_mem_imm(memory dst, immI src, rFlagsReg cr)
9684 %{
9685 match(Set dst (StoreI dst (AndI (LoadI dst) src)));
9686 effect(KILL cr);
9688 ins_cost(125);
9689 format %{ "andl $dst, $src\t# int" %}
9690 opcode(0x81, 0x4); /* Opcode 81 /4 id */
9691 ins_encode(REX_mem(dst), OpcSE(src),
9692 RM_opc_mem(secondary, dst), Con8or32(src));
9693 ins_pipe(ialu_mem_imm);
9694 %}
9696 // Or Instructions
9697 // Or Register with Register
9698 instruct orI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
9699 %{
9700 match(Set dst (OrI dst src));
9701 effect(KILL cr);
9703 format %{ "orl $dst, $src\t# int" %}
9704 opcode(0x0B);
9705 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
9706 ins_pipe(ialu_reg_reg);
9707 %}
9709 // Or Register with Immediate
9710 instruct orI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
9711 %{
9712 match(Set dst (OrI dst src));
9713 effect(KILL cr);
9715 format %{ "orl $dst, $src\t# int" %}
9716 opcode(0x81, 0x01); /* Opcode 81 /1 id */
9717 ins_encode(OpcSErm(dst, src), Con8or32(src));
9718 ins_pipe(ialu_reg);
9719 %}
9721 // Or Register with Memory
9722 instruct orI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
9723 %{
9724 match(Set dst (OrI dst (LoadI src)));
9725 effect(KILL cr);
9727 ins_cost(125);
9728 format %{ "orl $dst, $src\t# int" %}
9729 opcode(0x0B);
9730 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
9731 ins_pipe(ialu_reg_mem);
9732 %}
9734 // Or Memory with Register
9735 instruct orI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
9736 %{
9737 match(Set dst (StoreI dst (OrI (LoadI dst) src)));
9738 effect(KILL cr);
9740 ins_cost(150);
9741 format %{ "orl $dst, $src\t# int" %}
9742 opcode(0x09); /* Opcode 09 /r */
9743 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
9744 ins_pipe(ialu_mem_reg);
9745 %}
9747 // Or Memory with Immediate
9748 instruct orI_mem_imm(memory dst, immI src, rFlagsReg cr)
9749 %{
9750 match(Set dst (StoreI dst (OrI (LoadI dst) src)));
9751 effect(KILL cr);
9753 ins_cost(125);
9754 format %{ "orl $dst, $src\t# int" %}
9755 opcode(0x81, 0x1); /* Opcode 81 /1 id */
9756 ins_encode(REX_mem(dst), OpcSE(src),
9757 RM_opc_mem(secondary, dst), Con8or32(src));
9758 ins_pipe(ialu_mem_imm);
9759 %}
9761 // Xor Instructions
9762 // Xor Register with Register
9763 instruct xorI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
9764 %{
9765 match(Set dst (XorI dst src));
9766 effect(KILL cr);
9768 format %{ "xorl $dst, $src\t# int" %}
9769 opcode(0x33);
9770 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
9771 ins_pipe(ialu_reg_reg);
9772 %}
9774 // Xor Register with Immediate -1
9775 instruct xorI_rReg_im1(rRegI dst, immI_M1 imm) %{
9776 match(Set dst (XorI dst imm));
9778 format %{ "not $dst" %}
9779 ins_encode %{
9780 __ notl($dst$$Register);
9781 %}
9782 ins_pipe(ialu_reg);
9783 %}
9785 // Xor Register with Immediate
9786 instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
9787 %{
9788 match(Set dst (XorI dst src));
9789 effect(KILL cr);
9791 format %{ "xorl $dst, $src\t# int" %}
9792 opcode(0x81, 0x06); /* Opcode 81 /6 id */
9793 ins_encode(OpcSErm(dst, src), Con8or32(src));
9794 ins_pipe(ialu_reg);
9795 %}
9797 // Xor Register with Memory
9798 instruct xorI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
9799 %{
9800 match(Set dst (XorI dst (LoadI src)));
9801 effect(KILL cr);
9803 ins_cost(125);
9804 format %{ "xorl $dst, $src\t# int" %}
9805 opcode(0x33);
9806 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
9807 ins_pipe(ialu_reg_mem);
9808 %}
9810 // Xor Memory with Register
9811 instruct xorI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
9812 %{
9813 match(Set dst (StoreI dst (XorI (LoadI dst) src)));
9814 effect(KILL cr);
9816 ins_cost(150);
9817 format %{ "xorl $dst, $src\t# int" %}
9818 opcode(0x31); /* Opcode 31 /r */
9819 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
9820 ins_pipe(ialu_mem_reg);
9821 %}
9823 // Xor Memory with Immediate
9824 instruct xorI_mem_imm(memory dst, immI src, rFlagsReg cr)
9825 %{
9826 match(Set dst (StoreI dst (XorI (LoadI dst) src)));
9827 effect(KILL cr);
9829 ins_cost(125);
9830 format %{ "xorl $dst, $src\t# int" %}
9831 opcode(0x81, 0x6); /* Opcode 81 /6 id */
9832 ins_encode(REX_mem(dst), OpcSE(src),
9833 RM_opc_mem(secondary, dst), Con8or32(src));
9834 ins_pipe(ialu_mem_imm);
9835 %}
9838 // Long Logical Instructions
9840 // And Instructions
9841 // And Register with Register
9842 instruct andL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
9843 %{
9844 match(Set dst (AndL dst src));
9845 effect(KILL cr);
9847 format %{ "andq $dst, $src\t# long" %}
9848 opcode(0x23);
9849 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
9850 ins_pipe(ialu_reg_reg);
9851 %}
9853 // And Register with Immediate 255
9854 instruct andL_rReg_imm255(rRegL dst, immL_255 src)
9855 %{
9856 match(Set dst (AndL dst src));
9858 format %{ "movzbq $dst, $dst\t# long & 0xFF" %}
9859 opcode(0x0F, 0xB6);
9860 ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9861 ins_pipe(ialu_reg);
9862 %}
9864 // And Register with Immediate 65535
9865 instruct andL_rReg_imm65535(rRegL dst, immL_65535 src)
9866 %{
9867 match(Set dst (AndL dst src));
9869 format %{ "movzwq $dst, $dst\t# long & 0xFFFF" %}
9870 opcode(0x0F, 0xB7);
9871 ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9872 ins_pipe(ialu_reg);
9873 %}
9875 // And Register with Immediate
9876 instruct andL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
9877 %{
9878 match(Set dst (AndL dst src));
9879 effect(KILL cr);
9881 format %{ "andq $dst, $src\t# long" %}
9882 opcode(0x81, 0x04); /* Opcode 81 /4 */
9883 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
9884 ins_pipe(ialu_reg);
9885 %}
9887 // And Register with Memory
9888 instruct andL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
9889 %{
9890 match(Set dst (AndL dst (LoadL src)));
9891 effect(KILL cr);
9893 ins_cost(125);
9894 format %{ "andq $dst, $src\t# long" %}
9895 opcode(0x23);
9896 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
9897 ins_pipe(ialu_reg_mem);
9898 %}
9900 // And Memory with Register
9901 instruct andL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
9902 %{
9903 match(Set dst (StoreL dst (AndL (LoadL dst) src)));
9904 effect(KILL cr);
9906 ins_cost(150);
9907 format %{ "andq $dst, $src\t# long" %}
9908 opcode(0x21); /* Opcode 21 /r */
9909 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
9910 ins_pipe(ialu_mem_reg);
9911 %}
9913 // And Memory with Immediate
9914 instruct andL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
9915 %{
9916 match(Set dst (StoreL dst (AndL (LoadL dst) src)));
9917 effect(KILL cr);
9919 ins_cost(125);
9920 format %{ "andq $dst, $src\t# long" %}
9921 opcode(0x81, 0x4); /* Opcode 81 /4 id */
9922 ins_encode(REX_mem_wide(dst), OpcSE(src),
9923 RM_opc_mem(secondary, dst), Con8or32(src));
9924 ins_pipe(ialu_mem_imm);
9925 %}
9927 // Or Instructions
9928 // Or Register with Register
9929 instruct orL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
9930 %{
9931 match(Set dst (OrL dst src));
9932 effect(KILL cr);
9934 format %{ "orq $dst, $src\t# long" %}
9935 opcode(0x0B);
9936 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
9937 ins_pipe(ialu_reg_reg);
9938 %}
9940 // Use any_RegP to match R15 (TLS register) without spilling.
9941 instruct orL_rReg_castP2X(rRegL dst, any_RegP src, rFlagsReg cr) %{
9942 match(Set dst (OrL dst (CastP2X src)));
9943 effect(KILL cr);
9945 format %{ "orq $dst, $src\t# long" %}
9946 opcode(0x0B);
9947 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
9948 ins_pipe(ialu_reg_reg);
9949 %}
9952 // Or Register with Immediate
9953 instruct orL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
9954 %{
9955 match(Set dst (OrL dst src));
9956 effect(KILL cr);
9958 format %{ "orq $dst, $src\t# long" %}
9959 opcode(0x81, 0x01); /* Opcode 81 /1 id */
9960 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
9961 ins_pipe(ialu_reg);
9962 %}
9964 // Or Register with Memory
9965 instruct orL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
9966 %{
9967 match(Set dst (OrL dst (LoadL src)));
9968 effect(KILL cr);
9970 ins_cost(125);
9971 format %{ "orq $dst, $src\t# long" %}
9972 opcode(0x0B);
9973 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
9974 ins_pipe(ialu_reg_mem);
9975 %}
9977 // Or Memory with Register
9978 instruct orL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
9979 %{
9980 match(Set dst (StoreL dst (OrL (LoadL dst) src)));
9981 effect(KILL cr);
9983 ins_cost(150);
9984 format %{ "orq $dst, $src\t# long" %}
9985 opcode(0x09); /* Opcode 09 /r */
9986 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
9987 ins_pipe(ialu_mem_reg);
9988 %}
9990 // Or Memory with Immediate
9991 instruct orL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
9992 %{
9993 match(Set dst (StoreL dst (OrL (LoadL dst) src)));
9994 effect(KILL cr);
9996 ins_cost(125);
9997 format %{ "orq $dst, $src\t# long" %}
9998 opcode(0x81, 0x1); /* Opcode 81 /1 id */
9999 ins_encode(REX_mem_wide(dst), OpcSE(src),
10000 RM_opc_mem(secondary, dst), Con8or32(src));
10001 ins_pipe(ialu_mem_imm);
10002 %}
10004 // Xor Instructions
10005 // Xor Register with Register
10006 instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
10007 %{
10008 match(Set dst (XorL dst src));
10009 effect(KILL cr);
10011 format %{ "xorq $dst, $src\t# long" %}
10012 opcode(0x33);
10013 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
10014 ins_pipe(ialu_reg_reg);
10015 %}
10017 // Xor Register with Immediate -1
10018 instruct xorL_rReg_im1(rRegL dst, immL_M1 imm) %{
10019 match(Set dst (XorL dst imm));
10021 format %{ "notq $dst" %}
10022 ins_encode %{
10023 __ notq($dst$$Register);
10024 %}
10025 ins_pipe(ialu_reg);
10026 %}
10028 // Xor Register with Immediate
10029 instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
10030 %{
10031 match(Set dst (XorL dst src));
10032 effect(KILL cr);
10034 format %{ "xorq $dst, $src\t# long" %}
10035 opcode(0x81, 0x06); /* Opcode 81 /6 id */
10036 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
10037 ins_pipe(ialu_reg);
10038 %}
10040 // Xor Register with Memory
10041 instruct xorL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
10042 %{
10043 match(Set dst (XorL dst (LoadL src)));
10044 effect(KILL cr);
10046 ins_cost(125);
10047 format %{ "xorq $dst, $src\t# long" %}
10048 opcode(0x33);
10049 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
10050 ins_pipe(ialu_reg_mem);
10051 %}
10053 // Xor Memory with Register
10054 instruct xorL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
10055 %{
10056 match(Set dst (StoreL dst (XorL (LoadL dst) src)));
10057 effect(KILL cr);
10059 ins_cost(150);
10060 format %{ "xorq $dst, $src\t# long" %}
10061 opcode(0x31); /* Opcode 31 /r */
10062 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
10063 ins_pipe(ialu_mem_reg);
10064 %}
10066 // Xor Memory with Immediate
10067 instruct xorL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
10068 %{
10069 match(Set dst (StoreL dst (XorL (LoadL dst) src)));
10070 effect(KILL cr);
10072 ins_cost(125);
10073 format %{ "xorq $dst, $src\t# long" %}
10074 opcode(0x81, 0x6); /* Opcode 81 /6 id */
10075 ins_encode(REX_mem_wide(dst), OpcSE(src),
10076 RM_opc_mem(secondary, dst), Con8or32(src));
10077 ins_pipe(ialu_mem_imm);
10078 %}
10080 // Convert Int to Boolean
10081 instruct convI2B(rRegI dst, rRegI src, rFlagsReg cr)
10082 %{
10083 match(Set dst (Conv2B src));
10084 effect(KILL cr);
10086 format %{ "testl $src, $src\t# ci2b\n\t"
10087 "setnz $dst\n\t"
10088 "movzbl $dst, $dst" %}
10089 ins_encode(REX_reg_reg(src, src), opc_reg_reg(0x85, src, src), // testl
10090 setNZ_reg(dst),
10091 REX_reg_breg(dst, dst), // movzbl
10092 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst));
10093 ins_pipe(pipe_slow); // XXX
10094 %}
10096 // Convert Pointer to Boolean
10097 instruct convP2B(rRegI dst, rRegP src, rFlagsReg cr)
10098 %{
10099 match(Set dst (Conv2B src));
10100 effect(KILL cr);
10102 format %{ "testq $src, $src\t# cp2b\n\t"
10103 "setnz $dst\n\t"
10104 "movzbl $dst, $dst" %}
10105 ins_encode(REX_reg_reg_wide(src, src), opc_reg_reg(0x85, src, src), // testq
10106 setNZ_reg(dst),
10107 REX_reg_breg(dst, dst), // movzbl
10108 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst));
10109 ins_pipe(pipe_slow); // XXX
10110 %}
10112 instruct cmpLTMask(rRegI dst, rRegI p, rRegI q, rFlagsReg cr)
10113 %{
10114 match(Set dst (CmpLTMask p q));
10115 effect(KILL cr);
10117 ins_cost(400); // XXX
10118 format %{ "cmpl $p, $q\t# cmpLTMask\n\t"
10119 "setlt $dst\n\t"
10120 "movzbl $dst, $dst\n\t"
10121 "negl $dst" %}
10122 ins_encode(REX_reg_reg(p, q), opc_reg_reg(0x3B, p, q), // cmpl
10123 setLT_reg(dst),
10124 REX_reg_breg(dst, dst), // movzbl
10125 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst),
10126 neg_reg(dst));
10127 ins_pipe(pipe_slow);
10128 %}
10130 instruct cmpLTMask0(rRegI dst, immI0 zero, rFlagsReg cr)
10131 %{
10132 match(Set dst (CmpLTMask dst zero));
10133 effect(KILL cr);
10135 ins_cost(100); // XXX
10136 format %{ "sarl $dst, #31\t# cmpLTMask0" %}
10137 opcode(0xC1, 0x7); /* C1 /7 ib */
10138 ins_encode(reg_opc_imm(dst, 0x1F));
10139 ins_pipe(ialu_reg);
10140 %}
10143 instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rRegI tmp, rFlagsReg cr)
10144 %{
10145 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
10146 effect(TEMP tmp, KILL cr);
10148 ins_cost(400); // XXX
10149 format %{ "subl $p, $q\t# cadd_cmpLTMask1\n\t"
10150 "sbbl $tmp, $tmp\n\t"
10151 "andl $tmp, $y\n\t"
10152 "addl $p, $tmp" %}
10153 ins_encode %{
10154 Register Rp = $p$$Register;
10155 Register Rq = $q$$Register;
10156 Register Ry = $y$$Register;
10157 Register Rt = $tmp$$Register;
10158 __ subl(Rp, Rq);
10159 __ sbbl(Rt, Rt);
10160 __ andl(Rt, Ry);
10161 __ addl(Rp, Rt);
10162 %}
10163 ins_pipe(pipe_cmplt);
10164 %}
10166 //---------- FP Instructions------------------------------------------------
10168 instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
10169 %{
10170 match(Set cr (CmpF src1 src2));
10172 ins_cost(145);
10173 format %{ "ucomiss $src1, $src2\n\t"
10174 "jnp,s exit\n\t"
10175 "pushfq\t# saw NaN, set CF\n\t"
10176 "andq [rsp], #0xffffff2b\n\t"
10177 "popfq\n"
10178 "exit: nop\t# avoid branch to branch" %}
10179 opcode(0x0F, 0x2E);
10180 ins_encode(REX_reg_reg(src1, src2), OpcP, OpcS, reg_reg(src1, src2),
10181 cmpfp_fixup);
10182 ins_pipe(pipe_slow);
10183 %}
10185 instruct cmpF_cc_reg_CF(rFlagsRegUCF cr, regF src1, regF src2) %{
10186 match(Set cr (CmpF src1 src2));
10188 ins_cost(145);
10189 format %{ "ucomiss $src1, $src2" %}
10190 ins_encode %{
10191 __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
10192 %}
10193 ins_pipe(pipe_slow);
10194 %}
10196 instruct cmpF_cc_mem(rFlagsRegU cr, regF src1, memory src2)
10197 %{
10198 match(Set cr (CmpF src1 (LoadF src2)));
10200 ins_cost(145);
10201 format %{ "ucomiss $src1, $src2\n\t"
10202 "jnp,s exit\n\t"
10203 "pushfq\t# saw NaN, set CF\n\t"
10204 "andq [rsp], #0xffffff2b\n\t"
10205 "popfq\n"
10206 "exit: nop\t# avoid branch to branch" %}
10207 opcode(0x0F, 0x2E);
10208 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2),
10209 cmpfp_fixup);
10210 ins_pipe(pipe_slow);
10211 %}
10213 instruct cmpF_cc_memCF(rFlagsRegUCF cr, regF src1, memory src2) %{
10214 match(Set cr (CmpF src1 (LoadF src2)));
10216 ins_cost(100);
10217 format %{ "ucomiss $src1, $src2" %}
10218 opcode(0x0F, 0x2E);
10219 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2));
10220 ins_pipe(pipe_slow);
10221 %}
10223 instruct cmpF_cc_imm(rFlagsRegU cr, regF src, immF con) %{
10224 match(Set cr (CmpF src con));
10226 ins_cost(145);
10227 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t"
10228 "jnp,s exit\n\t"
10229 "pushfq\t# saw NaN, set CF\n\t"
10230 "andq [rsp], #0xffffff2b\n\t"
10231 "popfq\n"
10232 "exit: nop\t# avoid branch to branch" %}
10233 ins_encode %{
10234 __ ucomiss($src$$XMMRegister, $constantaddress($con));
10235 emit_cmpfp_fixup(_masm);
10236 %}
10237 ins_pipe(pipe_slow);
10238 %}
10240 instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src, immF con) %{
10241 match(Set cr (CmpF src con));
10242 ins_cost(100);
10243 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con" %}
10244 ins_encode %{
10245 __ ucomiss($src$$XMMRegister, $constantaddress($con));
10246 %}
10247 ins_pipe(pipe_slow);
10248 %}
10250 instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2)
10251 %{
10252 match(Set cr (CmpD src1 src2));
10254 ins_cost(145);
10255 format %{ "ucomisd $src1, $src2\n\t"
10256 "jnp,s exit\n\t"
10257 "pushfq\t# saw NaN, set CF\n\t"
10258 "andq [rsp], #0xffffff2b\n\t"
10259 "popfq\n"
10260 "exit: nop\t# avoid branch to branch" %}
10261 opcode(0x66, 0x0F, 0x2E);
10262 ins_encode(OpcP, REX_reg_reg(src1, src2), OpcS, OpcT, reg_reg(src1, src2),
10263 cmpfp_fixup);
10264 ins_pipe(pipe_slow);
10265 %}
10267 instruct cmpD_cc_reg_CF(rFlagsRegUCF cr, regD src1, regD src2) %{
10268 match(Set cr (CmpD src1 src2));
10270 ins_cost(100);
10271 format %{ "ucomisd $src1, $src2 test" %}
10272 ins_encode %{
10273 __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
10274 %}
10275 ins_pipe(pipe_slow);
10276 %}
10278 instruct cmpD_cc_mem(rFlagsRegU cr, regD src1, memory src2)
10279 %{
10280 match(Set cr (CmpD src1 (LoadD src2)));
10282 ins_cost(145);
10283 format %{ "ucomisd $src1, $src2\n\t"
10284 "jnp,s exit\n\t"
10285 "pushfq\t# saw NaN, set CF\n\t"
10286 "andq [rsp], #0xffffff2b\n\t"
10287 "popfq\n"
10288 "exit: nop\t# avoid branch to branch" %}
10289 opcode(0x66, 0x0F, 0x2E);
10290 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2),
10291 cmpfp_fixup);
10292 ins_pipe(pipe_slow);
10293 %}
10295 instruct cmpD_cc_memCF(rFlagsRegUCF cr, regD src1, memory src2) %{
10296 match(Set cr (CmpD src1 (LoadD src2)));
10298 ins_cost(100);
10299 format %{ "ucomisd $src1, $src2" %}
10300 opcode(0x66, 0x0F, 0x2E);
10301 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2));
10302 ins_pipe(pipe_slow);
10303 %}
10305 instruct cmpD_cc_imm(rFlagsRegU cr, regD src, immD con) %{
10306 match(Set cr (CmpD src con));
10308 ins_cost(145);
10309 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t"
10310 "jnp,s exit\n\t"
10311 "pushfq\t# saw NaN, set CF\n\t"
10312 "andq [rsp], #0xffffff2b\n\t"
10313 "popfq\n"
10314 "exit: nop\t# avoid branch to branch" %}
10315 ins_encode %{
10316 __ ucomisd($src$$XMMRegister, $constantaddress($con));
10317 emit_cmpfp_fixup(_masm);
10318 %}
10319 ins_pipe(pipe_slow);
10320 %}
10322 instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src, immD con) %{
10323 match(Set cr (CmpD src con));
10324 ins_cost(100);
10325 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con" %}
10326 ins_encode %{
10327 __ ucomisd($src$$XMMRegister, $constantaddress($con));
10328 %}
10329 ins_pipe(pipe_slow);
10330 %}
10332 // Compare into -1,0,1
10333 instruct cmpF_reg(rRegI dst, regF src1, regF src2, rFlagsReg cr)
10334 %{
10335 match(Set dst (CmpF3 src1 src2));
10336 effect(KILL cr);
10338 ins_cost(275);
10339 format %{ "ucomiss $src1, $src2\n\t"
10340 "movl $dst, #-1\n\t"
10341 "jp,s done\n\t"
10342 "jb,s done\n\t"
10343 "setne $dst\n\t"
10344 "movzbl $dst, $dst\n"
10345 "done:" %}
10347 opcode(0x0F, 0x2E);
10348 ins_encode(REX_reg_reg(src1, src2), OpcP, OpcS, reg_reg(src1, src2),
10349 cmpfp3(dst));
10350 ins_pipe(pipe_slow);
10351 %}
10353 // Compare into -1,0,1
10354 instruct cmpF_mem(rRegI dst, regF src1, memory src2, rFlagsReg cr)
10355 %{
10356 match(Set dst (CmpF3 src1 (LoadF src2)));
10357 effect(KILL cr);
10359 ins_cost(275);
10360 format %{ "ucomiss $src1, $src2\n\t"
10361 "movl $dst, #-1\n\t"
10362 "jp,s done\n\t"
10363 "jb,s done\n\t"
10364 "setne $dst\n\t"
10365 "movzbl $dst, $dst\n"
10366 "done:" %}
10368 opcode(0x0F, 0x2E);
10369 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2),
10370 cmpfp3(dst));
10371 ins_pipe(pipe_slow);
10372 %}
10374 // Compare into -1,0,1
10375 instruct cmpF_imm(rRegI dst, regF src, immF con, rFlagsReg cr) %{
10376 match(Set dst (CmpF3 src con));
10377 effect(KILL cr);
10379 ins_cost(275);
10380 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t"
10381 "movl $dst, #-1\n\t"
10382 "jp,s done\n\t"
10383 "jb,s done\n\t"
10384 "setne $dst\n\t"
10385 "movzbl $dst, $dst\n"
10386 "done:" %}
10387 ins_encode %{
10388 Label L_done;
10389 Register Rdst = $dst$$Register;
10390 __ ucomiss($src$$XMMRegister, $constantaddress($con));
10391 __ movl(Rdst, -1);
10392 __ jcc(Assembler::parity, L_done);
10393 __ jcc(Assembler::below, L_done);
10394 __ setb(Assembler::notEqual, Rdst);
10395 __ movzbl(Rdst, Rdst);
10396 __ bind(L_done);
10397 %}
10398 ins_pipe(pipe_slow);
10399 %}
10401 // Compare into -1,0,1
10402 instruct cmpD_reg(rRegI dst, regD src1, regD src2, rFlagsReg cr)
10403 %{
10404 match(Set dst (CmpD3 src1 src2));
10405 effect(KILL cr);
10407 ins_cost(275);
10408 format %{ "ucomisd $src1, $src2\n\t"
10409 "movl $dst, #-1\n\t"
10410 "jp,s done\n\t"
10411 "jb,s done\n\t"
10412 "setne $dst\n\t"
10413 "movzbl $dst, $dst\n"
10414 "done:" %}
10416 opcode(0x66, 0x0F, 0x2E);
10417 ins_encode(OpcP, REX_reg_reg(src1, src2), OpcS, OpcT, reg_reg(src1, src2),
10418 cmpfp3(dst));
10419 ins_pipe(pipe_slow);
10420 %}
10422 // Compare into -1,0,1
10423 instruct cmpD_mem(rRegI dst, regD src1, memory src2, rFlagsReg cr)
10424 %{
10425 match(Set dst (CmpD3 src1 (LoadD src2)));
10426 effect(KILL cr);
10428 ins_cost(275);
10429 format %{ "ucomisd $src1, $src2\n\t"
10430 "movl $dst, #-1\n\t"
10431 "jp,s done\n\t"
10432 "jb,s done\n\t"
10433 "setne $dst\n\t"
10434 "movzbl $dst, $dst\n"
10435 "done:" %}
10437 opcode(0x66, 0x0F, 0x2E);
10438 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2),
10439 cmpfp3(dst));
10440 ins_pipe(pipe_slow);
10441 %}
10443 // Compare into -1,0,1
10444 instruct cmpD_imm(rRegI dst, regD src, immD con, rFlagsReg cr) %{
10445 match(Set dst (CmpD3 src con));
10446 effect(KILL cr);
10448 ins_cost(275);
10449 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t"
10450 "movl $dst, #-1\n\t"
10451 "jp,s done\n\t"
10452 "jb,s done\n\t"
10453 "setne $dst\n\t"
10454 "movzbl $dst, $dst\n"
10455 "done:" %}
10456 ins_encode %{
10457 Register Rdst = $dst$$Register;
10458 Label L_done;
10459 __ ucomisd($src$$XMMRegister, $constantaddress($con));
10460 __ movl(Rdst, -1);
10461 __ jcc(Assembler::parity, L_done);
10462 __ jcc(Assembler::below, L_done);
10463 __ setb(Assembler::notEqual, Rdst);
10464 __ movzbl(Rdst, Rdst);
10465 __ bind(L_done);
10466 %}
10467 ins_pipe(pipe_slow);
10468 %}
10470 instruct addF_reg(regF dst, regF src)
10471 %{
10472 match(Set dst (AddF dst src));
10474 format %{ "addss $dst, $src" %}
10475 ins_cost(150); // XXX
10476 opcode(0xF3, 0x0F, 0x58);
10477 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10478 ins_pipe(pipe_slow);
10479 %}
10481 instruct addF_mem(regF dst, memory src)
10482 %{
10483 match(Set dst (AddF dst (LoadF src)));
10485 format %{ "addss $dst, $src" %}
10486 ins_cost(150); // XXX
10487 opcode(0xF3, 0x0F, 0x58);
10488 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10489 ins_pipe(pipe_slow);
10490 %}
10492 instruct addF_imm(regF dst, immF con) %{
10493 match(Set dst (AddF dst con));
10494 format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
10495 ins_cost(150); // XXX
10496 ins_encode %{
10497 __ addss($dst$$XMMRegister, $constantaddress($con));
10498 %}
10499 ins_pipe(pipe_slow);
10500 %}
10502 instruct addD_reg(regD dst, regD src)
10503 %{
10504 match(Set dst (AddD dst src));
10506 format %{ "addsd $dst, $src" %}
10507 ins_cost(150); // XXX
10508 opcode(0xF2, 0x0F, 0x58);
10509 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10510 ins_pipe(pipe_slow);
10511 %}
10513 instruct addD_mem(regD dst, memory src)
10514 %{
10515 match(Set dst (AddD dst (LoadD src)));
10517 format %{ "addsd $dst, $src" %}
10518 ins_cost(150); // XXX
10519 opcode(0xF2, 0x0F, 0x58);
10520 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10521 ins_pipe(pipe_slow);
10522 %}
10524 instruct addD_imm(regD dst, immD con) %{
10525 match(Set dst (AddD dst con));
10526 format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
10527 ins_cost(150); // XXX
10528 ins_encode %{
10529 __ addsd($dst$$XMMRegister, $constantaddress($con));
10530 %}
10531 ins_pipe(pipe_slow);
10532 %}
10534 instruct subF_reg(regF dst, regF src)
10535 %{
10536 match(Set dst (SubF dst src));
10538 format %{ "subss $dst, $src" %}
10539 ins_cost(150); // XXX
10540 opcode(0xF3, 0x0F, 0x5C);
10541 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10542 ins_pipe(pipe_slow);
10543 %}
10545 instruct subF_mem(regF dst, memory src)
10546 %{
10547 match(Set dst (SubF dst (LoadF src)));
10549 format %{ "subss $dst, $src" %}
10550 ins_cost(150); // XXX
10551 opcode(0xF3, 0x0F, 0x5C);
10552 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10553 ins_pipe(pipe_slow);
10554 %}
10556 instruct subF_imm(regF dst, immF con) %{
10557 match(Set dst (SubF dst con));
10558 format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
10559 ins_cost(150); // XXX
10560 ins_encode %{
10561 __ subss($dst$$XMMRegister, $constantaddress($con));
10562 %}
10563 ins_pipe(pipe_slow);
10564 %}
10566 instruct subD_reg(regD dst, regD src)
10567 %{
10568 match(Set dst (SubD dst src));
10570 format %{ "subsd $dst, $src" %}
10571 ins_cost(150); // XXX
10572 opcode(0xF2, 0x0F, 0x5C);
10573 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10574 ins_pipe(pipe_slow);
10575 %}
10577 instruct subD_mem(regD dst, memory src)
10578 %{
10579 match(Set dst (SubD dst (LoadD src)));
10581 format %{ "subsd $dst, $src" %}
10582 ins_cost(150); // XXX
10583 opcode(0xF2, 0x0F, 0x5C);
10584 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10585 ins_pipe(pipe_slow);
10586 %}
10588 instruct subD_imm(regD dst, immD con) %{
10589 match(Set dst (SubD dst con));
10590 format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
10591 ins_cost(150); // XXX
10592 ins_encode %{
10593 __ subsd($dst$$XMMRegister, $constantaddress($con));
10594 %}
10595 ins_pipe(pipe_slow);
10596 %}
10598 instruct mulF_reg(regF dst, regF src)
10599 %{
10600 match(Set dst (MulF dst src));
10602 format %{ "mulss $dst, $src" %}
10603 ins_cost(150); // XXX
10604 opcode(0xF3, 0x0F, 0x59);
10605 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10606 ins_pipe(pipe_slow);
10607 %}
10609 instruct mulF_mem(regF dst, memory src)
10610 %{
10611 match(Set dst (MulF dst (LoadF src)));
10613 format %{ "mulss $dst, $src" %}
10614 ins_cost(150); // XXX
10615 opcode(0xF3, 0x0F, 0x59);
10616 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10617 ins_pipe(pipe_slow);
10618 %}
10620 instruct mulF_imm(regF dst, immF con) %{
10621 match(Set dst (MulF dst con));
10622 format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
10623 ins_cost(150); // XXX
10624 ins_encode %{
10625 __ mulss($dst$$XMMRegister, $constantaddress($con));
10626 %}
10627 ins_pipe(pipe_slow);
10628 %}
10630 instruct mulD_reg(regD dst, regD src)
10631 %{
10632 match(Set dst (MulD dst src));
10634 format %{ "mulsd $dst, $src" %}
10635 ins_cost(150); // XXX
10636 opcode(0xF2, 0x0F, 0x59);
10637 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10638 ins_pipe(pipe_slow);
10639 %}
10641 instruct mulD_mem(regD dst, memory src)
10642 %{
10643 match(Set dst (MulD dst (LoadD src)));
10645 format %{ "mulsd $dst, $src" %}
10646 ins_cost(150); // XXX
10647 opcode(0xF2, 0x0F, 0x59);
10648 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10649 ins_pipe(pipe_slow);
10650 %}
10652 instruct mulD_imm(regD dst, immD con) %{
10653 match(Set dst (MulD dst con));
10654 format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
10655 ins_cost(150); // XXX
10656 ins_encode %{
10657 __ mulsd($dst$$XMMRegister, $constantaddress($con));
10658 %}
10659 ins_pipe(pipe_slow);
10660 %}
10662 instruct divF_reg(regF dst, regF src)
10663 %{
10664 match(Set dst (DivF dst src));
10666 format %{ "divss $dst, $src" %}
10667 ins_cost(150); // XXX
10668 opcode(0xF3, 0x0F, 0x5E);
10669 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10670 ins_pipe(pipe_slow);
10671 %}
10673 instruct divF_mem(regF dst, memory src)
10674 %{
10675 match(Set dst (DivF dst (LoadF src)));
10677 format %{ "divss $dst, $src" %}
10678 ins_cost(150); // XXX
10679 opcode(0xF3, 0x0F, 0x5E);
10680 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10681 ins_pipe(pipe_slow);
10682 %}
10684 instruct divF_imm(regF dst, immF con) %{
10685 match(Set dst (DivF dst con));
10686 format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
10687 ins_cost(150); // XXX
10688 ins_encode %{
10689 __ divss($dst$$XMMRegister, $constantaddress($con));
10690 %}
10691 ins_pipe(pipe_slow);
10692 %}
10694 instruct divD_reg(regD dst, regD src)
10695 %{
10696 match(Set dst (DivD dst src));
10698 format %{ "divsd $dst, $src" %}
10699 ins_cost(150); // XXX
10700 opcode(0xF2, 0x0F, 0x5E);
10701 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10702 ins_pipe(pipe_slow);
10703 %}
10705 instruct divD_mem(regD dst, memory src)
10706 %{
10707 match(Set dst (DivD dst (LoadD src)));
10709 format %{ "divsd $dst, $src" %}
10710 ins_cost(150); // XXX
10711 opcode(0xF2, 0x0F, 0x5E);
10712 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10713 ins_pipe(pipe_slow);
10714 %}
10716 instruct divD_imm(regD dst, immD con) %{
10717 match(Set dst (DivD dst con));
10718 format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
10719 ins_cost(150); // XXX
10720 ins_encode %{
10721 __ divsd($dst$$XMMRegister, $constantaddress($con));
10722 %}
10723 ins_pipe(pipe_slow);
10724 %}
10726 instruct sqrtF_reg(regF dst, regF src)
10727 %{
10728 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10730 format %{ "sqrtss $dst, $src" %}
10731 ins_cost(150); // XXX
10732 opcode(0xF3, 0x0F, 0x51);
10733 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10734 ins_pipe(pipe_slow);
10735 %}
10737 instruct sqrtF_mem(regF dst, memory src)
10738 %{
10739 match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src)))));
10741 format %{ "sqrtss $dst, $src" %}
10742 ins_cost(150); // XXX
10743 opcode(0xF3, 0x0F, 0x51);
10744 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10745 ins_pipe(pipe_slow);
10746 %}
10748 instruct sqrtF_imm(regF dst, immF con) %{
10749 match(Set dst (ConvD2F (SqrtD (ConvF2D con))));
10750 format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
10751 ins_cost(150); // XXX
10752 ins_encode %{
10753 __ sqrtss($dst$$XMMRegister, $constantaddress($con));
10754 %}
10755 ins_pipe(pipe_slow);
10756 %}
10758 instruct sqrtD_reg(regD dst, regD src)
10759 %{
10760 match(Set dst (SqrtD src));
10762 format %{ "sqrtsd $dst, $src" %}
10763 ins_cost(150); // XXX
10764 opcode(0xF2, 0x0F, 0x51);
10765 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10766 ins_pipe(pipe_slow);
10767 %}
10769 instruct sqrtD_mem(regD dst, memory src)
10770 %{
10771 match(Set dst (SqrtD (LoadD src)));
10773 format %{ "sqrtsd $dst, $src" %}
10774 ins_cost(150); // XXX
10775 opcode(0xF2, 0x0F, 0x51);
10776 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10777 ins_pipe(pipe_slow);
10778 %}
10780 instruct sqrtD_imm(regD dst, immD con) %{
10781 match(Set dst (SqrtD con));
10782 format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
10783 ins_cost(150); // XXX
10784 ins_encode %{
10785 __ sqrtsd($dst$$XMMRegister, $constantaddress($con));
10786 %}
10787 ins_pipe(pipe_slow);
10788 %}
10790 instruct absF_reg(regF dst)
10791 %{
10792 match(Set dst (AbsF dst));
10794 format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %}
10795 ins_encode(absF_encoding(dst));
10796 ins_pipe(pipe_slow);
10797 %}
10799 instruct absD_reg(regD dst)
10800 %{
10801 match(Set dst (AbsD dst));
10803 format %{ "andpd $dst, [0x7fffffffffffffff]\t"
10804 "# abs double by sign masking" %}
10805 ins_encode(absD_encoding(dst));
10806 ins_pipe(pipe_slow);
10807 %}
10809 instruct negF_reg(regF dst)
10810 %{
10811 match(Set dst (NegF dst));
10813 format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %}
10814 ins_encode(negF_encoding(dst));
10815 ins_pipe(pipe_slow);
10816 %}
10818 instruct negD_reg(regD dst)
10819 %{
10820 match(Set dst (NegD dst));
10822 format %{ "xorpd $dst, [0x8000000000000000]\t"
10823 "# neg double by sign flipping" %}
10824 ins_encode(negD_encoding(dst));
10825 ins_pipe(pipe_slow);
10826 %}
10828 // -----------Trig and Trancendental Instructions------------------------------
10829 instruct cosD_reg(regD dst) %{
10830 match(Set dst (CosD dst));
10832 format %{ "dcos $dst\n\t" %}
10833 opcode(0xD9, 0xFF);
10834 ins_encode( Push_SrcXD(dst), OpcP, OpcS, Push_ResultXD(dst) );
10835 ins_pipe( pipe_slow );
10836 %}
10838 instruct sinD_reg(regD dst) %{
10839 match(Set dst (SinD dst));
10841 format %{ "dsin $dst\n\t" %}
10842 opcode(0xD9, 0xFE);
10843 ins_encode( Push_SrcXD(dst), OpcP, OpcS, Push_ResultXD(dst) );
10844 ins_pipe( pipe_slow );
10845 %}
10847 instruct tanD_reg(regD dst) %{
10848 match(Set dst (TanD dst));
10850 format %{ "dtan $dst\n\t" %}
10851 ins_encode( Push_SrcXD(dst),
10852 Opcode(0xD9), Opcode(0xF2), //fptan
10853 Opcode(0xDD), Opcode(0xD8), //fstp st
10854 Push_ResultXD(dst) );
10855 ins_pipe( pipe_slow );
10856 %}
10858 instruct log10D_reg(regD dst) %{
10859 // The source and result Double operands in XMM registers
10860 match(Set dst (Log10D dst));
10861 // fldlg2 ; push log_10(2) on the FPU stack; full 80-bit number
10862 // fyl2x ; compute log_10(2) * log_2(x)
10863 format %{ "fldlg2\t\t\t#Log10\n\t"
10864 "fyl2x\t\t\t# Q=Log10*Log_2(x)\n\t"
10865 %}
10866 ins_encode(Opcode(0xD9), Opcode(0xEC), // fldlg2
10867 Push_SrcXD(dst),
10868 Opcode(0xD9), Opcode(0xF1), // fyl2x
10869 Push_ResultXD(dst));
10871 ins_pipe( pipe_slow );
10872 %}
10874 instruct logD_reg(regD dst) %{
10875 // The source and result Double operands in XMM registers
10876 match(Set dst (LogD dst));
10877 // fldln2 ; push log_e(2) on the FPU stack; full 80-bit number
10878 // fyl2x ; compute log_e(2) * log_2(x)
10879 format %{ "fldln2\t\t\t#Log_e\n\t"
10880 "fyl2x\t\t\t# Q=Log_e*Log_2(x)\n\t"
10881 %}
10882 ins_encode( Opcode(0xD9), Opcode(0xED), // fldln2
10883 Push_SrcXD(dst),
10884 Opcode(0xD9), Opcode(0xF1), // fyl2x
10885 Push_ResultXD(dst));
10886 ins_pipe( pipe_slow );
10887 %}
10891 //----------Arithmetic Conversion Instructions---------------------------------
10893 instruct roundFloat_nop(regF dst)
10894 %{
10895 match(Set dst (RoundFloat dst));
10897 ins_cost(0);
10898 ins_encode();
10899 ins_pipe(empty);
10900 %}
10902 instruct roundDouble_nop(regD dst)
10903 %{
10904 match(Set dst (RoundDouble dst));
10906 ins_cost(0);
10907 ins_encode();
10908 ins_pipe(empty);
10909 %}
10911 instruct convF2D_reg_reg(regD dst, regF src)
10912 %{
10913 match(Set dst (ConvF2D src));
10915 format %{ "cvtss2sd $dst, $src" %}
10916 opcode(0xF3, 0x0F, 0x5A);
10917 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10918 ins_pipe(pipe_slow); // XXX
10919 %}
10921 instruct convF2D_reg_mem(regD dst, memory src)
10922 %{
10923 match(Set dst (ConvF2D (LoadF src)));
10925 format %{ "cvtss2sd $dst, $src" %}
10926 opcode(0xF3, 0x0F, 0x5A);
10927 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10928 ins_pipe(pipe_slow); // XXX
10929 %}
10931 instruct convD2F_reg_reg(regF dst, regD src)
10932 %{
10933 match(Set dst (ConvD2F src));
10935 format %{ "cvtsd2ss $dst, $src" %}
10936 opcode(0xF2, 0x0F, 0x5A);
10937 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10938 ins_pipe(pipe_slow); // XXX
10939 %}
10941 instruct convD2F_reg_mem(regF dst, memory src)
10942 %{
10943 match(Set dst (ConvD2F (LoadD src)));
10945 format %{ "cvtsd2ss $dst, $src" %}
10946 opcode(0xF2, 0x0F, 0x5A);
10947 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10948 ins_pipe(pipe_slow); // XXX
10949 %}
10951 // XXX do mem variants
10952 instruct convF2I_reg_reg(rRegI dst, regF src, rFlagsReg cr)
10953 %{
10954 match(Set dst (ConvF2I src));
10955 effect(KILL cr);
10957 format %{ "cvttss2sil $dst, $src\t# f2i\n\t"
10958 "cmpl $dst, #0x80000000\n\t"
10959 "jne,s done\n\t"
10960 "subq rsp, #8\n\t"
10961 "movss [rsp], $src\n\t"
10962 "call f2i_fixup\n\t"
10963 "popq $dst\n"
10964 "done: "%}
10965 opcode(0xF3, 0x0F, 0x2C);
10966 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src),
10967 f2i_fixup(dst, src));
10968 ins_pipe(pipe_slow);
10969 %}
10971 instruct convF2L_reg_reg(rRegL dst, regF src, rFlagsReg cr)
10972 %{
10973 match(Set dst (ConvF2L src));
10974 effect(KILL cr);
10976 format %{ "cvttss2siq $dst, $src\t# f2l\n\t"
10977 "cmpq $dst, [0x8000000000000000]\n\t"
10978 "jne,s done\n\t"
10979 "subq rsp, #8\n\t"
10980 "movss [rsp], $src\n\t"
10981 "call f2l_fixup\n\t"
10982 "popq $dst\n"
10983 "done: "%}
10984 opcode(0xF3, 0x0F, 0x2C);
10985 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src),
10986 f2l_fixup(dst, src));
10987 ins_pipe(pipe_slow);
10988 %}
10990 instruct convD2I_reg_reg(rRegI dst, regD src, rFlagsReg cr)
10991 %{
10992 match(Set dst (ConvD2I src));
10993 effect(KILL cr);
10995 format %{ "cvttsd2sil $dst, $src\t# d2i\n\t"
10996 "cmpl $dst, #0x80000000\n\t"
10997 "jne,s done\n\t"
10998 "subq rsp, #8\n\t"
10999 "movsd [rsp], $src\n\t"
11000 "call d2i_fixup\n\t"
11001 "popq $dst\n"
11002 "done: "%}
11003 opcode(0xF2, 0x0F, 0x2C);
11004 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src),
11005 d2i_fixup(dst, src));
11006 ins_pipe(pipe_slow);
11007 %}
11009 instruct convD2L_reg_reg(rRegL dst, regD src, rFlagsReg cr)
11010 %{
11011 match(Set dst (ConvD2L src));
11012 effect(KILL cr);
11014 format %{ "cvttsd2siq $dst, $src\t# d2l\n\t"
11015 "cmpq $dst, [0x8000000000000000]\n\t"
11016 "jne,s done\n\t"
11017 "subq rsp, #8\n\t"
11018 "movsd [rsp], $src\n\t"
11019 "call d2l_fixup\n\t"
11020 "popq $dst\n"
11021 "done: "%}
11022 opcode(0xF2, 0x0F, 0x2C);
11023 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src),
11024 d2l_fixup(dst, src));
11025 ins_pipe(pipe_slow);
11026 %}
11028 instruct convI2F_reg_reg(regF dst, rRegI src)
11029 %{
11030 predicate(!UseXmmI2F);
11031 match(Set dst (ConvI2F src));
11033 format %{ "cvtsi2ssl $dst, $src\t# i2f" %}
11034 opcode(0xF3, 0x0F, 0x2A);
11035 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11036 ins_pipe(pipe_slow); // XXX
11037 %}
11039 instruct convI2F_reg_mem(regF dst, memory src)
11040 %{
11041 match(Set dst (ConvI2F (LoadI src)));
11043 format %{ "cvtsi2ssl $dst, $src\t# i2f" %}
11044 opcode(0xF3, 0x0F, 0x2A);
11045 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11046 ins_pipe(pipe_slow); // XXX
11047 %}
11049 instruct convI2D_reg_reg(regD dst, rRegI src)
11050 %{
11051 predicate(!UseXmmI2D);
11052 match(Set dst (ConvI2D src));
11054 format %{ "cvtsi2sdl $dst, $src\t# i2d" %}
11055 opcode(0xF2, 0x0F, 0x2A);
11056 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11057 ins_pipe(pipe_slow); // XXX
11058 %}
11060 instruct convI2D_reg_mem(regD dst, memory src)
11061 %{
11062 match(Set dst (ConvI2D (LoadI src)));
11064 format %{ "cvtsi2sdl $dst, $src\t# i2d" %}
11065 opcode(0xF2, 0x0F, 0x2A);
11066 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11067 ins_pipe(pipe_slow); // XXX
11068 %}
11070 instruct convXI2F_reg(regF dst, rRegI src)
11071 %{
11072 predicate(UseXmmI2F);
11073 match(Set dst (ConvI2F src));
11075 format %{ "movdl $dst, $src\n\t"
11076 "cvtdq2psl $dst, $dst\t# i2f" %}
11077 ins_encode %{
11078 __ movdl($dst$$XMMRegister, $src$$Register);
11079 __ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister);
11080 %}
11081 ins_pipe(pipe_slow); // XXX
11082 %}
11084 instruct convXI2D_reg(regD dst, rRegI src)
11085 %{
11086 predicate(UseXmmI2D);
11087 match(Set dst (ConvI2D src));
11089 format %{ "movdl $dst, $src\n\t"
11090 "cvtdq2pdl $dst, $dst\t# i2d" %}
11091 ins_encode %{
11092 __ movdl($dst$$XMMRegister, $src$$Register);
11093 __ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister);
11094 %}
11095 ins_pipe(pipe_slow); // XXX
11096 %}
11098 instruct convL2F_reg_reg(regF dst, rRegL src)
11099 %{
11100 match(Set dst (ConvL2F src));
11102 format %{ "cvtsi2ssq $dst, $src\t# l2f" %}
11103 opcode(0xF3, 0x0F, 0x2A);
11104 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src));
11105 ins_pipe(pipe_slow); // XXX
11106 %}
11108 instruct convL2F_reg_mem(regF dst, memory src)
11109 %{
11110 match(Set dst (ConvL2F (LoadL src)));
11112 format %{ "cvtsi2ssq $dst, $src\t# l2f" %}
11113 opcode(0xF3, 0x0F, 0x2A);
11114 ins_encode(OpcP, REX_reg_mem_wide(dst, src), OpcS, OpcT, reg_mem(dst, src));
11115 ins_pipe(pipe_slow); // XXX
11116 %}
11118 instruct convL2D_reg_reg(regD dst, rRegL src)
11119 %{
11120 match(Set dst (ConvL2D src));
11122 format %{ "cvtsi2sdq $dst, $src\t# l2d" %}
11123 opcode(0xF2, 0x0F, 0x2A);
11124 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src));
11125 ins_pipe(pipe_slow); // XXX
11126 %}
11128 instruct convL2D_reg_mem(regD dst, memory src)
11129 %{
11130 match(Set dst (ConvL2D (LoadL src)));
11132 format %{ "cvtsi2sdq $dst, $src\t# l2d" %}
11133 opcode(0xF2, 0x0F, 0x2A);
11134 ins_encode(OpcP, REX_reg_mem_wide(dst, src), OpcS, OpcT, reg_mem(dst, src));
11135 ins_pipe(pipe_slow); // XXX
11136 %}
11138 instruct convI2L_reg_reg(rRegL dst, rRegI src)
11139 %{
11140 match(Set dst (ConvI2L src));
11142 ins_cost(125);
11143 format %{ "movslq $dst, $src\t# i2l" %}
11144 ins_encode %{
11145 __ movslq($dst$$Register, $src$$Register);
11146 %}
11147 ins_pipe(ialu_reg_reg);
11148 %}
11150 // instruct convI2L_reg_reg_foo(rRegL dst, rRegI src)
11151 // %{
11152 // match(Set dst (ConvI2L src));
11153 // // predicate(_kids[0]->_leaf->as_Type()->type()->is_int()->_lo >= 0 &&
11154 // // _kids[0]->_leaf->as_Type()->type()->is_int()->_hi >= 0);
11155 // predicate(((const TypeNode*) n)->type()->is_long()->_hi ==
11156 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_hi &&
11157 // ((const TypeNode*) n)->type()->is_long()->_lo ==
11158 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_lo);
11160 // format %{ "movl $dst, $src\t# unsigned i2l" %}
11161 // ins_encode(enc_copy(dst, src));
11162 // // opcode(0x63); // needs REX.W
11163 // // ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst,src));
11164 // ins_pipe(ialu_reg_reg);
11165 // %}
11167 // Zero-extend convert int to long
11168 instruct convI2L_reg_reg_zex(rRegL dst, rRegI src, immL_32bits mask)
11169 %{
11170 match(Set dst (AndL (ConvI2L src) mask));
11172 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %}
11173 ins_encode(enc_copy(dst, src));
11174 ins_pipe(ialu_reg_reg);
11175 %}
11177 // Zero-extend convert int to long
11178 instruct convI2L_reg_mem_zex(rRegL dst, memory src, immL_32bits mask)
11179 %{
11180 match(Set dst (AndL (ConvI2L (LoadI src)) mask));
11182 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %}
11183 opcode(0x8B);
11184 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
11185 ins_pipe(ialu_reg_mem);
11186 %}
11188 instruct zerox_long_reg_reg(rRegL dst, rRegL src, immL_32bits mask)
11189 %{
11190 match(Set dst (AndL src mask));
11192 format %{ "movl $dst, $src\t# zero-extend long" %}
11193 ins_encode(enc_copy_always(dst, src));
11194 ins_pipe(ialu_reg_reg);
11195 %}
11197 instruct convL2I_reg_reg(rRegI dst, rRegL src)
11198 %{
11199 match(Set dst (ConvL2I src));
11201 format %{ "movl $dst, $src\t# l2i" %}
11202 ins_encode(enc_copy_always(dst, src));
11203 ins_pipe(ialu_reg_reg);
11204 %}
11207 instruct MoveF2I_stack_reg(rRegI dst, stackSlotF src) %{
11208 match(Set dst (MoveF2I src));
11209 effect(DEF dst, USE src);
11211 ins_cost(125);
11212 format %{ "movl $dst, $src\t# MoveF2I_stack_reg" %}
11213 opcode(0x8B);
11214 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
11215 ins_pipe(ialu_reg_mem);
11216 %}
11218 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{
11219 match(Set dst (MoveI2F src));
11220 effect(DEF dst, USE src);
11222 ins_cost(125);
11223 format %{ "movss $dst, $src\t# MoveI2F_stack_reg" %}
11224 opcode(0xF3, 0x0F, 0x10);
11225 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11226 ins_pipe(pipe_slow);
11227 %}
11229 instruct MoveD2L_stack_reg(rRegL dst, stackSlotD src) %{
11230 match(Set dst (MoveD2L src));
11231 effect(DEF dst, USE src);
11233 ins_cost(125);
11234 format %{ "movq $dst, $src\t# MoveD2L_stack_reg" %}
11235 opcode(0x8B);
11236 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
11237 ins_pipe(ialu_reg_mem);
11238 %}
11240 instruct MoveL2D_stack_reg_partial(regD dst, stackSlotL src) %{
11241 predicate(!UseXmmLoadAndClearUpper);
11242 match(Set dst (MoveL2D src));
11243 effect(DEF dst, USE src);
11245 ins_cost(125);
11246 format %{ "movlpd $dst, $src\t# MoveL2D_stack_reg" %}
11247 opcode(0x66, 0x0F, 0x12);
11248 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11249 ins_pipe(pipe_slow);
11250 %}
11252 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{
11253 predicate(UseXmmLoadAndClearUpper);
11254 match(Set dst (MoveL2D src));
11255 effect(DEF dst, USE src);
11257 ins_cost(125);
11258 format %{ "movsd $dst, $src\t# MoveL2D_stack_reg" %}
11259 opcode(0xF2, 0x0F, 0x10);
11260 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11261 ins_pipe(pipe_slow);
11262 %}
11265 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{
11266 match(Set dst (MoveF2I src));
11267 effect(DEF dst, USE src);
11269 ins_cost(95); // XXX
11270 format %{ "movss $dst, $src\t# MoveF2I_reg_stack" %}
11271 opcode(0xF3, 0x0F, 0x11);
11272 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
11273 ins_pipe(pipe_slow);
11274 %}
11276 instruct MoveI2F_reg_stack(stackSlotF dst, rRegI src) %{
11277 match(Set dst (MoveI2F src));
11278 effect(DEF dst, USE src);
11280 ins_cost(100);
11281 format %{ "movl $dst, $src\t# MoveI2F_reg_stack" %}
11282 opcode(0x89);
11283 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
11284 ins_pipe( ialu_mem_reg );
11285 %}
11287 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{
11288 match(Set dst (MoveD2L src));
11289 effect(DEF dst, USE src);
11291 ins_cost(95); // XXX
11292 format %{ "movsd $dst, $src\t# MoveL2D_reg_stack" %}
11293 opcode(0xF2, 0x0F, 0x11);
11294 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
11295 ins_pipe(pipe_slow);
11296 %}
11298 instruct MoveL2D_reg_stack(stackSlotD dst, rRegL src) %{
11299 match(Set dst (MoveL2D src));
11300 effect(DEF dst, USE src);
11302 ins_cost(100);
11303 format %{ "movq $dst, $src\t# MoveL2D_reg_stack" %}
11304 opcode(0x89);
11305 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
11306 ins_pipe(ialu_mem_reg);
11307 %}
11309 instruct MoveF2I_reg_reg(rRegI dst, regF src) %{
11310 match(Set dst (MoveF2I src));
11311 effect(DEF dst, USE src);
11312 ins_cost(85);
11313 format %{ "movd $dst,$src\t# MoveF2I" %}
11314 ins_encode %{ __ movdl($dst$$Register, $src$$XMMRegister); %}
11315 ins_pipe( pipe_slow );
11316 %}
11318 instruct MoveD2L_reg_reg(rRegL dst, regD src) %{
11319 match(Set dst (MoveD2L src));
11320 effect(DEF dst, USE src);
11321 ins_cost(85);
11322 format %{ "movd $dst,$src\t# MoveD2L" %}
11323 ins_encode %{ __ movdq($dst$$Register, $src$$XMMRegister); %}
11324 ins_pipe( pipe_slow );
11325 %}
11327 // The next instructions have long latency and use Int unit. Set high cost.
11328 instruct MoveI2F_reg_reg(regF dst, rRegI src) %{
11329 match(Set dst (MoveI2F src));
11330 effect(DEF dst, USE src);
11331 ins_cost(300);
11332 format %{ "movd $dst,$src\t# MoveI2F" %}
11333 ins_encode %{ __ movdl($dst$$XMMRegister, $src$$Register); %}
11334 ins_pipe( pipe_slow );
11335 %}
11337 instruct MoveL2D_reg_reg(regD dst, rRegL src) %{
11338 match(Set dst (MoveL2D src));
11339 effect(DEF dst, USE src);
11340 ins_cost(300);
11341 format %{ "movd $dst,$src\t# MoveL2D" %}
11342 ins_encode %{ __ movdq($dst$$XMMRegister, $src$$Register); %}
11343 ins_pipe( pipe_slow );
11344 %}
11346 // Replicate scalar to packed byte (1 byte) values in xmm
11347 instruct Repl8B_reg(regD dst, regD src) %{
11348 match(Set dst (Replicate8B src));
11349 format %{ "MOVDQA $dst,$src\n\t"
11350 "PUNPCKLBW $dst,$dst\n\t"
11351 "PSHUFLW $dst,$dst,0x00\t! replicate8B" %}
11352 ins_encode( pshufd_8x8(dst, src));
11353 ins_pipe( pipe_slow );
11354 %}
11356 // Replicate scalar to packed byte (1 byte) values in xmm
11357 instruct Repl8B_rRegI(regD dst, rRegI src) %{
11358 match(Set dst (Replicate8B src));
11359 format %{ "MOVD $dst,$src\n\t"
11360 "PUNPCKLBW $dst,$dst\n\t"
11361 "PSHUFLW $dst,$dst,0x00\t! replicate8B" %}
11362 ins_encode( mov_i2x(dst, src), pshufd_8x8(dst, dst));
11363 ins_pipe( pipe_slow );
11364 %}
11366 // Replicate scalar zero to packed byte (1 byte) values in xmm
11367 instruct Repl8B_immI0(regD dst, immI0 zero) %{
11368 match(Set dst (Replicate8B zero));
11369 format %{ "PXOR $dst,$dst\t! replicate8B" %}
11370 ins_encode( pxor(dst, dst));
11371 ins_pipe( fpu_reg_reg );
11372 %}
11374 // Replicate scalar to packed shore (2 byte) values in xmm
11375 instruct Repl4S_reg(regD dst, regD src) %{
11376 match(Set dst (Replicate4S src));
11377 format %{ "PSHUFLW $dst,$src,0x00\t! replicate4S" %}
11378 ins_encode( pshufd_4x16(dst, src));
11379 ins_pipe( fpu_reg_reg );
11380 %}
11382 // Replicate scalar to packed shore (2 byte) values in xmm
11383 instruct Repl4S_rRegI(regD dst, rRegI src) %{
11384 match(Set dst (Replicate4S src));
11385 format %{ "MOVD $dst,$src\n\t"
11386 "PSHUFLW $dst,$dst,0x00\t! replicate4S" %}
11387 ins_encode( mov_i2x(dst, src), pshufd_4x16(dst, dst));
11388 ins_pipe( fpu_reg_reg );
11389 %}
11391 // Replicate scalar zero to packed short (2 byte) values in xmm
11392 instruct Repl4S_immI0(regD dst, immI0 zero) %{
11393 match(Set dst (Replicate4S zero));
11394 format %{ "PXOR $dst,$dst\t! replicate4S" %}
11395 ins_encode( pxor(dst, dst));
11396 ins_pipe( fpu_reg_reg );
11397 %}
11399 // Replicate scalar to packed char (2 byte) values in xmm
11400 instruct Repl4C_reg(regD dst, regD src) %{
11401 match(Set dst (Replicate4C src));
11402 format %{ "PSHUFLW $dst,$src,0x00\t! replicate4C" %}
11403 ins_encode( pshufd_4x16(dst, src));
11404 ins_pipe( fpu_reg_reg );
11405 %}
11407 // Replicate scalar to packed char (2 byte) values in xmm
11408 instruct Repl4C_rRegI(regD dst, rRegI src) %{
11409 match(Set dst (Replicate4C src));
11410 format %{ "MOVD $dst,$src\n\t"
11411 "PSHUFLW $dst,$dst,0x00\t! replicate4C" %}
11412 ins_encode( mov_i2x(dst, src), pshufd_4x16(dst, dst));
11413 ins_pipe( fpu_reg_reg );
11414 %}
11416 // Replicate scalar zero to packed char (2 byte) values in xmm
11417 instruct Repl4C_immI0(regD dst, immI0 zero) %{
11418 match(Set dst (Replicate4C zero));
11419 format %{ "PXOR $dst,$dst\t! replicate4C" %}
11420 ins_encode( pxor(dst, dst));
11421 ins_pipe( fpu_reg_reg );
11422 %}
11424 // Replicate scalar to packed integer (4 byte) values in xmm
11425 instruct Repl2I_reg(regD dst, regD src) %{
11426 match(Set dst (Replicate2I src));
11427 format %{ "PSHUFD $dst,$src,0x00\t! replicate2I" %}
11428 ins_encode( pshufd(dst, src, 0x00));
11429 ins_pipe( fpu_reg_reg );
11430 %}
11432 // Replicate scalar to packed integer (4 byte) values in xmm
11433 instruct Repl2I_rRegI(regD dst, rRegI src) %{
11434 match(Set dst (Replicate2I src));
11435 format %{ "MOVD $dst,$src\n\t"
11436 "PSHUFD $dst,$dst,0x00\t! replicate2I" %}
11437 ins_encode( mov_i2x(dst, src), pshufd(dst, dst, 0x00));
11438 ins_pipe( fpu_reg_reg );
11439 %}
11441 // Replicate scalar zero to packed integer (2 byte) values in xmm
11442 instruct Repl2I_immI0(regD dst, immI0 zero) %{
11443 match(Set dst (Replicate2I zero));
11444 format %{ "PXOR $dst,$dst\t! replicate2I" %}
11445 ins_encode( pxor(dst, dst));
11446 ins_pipe( fpu_reg_reg );
11447 %}
11449 // Replicate scalar to packed single precision floating point values in xmm
11450 instruct Repl2F_reg(regD dst, regD src) %{
11451 match(Set dst (Replicate2F src));
11452 format %{ "PSHUFD $dst,$src,0xe0\t! replicate2F" %}
11453 ins_encode( pshufd(dst, src, 0xe0));
11454 ins_pipe( fpu_reg_reg );
11455 %}
11457 // Replicate scalar to packed single precision floating point values in xmm
11458 instruct Repl2F_regF(regD dst, regF src) %{
11459 match(Set dst (Replicate2F src));
11460 format %{ "PSHUFD $dst,$src,0xe0\t! replicate2F" %}
11461 ins_encode( pshufd(dst, src, 0xe0));
11462 ins_pipe( fpu_reg_reg );
11463 %}
11465 // Replicate scalar to packed single precision floating point values in xmm
11466 instruct Repl2F_immF0(regD dst, immF0 zero) %{
11467 match(Set dst (Replicate2F zero));
11468 format %{ "PXOR $dst,$dst\t! replicate2F" %}
11469 ins_encode( pxor(dst, dst));
11470 ins_pipe( fpu_reg_reg );
11471 %}
11474 // =======================================================================
11475 // fast clearing of an array
11476 instruct rep_stos(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy,
11477 rFlagsReg cr)
11478 %{
11479 match(Set dummy (ClearArray cnt base));
11480 effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
11482 format %{ "xorl rax, rax\t# ClearArray:\n\t"
11483 "rep stosq\t# Store rax to *rdi++ while rcx--" %}
11484 ins_encode(opc_reg_reg(0x33, RAX, RAX), // xorl %eax, %eax
11485 Opcode(0xF3), Opcode(0x48), Opcode(0xAB)); // rep REX_W stos
11486 ins_pipe(pipe_slow);
11487 %}
11489 instruct string_compare(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
11490 rax_RegI result, regD tmp1, rFlagsReg cr)
11491 %{
11492 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
11493 effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
11495 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %}
11496 ins_encode %{
11497 __ string_compare($str1$$Register, $str2$$Register,
11498 $cnt1$$Register, $cnt2$$Register, $result$$Register,
11499 $tmp1$$XMMRegister);
11500 %}
11501 ins_pipe( pipe_slow );
11502 %}
11504 // fast search of substring with known size.
11505 instruct string_indexof_con(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2,
11506 rbx_RegI result, regD vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
11507 %{
11508 predicate(UseSSE42Intrinsics);
11509 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
11510 effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr);
11512 format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %}
11513 ins_encode %{
11514 int icnt2 = (int)$int_cnt2$$constant;
11515 if (icnt2 >= 8) {
11516 // IndexOf for constant substrings with size >= 8 elements
11517 // which don't need to be loaded through stack.
11518 __ string_indexofC8($str1$$Register, $str2$$Register,
11519 $cnt1$$Register, $cnt2$$Register,
11520 icnt2, $result$$Register,
11521 $vec$$XMMRegister, $tmp$$Register);
11522 } else {
11523 // Small strings are loaded through stack if they cross page boundary.
11524 __ string_indexof($str1$$Register, $str2$$Register,
11525 $cnt1$$Register, $cnt2$$Register,
11526 icnt2, $result$$Register,
11527 $vec$$XMMRegister, $tmp$$Register);
11528 }
11529 %}
11530 ins_pipe( pipe_slow );
11531 %}
11533 instruct string_indexof(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2,
11534 rbx_RegI result, regD vec, rcx_RegI tmp, rFlagsReg cr)
11535 %{
11536 predicate(UseSSE42Intrinsics);
11537 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
11538 effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr);
11540 format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %}
11541 ins_encode %{
11542 __ string_indexof($str1$$Register, $str2$$Register,
11543 $cnt1$$Register, $cnt2$$Register,
11544 (-1), $result$$Register,
11545 $vec$$XMMRegister, $tmp$$Register);
11546 %}
11547 ins_pipe( pipe_slow );
11548 %}
11550 // fast string equals
11551 instruct string_equals(rdi_RegP str1, rsi_RegP str2, rcx_RegI cnt, rax_RegI result,
11552 regD tmp1, regD tmp2, rbx_RegI tmp3, rFlagsReg cr)
11553 %{
11554 match(Set result (StrEquals (Binary str1 str2) cnt));
11555 effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp3, KILL cr);
11557 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp1, $tmp2, $tmp3" %}
11558 ins_encode %{
11559 __ char_arrays_equals(false, $str1$$Register, $str2$$Register,
11560 $cnt$$Register, $result$$Register, $tmp3$$Register,
11561 $tmp1$$XMMRegister, $tmp2$$XMMRegister);
11562 %}
11563 ins_pipe( pipe_slow );
11564 %}
11566 // fast array equals
11567 instruct array_equals(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
11568 regD tmp1, regD tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr)
11569 %{
11570 match(Set result (AryEq ary1 ary2));
11571 effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr);
11572 //ins_cost(300);
11574 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %}
11575 ins_encode %{
11576 __ char_arrays_equals(true, $ary1$$Register, $ary2$$Register,
11577 $tmp3$$Register, $result$$Register, $tmp4$$Register,
11578 $tmp1$$XMMRegister, $tmp2$$XMMRegister);
11579 %}
11580 ins_pipe( pipe_slow );
11581 %}
11583 //----------Control Flow Instructions------------------------------------------
11584 // Signed compare Instructions
11586 // XXX more variants!!
11587 instruct compI_rReg(rFlagsReg cr, rRegI op1, rRegI op2)
11588 %{
11589 match(Set cr (CmpI op1 op2));
11590 effect(DEF cr, USE op1, USE op2);
11592 format %{ "cmpl $op1, $op2" %}
11593 opcode(0x3B); /* Opcode 3B /r */
11594 ins_encode(REX_reg_reg(op1, op2), OpcP, reg_reg(op1, op2));
11595 ins_pipe(ialu_cr_reg_reg);
11596 %}
11598 instruct compI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2)
11599 %{
11600 match(Set cr (CmpI op1 op2));
11602 format %{ "cmpl $op1, $op2" %}
11603 opcode(0x81, 0x07); /* Opcode 81 /7 */
11604 ins_encode(OpcSErm(op1, op2), Con8or32(op2));
11605 ins_pipe(ialu_cr_reg_imm);
11606 %}
11608 instruct compI_rReg_mem(rFlagsReg cr, rRegI op1, memory op2)
11609 %{
11610 match(Set cr (CmpI op1 (LoadI op2)));
11612 ins_cost(500); // XXX
11613 format %{ "cmpl $op1, $op2" %}
11614 opcode(0x3B); /* Opcode 3B /r */
11615 ins_encode(REX_reg_mem(op1, op2), OpcP, reg_mem(op1, op2));
11616 ins_pipe(ialu_cr_reg_mem);
11617 %}
11619 instruct testI_reg(rFlagsReg cr, rRegI src, immI0 zero)
11620 %{
11621 match(Set cr (CmpI src zero));
11623 format %{ "testl $src, $src" %}
11624 opcode(0x85);
11625 ins_encode(REX_reg_reg(src, src), OpcP, reg_reg(src, src));
11626 ins_pipe(ialu_cr_reg_imm);
11627 %}
11629 instruct testI_reg_imm(rFlagsReg cr, rRegI src, immI con, immI0 zero)
11630 %{
11631 match(Set cr (CmpI (AndI src con) zero));
11633 format %{ "testl $src, $con" %}
11634 opcode(0xF7, 0x00);
11635 ins_encode(REX_reg(src), OpcP, reg_opc(src), Con32(con));
11636 ins_pipe(ialu_cr_reg_imm);
11637 %}
11639 instruct testI_reg_mem(rFlagsReg cr, rRegI src, memory mem, immI0 zero)
11640 %{
11641 match(Set cr (CmpI (AndI src (LoadI mem)) zero));
11643 format %{ "testl $src, $mem" %}
11644 opcode(0x85);
11645 ins_encode(REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
11646 ins_pipe(ialu_cr_reg_mem);
11647 %}
11649 // Unsigned compare Instructions; really, same as signed except they
11650 // produce an rFlagsRegU instead of rFlagsReg.
11651 instruct compU_rReg(rFlagsRegU cr, rRegI op1, rRegI op2)
11652 %{
11653 match(Set cr (CmpU op1 op2));
11655 format %{ "cmpl $op1, $op2\t# unsigned" %}
11656 opcode(0x3B); /* Opcode 3B /r */
11657 ins_encode(REX_reg_reg(op1, op2), OpcP, reg_reg(op1, op2));
11658 ins_pipe(ialu_cr_reg_reg);
11659 %}
11661 instruct compU_rReg_imm(rFlagsRegU cr, rRegI op1, immI op2)
11662 %{
11663 match(Set cr (CmpU op1 op2));
11665 format %{ "cmpl $op1, $op2\t# unsigned" %}
11666 opcode(0x81,0x07); /* Opcode 81 /7 */
11667 ins_encode(OpcSErm(op1, op2), Con8or32(op2));
11668 ins_pipe(ialu_cr_reg_imm);
11669 %}
11671 instruct compU_rReg_mem(rFlagsRegU cr, rRegI op1, memory op2)
11672 %{
11673 match(Set cr (CmpU op1 (LoadI op2)));
11675 ins_cost(500); // XXX
11676 format %{ "cmpl $op1, $op2\t# unsigned" %}
11677 opcode(0x3B); /* Opcode 3B /r */
11678 ins_encode(REX_reg_mem(op1, op2), OpcP, reg_mem(op1, op2));
11679 ins_pipe(ialu_cr_reg_mem);
11680 %}
11682 // // // Cisc-spilled version of cmpU_rReg
11683 // //instruct compU_mem_rReg(rFlagsRegU cr, memory op1, rRegI op2)
11684 // //%{
11685 // // match(Set cr (CmpU (LoadI op1) op2));
11686 // //
11687 // // format %{ "CMPu $op1,$op2" %}
11688 // // ins_cost(500);
11689 // // opcode(0x39); /* Opcode 39 /r */
11690 // // ins_encode( OpcP, reg_mem( op1, op2) );
11691 // //%}
11693 instruct testU_reg(rFlagsRegU cr, rRegI src, immI0 zero)
11694 %{
11695 match(Set cr (CmpU src zero));
11697 format %{ "testl $src, $src\t# unsigned" %}
11698 opcode(0x85);
11699 ins_encode(REX_reg_reg(src, src), OpcP, reg_reg(src, src));
11700 ins_pipe(ialu_cr_reg_imm);
11701 %}
11703 instruct compP_rReg(rFlagsRegU cr, rRegP op1, rRegP op2)
11704 %{
11705 match(Set cr (CmpP op1 op2));
11707 format %{ "cmpq $op1, $op2\t# ptr" %}
11708 opcode(0x3B); /* Opcode 3B /r */
11709 ins_encode(REX_reg_reg_wide(op1, op2), OpcP, reg_reg(op1, op2));
11710 ins_pipe(ialu_cr_reg_reg);
11711 %}
11713 instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2)
11714 %{
11715 match(Set cr (CmpP op1 (LoadP op2)));
11717 ins_cost(500); // XXX
11718 format %{ "cmpq $op1, $op2\t# ptr" %}
11719 opcode(0x3B); /* Opcode 3B /r */
11720 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
11721 ins_pipe(ialu_cr_reg_mem);
11722 %}
11724 // // // Cisc-spilled version of cmpP_rReg
11725 // //instruct compP_mem_rReg(rFlagsRegU cr, memory op1, rRegP op2)
11726 // //%{
11727 // // match(Set cr (CmpP (LoadP op1) op2));
11728 // //
11729 // // format %{ "CMPu $op1,$op2" %}
11730 // // ins_cost(500);
11731 // // opcode(0x39); /* Opcode 39 /r */
11732 // // ins_encode( OpcP, reg_mem( op1, op2) );
11733 // //%}
11735 // XXX this is generalized by compP_rReg_mem???
11736 // Compare raw pointer (used in out-of-heap check).
11737 // Only works because non-oop pointers must be raw pointers
11738 // and raw pointers have no anti-dependencies.
11739 instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2)
11740 %{
11741 predicate(!n->in(2)->in(2)->bottom_type()->isa_oop_ptr());
11742 match(Set cr (CmpP op1 (LoadP op2)));
11744 format %{ "cmpq $op1, $op2\t# raw ptr" %}
11745 opcode(0x3B); /* Opcode 3B /r */
11746 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
11747 ins_pipe(ialu_cr_reg_mem);
11748 %}
11750 // This will generate a signed flags result. This should be OK since
11751 // any compare to a zero should be eq/neq.
11752 instruct testP_reg(rFlagsReg cr, rRegP src, immP0 zero)
11753 %{
11754 match(Set cr (CmpP src zero));
11756 format %{ "testq $src, $src\t# ptr" %}
11757 opcode(0x85);
11758 ins_encode(REX_reg_reg_wide(src, src), OpcP, reg_reg(src, src));
11759 ins_pipe(ialu_cr_reg_imm);
11760 %}
11762 // This will generate a signed flags result. This should be OK since
11763 // any compare to a zero should be eq/neq.
11764 instruct testP_mem(rFlagsReg cr, memory op, immP0 zero)
11765 %{
11766 predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL));
11767 match(Set cr (CmpP (LoadP op) zero));
11769 ins_cost(500); // XXX
11770 format %{ "testq $op, 0xffffffffffffffff\t# ptr" %}
11771 opcode(0xF7); /* Opcode F7 /0 */
11772 ins_encode(REX_mem_wide(op),
11773 OpcP, RM_opc_mem(0x00, op), Con_d32(0xFFFFFFFF));
11774 ins_pipe(ialu_cr_reg_imm);
11775 %}
11777 instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero)
11778 %{
11779 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
11780 match(Set cr (CmpP (LoadP mem) zero));
11782 format %{ "cmpq R12, $mem\t# ptr (R12_heapbase==0)" %}
11783 ins_encode %{
11784 __ cmpq(r12, $mem$$Address);
11785 %}
11786 ins_pipe(ialu_cr_reg_mem);
11787 %}
11789 instruct compN_rReg(rFlagsRegU cr, rRegN op1, rRegN op2)
11790 %{
11791 match(Set cr (CmpN op1 op2));
11793 format %{ "cmpl $op1, $op2\t# compressed ptr" %}
11794 ins_encode %{ __ cmpl($op1$$Register, $op2$$Register); %}
11795 ins_pipe(ialu_cr_reg_reg);
11796 %}
11798 instruct compN_rReg_mem(rFlagsRegU cr, rRegN src, memory mem)
11799 %{
11800 match(Set cr (CmpN src (LoadN mem)));
11802 format %{ "cmpl $src, $mem\t# compressed ptr" %}
11803 ins_encode %{
11804 __ cmpl($src$$Register, $mem$$Address);
11805 %}
11806 ins_pipe(ialu_cr_reg_mem);
11807 %}
11809 instruct compN_rReg_imm(rFlagsRegU cr, rRegN op1, immN op2) %{
11810 match(Set cr (CmpN op1 op2));
11812 format %{ "cmpl $op1, $op2\t# compressed ptr" %}
11813 ins_encode %{
11814 __ cmp_narrow_oop($op1$$Register, (jobject)$op2$$constant);
11815 %}
11816 ins_pipe(ialu_cr_reg_imm);
11817 %}
11819 instruct compN_mem_imm(rFlagsRegU cr, memory mem, immN src)
11820 %{
11821 match(Set cr (CmpN src (LoadN mem)));
11823 format %{ "cmpl $mem, $src\t# compressed ptr" %}
11824 ins_encode %{
11825 __ cmp_narrow_oop($mem$$Address, (jobject)$src$$constant);
11826 %}
11827 ins_pipe(ialu_cr_reg_mem);
11828 %}
11830 instruct testN_reg(rFlagsReg cr, rRegN src, immN0 zero) %{
11831 match(Set cr (CmpN src zero));
11833 format %{ "testl $src, $src\t# compressed ptr" %}
11834 ins_encode %{ __ testl($src$$Register, $src$$Register); %}
11835 ins_pipe(ialu_cr_reg_imm);
11836 %}
11838 instruct testN_mem(rFlagsReg cr, memory mem, immN0 zero)
11839 %{
11840 predicate(Universe::narrow_oop_base() != NULL);
11841 match(Set cr (CmpN (LoadN mem) zero));
11843 ins_cost(500); // XXX
11844 format %{ "testl $mem, 0xffffffff\t# compressed ptr" %}
11845 ins_encode %{
11846 __ cmpl($mem$$Address, (int)0xFFFFFFFF);
11847 %}
11848 ins_pipe(ialu_cr_reg_mem);
11849 %}
11851 instruct testN_mem_reg0(rFlagsReg cr, memory mem, immN0 zero)
11852 %{
11853 predicate(Universe::narrow_oop_base() == NULL);
11854 match(Set cr (CmpN (LoadN mem) zero));
11856 format %{ "cmpl R12, $mem\t# compressed ptr (R12_heapbase==0)" %}
11857 ins_encode %{
11858 __ cmpl(r12, $mem$$Address);
11859 %}
11860 ins_pipe(ialu_cr_reg_mem);
11861 %}
11863 // Yanked all unsigned pointer compare operations.
11864 // Pointer compares are done with CmpP which is already unsigned.
11866 instruct compL_rReg(rFlagsReg cr, rRegL op1, rRegL op2)
11867 %{
11868 match(Set cr (CmpL op1 op2));
11870 format %{ "cmpq $op1, $op2" %}
11871 opcode(0x3B); /* Opcode 3B /r */
11872 ins_encode(REX_reg_reg_wide(op1, op2), OpcP, reg_reg(op1, op2));
11873 ins_pipe(ialu_cr_reg_reg);
11874 %}
11876 instruct compL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2)
11877 %{
11878 match(Set cr (CmpL op1 op2));
11880 format %{ "cmpq $op1, $op2" %}
11881 opcode(0x81, 0x07); /* Opcode 81 /7 */
11882 ins_encode(OpcSErm_wide(op1, op2), Con8or32(op2));
11883 ins_pipe(ialu_cr_reg_imm);
11884 %}
11886 instruct compL_rReg_mem(rFlagsReg cr, rRegL op1, memory op2)
11887 %{
11888 match(Set cr (CmpL op1 (LoadL op2)));
11890 format %{ "cmpq $op1, $op2" %}
11891 opcode(0x3B); /* Opcode 3B /r */
11892 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
11893 ins_pipe(ialu_cr_reg_mem);
11894 %}
11896 instruct testL_reg(rFlagsReg cr, rRegL src, immL0 zero)
11897 %{
11898 match(Set cr (CmpL src zero));
11900 format %{ "testq $src, $src" %}
11901 opcode(0x85);
11902 ins_encode(REX_reg_reg_wide(src, src), OpcP, reg_reg(src, src));
11903 ins_pipe(ialu_cr_reg_imm);
11904 %}
11906 instruct testL_reg_imm(rFlagsReg cr, rRegL src, immL32 con, immL0 zero)
11907 %{
11908 match(Set cr (CmpL (AndL src con) zero));
11910 format %{ "testq $src, $con\t# long" %}
11911 opcode(0xF7, 0x00);
11912 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src), Con32(con));
11913 ins_pipe(ialu_cr_reg_imm);
11914 %}
11916 instruct testL_reg_mem(rFlagsReg cr, rRegL src, memory mem, immL0 zero)
11917 %{
11918 match(Set cr (CmpL (AndL src (LoadL mem)) zero));
11920 format %{ "testq $src, $mem" %}
11921 opcode(0x85);
11922 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
11923 ins_pipe(ialu_cr_reg_mem);
11924 %}
11926 // Manifest a CmpL result in an integer register. Very painful.
11927 // This is the test to avoid.
11928 instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags)
11929 %{
11930 match(Set dst (CmpL3 src1 src2));
11931 effect(KILL flags);
11933 ins_cost(275); // XXX
11934 format %{ "cmpq $src1, $src2\t# CmpL3\n\t"
11935 "movl $dst, -1\n\t"
11936 "jl,s done\n\t"
11937 "setne $dst\n\t"
11938 "movzbl $dst, $dst\n\t"
11939 "done:" %}
11940 ins_encode(cmpl3_flag(src1, src2, dst));
11941 ins_pipe(pipe_slow);
11942 %}
11944 //----------Max and Min--------------------------------------------------------
11945 // Min Instructions
11947 instruct cmovI_reg_g(rRegI dst, rRegI src, rFlagsReg cr)
11948 %{
11949 effect(USE_DEF dst, USE src, USE cr);
11951 format %{ "cmovlgt $dst, $src\t# min" %}
11952 opcode(0x0F, 0x4F);
11953 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
11954 ins_pipe(pipe_cmov_reg);
11955 %}
11958 instruct minI_rReg(rRegI dst, rRegI src)
11959 %{
11960 match(Set dst (MinI dst src));
11962 ins_cost(200);
11963 expand %{
11964 rFlagsReg cr;
11965 compI_rReg(cr, dst, src);
11966 cmovI_reg_g(dst, src, cr);
11967 %}
11968 %}
11970 instruct cmovI_reg_l(rRegI dst, rRegI src, rFlagsReg cr)
11971 %{
11972 effect(USE_DEF dst, USE src, USE cr);
11974 format %{ "cmovllt $dst, $src\t# max" %}
11975 opcode(0x0F, 0x4C);
11976 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
11977 ins_pipe(pipe_cmov_reg);
11978 %}
11981 instruct maxI_rReg(rRegI dst, rRegI src)
11982 %{
11983 match(Set dst (MaxI dst src));
11985 ins_cost(200);
11986 expand %{
11987 rFlagsReg cr;
11988 compI_rReg(cr, dst, src);
11989 cmovI_reg_l(dst, src, cr);
11990 %}
11991 %}
11993 // ============================================================================
11994 // Branch Instructions
11996 // Jump Direct - Label defines a relative address from JMP+1
11997 instruct jmpDir(label labl)
11998 %{
11999 match(Goto);
12000 effect(USE labl);
12002 ins_cost(300);
12003 format %{ "jmp $labl" %}
12004 size(5);
12005 ins_encode %{
12006 Label* L = $labl$$label;
12007 __ jmp(*L, false); // Always long jump
12008 %}
12009 ins_pipe(pipe_jmp);
12010 %}
12012 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12013 instruct jmpCon(cmpOp cop, rFlagsReg cr, label labl)
12014 %{
12015 match(If cop cr);
12016 effect(USE labl);
12018 ins_cost(300);
12019 format %{ "j$cop $labl" %}
12020 size(6);
12021 ins_encode %{
12022 Label* L = $labl$$label;
12023 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
12024 %}
12025 ins_pipe(pipe_jcc);
12026 %}
12028 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12029 instruct jmpLoopEnd(cmpOp cop, rFlagsReg cr, label labl)
12030 %{
12031 match(CountedLoopEnd cop cr);
12032 effect(USE labl);
12034 ins_cost(300);
12035 format %{ "j$cop $labl\t# loop end" %}
12036 size(6);
12037 ins_encode %{
12038 Label* L = $labl$$label;
12039 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
12040 %}
12041 ins_pipe(pipe_jcc);
12042 %}
12044 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12045 instruct jmpLoopEndU(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12046 match(CountedLoopEnd cop cmp);
12047 effect(USE labl);
12049 ins_cost(300);
12050 format %{ "j$cop,u $labl\t# loop end" %}
12051 size(6);
12052 ins_encode %{
12053 Label* L = $labl$$label;
12054 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
12055 %}
12056 ins_pipe(pipe_jcc);
12057 %}
12059 instruct jmpLoopEndUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12060 match(CountedLoopEnd cop cmp);
12061 effect(USE labl);
12063 ins_cost(200);
12064 format %{ "j$cop,u $labl\t# loop end" %}
12065 size(6);
12066 ins_encode %{
12067 Label* L = $labl$$label;
12068 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
12069 %}
12070 ins_pipe(pipe_jcc);
12071 %}
12073 // Jump Direct Conditional - using unsigned comparison
12074 instruct jmpConU(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12075 match(If cop cmp);
12076 effect(USE labl);
12078 ins_cost(300);
12079 format %{ "j$cop,u $labl" %}
12080 size(6);
12081 ins_encode %{
12082 Label* L = $labl$$label;
12083 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
12084 %}
12085 ins_pipe(pipe_jcc);
12086 %}
12088 instruct jmpConUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12089 match(If cop cmp);
12090 effect(USE labl);
12092 ins_cost(200);
12093 format %{ "j$cop,u $labl" %}
12094 size(6);
12095 ins_encode %{
12096 Label* L = $labl$$label;
12097 __ jcc((Assembler::Condition)($cop$$cmpcode), *L, false); // Always long jump
12098 %}
12099 ins_pipe(pipe_jcc);
12100 %}
12102 instruct jmpConUCF2(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
12103 match(If cop cmp);
12104 effect(USE labl);
12106 ins_cost(200);
12107 format %{ $$template
12108 if ($cop$$cmpcode == Assembler::notEqual) {
12109 $$emit$$"jp,u $labl\n\t"
12110 $$emit$$"j$cop,u $labl"
12111 } else {
12112 $$emit$$"jp,u done\n\t"
12113 $$emit$$"j$cop,u $labl\n\t"
12114 $$emit$$"done:"
12115 }
12116 %}
12117 ins_encode %{
12118 Label* l = $labl$$label;
12119 if ($cop$$cmpcode == Assembler::notEqual) {
12120 __ jcc(Assembler::parity, *l, false);
12121 __ jcc(Assembler::notEqual, *l, false);
12122 } else if ($cop$$cmpcode == Assembler::equal) {
12123 Label done;
12124 __ jccb(Assembler::parity, done);
12125 __ jcc(Assembler::equal, *l, false);
12126 __ bind(done);
12127 } else {
12128 ShouldNotReachHere();
12129 }
12130 %}
12131 ins_pipe(pipe_jcc);
12132 %}
12134 // ============================================================================
12135 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary
12136 // superklass array for an instance of the superklass. Set a hidden
12137 // internal cache on a hit (cache is checked with exposed code in
12138 // gen_subtype_check()). Return NZ for a miss or zero for a hit. The
12139 // encoding ALSO sets flags.
12141 instruct partialSubtypeCheck(rdi_RegP result,
12142 rsi_RegP sub, rax_RegP super, rcx_RegI rcx,
12143 rFlagsReg cr)
12144 %{
12145 match(Set result (PartialSubtypeCheck sub super));
12146 effect(KILL rcx, KILL cr);
12148 ins_cost(1100); // slightly larger than the next version
12149 format %{ "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t"
12150 "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
12151 "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
12152 "repne scasq\t# Scan *rdi++ for a match with rax while rcx--\n\t"
12153 "jne,s miss\t\t# Missed: rdi not-zero\n\t"
12154 "movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t"
12155 "xorq $result, $result\t\t Hit: rdi zero\n\t"
12156 "miss:\t" %}
12158 opcode(0x1); // Force a XOR of RDI
12159 ins_encode(enc_PartialSubtypeCheck());
12160 ins_pipe(pipe_slow);
12161 %}
12163 instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr,
12164 rsi_RegP sub, rax_RegP super, rcx_RegI rcx,
12165 immP0 zero,
12166 rdi_RegP result)
12167 %{
12168 match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
12169 effect(KILL rcx, KILL result);
12171 ins_cost(1000);
12172 format %{ "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t"
12173 "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
12174 "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
12175 "repne scasq\t# Scan *rdi++ for a match with rax while cx-- != 0\n\t"
12176 "jne,s miss\t\t# Missed: flags nz\n\t"
12177 "movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t"
12178 "miss:\t" %}
12180 opcode(0x0); // No need to XOR RDI
12181 ins_encode(enc_PartialSubtypeCheck());
12182 ins_pipe(pipe_slow);
12183 %}
12185 // ============================================================================
12186 // Branch Instructions -- short offset versions
12187 //
12188 // These instructions are used to replace jumps of a long offset (the default
12189 // match) with jumps of a shorter offset. These instructions are all tagged
12190 // with the ins_short_branch attribute, which causes the ADLC to suppress the
12191 // match rules in general matching. Instead, the ADLC generates a conversion
12192 // method in the MachNode which can be used to do in-place replacement of the
12193 // long variant with the shorter variant. The compiler will determine if a
12194 // branch can be taken by the is_short_branch_offset() predicate in the machine
12195 // specific code section of the file.
12197 // Jump Direct - Label defines a relative address from JMP+1
12198 instruct jmpDir_short(label labl) %{
12199 match(Goto);
12200 effect(USE labl);
12202 ins_cost(300);
12203 format %{ "jmp,s $labl" %}
12204 size(2);
12205 ins_encode %{
12206 Label* L = $labl$$label;
12207 __ jmpb(*L);
12208 %}
12209 ins_pipe(pipe_jmp);
12210 ins_short_branch(1);
12211 %}
12213 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12214 instruct jmpCon_short(cmpOp cop, rFlagsReg cr, label labl) %{
12215 match(If cop cr);
12216 effect(USE labl);
12218 ins_cost(300);
12219 format %{ "j$cop,s $labl" %}
12220 size(2);
12221 ins_encode %{
12222 Label* L = $labl$$label;
12223 __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12224 %}
12225 ins_pipe(pipe_jcc);
12226 ins_short_branch(1);
12227 %}
12229 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12230 instruct jmpLoopEnd_short(cmpOp cop, rFlagsReg cr, label labl) %{
12231 match(CountedLoopEnd cop cr);
12232 effect(USE labl);
12234 ins_cost(300);
12235 format %{ "j$cop,s $labl\t# loop end" %}
12236 size(2);
12237 ins_encode %{
12238 Label* L = $labl$$label;
12239 __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12240 %}
12241 ins_pipe(pipe_jcc);
12242 ins_short_branch(1);
12243 %}
12245 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12246 instruct jmpLoopEndU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12247 match(CountedLoopEnd cop cmp);
12248 effect(USE labl);
12250 ins_cost(300);
12251 format %{ "j$cop,us $labl\t# loop end" %}
12252 size(2);
12253 ins_encode %{
12254 Label* L = $labl$$label;
12255 __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12256 %}
12257 ins_pipe(pipe_jcc);
12258 ins_short_branch(1);
12259 %}
12261 instruct jmpLoopEndUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12262 match(CountedLoopEnd cop cmp);
12263 effect(USE labl);
12265 ins_cost(300);
12266 format %{ "j$cop,us $labl\t# loop end" %}
12267 size(2);
12268 ins_encode %{
12269 Label* L = $labl$$label;
12270 __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12271 %}
12272 ins_pipe(pipe_jcc);
12273 ins_short_branch(1);
12274 %}
12276 // Jump Direct Conditional - using unsigned comparison
12277 instruct jmpConU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12278 match(If cop cmp);
12279 effect(USE labl);
12281 ins_cost(300);
12282 format %{ "j$cop,us $labl" %}
12283 size(2);
12284 ins_encode %{
12285 Label* L = $labl$$label;
12286 __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12287 %}
12288 ins_pipe(pipe_jcc);
12289 ins_short_branch(1);
12290 %}
12292 instruct jmpConUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12293 match(If cop cmp);
12294 effect(USE labl);
12296 ins_cost(300);
12297 format %{ "j$cop,us $labl" %}
12298 size(2);
12299 ins_encode %{
12300 Label* L = $labl$$label;
12301 __ jccb((Assembler::Condition)($cop$$cmpcode), *L);
12302 %}
12303 ins_pipe(pipe_jcc);
12304 ins_short_branch(1);
12305 %}
12307 instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
12308 match(If cop cmp);
12309 effect(USE labl);
12311 ins_cost(300);
12312 format %{ $$template
12313 if ($cop$$cmpcode == Assembler::notEqual) {
12314 $$emit$$"jp,u,s $labl\n\t"
12315 $$emit$$"j$cop,u,s $labl"
12316 } else {
12317 $$emit$$"jp,u,s done\n\t"
12318 $$emit$$"j$cop,u,s $labl\n\t"
12319 $$emit$$"done:"
12320 }
12321 %}
12322 size(4);
12323 ins_encode %{
12324 Label* l = $labl$$label;
12325 if ($cop$$cmpcode == Assembler::notEqual) {
12326 __ jccb(Assembler::parity, *l);
12327 __ jccb(Assembler::notEqual, *l);
12328 } else if ($cop$$cmpcode == Assembler::equal) {
12329 Label done;
12330 __ jccb(Assembler::parity, done);
12331 __ jccb(Assembler::equal, *l);
12332 __ bind(done);
12333 } else {
12334 ShouldNotReachHere();
12335 }
12336 %}
12337 ins_pipe(pipe_jcc);
12338 ins_short_branch(1);
12339 %}
12341 // ============================================================================
12342 // inlined locking and unlocking
12344 instruct cmpFastLock(rFlagsReg cr,
12345 rRegP object, rRegP box, rax_RegI tmp, rRegP scr)
12346 %{
12347 match(Set cr (FastLock object box));
12348 effect(TEMP tmp, TEMP scr);
12350 ins_cost(300);
12351 format %{ "fastlock $object,$box,$tmp,$scr" %}
12352 ins_encode(Fast_Lock(object, box, tmp, scr));
12353 ins_pipe(pipe_slow);
12354 %}
12356 instruct cmpFastUnlock(rFlagsReg cr,
12357 rRegP object, rax_RegP box, rRegP tmp)
12358 %{
12359 match(Set cr (FastUnlock object box));
12360 effect(TEMP tmp);
12362 ins_cost(300);
12363 format %{ "fastunlock $object, $box, $tmp" %}
12364 ins_encode(Fast_Unlock(object, box, tmp));
12365 ins_pipe(pipe_slow);
12366 %}
12369 // ============================================================================
12370 // Safepoint Instructions
12371 instruct safePoint_poll(rFlagsReg cr)
12372 %{
12373 predicate(!Assembler::is_polling_page_far());
12374 match(SafePoint);
12375 effect(KILL cr);
12377 format %{ "testl rax, [rip + #offset_to_poll_page]\t"
12378 "# Safepoint: poll for GC" %}
12379 ins_cost(125);
12380 ins_encode %{
12381 AddressLiteral addr(os::get_polling_page(), relocInfo::poll_type);
12382 __ testl(rax, addr);
12383 %}
12384 ins_pipe(ialu_reg_mem);
12385 %}
12387 instruct safePoint_poll_far(rFlagsReg cr, rRegP poll)
12388 %{
12389 predicate(Assembler::is_polling_page_far());
12390 match(SafePoint poll);
12391 effect(KILL cr, USE poll);
12393 format %{ "testl rax, [$poll]\t"
12394 "# Safepoint: poll for GC" %}
12395 ins_cost(125);
12396 ins_encode %{
12397 __ relocate(relocInfo::poll_type);
12398 __ testl(rax, Address($poll$$Register, 0));
12399 %}
12400 ins_pipe(ialu_reg_mem);
12401 %}
12403 // ============================================================================
12404 // Procedure Call/Return Instructions
12405 // Call Java Static Instruction
12406 // Note: If this code changes, the corresponding ret_addr_offset() and
12407 // compute_padding() functions will have to be adjusted.
12408 instruct CallStaticJavaDirect(method meth) %{
12409 match(CallStaticJava);
12410 predicate(!((CallStaticJavaNode*) n)->is_method_handle_invoke());
12411 effect(USE meth);
12413 ins_cost(300);
12414 format %{ "call,static " %}
12415 opcode(0xE8); /* E8 cd */
12416 ins_encode(Java_Static_Call(meth), call_epilog);
12417 ins_pipe(pipe_slow);
12418 ins_alignment(4);
12419 %}
12421 // Call Java Static Instruction (method handle version)
12422 // Note: If this code changes, the corresponding ret_addr_offset() and
12423 // compute_padding() functions will have to be adjusted.
12424 instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{
12425 match(CallStaticJava);
12426 predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
12427 effect(USE meth);
12428 // RBP is saved by all callees (for interpreter stack correction).
12429 // We use it here for a similar purpose, in {preserve,restore}_SP.
12431 ins_cost(300);
12432 format %{ "call,static/MethodHandle " %}
12433 opcode(0xE8); /* E8 cd */
12434 ins_encode(preserve_SP,
12435 Java_Static_Call(meth),
12436 restore_SP,
12437 call_epilog);
12438 ins_pipe(pipe_slow);
12439 ins_alignment(4);
12440 %}
12442 // Call Java Dynamic Instruction
12443 // Note: If this code changes, the corresponding ret_addr_offset() and
12444 // compute_padding() functions will have to be adjusted.
12445 instruct CallDynamicJavaDirect(method meth)
12446 %{
12447 match(CallDynamicJava);
12448 effect(USE meth);
12450 ins_cost(300);
12451 format %{ "movq rax, #Universe::non_oop_word()\n\t"
12452 "call,dynamic " %}
12453 opcode(0xE8); /* E8 cd */
12454 ins_encode(Java_Dynamic_Call(meth), call_epilog);
12455 ins_pipe(pipe_slow);
12456 ins_alignment(4);
12457 %}
12459 // Call Runtime Instruction
12460 instruct CallRuntimeDirect(method meth)
12461 %{
12462 match(CallRuntime);
12463 effect(USE meth);
12465 ins_cost(300);
12466 format %{ "call,runtime " %}
12467 opcode(0xE8); /* E8 cd */
12468 ins_encode(Java_To_Runtime(meth));
12469 ins_pipe(pipe_slow);
12470 %}
12472 // Call runtime without safepoint
12473 instruct CallLeafDirect(method meth)
12474 %{
12475 match(CallLeaf);
12476 effect(USE meth);
12478 ins_cost(300);
12479 format %{ "call_leaf,runtime " %}
12480 opcode(0xE8); /* E8 cd */
12481 ins_encode(Java_To_Runtime(meth));
12482 ins_pipe(pipe_slow);
12483 %}
12485 // Call runtime without safepoint
12486 instruct CallLeafNoFPDirect(method meth)
12487 %{
12488 match(CallLeafNoFP);
12489 effect(USE meth);
12491 ins_cost(300);
12492 format %{ "call_leaf_nofp,runtime " %}
12493 opcode(0xE8); /* E8 cd */
12494 ins_encode(Java_To_Runtime(meth));
12495 ins_pipe(pipe_slow);
12496 %}
12498 // Return Instruction
12499 // Remove the return address & jump to it.
12500 // Notice: We always emit a nop after a ret to make sure there is room
12501 // for safepoint patching
12502 instruct Ret()
12503 %{
12504 match(Return);
12506 format %{ "ret" %}
12507 opcode(0xC3);
12508 ins_encode(OpcP);
12509 ins_pipe(pipe_jmp);
12510 %}
12512 // Tail Call; Jump from runtime stub to Java code.
12513 // Also known as an 'interprocedural jump'.
12514 // Target of jump will eventually return to caller.
12515 // TailJump below removes the return address.
12516 instruct TailCalljmpInd(no_rbp_RegP jump_target, rbx_RegP method_oop)
12517 %{
12518 match(TailCall jump_target method_oop);
12520 ins_cost(300);
12521 format %{ "jmp $jump_target\t# rbx holds method oop" %}
12522 opcode(0xFF, 0x4); /* Opcode FF /4 */
12523 ins_encode(REX_reg(jump_target), OpcP, reg_opc(jump_target));
12524 ins_pipe(pipe_jmp);
12525 %}
12527 // Tail Jump; remove the return address; jump to target.
12528 // TailCall above leaves the return address around.
12529 instruct tailjmpInd(no_rbp_RegP jump_target, rax_RegP ex_oop)
12530 %{
12531 match(TailJump jump_target ex_oop);
12533 ins_cost(300);
12534 format %{ "popq rdx\t# pop return address\n\t"
12535 "jmp $jump_target" %}
12536 opcode(0xFF, 0x4); /* Opcode FF /4 */
12537 ins_encode(Opcode(0x5a), // popq rdx
12538 REX_reg(jump_target), OpcP, reg_opc(jump_target));
12539 ins_pipe(pipe_jmp);
12540 %}
12542 // Create exception oop: created by stack-crawling runtime code.
12543 // Created exception is now available to this handler, and is setup
12544 // just prior to jumping to this handler. No code emitted.
12545 instruct CreateException(rax_RegP ex_oop)
12546 %{
12547 match(Set ex_oop (CreateEx));
12549 size(0);
12550 // use the following format syntax
12551 format %{ "# exception oop is in rax; no code emitted" %}
12552 ins_encode();
12553 ins_pipe(empty);
12554 %}
12556 // Rethrow exception:
12557 // The exception oop will come in the first argument position.
12558 // Then JUMP (not call) to the rethrow stub code.
12559 instruct RethrowException()
12560 %{
12561 match(Rethrow);
12563 // use the following format syntax
12564 format %{ "jmp rethrow_stub" %}
12565 ins_encode(enc_rethrow);
12566 ins_pipe(pipe_jmp);
12567 %}
12570 //----------PEEPHOLE RULES-----------------------------------------------------
12571 // These must follow all instruction definitions as they use the names
12572 // defined in the instructions definitions.
12573 //
12574 // peepmatch ( root_instr_name [preceding_instruction]* );
12575 //
12576 // peepconstraint %{
12577 // (instruction_number.operand_name relational_op instruction_number.operand_name
12578 // [, ...] );
12579 // // instruction numbers are zero-based using left to right order in peepmatch
12580 //
12581 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
12582 // // provide an instruction_number.operand_name for each operand that appears
12583 // // in the replacement instruction's match rule
12584 //
12585 // ---------VM FLAGS---------------------------------------------------------
12586 //
12587 // All peephole optimizations can be turned off using -XX:-OptoPeephole
12588 //
12589 // Each peephole rule is given an identifying number starting with zero and
12590 // increasing by one in the order seen by the parser. An individual peephole
12591 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
12592 // on the command-line.
12593 //
12594 // ---------CURRENT LIMITATIONS----------------------------------------------
12595 //
12596 // Only match adjacent instructions in same basic block
12597 // Only equality constraints
12598 // Only constraints between operands, not (0.dest_reg == RAX_enc)
12599 // Only one replacement instruction
12600 //
12601 // ---------EXAMPLE----------------------------------------------------------
12602 //
12603 // // pertinent parts of existing instructions in architecture description
12604 // instruct movI(rRegI dst, rRegI src)
12605 // %{
12606 // match(Set dst (CopyI src));
12607 // %}
12608 //
12609 // instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr)
12610 // %{
12611 // match(Set dst (AddI dst src));
12612 // effect(KILL cr);
12613 // %}
12614 //
12615 // // Change (inc mov) to lea
12616 // peephole %{
12617 // // increment preceeded by register-register move
12618 // peepmatch ( incI_rReg movI );
12619 // // require that the destination register of the increment
12620 // // match the destination register of the move
12621 // peepconstraint ( 0.dst == 1.dst );
12622 // // construct a replacement instruction that sets
12623 // // the destination to ( move's source register + one )
12624 // peepreplace ( leaI_rReg_immI( 0.dst 1.src 0.src ) );
12625 // %}
12626 //
12628 // Implementation no longer uses movX instructions since
12629 // machine-independent system no longer uses CopyX nodes.
12630 //
12631 // peephole
12632 // %{
12633 // peepmatch (incI_rReg movI);
12634 // peepconstraint (0.dst == 1.dst);
12635 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
12636 // %}
12638 // peephole
12639 // %{
12640 // peepmatch (decI_rReg movI);
12641 // peepconstraint (0.dst == 1.dst);
12642 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
12643 // %}
12645 // peephole
12646 // %{
12647 // peepmatch (addI_rReg_imm movI);
12648 // peepconstraint (0.dst == 1.dst);
12649 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
12650 // %}
12652 // peephole
12653 // %{
12654 // peepmatch (incL_rReg movL);
12655 // peepconstraint (0.dst == 1.dst);
12656 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
12657 // %}
12659 // peephole
12660 // %{
12661 // peepmatch (decL_rReg movL);
12662 // peepconstraint (0.dst == 1.dst);
12663 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
12664 // %}
12666 // peephole
12667 // %{
12668 // peepmatch (addL_rReg_imm movL);
12669 // peepconstraint (0.dst == 1.dst);
12670 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
12671 // %}
12673 // peephole
12674 // %{
12675 // peepmatch (addP_rReg_imm movP);
12676 // peepconstraint (0.dst == 1.dst);
12677 // peepreplace (leaP_rReg_imm(0.dst 1.src 0.src));
12678 // %}
12680 // // Change load of spilled value to only a spill
12681 // instruct storeI(memory mem, rRegI src)
12682 // %{
12683 // match(Set mem (StoreI mem src));
12684 // %}
12685 //
12686 // instruct loadI(rRegI dst, memory mem)
12687 // %{
12688 // match(Set dst (LoadI mem));
12689 // %}
12690 //
12692 peephole
12693 %{
12694 peepmatch (loadI storeI);
12695 peepconstraint (1.src == 0.dst, 1.mem == 0.mem);
12696 peepreplace (storeI(1.mem 1.mem 1.src));
12697 %}
12699 peephole
12700 %{
12701 peepmatch (loadL storeL);
12702 peepconstraint (1.src == 0.dst, 1.mem == 0.mem);
12703 peepreplace (storeL(1.mem 1.mem 1.src));
12704 %}
12706 //----------SMARTSPILL RULES---------------------------------------------------
12707 // These must follow all instruction definitions as they use the names
12708 // defined in the instructions definitions.