Thu, 29 May 2008 12:04:14 -0700
6705887: Compressed Oops: generate x64 addressing and implicit null checks with narrow oops
Summary: Generate addresses and implicit null checks with narrow oops to avoid decoding.
Reviewed-by: jrose, never
1 //
2 // Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 // CA 95054 USA or visit www.sun.com if you need additional information or
21 // have any questions.
22 //
23 //
25 // AMD64 Architecture Description File
27 //----------REGISTER DEFINITION BLOCK------------------------------------------
28 // This information is used by the matcher and the register allocator to
29 // describe individual registers and classes of registers within the target
30 // archtecture.
32 register %{
33 //----------Architecture Description Register Definitions----------------------
34 // General Registers
35 // "reg_def" name ( register save type, C convention save type,
36 // ideal register type, encoding );
37 // Register Save Types:
38 //
39 // NS = No-Save: The register allocator assumes that these registers
40 // can be used without saving upon entry to the method, &
41 // that they do not need to be saved at call sites.
42 //
43 // SOC = Save-On-Call: The register allocator assumes that these registers
44 // can be used without saving upon entry to the method,
45 // but that they must be saved at call sites.
46 //
47 // SOE = Save-On-Entry: The register allocator assumes that these registers
48 // must be saved before using them upon entry to the
49 // method, but they do not need to be saved at call
50 // sites.
51 //
52 // AS = Always-Save: The register allocator assumes that these registers
53 // must be saved before using them upon entry to the
54 // method, & that they must be saved at call sites.
55 //
56 // Ideal Register Type is used to determine how to save & restore a
57 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
58 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
59 //
60 // The encoding number is the actual bit-pattern placed into the opcodes.
62 // General Registers
63 // R8-R15 must be encoded with REX. (RSP, RBP, RSI, RDI need REX when
64 // used as byte registers)
66 // Previously set RBX, RSI, and RDI as save-on-entry for java code
67 // Turn off SOE in java-code due to frequent use of uncommon-traps.
68 // Now that allocator is better, turn on RSI and RDI as SOE registers.
70 reg_def RAX (SOC, SOC, Op_RegI, 0, rax->as_VMReg());
71 reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax->as_VMReg()->next());
73 reg_def RCX (SOC, SOC, Op_RegI, 1, rcx->as_VMReg());
74 reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()->next());
76 reg_def RDX (SOC, SOC, Op_RegI, 2, rdx->as_VMReg());
77 reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()->next());
79 reg_def RBX (SOC, SOE, Op_RegI, 3, rbx->as_VMReg());
80 reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx->as_VMReg()->next());
82 reg_def RSP (NS, NS, Op_RegI, 4, rsp->as_VMReg());
83 reg_def RSP_H(NS, NS, Op_RegI, 4, rsp->as_VMReg()->next());
85 // now that adapter frames are gone RBP is always saved and restored by the prolog/epilog code
86 reg_def RBP (NS, SOE, Op_RegI, 5, rbp->as_VMReg());
87 reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp->as_VMReg()->next());
89 #ifdef _WIN64
91 reg_def RSI (SOC, SOE, Op_RegI, 6, rsi->as_VMReg());
92 reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()->next());
94 reg_def RDI (SOC, SOE, Op_RegI, 7, rdi->as_VMReg());
95 reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()->next());
97 #else
99 reg_def RSI (SOC, SOC, Op_RegI, 6, rsi->as_VMReg());
100 reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi->as_VMReg()->next());
102 reg_def RDI (SOC, SOC, Op_RegI, 7, rdi->as_VMReg());
103 reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi->as_VMReg()->next());
105 #endif
107 reg_def R8 (SOC, SOC, Op_RegI, 8, r8->as_VMReg());
108 reg_def R8_H (SOC, SOC, Op_RegI, 8, r8->as_VMReg()->next());
110 reg_def R9 (SOC, SOC, Op_RegI, 9, r9->as_VMReg());
111 reg_def R9_H (SOC, SOC, Op_RegI, 9, r9->as_VMReg()->next());
113 reg_def R10 (SOC, SOC, Op_RegI, 10, r10->as_VMReg());
114 reg_def R10_H(SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
116 reg_def R11 (SOC, SOC, Op_RegI, 11, r11->as_VMReg());
117 reg_def R11_H(SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
119 reg_def R12 (SOC, SOE, Op_RegI, 12, r12->as_VMReg());
120 reg_def R12_H(SOC, SOE, Op_RegI, 12, r12->as_VMReg()->next());
122 reg_def R13 (SOC, SOE, Op_RegI, 13, r13->as_VMReg());
123 reg_def R13_H(SOC, SOE, Op_RegI, 13, r13->as_VMReg()->next());
125 reg_def R14 (SOC, SOE, Op_RegI, 14, r14->as_VMReg());
126 reg_def R14_H(SOC, SOE, Op_RegI, 14, r14->as_VMReg()->next());
128 reg_def R15 (SOC, SOE, Op_RegI, 15, r15->as_VMReg());
129 reg_def R15_H(SOC, SOE, Op_RegI, 15, r15->as_VMReg()->next());
132 // Floating Point Registers
134 // XMM registers. 128-bit registers or 4 words each, labeled (a)-d.
135 // Word a in each register holds a Float, words ab hold a Double. We
136 // currently do not use the SIMD capabilities, so registers cd are
137 // unused at the moment.
138 // XMM8-XMM15 must be encoded with REX.
139 // Linux ABI: No register preserved across function calls
140 // XMM0-XMM7 might hold parameters
141 // Windows ABI: XMM6-XMM15 preserved across function calls
142 // XMM0-XMM3 might hold parameters
144 reg_def XMM0 (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg());
145 reg_def XMM0_H (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next());
147 reg_def XMM1 (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg());
148 reg_def XMM1_H (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next());
150 reg_def XMM2 (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg());
151 reg_def XMM2_H (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next());
153 reg_def XMM3 (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg());
154 reg_def XMM3_H (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next());
156 reg_def XMM4 (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg());
157 reg_def XMM4_H (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next());
159 reg_def XMM5 (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg());
160 reg_def XMM5_H (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next());
162 #ifdef _WIN64
164 reg_def XMM6 (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg());
165 reg_def XMM6_H (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next());
167 reg_def XMM7 (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg());
168 reg_def XMM7_H (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next());
170 reg_def XMM8 (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg());
171 reg_def XMM8_H (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next());
173 reg_def XMM9 (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg());
174 reg_def XMM9_H (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next());
176 reg_def XMM10 (SOC, SOE, Op_RegF, 10, xmm10->as_VMReg());
177 reg_def XMM10_H(SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next());
179 reg_def XMM11 (SOC, SOE, Op_RegF, 11, xmm11->as_VMReg());
180 reg_def XMM11_H(SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next());
182 reg_def XMM12 (SOC, SOE, Op_RegF, 12, xmm12->as_VMReg());
183 reg_def XMM12_H(SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next());
185 reg_def XMM13 (SOC, SOE, Op_RegF, 13, xmm13->as_VMReg());
186 reg_def XMM13_H(SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next());
188 reg_def XMM14 (SOC, SOE, Op_RegF, 14, xmm14->as_VMReg());
189 reg_def XMM14_H(SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next());
191 reg_def XMM15 (SOC, SOE, Op_RegF, 15, xmm15->as_VMReg());
192 reg_def XMM15_H(SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next());
194 #else
196 reg_def XMM6 (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg());
197 reg_def XMM6_H (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next());
199 reg_def XMM7 (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg());
200 reg_def XMM7_H (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next());
202 reg_def XMM8 (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg());
203 reg_def XMM8_H (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next());
205 reg_def XMM9 (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg());
206 reg_def XMM9_H (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next());
208 reg_def XMM10 (SOC, SOC, Op_RegF, 10, xmm10->as_VMReg());
209 reg_def XMM10_H(SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next());
211 reg_def XMM11 (SOC, SOC, Op_RegF, 11, xmm11->as_VMReg());
212 reg_def XMM11_H(SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next());
214 reg_def XMM12 (SOC, SOC, Op_RegF, 12, xmm12->as_VMReg());
215 reg_def XMM12_H(SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next());
217 reg_def XMM13 (SOC, SOC, Op_RegF, 13, xmm13->as_VMReg());
218 reg_def XMM13_H(SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next());
220 reg_def XMM14 (SOC, SOC, Op_RegF, 14, xmm14->as_VMReg());
221 reg_def XMM14_H(SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next());
223 reg_def XMM15 (SOC, SOC, Op_RegF, 15, xmm15->as_VMReg());
224 reg_def XMM15_H(SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next());
226 #endif // _WIN64
228 reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad());
230 // Specify priority of register selection within phases of register
231 // allocation. Highest priority is first. A useful heuristic is to
232 // give registers a low priority when they are required by machine
233 // instructions, like EAX and EDX on I486, and choose no-save registers
234 // before save-on-call, & save-on-call before save-on-entry. Registers
235 // which participate in fixed calling sequences should come last.
236 // Registers which are used as pairs must fall on an even boundary.
238 alloc_class chunk0(R10, R10_H,
239 R11, R11_H,
240 R8, R8_H,
241 R9, R9_H,
242 R12, R12_H,
243 RCX, RCX_H,
244 RBX, RBX_H,
245 RDI, RDI_H,
246 RDX, RDX_H,
247 RSI, RSI_H,
248 RAX, RAX_H,
249 RBP, RBP_H,
250 R13, R13_H,
251 R14, R14_H,
252 R15, R15_H,
253 RSP, RSP_H);
255 // XXX probably use 8-15 first on Linux
256 alloc_class chunk1(XMM0, XMM0_H,
257 XMM1, XMM1_H,
258 XMM2, XMM2_H,
259 XMM3, XMM3_H,
260 XMM4, XMM4_H,
261 XMM5, XMM5_H,
262 XMM6, XMM6_H,
263 XMM7, XMM7_H,
264 XMM8, XMM8_H,
265 XMM9, XMM9_H,
266 XMM10, XMM10_H,
267 XMM11, XMM11_H,
268 XMM12, XMM12_H,
269 XMM13, XMM13_H,
270 XMM14, XMM14_H,
271 XMM15, XMM15_H);
273 alloc_class chunk2(RFLAGS);
276 //----------Architecture Description Register Classes--------------------------
277 // Several register classes are automatically defined based upon information in
278 // this architecture description.
279 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
280 // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ )
281 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
283 //
285 // Class for all pointer registers (including RSP)
286 reg_class any_reg(RAX, RAX_H,
287 RDX, RDX_H,
288 RBP, RBP_H,
289 RDI, RDI_H,
290 RSI, RSI_H,
291 RCX, RCX_H,
292 RBX, RBX_H,
293 RSP, RSP_H,
294 R8, R8_H,
295 R9, R9_H,
296 R10, R10_H,
297 R11, R11_H,
298 R12, R12_H,
299 R13, R13_H,
300 R14, R14_H,
301 R15, R15_H);
303 // Class for all pointer registers except RSP
304 reg_class ptr_reg(RAX, RAX_H,
305 RDX, RDX_H,
306 RBP, RBP_H,
307 RDI, RDI_H,
308 RSI, RSI_H,
309 RCX, RCX_H,
310 RBX, RBX_H,
311 R8, R8_H,
312 R9, R9_H,
313 R10, R10_H,
314 R11, R11_H,
315 R13, R13_H,
316 R14, R14_H);
318 // Class for all pointer registers except RAX and RSP
319 reg_class ptr_no_rax_reg(RDX, RDX_H,
320 RBP, RBP_H,
321 RDI, RDI_H,
322 RSI, RSI_H,
323 RCX, RCX_H,
324 RBX, RBX_H,
325 R8, R8_H,
326 R9, R9_H,
327 R10, R10_H,
328 R11, R11_H,
329 R12, R12_H,
330 R13, R13_H,
331 R14, R14_H);
333 reg_class ptr_no_rbp_reg(RDX, RDX_H,
334 RAX, RAX_H,
335 RDI, RDI_H,
336 RSI, RSI_H,
337 RCX, RCX_H,
338 RBX, RBX_H,
339 R8, R8_H,
340 R9, R9_H,
341 R10, R10_H,
342 R11, R11_H,
343 R12, R12_H,
344 R13, R13_H,
345 R14, R14_H);
347 // Class for all pointer registers except RAX, RBX and RSP
348 reg_class ptr_no_rax_rbx_reg(RDX, RDX_H,
349 RBP, RBP_H,
350 RDI, RDI_H,
351 RSI, RSI_H,
352 RCX, RCX_H,
353 R8, R8_H,
354 R9, R9_H,
355 R10, R10_H,
356 R11, R11_H,
357 R12, R12_H,
358 R13, R13_H,
359 R14, R14_H);
361 // Singleton class for RAX pointer register
362 reg_class ptr_rax_reg(RAX, RAX_H);
364 // Singleton class for RBX pointer register
365 reg_class ptr_rbx_reg(RBX, RBX_H);
367 // Singleton class for RSI pointer register
368 reg_class ptr_rsi_reg(RSI, RSI_H);
370 // Singleton class for RDI pointer register
371 reg_class ptr_rdi_reg(RDI, RDI_H);
373 // Singleton class for RBP pointer register
374 reg_class ptr_rbp_reg(RBP, RBP_H);
376 // Singleton class for stack pointer
377 reg_class ptr_rsp_reg(RSP, RSP_H);
379 // Singleton class for TLS pointer
380 reg_class ptr_r15_reg(R15, R15_H);
382 // Class for all long registers (except RSP)
383 reg_class long_reg(RAX, RAX_H,
384 RDX, RDX_H,
385 RBP, RBP_H,
386 RDI, RDI_H,
387 RSI, RSI_H,
388 RCX, RCX_H,
389 RBX, RBX_H,
390 R8, R8_H,
391 R9, R9_H,
392 R10, R10_H,
393 R11, R11_H,
394 R13, R13_H,
395 R14, R14_H);
397 // Class for all long registers except RAX, RDX (and RSP)
398 reg_class long_no_rax_rdx_reg(RBP, RBP_H,
399 RDI, RDI_H,
400 RSI, RSI_H,
401 RCX, RCX_H,
402 RBX, RBX_H,
403 R8, R8_H,
404 R9, R9_H,
405 R10, R10_H,
406 R11, R11_H,
407 R13, R13_H,
408 R14, R14_H);
410 // Class for all long registers except RCX (and RSP)
411 reg_class long_no_rcx_reg(RBP, RBP_H,
412 RDI, RDI_H,
413 RSI, RSI_H,
414 RAX, RAX_H,
415 RDX, RDX_H,
416 RBX, RBX_H,
417 R8, R8_H,
418 R9, R9_H,
419 R10, R10_H,
420 R11, R11_H,
421 R13, R13_H,
422 R14, R14_H);
424 // Class for all long registers except RAX (and RSP)
425 reg_class long_no_rax_reg(RBP, RBP_H,
426 RDX, RDX_H,
427 RDI, RDI_H,
428 RSI, RSI_H,
429 RCX, RCX_H,
430 RBX, RBX_H,
431 R8, R8_H,
432 R9, R9_H,
433 R10, R10_H,
434 R11, R11_H,
435 R13, R13_H,
436 R14, R14_H);
438 // Singleton class for RAX long register
439 reg_class long_rax_reg(RAX, RAX_H);
441 // Singleton class for RCX long register
442 reg_class long_rcx_reg(RCX, RCX_H);
444 // Singleton class for RDX long register
445 reg_class long_rdx_reg(RDX, RDX_H);
447 // Singleton class for R12 long register
448 reg_class long_r12_reg(R12, R12_H);
450 // Class for all int registers (except RSP)
451 reg_class int_reg(RAX,
452 RDX,
453 RBP,
454 RDI,
455 RSI,
456 RCX,
457 RBX,
458 R8,
459 R9,
460 R10,
461 R11,
462 R13,
463 R14);
465 // Class for all int registers except RCX (and RSP)
466 reg_class int_no_rcx_reg(RAX,
467 RDX,
468 RBP,
469 RDI,
470 RSI,
471 RBX,
472 R8,
473 R9,
474 R10,
475 R11,
476 R13,
477 R14);
479 // Class for all int registers except RAX, RDX (and RSP)
480 reg_class int_no_rax_rdx_reg(RBP,
481 RDI
482 RSI,
483 RCX,
484 RBX,
485 R8,
486 R9,
487 R10,
488 R11,
489 R13,
490 R14);
492 // Singleton class for RAX int register
493 reg_class int_rax_reg(RAX);
495 // Singleton class for RBX int register
496 reg_class int_rbx_reg(RBX);
498 // Singleton class for RCX int register
499 reg_class int_rcx_reg(RCX);
501 // Singleton class for RCX int register
502 reg_class int_rdx_reg(RDX);
504 // Singleton class for RCX int register
505 reg_class int_rdi_reg(RDI);
507 // Singleton class for instruction pointer
508 // reg_class ip_reg(RIP);
510 // Singleton class for condition codes
511 reg_class int_flags(RFLAGS);
513 // Class for all float registers
514 reg_class float_reg(XMM0,
515 XMM1,
516 XMM2,
517 XMM3,
518 XMM4,
519 XMM5,
520 XMM6,
521 XMM7,
522 XMM8,
523 XMM9,
524 XMM10,
525 XMM11,
526 XMM12,
527 XMM13,
528 XMM14,
529 XMM15);
531 // Class for all double registers
532 reg_class double_reg(XMM0, XMM0_H,
533 XMM1, XMM1_H,
534 XMM2, XMM2_H,
535 XMM3, XMM3_H,
536 XMM4, XMM4_H,
537 XMM5, XMM5_H,
538 XMM6, XMM6_H,
539 XMM7, XMM7_H,
540 XMM8, XMM8_H,
541 XMM9, XMM9_H,
542 XMM10, XMM10_H,
543 XMM11, XMM11_H,
544 XMM12, XMM12_H,
545 XMM13, XMM13_H,
546 XMM14, XMM14_H,
547 XMM15, XMM15_H);
548 %}
551 //----------SOURCE BLOCK-------------------------------------------------------
552 // This is a block of C++ code which provides values, functions, and
553 // definitions necessary in the rest of the architecture description
554 source %{
555 #define RELOC_IMM64 Assembler::imm64_operand
556 #define RELOC_DISP32 Assembler::disp32_operand
558 #define __ _masm.
560 // !!!!! Special hack to get all types of calls to specify the byte offset
561 // from the start of the call to the point where the return address
562 // will point.
563 int MachCallStaticJavaNode::ret_addr_offset()
564 {
565 return 5; // 5 bytes from start of call to where return address points
566 }
568 int MachCallDynamicJavaNode::ret_addr_offset()
569 {
570 return 15; // 15 bytes from start of call to where return address points
571 }
573 // In os_cpu .ad file
574 // int MachCallRuntimeNode::ret_addr_offset()
576 // Indicate if the safepoint node needs the polling page as an input.
577 // Since amd64 does not have absolute addressing but RIP-relative
578 // addressing and the polling page is within 2G, it doesn't.
579 bool SafePointNode::needs_polling_address_input()
580 {
581 return false;
582 }
584 //
585 // Compute padding required for nodes which need alignment
586 //
588 // The address of the call instruction needs to be 4-byte aligned to
589 // ensure that it does not span a cache line so that it can be patched.
590 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
591 {
592 current_offset += 1; // skip call opcode byte
593 return round_to(current_offset, alignment_required()) - current_offset;
594 }
596 // The address of the call instruction needs to be 4-byte aligned to
597 // ensure that it does not span a cache line so that it can be patched.
598 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
599 {
600 current_offset += 11; // skip movq instruction + call opcode byte
601 return round_to(current_offset, alignment_required()) - current_offset;
602 }
604 #ifndef PRODUCT
605 void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const
606 {
607 st->print("INT3");
608 }
609 #endif
611 // EMIT_RM()
612 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3)
613 {
614 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
615 *(cbuf.code_end()) = c;
616 cbuf.set_code_end(cbuf.code_end() + 1);
617 }
619 // EMIT_CC()
620 void emit_cc(CodeBuffer &cbuf, int f1, int f2)
621 {
622 unsigned char c = (unsigned char) (f1 | f2);
623 *(cbuf.code_end()) = c;
624 cbuf.set_code_end(cbuf.code_end() + 1);
625 }
627 // EMIT_OPCODE()
628 void emit_opcode(CodeBuffer &cbuf, int code)
629 {
630 *(cbuf.code_end()) = (unsigned char) code;
631 cbuf.set_code_end(cbuf.code_end() + 1);
632 }
634 // EMIT_OPCODE() w/ relocation information
635 void emit_opcode(CodeBuffer &cbuf,
636 int code, relocInfo::relocType reloc, int offset, int format)
637 {
638 cbuf.relocate(cbuf.inst_mark() + offset, reloc, format);
639 emit_opcode(cbuf, code);
640 }
642 // EMIT_D8()
643 void emit_d8(CodeBuffer &cbuf, int d8)
644 {
645 *(cbuf.code_end()) = (unsigned char) d8;
646 cbuf.set_code_end(cbuf.code_end() + 1);
647 }
649 // EMIT_D16()
650 void emit_d16(CodeBuffer &cbuf, int d16)
651 {
652 *((short *)(cbuf.code_end())) = d16;
653 cbuf.set_code_end(cbuf.code_end() + 2);
654 }
656 // EMIT_D32()
657 void emit_d32(CodeBuffer &cbuf, int d32)
658 {
659 *((int *)(cbuf.code_end())) = d32;
660 cbuf.set_code_end(cbuf.code_end() + 4);
661 }
663 // EMIT_D64()
664 void emit_d64(CodeBuffer &cbuf, int64_t d64)
665 {
666 *((int64_t*) (cbuf.code_end())) = d64;
667 cbuf.set_code_end(cbuf.code_end() + 8);
668 }
670 // emit 32 bit value and construct relocation entry from relocInfo::relocType
671 void emit_d32_reloc(CodeBuffer& cbuf,
672 int d32,
673 relocInfo::relocType reloc,
674 int format)
675 {
676 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc");
677 cbuf.relocate(cbuf.inst_mark(), reloc, format);
679 *((int*) (cbuf.code_end())) = d32;
680 cbuf.set_code_end(cbuf.code_end() + 4);
681 }
683 // emit 32 bit value and construct relocation entry from RelocationHolder
684 void emit_d32_reloc(CodeBuffer& cbuf,
685 int d32,
686 RelocationHolder const& rspec,
687 int format)
688 {
689 #ifdef ASSERT
690 if (rspec.reloc()->type() == relocInfo::oop_type &&
691 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
692 assert(oop((intptr_t)d32)->is_oop() && oop((intptr_t)d32)->is_perm(), "cannot embed non-perm oops in code");
693 }
694 #endif
695 cbuf.relocate(cbuf.inst_mark(), rspec, format);
697 *((int* )(cbuf.code_end())) = d32;
698 cbuf.set_code_end(cbuf.code_end() + 4);
699 }
701 void emit_d32_reloc(CodeBuffer& cbuf, address addr) {
702 address next_ip = cbuf.code_end() + 4;
703 emit_d32_reloc(cbuf, (int) (addr - next_ip),
704 external_word_Relocation::spec(addr),
705 RELOC_DISP32);
706 }
709 // emit 64 bit value and construct relocation entry from relocInfo::relocType
710 void emit_d64_reloc(CodeBuffer& cbuf,
711 int64_t d64,
712 relocInfo::relocType reloc,
713 int format)
714 {
715 cbuf.relocate(cbuf.inst_mark(), reloc, format);
717 *((int64_t*) (cbuf.code_end())) = d64;
718 cbuf.set_code_end(cbuf.code_end() + 8);
719 }
721 // emit 64 bit value and construct relocation entry from RelocationHolder
722 void emit_d64_reloc(CodeBuffer& cbuf,
723 int64_t d64,
724 RelocationHolder const& rspec,
725 int format)
726 {
727 #ifdef ASSERT
728 if (rspec.reloc()->type() == relocInfo::oop_type &&
729 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
730 assert(oop(d64)->is_oop() && oop(d64)->is_perm(),
731 "cannot embed non-perm oops in code");
732 }
733 #endif
734 cbuf.relocate(cbuf.inst_mark(), rspec, format);
736 *((int64_t*) (cbuf.code_end())) = d64;
737 cbuf.set_code_end(cbuf.code_end() + 8);
738 }
740 // Access stack slot for load or store
741 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp)
742 {
743 emit_opcode(cbuf, opcode); // (e.g., FILD [RSP+src])
744 if (-0x80 <= disp && disp < 0x80) {
745 emit_rm(cbuf, 0x01, rm_field, RSP_enc); // R/M byte
746 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
747 emit_d8(cbuf, disp); // Displacement // R/M byte
748 } else {
749 emit_rm(cbuf, 0x02, rm_field, RSP_enc); // R/M byte
750 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
751 emit_d32(cbuf, disp); // Displacement // R/M byte
752 }
753 }
755 // rRegI ereg, memory mem) %{ // emit_reg_mem
756 void encode_RegMem(CodeBuffer &cbuf,
757 int reg,
758 int base, int index, int scale, int disp, bool disp_is_oop)
759 {
760 assert(!disp_is_oop, "cannot have disp");
761 int regenc = reg & 7;
762 int baseenc = base & 7;
763 int indexenc = index & 7;
765 // There is no index & no scale, use form without SIB byte
766 if (index == 0x4 && scale == 0 && base != RSP_enc && base != R12_enc) {
767 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
768 if (disp == 0 && base != RBP_enc && base != R13_enc) {
769 emit_rm(cbuf, 0x0, regenc, baseenc); // *
770 } else if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
771 // If 8-bit displacement, mode 0x1
772 emit_rm(cbuf, 0x1, regenc, baseenc); // *
773 emit_d8(cbuf, disp);
774 } else {
775 // If 32-bit displacement
776 if (base == -1) { // Special flag for absolute address
777 emit_rm(cbuf, 0x0, regenc, 0x5); // *
778 if (disp_is_oop) {
779 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
780 } else {
781 emit_d32(cbuf, disp);
782 }
783 } else {
784 // Normal base + offset
785 emit_rm(cbuf, 0x2, regenc, baseenc); // *
786 if (disp_is_oop) {
787 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
788 } else {
789 emit_d32(cbuf, disp);
790 }
791 }
792 }
793 } else {
794 // Else, encode with the SIB byte
795 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
796 if (disp == 0 && base != RBP_enc && base != R13_enc) {
797 // If no displacement
798 emit_rm(cbuf, 0x0, regenc, 0x4); // *
799 emit_rm(cbuf, scale, indexenc, baseenc);
800 } else {
801 if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
802 // If 8-bit displacement, mode 0x1
803 emit_rm(cbuf, 0x1, regenc, 0x4); // *
804 emit_rm(cbuf, scale, indexenc, baseenc);
805 emit_d8(cbuf, disp);
806 } else {
807 // If 32-bit displacement
808 if (base == 0x04 ) {
809 emit_rm(cbuf, 0x2, regenc, 0x4);
810 emit_rm(cbuf, scale, indexenc, 0x04); // XXX is this valid???
811 } else {
812 emit_rm(cbuf, 0x2, regenc, 0x4);
813 emit_rm(cbuf, scale, indexenc, baseenc); // *
814 }
815 if (disp_is_oop) {
816 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
817 } else {
818 emit_d32(cbuf, disp);
819 }
820 }
821 }
822 }
823 }
825 void encode_copy(CodeBuffer &cbuf, int dstenc, int srcenc)
826 {
827 if (dstenc != srcenc) {
828 if (dstenc < 8) {
829 if (srcenc >= 8) {
830 emit_opcode(cbuf, Assembler::REX_B);
831 srcenc -= 8;
832 }
833 } else {
834 if (srcenc < 8) {
835 emit_opcode(cbuf, Assembler::REX_R);
836 } else {
837 emit_opcode(cbuf, Assembler::REX_RB);
838 srcenc -= 8;
839 }
840 dstenc -= 8;
841 }
843 emit_opcode(cbuf, 0x8B);
844 emit_rm(cbuf, 0x3, dstenc, srcenc);
845 }
846 }
848 void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
849 if( dst_encoding == src_encoding ) {
850 // reg-reg copy, use an empty encoding
851 } else {
852 MacroAssembler _masm(&cbuf);
854 __ movdqa(as_XMMRegister(dst_encoding), as_XMMRegister(src_encoding));
855 }
856 }
859 //=============================================================================
860 #ifndef PRODUCT
861 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
862 {
863 Compile* C = ra_->C;
865 int framesize = C->frame_slots() << LogBytesPerInt;
866 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
867 // Remove wordSize for return adr already pushed
868 // and another for the RBP we are going to save
869 framesize -= 2*wordSize;
870 bool need_nop = true;
872 // Calls to C2R adapters often do not accept exceptional returns.
873 // We require that their callers must bang for them. But be
874 // careful, because some VM calls (such as call site linkage) can
875 // use several kilobytes of stack. But the stack safety zone should
876 // account for that. See bugs 4446381, 4468289, 4497237.
877 if (C->need_stack_bang(framesize)) {
878 st->print_cr("# stack bang"); st->print("\t");
879 need_nop = false;
880 }
881 st->print_cr("pushq rbp"); st->print("\t");
883 if (VerifyStackAtCalls) {
884 // Majik cookie to verify stack depth
885 st->print_cr("pushq 0xffffffffbadb100d"
886 "\t# Majik cookie for stack depth check");
887 st->print("\t");
888 framesize -= wordSize; // Remove 2 for cookie
889 need_nop = false;
890 }
892 if (framesize) {
893 st->print("subq rsp, #%d\t# Create frame", framesize);
894 if (framesize < 0x80 && need_nop) {
895 st->print("\n\tnop\t# nop for patch_verified_entry");
896 }
897 }
898 }
899 #endif
901 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
902 {
903 Compile* C = ra_->C;
905 // WARNING: Initial instruction MUST be 5 bytes or longer so that
906 // NativeJump::patch_verified_entry will be able to patch out the entry
907 // code safely. The fldcw is ok at 6 bytes, the push to verify stack
908 // depth is ok at 5 bytes, the frame allocation can be either 3 or
909 // 6 bytes. So if we don't do the fldcw or the push then we must
910 // use the 6 byte frame allocation even if we have no frame. :-(
911 // If method sets FPU control word do it now
913 int framesize = C->frame_slots() << LogBytesPerInt;
914 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
915 // Remove wordSize for return adr already pushed
916 // and another for the RBP we are going to save
917 framesize -= 2*wordSize;
918 bool need_nop = true;
920 // Calls to C2R adapters often do not accept exceptional returns.
921 // We require that their callers must bang for them. But be
922 // careful, because some VM calls (such as call site linkage) can
923 // use several kilobytes of stack. But the stack safety zone should
924 // account for that. See bugs 4446381, 4468289, 4497237.
925 if (C->need_stack_bang(framesize)) {
926 MacroAssembler masm(&cbuf);
927 masm.generate_stack_overflow_check(framesize);
928 need_nop = false;
929 }
931 // We always push rbp so that on return to interpreter rbp will be
932 // restored correctly and we can correct the stack.
933 emit_opcode(cbuf, 0x50 | RBP_enc);
935 if (VerifyStackAtCalls) {
936 // Majik cookie to verify stack depth
937 emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d
938 emit_d32(cbuf, 0xbadb100d);
939 framesize -= wordSize; // Remove 2 for cookie
940 need_nop = false;
941 }
943 if (framesize) {
944 emit_opcode(cbuf, Assembler::REX_W);
945 if (framesize < 0x80) {
946 emit_opcode(cbuf, 0x83); // sub SP,#framesize
947 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
948 emit_d8(cbuf, framesize);
949 if (need_nop) {
950 emit_opcode(cbuf, 0x90); // nop
951 }
952 } else {
953 emit_opcode(cbuf, 0x81); // sub SP,#framesize
954 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
955 emit_d32(cbuf, framesize);
956 }
957 }
959 C->set_frame_complete(cbuf.code_end() - cbuf.code_begin());
961 #ifdef ASSERT
962 if (VerifyStackAtCalls) {
963 Label L;
964 MacroAssembler masm(&cbuf);
965 masm.pushq(rax);
966 masm.movq(rax, rsp);
967 masm.andq(rax, StackAlignmentInBytes-1);
968 masm.cmpq(rax, StackAlignmentInBytes-wordSize);
969 masm.popq(rax);
970 masm.jcc(Assembler::equal, L);
971 masm.stop("Stack is not properly aligned!");
972 masm.bind(L);
973 }
974 #endif
975 }
977 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
978 {
979 return MachNode::size(ra_); // too many variables; just compute it
980 // the hard way
981 }
983 int MachPrologNode::reloc() const
984 {
985 return 0; // a large enough number
986 }
988 //=============================================================================
989 #ifndef PRODUCT
990 void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
991 {
992 Compile* C = ra_->C;
993 int framesize = C->frame_slots() << LogBytesPerInt;
994 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
995 // Remove word for return adr already pushed
996 // and RBP
997 framesize -= 2*wordSize;
999 if (framesize) {
1000 st->print_cr("addq\trsp, %d\t# Destroy frame", framesize);
1001 st->print("\t");
1002 }
1004 st->print_cr("popq\trbp");
1005 if (do_polling() && C->is_method_compilation()) {
1006 st->print_cr("\ttestl\trax, [rip + #offset_to_poll_page]\t"
1007 "# Safepoint: poll for GC");
1008 st->print("\t");
1009 }
1010 }
1011 #endif
1013 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1014 {
1015 Compile* C = ra_->C;
1016 int framesize = C->frame_slots() << LogBytesPerInt;
1017 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1018 // Remove word for return adr already pushed
1019 // and RBP
1020 framesize -= 2*wordSize;
1022 // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
1024 if (framesize) {
1025 emit_opcode(cbuf, Assembler::REX_W);
1026 if (framesize < 0x80) {
1027 emit_opcode(cbuf, 0x83); // addq rsp, #framesize
1028 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
1029 emit_d8(cbuf, framesize);
1030 } else {
1031 emit_opcode(cbuf, 0x81); // addq rsp, #framesize
1032 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
1033 emit_d32(cbuf, framesize);
1034 }
1035 }
1037 // popq rbp
1038 emit_opcode(cbuf, 0x58 | RBP_enc);
1040 if (do_polling() && C->is_method_compilation()) {
1041 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
1042 // XXX reg_mem doesn't support RIP-relative addressing yet
1043 cbuf.set_inst_mark();
1044 cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_return_type, 0); // XXX
1045 emit_opcode(cbuf, 0x85); // testl
1046 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
1047 // cbuf.inst_mark() is beginning of instruction
1048 emit_d32_reloc(cbuf, os::get_polling_page());
1049 // relocInfo::poll_return_type,
1050 }
1051 }
1053 uint MachEpilogNode::size(PhaseRegAlloc* ra_) const
1054 {
1055 Compile* C = ra_->C;
1056 int framesize = C->frame_slots() << LogBytesPerInt;
1057 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1058 // Remove word for return adr already pushed
1059 // and RBP
1060 framesize -= 2*wordSize;
1062 uint size = 0;
1064 if (do_polling() && C->is_method_compilation()) {
1065 size += 6;
1066 }
1068 // count popq rbp
1069 size++;
1071 if (framesize) {
1072 if (framesize < 0x80) {
1073 size += 4;
1074 } else if (framesize) {
1075 size += 7;
1076 }
1077 }
1079 return size;
1080 }
1082 int MachEpilogNode::reloc() const
1083 {
1084 return 2; // a large enough number
1085 }
1087 const Pipeline* MachEpilogNode::pipeline() const
1088 {
1089 return MachNode::pipeline_class();
1090 }
1092 int MachEpilogNode::safepoint_offset() const
1093 {
1094 return 0;
1095 }
1097 //=============================================================================
1099 enum RC {
1100 rc_bad,
1101 rc_int,
1102 rc_float,
1103 rc_stack
1104 };
1106 static enum RC rc_class(OptoReg::Name reg)
1107 {
1108 if( !OptoReg::is_valid(reg) ) return rc_bad;
1110 if (OptoReg::is_stack(reg)) return rc_stack;
1112 VMReg r = OptoReg::as_VMReg(reg);
1114 if (r->is_Register()) return rc_int;
1116 assert(r->is_XMMRegister(), "must be");
1117 return rc_float;
1118 }
1120 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
1121 PhaseRegAlloc* ra_,
1122 bool do_size,
1123 outputStream* st) const
1124 {
1126 // Get registers to move
1127 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1128 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1129 OptoReg::Name dst_second = ra_->get_reg_second(this);
1130 OptoReg::Name dst_first = ra_->get_reg_first(this);
1132 enum RC src_second_rc = rc_class(src_second);
1133 enum RC src_first_rc = rc_class(src_first);
1134 enum RC dst_second_rc = rc_class(dst_second);
1135 enum RC dst_first_rc = rc_class(dst_first);
1137 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first),
1138 "must move at least 1 register" );
1140 if (src_first == dst_first && src_second == dst_second) {
1141 // Self copy, no move
1142 return 0;
1143 } else if (src_first_rc == rc_stack) {
1144 // mem ->
1145 if (dst_first_rc == rc_stack) {
1146 // mem -> mem
1147 assert(src_second != dst_first, "overlap");
1148 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1149 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1150 // 64-bit
1151 int src_offset = ra_->reg2offset(src_first);
1152 int dst_offset = ra_->reg2offset(dst_first);
1153 if (cbuf) {
1154 emit_opcode(*cbuf, 0xFF);
1155 encode_RegMem(*cbuf, RSI_enc, RSP_enc, 0x4, 0, src_offset, false);
1157 emit_opcode(*cbuf, 0x8F);
1158 encode_RegMem(*cbuf, RAX_enc, RSP_enc, 0x4, 0, dst_offset, false);
1160 #ifndef PRODUCT
1161 } else if (!do_size) {
1162 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
1163 "popq [rsp + #%d]",
1164 src_offset,
1165 dst_offset);
1166 #endif
1167 }
1168 return
1169 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) +
1170 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4));
1171 } else {
1172 // 32-bit
1173 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1174 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1175 // No pushl/popl, so:
1176 int src_offset = ra_->reg2offset(src_first);
1177 int dst_offset = ra_->reg2offset(dst_first);
1178 if (cbuf) {
1179 emit_opcode(*cbuf, Assembler::REX_W);
1180 emit_opcode(*cbuf, 0x89);
1181 emit_opcode(*cbuf, 0x44);
1182 emit_opcode(*cbuf, 0x24);
1183 emit_opcode(*cbuf, 0xF8);
1185 emit_opcode(*cbuf, 0x8B);
1186 encode_RegMem(*cbuf,
1187 RAX_enc,
1188 RSP_enc, 0x4, 0, src_offset,
1189 false);
1191 emit_opcode(*cbuf, 0x89);
1192 encode_RegMem(*cbuf,
1193 RAX_enc,
1194 RSP_enc, 0x4, 0, dst_offset,
1195 false);
1197 emit_opcode(*cbuf, Assembler::REX_W);
1198 emit_opcode(*cbuf, 0x8B);
1199 emit_opcode(*cbuf, 0x44);
1200 emit_opcode(*cbuf, 0x24);
1201 emit_opcode(*cbuf, 0xF8);
1203 #ifndef PRODUCT
1204 } else if (!do_size) {
1205 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t"
1206 "movl rax, [rsp + #%d]\n\t"
1207 "movl [rsp + #%d], rax\n\t"
1208 "movq rax, [rsp - #8]",
1209 src_offset,
1210 dst_offset);
1211 #endif
1212 }
1213 return
1214 5 + // movq
1215 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + // movl
1216 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)) + // movl
1217 5; // movq
1218 }
1219 } else if (dst_first_rc == rc_int) {
1220 // mem -> gpr
1221 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1222 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1223 // 64-bit
1224 int offset = ra_->reg2offset(src_first);
1225 if (cbuf) {
1226 if (Matcher::_regEncode[dst_first] < 8) {
1227 emit_opcode(*cbuf, Assembler::REX_W);
1228 } else {
1229 emit_opcode(*cbuf, Assembler::REX_WR);
1230 }
1231 emit_opcode(*cbuf, 0x8B);
1232 encode_RegMem(*cbuf,
1233 Matcher::_regEncode[dst_first],
1234 RSP_enc, 0x4, 0, offset,
1235 false);
1236 #ifndef PRODUCT
1237 } else if (!do_size) {
1238 st->print("movq %s, [rsp + #%d]\t# spill",
1239 Matcher::regName[dst_first],
1240 offset);
1241 #endif
1242 }
1243 return
1244 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
1245 } else {
1246 // 32-bit
1247 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1248 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1249 int offset = ra_->reg2offset(src_first);
1250 if (cbuf) {
1251 if (Matcher::_regEncode[dst_first] >= 8) {
1252 emit_opcode(*cbuf, Assembler::REX_R);
1253 }
1254 emit_opcode(*cbuf, 0x8B);
1255 encode_RegMem(*cbuf,
1256 Matcher::_regEncode[dst_first],
1257 RSP_enc, 0x4, 0, offset,
1258 false);
1259 #ifndef PRODUCT
1260 } else if (!do_size) {
1261 st->print("movl %s, [rsp + #%d]\t# spill",
1262 Matcher::regName[dst_first],
1263 offset);
1264 #endif
1265 }
1266 return
1267 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1268 ((Matcher::_regEncode[dst_first] < 8)
1269 ? 3
1270 : 4); // REX
1271 }
1272 } else if (dst_first_rc == rc_float) {
1273 // mem-> xmm
1274 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1275 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1276 // 64-bit
1277 int offset = ra_->reg2offset(src_first);
1278 if (cbuf) {
1279 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
1280 if (Matcher::_regEncode[dst_first] >= 8) {
1281 emit_opcode(*cbuf, Assembler::REX_R);
1282 }
1283 emit_opcode(*cbuf, 0x0F);
1284 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
1285 encode_RegMem(*cbuf,
1286 Matcher::_regEncode[dst_first],
1287 RSP_enc, 0x4, 0, offset,
1288 false);
1289 #ifndef PRODUCT
1290 } else if (!do_size) {
1291 st->print("%s %s, [rsp + #%d]\t# spill",
1292 UseXmmLoadAndClearUpper ? "movsd " : "movlpd",
1293 Matcher::regName[dst_first],
1294 offset);
1295 #endif
1296 }
1297 return
1298 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1299 ((Matcher::_regEncode[dst_first] < 8)
1300 ? 5
1301 : 6); // REX
1302 } else {
1303 // 32-bit
1304 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1305 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1306 int offset = ra_->reg2offset(src_first);
1307 if (cbuf) {
1308 emit_opcode(*cbuf, 0xF3);
1309 if (Matcher::_regEncode[dst_first] >= 8) {
1310 emit_opcode(*cbuf, Assembler::REX_R);
1311 }
1312 emit_opcode(*cbuf, 0x0F);
1313 emit_opcode(*cbuf, 0x10);
1314 encode_RegMem(*cbuf,
1315 Matcher::_regEncode[dst_first],
1316 RSP_enc, 0x4, 0, offset,
1317 false);
1318 #ifndef PRODUCT
1319 } else if (!do_size) {
1320 st->print("movss %s, [rsp + #%d]\t# spill",
1321 Matcher::regName[dst_first],
1322 offset);
1323 #endif
1324 }
1325 return
1326 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1327 ((Matcher::_regEncode[dst_first] < 8)
1328 ? 5
1329 : 6); // REX
1330 }
1331 }
1332 } else if (src_first_rc == rc_int) {
1333 // gpr ->
1334 if (dst_first_rc == rc_stack) {
1335 // gpr -> mem
1336 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1337 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1338 // 64-bit
1339 int offset = ra_->reg2offset(dst_first);
1340 if (cbuf) {
1341 if (Matcher::_regEncode[src_first] < 8) {
1342 emit_opcode(*cbuf, Assembler::REX_W);
1343 } else {
1344 emit_opcode(*cbuf, Assembler::REX_WR);
1345 }
1346 emit_opcode(*cbuf, 0x89);
1347 encode_RegMem(*cbuf,
1348 Matcher::_regEncode[src_first],
1349 RSP_enc, 0x4, 0, offset,
1350 false);
1351 #ifndef PRODUCT
1352 } else if (!do_size) {
1353 st->print("movq [rsp + #%d], %s\t# spill",
1354 offset,
1355 Matcher::regName[src_first]);
1356 #endif
1357 }
1358 return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
1359 } else {
1360 // 32-bit
1361 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1362 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1363 int offset = ra_->reg2offset(dst_first);
1364 if (cbuf) {
1365 if (Matcher::_regEncode[src_first] >= 8) {
1366 emit_opcode(*cbuf, Assembler::REX_R);
1367 }
1368 emit_opcode(*cbuf, 0x89);
1369 encode_RegMem(*cbuf,
1370 Matcher::_regEncode[src_first],
1371 RSP_enc, 0x4, 0, offset,
1372 false);
1373 #ifndef PRODUCT
1374 } else if (!do_size) {
1375 st->print("movl [rsp + #%d], %s\t# spill",
1376 offset,
1377 Matcher::regName[src_first]);
1378 #endif
1379 }
1380 return
1381 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1382 ((Matcher::_regEncode[src_first] < 8)
1383 ? 3
1384 : 4); // REX
1385 }
1386 } else if (dst_first_rc == rc_int) {
1387 // gpr -> gpr
1388 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1389 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1390 // 64-bit
1391 if (cbuf) {
1392 if (Matcher::_regEncode[dst_first] < 8) {
1393 if (Matcher::_regEncode[src_first] < 8) {
1394 emit_opcode(*cbuf, Assembler::REX_W);
1395 } else {
1396 emit_opcode(*cbuf, Assembler::REX_WB);
1397 }
1398 } else {
1399 if (Matcher::_regEncode[src_first] < 8) {
1400 emit_opcode(*cbuf, Assembler::REX_WR);
1401 } else {
1402 emit_opcode(*cbuf, Assembler::REX_WRB);
1403 }
1404 }
1405 emit_opcode(*cbuf, 0x8B);
1406 emit_rm(*cbuf, 0x3,
1407 Matcher::_regEncode[dst_first] & 7,
1408 Matcher::_regEncode[src_first] & 7);
1409 #ifndef PRODUCT
1410 } else if (!do_size) {
1411 st->print("movq %s, %s\t# spill",
1412 Matcher::regName[dst_first],
1413 Matcher::regName[src_first]);
1414 #endif
1415 }
1416 return 3; // REX
1417 } else {
1418 // 32-bit
1419 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1420 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1421 if (cbuf) {
1422 if (Matcher::_regEncode[dst_first] < 8) {
1423 if (Matcher::_regEncode[src_first] >= 8) {
1424 emit_opcode(*cbuf, Assembler::REX_B);
1425 }
1426 } else {
1427 if (Matcher::_regEncode[src_first] < 8) {
1428 emit_opcode(*cbuf, Assembler::REX_R);
1429 } else {
1430 emit_opcode(*cbuf, Assembler::REX_RB);
1431 }
1432 }
1433 emit_opcode(*cbuf, 0x8B);
1434 emit_rm(*cbuf, 0x3,
1435 Matcher::_regEncode[dst_first] & 7,
1436 Matcher::_regEncode[src_first] & 7);
1437 #ifndef PRODUCT
1438 } else if (!do_size) {
1439 st->print("movl %s, %s\t# spill",
1440 Matcher::regName[dst_first],
1441 Matcher::regName[src_first]);
1442 #endif
1443 }
1444 return
1445 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1446 ? 2
1447 : 3; // REX
1448 }
1449 } else if (dst_first_rc == rc_float) {
1450 // gpr -> xmm
1451 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1452 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1453 // 64-bit
1454 if (cbuf) {
1455 emit_opcode(*cbuf, 0x66);
1456 if (Matcher::_regEncode[dst_first] < 8) {
1457 if (Matcher::_regEncode[src_first] < 8) {
1458 emit_opcode(*cbuf, Assembler::REX_W);
1459 } else {
1460 emit_opcode(*cbuf, Assembler::REX_WB);
1461 }
1462 } else {
1463 if (Matcher::_regEncode[src_first] < 8) {
1464 emit_opcode(*cbuf, Assembler::REX_WR);
1465 } else {
1466 emit_opcode(*cbuf, Assembler::REX_WRB);
1467 }
1468 }
1469 emit_opcode(*cbuf, 0x0F);
1470 emit_opcode(*cbuf, 0x6E);
1471 emit_rm(*cbuf, 0x3,
1472 Matcher::_regEncode[dst_first] & 7,
1473 Matcher::_regEncode[src_first] & 7);
1474 #ifndef PRODUCT
1475 } else if (!do_size) {
1476 st->print("movdq %s, %s\t# spill",
1477 Matcher::regName[dst_first],
1478 Matcher::regName[src_first]);
1479 #endif
1480 }
1481 return 5; // REX
1482 } else {
1483 // 32-bit
1484 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1485 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1486 if (cbuf) {
1487 emit_opcode(*cbuf, 0x66);
1488 if (Matcher::_regEncode[dst_first] < 8) {
1489 if (Matcher::_regEncode[src_first] >= 8) {
1490 emit_opcode(*cbuf, Assembler::REX_B);
1491 }
1492 } else {
1493 if (Matcher::_regEncode[src_first] < 8) {
1494 emit_opcode(*cbuf, Assembler::REX_R);
1495 } else {
1496 emit_opcode(*cbuf, Assembler::REX_RB);
1497 }
1498 }
1499 emit_opcode(*cbuf, 0x0F);
1500 emit_opcode(*cbuf, 0x6E);
1501 emit_rm(*cbuf, 0x3,
1502 Matcher::_regEncode[dst_first] & 7,
1503 Matcher::_regEncode[src_first] & 7);
1504 #ifndef PRODUCT
1505 } else if (!do_size) {
1506 st->print("movdl %s, %s\t# spill",
1507 Matcher::regName[dst_first],
1508 Matcher::regName[src_first]);
1509 #endif
1510 }
1511 return
1512 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1513 ? 4
1514 : 5; // REX
1515 }
1516 }
1517 } else if (src_first_rc == rc_float) {
1518 // xmm ->
1519 if (dst_first_rc == rc_stack) {
1520 // xmm -> mem
1521 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1522 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1523 // 64-bit
1524 int offset = ra_->reg2offset(dst_first);
1525 if (cbuf) {
1526 emit_opcode(*cbuf, 0xF2);
1527 if (Matcher::_regEncode[src_first] >= 8) {
1528 emit_opcode(*cbuf, Assembler::REX_R);
1529 }
1530 emit_opcode(*cbuf, 0x0F);
1531 emit_opcode(*cbuf, 0x11);
1532 encode_RegMem(*cbuf,
1533 Matcher::_regEncode[src_first],
1534 RSP_enc, 0x4, 0, offset,
1535 false);
1536 #ifndef PRODUCT
1537 } else if (!do_size) {
1538 st->print("movsd [rsp + #%d], %s\t# spill",
1539 offset,
1540 Matcher::regName[src_first]);
1541 #endif
1542 }
1543 return
1544 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1545 ((Matcher::_regEncode[src_first] < 8)
1546 ? 5
1547 : 6); // REX
1548 } else {
1549 // 32-bit
1550 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1551 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1552 int offset = ra_->reg2offset(dst_first);
1553 if (cbuf) {
1554 emit_opcode(*cbuf, 0xF3);
1555 if (Matcher::_regEncode[src_first] >= 8) {
1556 emit_opcode(*cbuf, Assembler::REX_R);
1557 }
1558 emit_opcode(*cbuf, 0x0F);
1559 emit_opcode(*cbuf, 0x11);
1560 encode_RegMem(*cbuf,
1561 Matcher::_regEncode[src_first],
1562 RSP_enc, 0x4, 0, offset,
1563 false);
1564 #ifndef PRODUCT
1565 } else if (!do_size) {
1566 st->print("movss [rsp + #%d], %s\t# spill",
1567 offset,
1568 Matcher::regName[src_first]);
1569 #endif
1570 }
1571 return
1572 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1573 ((Matcher::_regEncode[src_first] < 8)
1574 ? 5
1575 : 6); // REX
1576 }
1577 } else if (dst_first_rc == rc_int) {
1578 // xmm -> gpr
1579 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1580 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1581 // 64-bit
1582 if (cbuf) {
1583 emit_opcode(*cbuf, 0x66);
1584 if (Matcher::_regEncode[dst_first] < 8) {
1585 if (Matcher::_regEncode[src_first] < 8) {
1586 emit_opcode(*cbuf, Assembler::REX_W);
1587 } else {
1588 emit_opcode(*cbuf, Assembler::REX_WR); // attention!
1589 }
1590 } else {
1591 if (Matcher::_regEncode[src_first] < 8) {
1592 emit_opcode(*cbuf, Assembler::REX_WB); // attention!
1593 } else {
1594 emit_opcode(*cbuf, Assembler::REX_WRB);
1595 }
1596 }
1597 emit_opcode(*cbuf, 0x0F);
1598 emit_opcode(*cbuf, 0x7E);
1599 emit_rm(*cbuf, 0x3,
1600 Matcher::_regEncode[dst_first] & 7,
1601 Matcher::_regEncode[src_first] & 7);
1602 #ifndef PRODUCT
1603 } else if (!do_size) {
1604 st->print("movdq %s, %s\t# spill",
1605 Matcher::regName[dst_first],
1606 Matcher::regName[src_first]);
1607 #endif
1608 }
1609 return 5; // REX
1610 } else {
1611 // 32-bit
1612 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1613 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1614 if (cbuf) {
1615 emit_opcode(*cbuf, 0x66);
1616 if (Matcher::_regEncode[dst_first] < 8) {
1617 if (Matcher::_regEncode[src_first] >= 8) {
1618 emit_opcode(*cbuf, Assembler::REX_R); // attention!
1619 }
1620 } else {
1621 if (Matcher::_regEncode[src_first] < 8) {
1622 emit_opcode(*cbuf, Assembler::REX_B); // attention!
1623 } else {
1624 emit_opcode(*cbuf, Assembler::REX_RB);
1625 }
1626 }
1627 emit_opcode(*cbuf, 0x0F);
1628 emit_opcode(*cbuf, 0x7E);
1629 emit_rm(*cbuf, 0x3,
1630 Matcher::_regEncode[dst_first] & 7,
1631 Matcher::_regEncode[src_first] & 7);
1632 #ifndef PRODUCT
1633 } else if (!do_size) {
1634 st->print("movdl %s, %s\t# spill",
1635 Matcher::regName[dst_first],
1636 Matcher::regName[src_first]);
1637 #endif
1638 }
1639 return
1640 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1641 ? 4
1642 : 5; // REX
1643 }
1644 } else if (dst_first_rc == rc_float) {
1645 // xmm -> xmm
1646 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1647 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1648 // 64-bit
1649 if (cbuf) {
1650 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
1651 if (Matcher::_regEncode[dst_first] < 8) {
1652 if (Matcher::_regEncode[src_first] >= 8) {
1653 emit_opcode(*cbuf, Assembler::REX_B);
1654 }
1655 } else {
1656 if (Matcher::_regEncode[src_first] < 8) {
1657 emit_opcode(*cbuf, Assembler::REX_R);
1658 } else {
1659 emit_opcode(*cbuf, Assembler::REX_RB);
1660 }
1661 }
1662 emit_opcode(*cbuf, 0x0F);
1663 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
1664 emit_rm(*cbuf, 0x3,
1665 Matcher::_regEncode[dst_first] & 7,
1666 Matcher::_regEncode[src_first] & 7);
1667 #ifndef PRODUCT
1668 } else if (!do_size) {
1669 st->print("%s %s, %s\t# spill",
1670 UseXmmRegToRegMoveAll ? "movapd" : "movsd ",
1671 Matcher::regName[dst_first],
1672 Matcher::regName[src_first]);
1673 #endif
1674 }
1675 return
1676 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1677 ? 4
1678 : 5; // REX
1679 } else {
1680 // 32-bit
1681 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1682 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1683 if (cbuf) {
1684 if (!UseXmmRegToRegMoveAll)
1685 emit_opcode(*cbuf, 0xF3);
1686 if (Matcher::_regEncode[dst_first] < 8) {
1687 if (Matcher::_regEncode[src_first] >= 8) {
1688 emit_opcode(*cbuf, Assembler::REX_B);
1689 }
1690 } else {
1691 if (Matcher::_regEncode[src_first] < 8) {
1692 emit_opcode(*cbuf, Assembler::REX_R);
1693 } else {
1694 emit_opcode(*cbuf, Assembler::REX_RB);
1695 }
1696 }
1697 emit_opcode(*cbuf, 0x0F);
1698 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
1699 emit_rm(*cbuf, 0x3,
1700 Matcher::_regEncode[dst_first] & 7,
1701 Matcher::_regEncode[src_first] & 7);
1702 #ifndef PRODUCT
1703 } else if (!do_size) {
1704 st->print("%s %s, %s\t# spill",
1705 UseXmmRegToRegMoveAll ? "movaps" : "movss ",
1706 Matcher::regName[dst_first],
1707 Matcher::regName[src_first]);
1708 #endif
1709 }
1710 return
1711 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1712 ? (UseXmmRegToRegMoveAll ? 3 : 4)
1713 : (UseXmmRegToRegMoveAll ? 4 : 5); // REX
1714 }
1715 }
1716 }
1718 assert(0," foo ");
1719 Unimplemented();
1721 return 0;
1722 }
1724 #ifndef PRODUCT
1725 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const
1726 {
1727 implementation(NULL, ra_, false, st);
1728 }
1729 #endif
1731 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
1732 {
1733 implementation(&cbuf, ra_, false, NULL);
1734 }
1736 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const
1737 {
1738 return implementation(NULL, ra_, true, NULL);
1739 }
1741 //=============================================================================
1742 #ifndef PRODUCT
1743 void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const
1744 {
1745 st->print("nop \t# %d bytes pad for loops and calls", _count);
1746 }
1747 #endif
1749 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const
1750 {
1751 MacroAssembler _masm(&cbuf);
1752 __ nop(_count);
1753 }
1755 uint MachNopNode::size(PhaseRegAlloc*) const
1756 {
1757 return _count;
1758 }
1761 //=============================================================================
1762 #ifndef PRODUCT
1763 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1764 {
1765 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1766 int reg = ra_->get_reg_first(this);
1767 st->print("leaq %s, [rsp + #%d]\t# box lock",
1768 Matcher::regName[reg], offset);
1769 }
1770 #endif
1772 void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1773 {
1774 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1775 int reg = ra_->get_encode(this);
1776 if (offset >= 0x80) {
1777 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
1778 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1779 emit_rm(cbuf, 0x2, reg & 7, 0x04);
1780 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
1781 emit_d32(cbuf, offset);
1782 } else {
1783 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
1784 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1785 emit_rm(cbuf, 0x1, reg & 7, 0x04);
1786 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
1787 emit_d8(cbuf, offset);
1788 }
1789 }
1791 uint BoxLockNode::size(PhaseRegAlloc *ra_) const
1792 {
1793 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1794 return (offset < 0x80) ? 5 : 8; // REX
1795 }
1797 //=============================================================================
1799 // emit call stub, compiled java to interpreter
1800 void emit_java_to_interp(CodeBuffer& cbuf)
1801 {
1802 // Stub is fixed up when the corresponding call is converted from
1803 // calling compiled code to calling interpreted code.
1804 // movq rbx, 0
1805 // jmp -5 # to self
1807 address mark = cbuf.inst_mark(); // get mark within main instrs section
1809 // Note that the code buffer's inst_mark is always relative to insts.
1810 // That's why we must use the macroassembler to generate a stub.
1811 MacroAssembler _masm(&cbuf);
1813 address base =
1814 __ start_a_stub(Compile::MAX_stubs_size);
1815 if (base == NULL) return; // CodeBuffer::expand failed
1816 // static stub relocation stores the instruction address of the call
1817 __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
1818 // static stub relocation also tags the methodOop in the code-stream.
1819 __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time
1820 __ jump(RuntimeAddress(__ pc()));
1822 // Update current stubs pointer and restore code_end.
1823 __ end_a_stub();
1824 }
1826 // size of call stub, compiled java to interpretor
1827 uint size_java_to_interp()
1828 {
1829 return 15; // movq (1+1+8); jmp (1+4)
1830 }
1832 // relocation entries for call stub, compiled java to interpretor
1833 uint reloc_java_to_interp()
1834 {
1835 return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
1836 }
1838 //=============================================================================
1839 #ifndef PRODUCT
1840 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1841 {
1842 if (UseCompressedOops) {
1843 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
1844 st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]");
1845 st->print_cr("cmpq rax, rscratch1\t # Inline cache check");
1846 } else {
1847 st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
1848 "# Inline cache check", oopDesc::klass_offset_in_bytes());
1849 }
1850 st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
1851 st->print_cr("\tnop");
1852 if (!OptoBreakpoint) {
1853 st->print_cr("\tnop");
1854 }
1855 }
1856 #endif
1858 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1859 {
1860 MacroAssembler masm(&cbuf);
1861 #ifdef ASSERT
1862 uint code_size = cbuf.code_size();
1863 #endif
1864 if (UseCompressedOops) {
1865 masm.load_klass(rscratch1, j_rarg0);
1866 masm.cmpq(rax, rscratch1);
1867 } else {
1868 masm.cmpq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
1869 }
1871 masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1873 /* WARNING these NOPs are critical so that verified entry point is properly
1874 aligned for patching by NativeJump::patch_verified_entry() */
1875 int nops_cnt = 1;
1876 if (!OptoBreakpoint) {
1877 // Leave space for int3
1878 nops_cnt += 1;
1879 }
1880 if (UseCompressedOops) {
1881 // ??? divisible by 4 is aligned?
1882 nops_cnt += 1;
1883 }
1884 masm.nop(nops_cnt);
1886 assert(cbuf.code_size() - code_size == size(ra_),
1887 "checking code size of inline cache node");
1888 }
1890 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1891 {
1892 if (UseCompressedOops) {
1893 return OptoBreakpoint ? 19 : 20;
1894 } else {
1895 return OptoBreakpoint ? 11 : 12;
1896 }
1897 }
1900 //=============================================================================
1901 uint size_exception_handler()
1902 {
1903 // NativeCall instruction size is the same as NativeJump.
1904 // Note that this value is also credited (in output.cpp) to
1905 // the size of the code section.
1906 return NativeJump::instruction_size;
1907 }
1909 // Emit exception handler code.
1910 int emit_exception_handler(CodeBuffer& cbuf)
1911 {
1913 // Note that the code buffer's inst_mark is always relative to insts.
1914 // That's why we must use the macroassembler to generate a handler.
1915 MacroAssembler _masm(&cbuf);
1916 address base =
1917 __ start_a_stub(size_exception_handler());
1918 if (base == NULL) return 0; // CodeBuffer::expand failed
1919 int offset = __ offset();
1920 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin()));
1921 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1922 __ end_a_stub();
1923 return offset;
1924 }
1926 uint size_deopt_handler()
1927 {
1928 // three 5 byte instructions
1929 return 15;
1930 }
1932 // Emit deopt handler code.
1933 int emit_deopt_handler(CodeBuffer& cbuf)
1934 {
1936 // Note that the code buffer's inst_mark is always relative to insts.
1937 // That's why we must use the macroassembler to generate a handler.
1938 MacroAssembler _masm(&cbuf);
1939 address base =
1940 __ start_a_stub(size_deopt_handler());
1941 if (base == NULL) return 0; // CodeBuffer::expand failed
1942 int offset = __ offset();
1943 address the_pc = (address) __ pc();
1944 Label next;
1945 // push a "the_pc" on the stack without destroying any registers
1946 // as they all may be live.
1948 // push address of "next"
1949 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
1950 __ bind(next);
1951 // adjust it so it matches "the_pc"
1952 __ subq(Address(rsp, 0), __ offset() - offset);
1953 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
1954 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1955 __ end_a_stub();
1956 return offset;
1957 }
1959 static void emit_double_constant(CodeBuffer& cbuf, double x) {
1960 int mark = cbuf.insts()->mark_off();
1961 MacroAssembler _masm(&cbuf);
1962 address double_address = __ double_constant(x);
1963 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
1964 emit_d32_reloc(cbuf,
1965 (int) (double_address - cbuf.code_end() - 4),
1966 internal_word_Relocation::spec(double_address),
1967 RELOC_DISP32);
1968 }
1970 static void emit_float_constant(CodeBuffer& cbuf, float x) {
1971 int mark = cbuf.insts()->mark_off();
1972 MacroAssembler _masm(&cbuf);
1973 address float_address = __ float_constant(x);
1974 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
1975 emit_d32_reloc(cbuf,
1976 (int) (float_address - cbuf.code_end() - 4),
1977 internal_word_Relocation::spec(float_address),
1978 RELOC_DISP32);
1979 }
1982 int Matcher::regnum_to_fpu_offset(int regnum)
1983 {
1984 return regnum - 32; // The FP registers are in the second chunk
1985 }
1987 // This is UltraSparc specific, true just means we have fast l2f conversion
1988 const bool Matcher::convL2FSupported(void) {
1989 return true;
1990 }
1992 // Vector width in bytes
1993 const uint Matcher::vector_width_in_bytes(void) {
1994 return 8;
1995 }
1997 // Vector ideal reg
1998 const uint Matcher::vector_ideal_reg(void) {
1999 return Op_RegD;
2000 }
2002 // Is this branch offset short enough that a short branch can be used?
2003 //
2004 // NOTE: If the platform does not provide any short branch variants, then
2005 // this method should return false for offset 0.
2006 bool Matcher::is_short_branch_offset(int offset)
2007 {
2008 return -0x80 <= offset && offset < 0x80;
2009 }
2011 const bool Matcher::isSimpleConstant64(jlong value) {
2012 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2013 //return value == (int) value; // Cf. storeImmL and immL32.
2015 // Probably always true, even if a temp register is required.
2016 return true;
2017 }
2019 // The ecx parameter to rep stosq for the ClearArray node is in words.
2020 const bool Matcher::init_array_count_is_in_bytes = false;
2022 // Threshold size for cleararray.
2023 const int Matcher::init_array_short_size = 8 * BytesPerLong;
2025 // Should the Matcher clone shifts on addressing modes, expecting them
2026 // to be subsumed into complex addressing expressions or compute them
2027 // into registers? True for Intel but false for most RISCs
2028 const bool Matcher::clone_shift_expressions = true;
2030 // Is it better to copy float constants, or load them directly from
2031 // memory? Intel can load a float constant from a direct address,
2032 // requiring no extra registers. Most RISCs will have to materialize
2033 // an address into a register first, so they would do better to copy
2034 // the constant from stack.
2035 const bool Matcher::rematerialize_float_constants = true; // XXX
2037 // If CPU can load and store mis-aligned doubles directly then no
2038 // fixup is needed. Else we split the double into 2 integer pieces
2039 // and move it piece-by-piece. Only happens when passing doubles into
2040 // C code as the Java calling convention forces doubles to be aligned.
2041 const bool Matcher::misaligned_doubles_ok = true;
2043 // No-op on amd64
2044 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
2046 // Advertise here if the CPU requires explicit rounding operations to
2047 // implement the UseStrictFP mode.
2048 const bool Matcher::strict_fp_requires_explicit_rounding = true;
2050 // Do floats take an entire double register or just half?
2051 const bool Matcher::float_in_double = true;
2052 // Do ints take an entire long register or just half?
2053 const bool Matcher::int_in_long = true;
2055 // Return whether or not this register is ever used as an argument.
2056 // This function is used on startup to build the trampoline stubs in
2057 // generateOptoStub. Registers not mentioned will be killed by the VM
2058 // call in the trampoline, and arguments in those registers not be
2059 // available to the callee.
2060 bool Matcher::can_be_java_arg(int reg)
2061 {
2062 return
2063 reg == RDI_num || reg == RDI_H_num ||
2064 reg == RSI_num || reg == RSI_H_num ||
2065 reg == RDX_num || reg == RDX_H_num ||
2066 reg == RCX_num || reg == RCX_H_num ||
2067 reg == R8_num || reg == R8_H_num ||
2068 reg == R9_num || reg == R9_H_num ||
2069 reg == R12_num || reg == R12_H_num ||
2070 reg == XMM0_num || reg == XMM0_H_num ||
2071 reg == XMM1_num || reg == XMM1_H_num ||
2072 reg == XMM2_num || reg == XMM2_H_num ||
2073 reg == XMM3_num || reg == XMM3_H_num ||
2074 reg == XMM4_num || reg == XMM4_H_num ||
2075 reg == XMM5_num || reg == XMM5_H_num ||
2076 reg == XMM6_num || reg == XMM6_H_num ||
2077 reg == XMM7_num || reg == XMM7_H_num;
2078 }
2080 bool Matcher::is_spillable_arg(int reg)
2081 {
2082 return can_be_java_arg(reg);
2083 }
2085 // Register for DIVI projection of divmodI
2086 RegMask Matcher::divI_proj_mask() {
2087 return INT_RAX_REG_mask;
2088 }
2090 // Register for MODI projection of divmodI
2091 RegMask Matcher::modI_proj_mask() {
2092 return INT_RDX_REG_mask;
2093 }
2095 // Register for DIVL projection of divmodL
2096 RegMask Matcher::divL_proj_mask() {
2097 return LONG_RAX_REG_mask;
2098 }
2100 // Register for MODL projection of divmodL
2101 RegMask Matcher::modL_proj_mask() {
2102 return LONG_RDX_REG_mask;
2103 }
2105 static Address build_address(int b, int i, int s, int d) {
2106 Register index = as_Register(i);
2107 Address::ScaleFactor scale = (Address::ScaleFactor)s;
2108 if (index == rsp) {
2109 index = noreg;
2110 scale = Address::no_scale;
2111 }
2112 Address addr(as_Register(b), index, scale, d);
2113 return addr;
2114 }
2116 %}
2118 //----------ENCODING BLOCK-----------------------------------------------------
2119 // This block specifies the encoding classes used by the compiler to
2120 // output byte streams. Encoding classes are parameterized macros
2121 // used by Machine Instruction Nodes in order to generate the bit
2122 // encoding of the instruction. Operands specify their base encoding
2123 // interface with the interface keyword. There are currently
2124 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2125 // COND_INTER. REG_INTER causes an operand to generate a function
2126 // which returns its register number when queried. CONST_INTER causes
2127 // an operand to generate a function which returns the value of the
2128 // constant when queried. MEMORY_INTER causes an operand to generate
2129 // four functions which return the Base Register, the Index Register,
2130 // the Scale Value, and the Offset Value of the operand when queried.
2131 // COND_INTER causes an operand to generate six functions which return
2132 // the encoding code (ie - encoding bits for the instruction)
2133 // associated with each basic boolean condition for a conditional
2134 // instruction.
2135 //
2136 // Instructions specify two basic values for encoding. Again, a
2137 // function is available to check if the constant displacement is an
2138 // oop. They use the ins_encode keyword to specify their encoding
2139 // classes (which must be a sequence of enc_class names, and their
2140 // parameters, specified in the encoding block), and they use the
2141 // opcode keyword to specify, in order, their primary, secondary, and
2142 // tertiary opcode. Only the opcode sections which a particular
2143 // instruction needs for encoding need to be specified.
2144 encode %{
2145 // Build emit functions for each basic byte or larger field in the
2146 // intel encoding scheme (opcode, rm, sib, immediate), and call them
2147 // from C++ code in the enc_class source block. Emit functions will
2148 // live in the main source block for now. In future, we can
2149 // generalize this by adding a syntax that specifies the sizes of
2150 // fields in an order, so that the adlc can build the emit functions
2151 // automagically
2153 // Emit primary opcode
2154 enc_class OpcP
2155 %{
2156 emit_opcode(cbuf, $primary);
2157 %}
2159 // Emit secondary opcode
2160 enc_class OpcS
2161 %{
2162 emit_opcode(cbuf, $secondary);
2163 %}
2165 // Emit tertiary opcode
2166 enc_class OpcT
2167 %{
2168 emit_opcode(cbuf, $tertiary);
2169 %}
2171 // Emit opcode directly
2172 enc_class Opcode(immI d8)
2173 %{
2174 emit_opcode(cbuf, $d8$$constant);
2175 %}
2177 // Emit size prefix
2178 enc_class SizePrefix
2179 %{
2180 emit_opcode(cbuf, 0x66);
2181 %}
2183 enc_class reg(rRegI reg)
2184 %{
2185 emit_rm(cbuf, 0x3, 0, $reg$$reg & 7);
2186 %}
2188 enc_class reg_reg(rRegI dst, rRegI src)
2189 %{
2190 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2191 %}
2193 enc_class opc_reg_reg(immI opcode, rRegI dst, rRegI src)
2194 %{
2195 emit_opcode(cbuf, $opcode$$constant);
2196 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2197 %}
2199 enc_class cmpfp_fixup()
2200 %{
2201 // jnp,s exit
2202 emit_opcode(cbuf, 0x7B);
2203 emit_d8(cbuf, 0x0A);
2205 // pushfq
2206 emit_opcode(cbuf, 0x9C);
2208 // andq $0xffffff2b, (%rsp)
2209 emit_opcode(cbuf, Assembler::REX_W);
2210 emit_opcode(cbuf, 0x81);
2211 emit_opcode(cbuf, 0x24);
2212 emit_opcode(cbuf, 0x24);
2213 emit_d32(cbuf, 0xffffff2b);
2215 // popfq
2216 emit_opcode(cbuf, 0x9D);
2218 // nop (target for branch to avoid branch to branch)
2219 emit_opcode(cbuf, 0x90);
2220 %}
2222 enc_class cmpfp3(rRegI dst)
2223 %{
2224 int dstenc = $dst$$reg;
2226 // movl $dst, -1
2227 if (dstenc >= 8) {
2228 emit_opcode(cbuf, Assembler::REX_B);
2229 }
2230 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
2231 emit_d32(cbuf, -1);
2233 // jp,s done
2234 emit_opcode(cbuf, 0x7A);
2235 emit_d8(cbuf, dstenc < 4 ? 0x08 : 0x0A);
2237 // jb,s done
2238 emit_opcode(cbuf, 0x72);
2239 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
2241 // setne $dst
2242 if (dstenc >= 4) {
2243 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
2244 }
2245 emit_opcode(cbuf, 0x0F);
2246 emit_opcode(cbuf, 0x95);
2247 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
2249 // movzbl $dst, $dst
2250 if (dstenc >= 4) {
2251 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
2252 }
2253 emit_opcode(cbuf, 0x0F);
2254 emit_opcode(cbuf, 0xB6);
2255 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
2256 %}
2258 enc_class cdql_enc(no_rax_rdx_RegI div)
2259 %{
2260 // Full implementation of Java idiv and irem; checks for
2261 // special case as described in JVM spec., p.243 & p.271.
2262 //
2263 // normal case special case
2264 //
2265 // input : rax: dividend min_int
2266 // reg: divisor -1
2267 //
2268 // output: rax: quotient (= rax idiv reg) min_int
2269 // rdx: remainder (= rax irem reg) 0
2270 //
2271 // Code sequnce:
2272 //
2273 // 0: 3d 00 00 00 80 cmp $0x80000000,%eax
2274 // 5: 75 07/08 jne e <normal>
2275 // 7: 33 d2 xor %edx,%edx
2276 // [div >= 8 -> offset + 1]
2277 // [REX_B]
2278 // 9: 83 f9 ff cmp $0xffffffffffffffff,$div
2279 // c: 74 03/04 je 11 <done>
2280 // 000000000000000e <normal>:
2281 // e: 99 cltd
2282 // [div >= 8 -> offset + 1]
2283 // [REX_B]
2284 // f: f7 f9 idiv $div
2285 // 0000000000000011 <done>:
2287 // cmp $0x80000000,%eax
2288 emit_opcode(cbuf, 0x3d);
2289 emit_d8(cbuf, 0x00);
2290 emit_d8(cbuf, 0x00);
2291 emit_d8(cbuf, 0x00);
2292 emit_d8(cbuf, 0x80);
2294 // jne e <normal>
2295 emit_opcode(cbuf, 0x75);
2296 emit_d8(cbuf, $div$$reg < 8 ? 0x07 : 0x08);
2298 // xor %edx,%edx
2299 emit_opcode(cbuf, 0x33);
2300 emit_d8(cbuf, 0xD2);
2302 // cmp $0xffffffffffffffff,%ecx
2303 if ($div$$reg >= 8) {
2304 emit_opcode(cbuf, Assembler::REX_B);
2305 }
2306 emit_opcode(cbuf, 0x83);
2307 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
2308 emit_d8(cbuf, 0xFF);
2310 // je 11 <done>
2311 emit_opcode(cbuf, 0x74);
2312 emit_d8(cbuf, $div$$reg < 8 ? 0x03 : 0x04);
2314 // <normal>
2315 // cltd
2316 emit_opcode(cbuf, 0x99);
2318 // idivl (note: must be emitted by the user of this rule)
2319 // <done>
2320 %}
2322 enc_class cdqq_enc(no_rax_rdx_RegL div)
2323 %{
2324 // Full implementation of Java ldiv and lrem; checks for
2325 // special case as described in JVM spec., p.243 & p.271.
2326 //
2327 // normal case special case
2328 //
2329 // input : rax: dividend min_long
2330 // reg: divisor -1
2331 //
2332 // output: rax: quotient (= rax idiv reg) min_long
2333 // rdx: remainder (= rax irem reg) 0
2334 //
2335 // Code sequnce:
2336 //
2337 // 0: 48 ba 00 00 00 00 00 mov $0x8000000000000000,%rdx
2338 // 7: 00 00 80
2339 // a: 48 39 d0 cmp %rdx,%rax
2340 // d: 75 08 jne 17 <normal>
2341 // f: 33 d2 xor %edx,%edx
2342 // 11: 48 83 f9 ff cmp $0xffffffffffffffff,$div
2343 // 15: 74 05 je 1c <done>
2344 // 0000000000000017 <normal>:
2345 // 17: 48 99 cqto
2346 // 19: 48 f7 f9 idiv $div
2347 // 000000000000001c <done>:
2349 // mov $0x8000000000000000,%rdx
2350 emit_opcode(cbuf, Assembler::REX_W);
2351 emit_opcode(cbuf, 0xBA);
2352 emit_d8(cbuf, 0x00);
2353 emit_d8(cbuf, 0x00);
2354 emit_d8(cbuf, 0x00);
2355 emit_d8(cbuf, 0x00);
2356 emit_d8(cbuf, 0x00);
2357 emit_d8(cbuf, 0x00);
2358 emit_d8(cbuf, 0x00);
2359 emit_d8(cbuf, 0x80);
2361 // cmp %rdx,%rax
2362 emit_opcode(cbuf, Assembler::REX_W);
2363 emit_opcode(cbuf, 0x39);
2364 emit_d8(cbuf, 0xD0);
2366 // jne 17 <normal>
2367 emit_opcode(cbuf, 0x75);
2368 emit_d8(cbuf, 0x08);
2370 // xor %edx,%edx
2371 emit_opcode(cbuf, 0x33);
2372 emit_d8(cbuf, 0xD2);
2374 // cmp $0xffffffffffffffff,$div
2375 emit_opcode(cbuf, $div$$reg < 8 ? Assembler::REX_W : Assembler::REX_WB);
2376 emit_opcode(cbuf, 0x83);
2377 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
2378 emit_d8(cbuf, 0xFF);
2380 // je 1e <done>
2381 emit_opcode(cbuf, 0x74);
2382 emit_d8(cbuf, 0x05);
2384 // <normal>
2385 // cqto
2386 emit_opcode(cbuf, Assembler::REX_W);
2387 emit_opcode(cbuf, 0x99);
2389 // idivq (note: must be emitted by the user of this rule)
2390 // <done>
2391 %}
2393 // Opcde enc_class for 8/32 bit immediate instructions with sign-extension
2394 enc_class OpcSE(immI imm)
2395 %{
2396 // Emit primary opcode and set sign-extend bit
2397 // Check for 8-bit immediate, and set sign extend bit in opcode
2398 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2399 emit_opcode(cbuf, $primary | 0x02);
2400 } else {
2401 // 32-bit immediate
2402 emit_opcode(cbuf, $primary);
2403 }
2404 %}
2406 enc_class OpcSErm(rRegI dst, immI imm)
2407 %{
2408 // OpcSEr/m
2409 int dstenc = $dst$$reg;
2410 if (dstenc >= 8) {
2411 emit_opcode(cbuf, Assembler::REX_B);
2412 dstenc -= 8;
2413 }
2414 // Emit primary opcode and set sign-extend bit
2415 // Check for 8-bit immediate, and set sign extend bit in opcode
2416 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2417 emit_opcode(cbuf, $primary | 0x02);
2418 } else {
2419 // 32-bit immediate
2420 emit_opcode(cbuf, $primary);
2421 }
2422 // Emit r/m byte with secondary opcode, after primary opcode.
2423 emit_rm(cbuf, 0x3, $secondary, dstenc);
2424 %}
2426 enc_class OpcSErm_wide(rRegL dst, immI imm)
2427 %{
2428 // OpcSEr/m
2429 int dstenc = $dst$$reg;
2430 if (dstenc < 8) {
2431 emit_opcode(cbuf, Assembler::REX_W);
2432 } else {
2433 emit_opcode(cbuf, Assembler::REX_WB);
2434 dstenc -= 8;
2435 }
2436 // Emit primary opcode and set sign-extend bit
2437 // Check for 8-bit immediate, and set sign extend bit in opcode
2438 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2439 emit_opcode(cbuf, $primary | 0x02);
2440 } else {
2441 // 32-bit immediate
2442 emit_opcode(cbuf, $primary);
2443 }
2444 // Emit r/m byte with secondary opcode, after primary opcode.
2445 emit_rm(cbuf, 0x3, $secondary, dstenc);
2446 %}
2448 enc_class Con8or32(immI imm)
2449 %{
2450 // Check for 8-bit immediate, and set sign extend bit in opcode
2451 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2452 $$$emit8$imm$$constant;
2453 } else {
2454 // 32-bit immediate
2455 $$$emit32$imm$$constant;
2456 }
2457 %}
2459 enc_class Lbl(label labl)
2460 %{
2461 // JMP, CALL
2462 Label* l = $labl$$label;
2463 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
2464 %}
2466 enc_class LblShort(label labl)
2467 %{
2468 // JMP, CALL
2469 Label* l = $labl$$label;
2470 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
2471 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
2472 emit_d8(cbuf, disp);
2473 %}
2475 enc_class opc2_reg(rRegI dst)
2476 %{
2477 // BSWAP
2478 emit_cc(cbuf, $secondary, $dst$$reg);
2479 %}
2481 enc_class opc3_reg(rRegI dst)
2482 %{
2483 // BSWAP
2484 emit_cc(cbuf, $tertiary, $dst$$reg);
2485 %}
2487 enc_class reg_opc(rRegI div)
2488 %{
2489 // INC, DEC, IDIV, IMOD, JMP indirect, ...
2490 emit_rm(cbuf, 0x3, $secondary, $div$$reg & 7);
2491 %}
2493 enc_class Jcc(cmpOp cop, label labl)
2494 %{
2495 // JCC
2496 Label* l = $labl$$label;
2497 $$$emit8$primary;
2498 emit_cc(cbuf, $secondary, $cop$$cmpcode);
2499 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
2500 %}
2502 enc_class JccShort (cmpOp cop, label labl)
2503 %{
2504 // JCC
2505 Label *l = $labl$$label;
2506 emit_cc(cbuf, $primary, $cop$$cmpcode);
2507 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
2508 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
2509 emit_d8(cbuf, disp);
2510 %}
2512 enc_class enc_cmov(cmpOp cop)
2513 %{
2514 // CMOV
2515 $$$emit8$primary;
2516 emit_cc(cbuf, $secondary, $cop$$cmpcode);
2517 %}
2519 enc_class enc_cmovf_branch(cmpOp cop, regF dst, regF src)
2520 %{
2521 // Invert sense of branch from sense of cmov
2522 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
2523 emit_d8(cbuf, ($dst$$reg < 8 && $src$$reg < 8)
2524 ? (UseXmmRegToRegMoveAll ? 3 : 4)
2525 : (UseXmmRegToRegMoveAll ? 4 : 5) ); // REX
2526 // UseXmmRegToRegMoveAll ? movaps(dst, src) : movss(dst, src)
2527 if (!UseXmmRegToRegMoveAll) emit_opcode(cbuf, 0xF3);
2528 if ($dst$$reg < 8) {
2529 if ($src$$reg >= 8) {
2530 emit_opcode(cbuf, Assembler::REX_B);
2531 }
2532 } else {
2533 if ($src$$reg < 8) {
2534 emit_opcode(cbuf, Assembler::REX_R);
2535 } else {
2536 emit_opcode(cbuf, Assembler::REX_RB);
2537 }
2538 }
2539 emit_opcode(cbuf, 0x0F);
2540 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
2541 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2542 %}
2544 enc_class enc_cmovd_branch(cmpOp cop, regD dst, regD src)
2545 %{
2546 // Invert sense of branch from sense of cmov
2547 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
2548 emit_d8(cbuf, $dst$$reg < 8 && $src$$reg < 8 ? 4 : 5); // REX
2550 // UseXmmRegToRegMoveAll ? movapd(dst, src) : movsd(dst, src)
2551 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
2552 if ($dst$$reg < 8) {
2553 if ($src$$reg >= 8) {
2554 emit_opcode(cbuf, Assembler::REX_B);
2555 }
2556 } else {
2557 if ($src$$reg < 8) {
2558 emit_opcode(cbuf, Assembler::REX_R);
2559 } else {
2560 emit_opcode(cbuf, Assembler::REX_RB);
2561 }
2562 }
2563 emit_opcode(cbuf, 0x0F);
2564 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
2565 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2566 %}
2568 enc_class enc_PartialSubtypeCheck()
2569 %{
2570 Register Rrdi = as_Register(RDI_enc); // result register
2571 Register Rrax = as_Register(RAX_enc); // super class
2572 Register Rrcx = as_Register(RCX_enc); // killed
2573 Register Rrsi = as_Register(RSI_enc); // sub class
2574 Label hit, miss, cmiss;
2576 MacroAssembler _masm(&cbuf);
2577 // Compare super with sub directly, since super is not in its own SSA.
2578 // The compiler used to emit this test, but we fold it in here,
2579 // to allow platform-specific tweaking on sparc.
2580 __ cmpq(Rrax, Rrsi);
2581 __ jcc(Assembler::equal, hit);
2582 #ifndef PRODUCT
2583 __ lea(Rrcx, ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
2584 __ incrementl(Address(Rrcx, 0));
2585 #endif //PRODUCT
2586 __ movq(Rrdi, Address(Rrsi,
2587 sizeof(oopDesc) +
2588 Klass::secondary_supers_offset_in_bytes()));
2589 __ movl(Rrcx, Address(Rrdi, arrayOopDesc::length_offset_in_bytes()));
2590 __ addq(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
2591 if (UseCompressedOops) {
2592 __ encode_heap_oop(Rrax);
2593 __ repne_scanl();
2594 __ jcc(Assembler::notEqual, cmiss);
2595 __ decode_heap_oop(Rrax);
2596 __ movq(Address(Rrsi,
2597 sizeof(oopDesc) +
2598 Klass::secondary_super_cache_offset_in_bytes()),
2599 Rrax);
2600 __ jmp(hit);
2601 __ bind(cmiss);
2602 __ decode_heap_oop(Rrax);
2603 __ jmp(miss);
2604 } else {
2605 __ repne_scanq();
2606 __ jcc(Assembler::notEqual, miss);
2607 __ movq(Address(Rrsi,
2608 sizeof(oopDesc) +
2609 Klass::secondary_super_cache_offset_in_bytes()),
2610 Rrax);
2611 }
2612 __ bind(hit);
2613 if ($primary) {
2614 __ xorq(Rrdi, Rrdi);
2615 }
2616 __ bind(miss);
2617 %}
2619 enc_class Java_To_Interpreter(method meth)
2620 %{
2621 // CALL Java_To_Interpreter
2622 // This is the instruction starting address for relocation info.
2623 cbuf.set_inst_mark();
2624 $$$emit8$primary;
2625 // CALL directly to the runtime
2626 emit_d32_reloc(cbuf,
2627 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2628 runtime_call_Relocation::spec(),
2629 RELOC_DISP32);
2630 %}
2632 enc_class Java_Static_Call(method meth)
2633 %{
2634 // JAVA STATIC CALL
2635 // CALL to fixup routine. Fixup routine uses ScopeDesc info to
2636 // determine who we intended to call.
2637 cbuf.set_inst_mark();
2638 $$$emit8$primary;
2640 if (!_method) {
2641 emit_d32_reloc(cbuf,
2642 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2643 runtime_call_Relocation::spec(),
2644 RELOC_DISP32);
2645 } else if (_optimized_virtual) {
2646 emit_d32_reloc(cbuf,
2647 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2648 opt_virtual_call_Relocation::spec(),
2649 RELOC_DISP32);
2650 } else {
2651 emit_d32_reloc(cbuf,
2652 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2653 static_call_Relocation::spec(),
2654 RELOC_DISP32);
2655 }
2656 if (_method) {
2657 // Emit stub for static call
2658 emit_java_to_interp(cbuf);
2659 }
2660 %}
2662 enc_class Java_Dynamic_Call(method meth)
2663 %{
2664 // JAVA DYNAMIC CALL
2665 // !!!!!
2666 // Generate "movq rax, -1", placeholder instruction to load oop-info
2667 // emit_call_dynamic_prologue( cbuf );
2668 cbuf.set_inst_mark();
2670 // movq rax, -1
2671 emit_opcode(cbuf, Assembler::REX_W);
2672 emit_opcode(cbuf, 0xB8 | RAX_enc);
2673 emit_d64_reloc(cbuf,
2674 (int64_t) Universe::non_oop_word(),
2675 oop_Relocation::spec_for_immediate(), RELOC_IMM64);
2676 address virtual_call_oop_addr = cbuf.inst_mark();
2677 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2678 // who we intended to call.
2679 cbuf.set_inst_mark();
2680 $$$emit8$primary;
2681 emit_d32_reloc(cbuf,
2682 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2683 virtual_call_Relocation::spec(virtual_call_oop_addr),
2684 RELOC_DISP32);
2685 %}
2687 enc_class Java_Compiled_Call(method meth)
2688 %{
2689 // JAVA COMPILED CALL
2690 int disp = in_bytes(methodOopDesc:: from_compiled_offset());
2692 // XXX XXX offset is 128 is 1.5 NON-PRODUCT !!!
2693 // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small");
2695 // callq *disp(%rax)
2696 cbuf.set_inst_mark();
2697 $$$emit8$primary;
2698 if (disp < 0x80) {
2699 emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte
2700 emit_d8(cbuf, disp); // Displacement
2701 } else {
2702 emit_rm(cbuf, 0x02, $secondary, RAX_enc); // R/M byte
2703 emit_d32(cbuf, disp); // Displacement
2704 }
2705 %}
2707 enc_class reg_opc_imm(rRegI dst, immI8 shift)
2708 %{
2709 // SAL, SAR, SHR
2710 int dstenc = $dst$$reg;
2711 if (dstenc >= 8) {
2712 emit_opcode(cbuf, Assembler::REX_B);
2713 dstenc -= 8;
2714 }
2715 $$$emit8$primary;
2716 emit_rm(cbuf, 0x3, $secondary, dstenc);
2717 $$$emit8$shift$$constant;
2718 %}
2720 enc_class reg_opc_imm_wide(rRegL dst, immI8 shift)
2721 %{
2722 // SAL, SAR, SHR
2723 int dstenc = $dst$$reg;
2724 if (dstenc < 8) {
2725 emit_opcode(cbuf, Assembler::REX_W);
2726 } else {
2727 emit_opcode(cbuf, Assembler::REX_WB);
2728 dstenc -= 8;
2729 }
2730 $$$emit8$primary;
2731 emit_rm(cbuf, 0x3, $secondary, dstenc);
2732 $$$emit8$shift$$constant;
2733 %}
2735 enc_class load_immI(rRegI dst, immI src)
2736 %{
2737 int dstenc = $dst$$reg;
2738 if (dstenc >= 8) {
2739 emit_opcode(cbuf, Assembler::REX_B);
2740 dstenc -= 8;
2741 }
2742 emit_opcode(cbuf, 0xB8 | dstenc);
2743 $$$emit32$src$$constant;
2744 %}
2746 enc_class load_immL(rRegL dst, immL src)
2747 %{
2748 int dstenc = $dst$$reg;
2749 if (dstenc < 8) {
2750 emit_opcode(cbuf, Assembler::REX_W);
2751 } else {
2752 emit_opcode(cbuf, Assembler::REX_WB);
2753 dstenc -= 8;
2754 }
2755 emit_opcode(cbuf, 0xB8 | dstenc);
2756 emit_d64(cbuf, $src$$constant);
2757 %}
2759 enc_class load_immUL32(rRegL dst, immUL32 src)
2760 %{
2761 // same as load_immI, but this time we care about zeroes in the high word
2762 int dstenc = $dst$$reg;
2763 if (dstenc >= 8) {
2764 emit_opcode(cbuf, Assembler::REX_B);
2765 dstenc -= 8;
2766 }
2767 emit_opcode(cbuf, 0xB8 | dstenc);
2768 $$$emit32$src$$constant;
2769 %}
2771 enc_class load_immL32(rRegL dst, immL32 src)
2772 %{
2773 int dstenc = $dst$$reg;
2774 if (dstenc < 8) {
2775 emit_opcode(cbuf, Assembler::REX_W);
2776 } else {
2777 emit_opcode(cbuf, Assembler::REX_WB);
2778 dstenc -= 8;
2779 }
2780 emit_opcode(cbuf, 0xC7);
2781 emit_rm(cbuf, 0x03, 0x00, dstenc);
2782 $$$emit32$src$$constant;
2783 %}
2785 enc_class load_immP31(rRegP dst, immP32 src)
2786 %{
2787 // same as load_immI, but this time we care about zeroes in the high word
2788 int dstenc = $dst$$reg;
2789 if (dstenc >= 8) {
2790 emit_opcode(cbuf, Assembler::REX_B);
2791 dstenc -= 8;
2792 }
2793 emit_opcode(cbuf, 0xB8 | dstenc);
2794 $$$emit32$src$$constant;
2795 %}
2797 enc_class load_immP(rRegP dst, immP src)
2798 %{
2799 int dstenc = $dst$$reg;
2800 if (dstenc < 8) {
2801 emit_opcode(cbuf, Assembler::REX_W);
2802 } else {
2803 emit_opcode(cbuf, Assembler::REX_WB);
2804 dstenc -= 8;
2805 }
2806 emit_opcode(cbuf, 0xB8 | dstenc);
2807 // This next line should be generated from ADLC
2808 if ($src->constant_is_oop()) {
2809 emit_d64_reloc(cbuf, $src$$constant, relocInfo::oop_type, RELOC_IMM64);
2810 } else {
2811 emit_d64(cbuf, $src$$constant);
2812 }
2813 %}
2815 enc_class load_immF(regF dst, immF con)
2816 %{
2817 // XXX reg_mem doesn't support RIP-relative addressing yet
2818 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2819 emit_float_constant(cbuf, $con$$constant);
2820 %}
2822 enc_class load_immD(regD dst, immD con)
2823 %{
2824 // XXX reg_mem doesn't support RIP-relative addressing yet
2825 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2826 emit_double_constant(cbuf, $con$$constant);
2827 %}
2829 enc_class load_conF (regF dst, immF con) %{ // Load float constant
2830 emit_opcode(cbuf, 0xF3);
2831 if ($dst$$reg >= 8) {
2832 emit_opcode(cbuf, Assembler::REX_R);
2833 }
2834 emit_opcode(cbuf, 0x0F);
2835 emit_opcode(cbuf, 0x10);
2836 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2837 emit_float_constant(cbuf, $con$$constant);
2838 %}
2840 enc_class load_conD (regD dst, immD con) %{ // Load double constant
2841 // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)
2842 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
2843 if ($dst$$reg >= 8) {
2844 emit_opcode(cbuf, Assembler::REX_R);
2845 }
2846 emit_opcode(cbuf, 0x0F);
2847 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
2848 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2849 emit_double_constant(cbuf, $con$$constant);
2850 %}
2852 // Encode a reg-reg copy. If it is useless, then empty encoding.
2853 enc_class enc_copy(rRegI dst, rRegI src)
2854 %{
2855 encode_copy(cbuf, $dst$$reg, $src$$reg);
2856 %}
2858 // Encode xmm reg-reg copy. If it is useless, then empty encoding.
2859 enc_class enc_CopyXD( RegD dst, RegD src ) %{
2860 encode_CopyXD( cbuf, $dst$$reg, $src$$reg );
2861 %}
2863 enc_class enc_copy_always(rRegI dst, rRegI src)
2864 %{
2865 int srcenc = $src$$reg;
2866 int dstenc = $dst$$reg;
2868 if (dstenc < 8) {
2869 if (srcenc >= 8) {
2870 emit_opcode(cbuf, Assembler::REX_B);
2871 srcenc -= 8;
2872 }
2873 } else {
2874 if (srcenc < 8) {
2875 emit_opcode(cbuf, Assembler::REX_R);
2876 } else {
2877 emit_opcode(cbuf, Assembler::REX_RB);
2878 srcenc -= 8;
2879 }
2880 dstenc -= 8;
2881 }
2883 emit_opcode(cbuf, 0x8B);
2884 emit_rm(cbuf, 0x3, dstenc, srcenc);
2885 %}
2887 enc_class enc_copy_wide(rRegL dst, rRegL src)
2888 %{
2889 int srcenc = $src$$reg;
2890 int dstenc = $dst$$reg;
2892 if (dstenc != srcenc) {
2893 if (dstenc < 8) {
2894 if (srcenc < 8) {
2895 emit_opcode(cbuf, Assembler::REX_W);
2896 } else {
2897 emit_opcode(cbuf, Assembler::REX_WB);
2898 srcenc -= 8;
2899 }
2900 } else {
2901 if (srcenc < 8) {
2902 emit_opcode(cbuf, Assembler::REX_WR);
2903 } else {
2904 emit_opcode(cbuf, Assembler::REX_WRB);
2905 srcenc -= 8;
2906 }
2907 dstenc -= 8;
2908 }
2909 emit_opcode(cbuf, 0x8B);
2910 emit_rm(cbuf, 0x3, dstenc, srcenc);
2911 }
2912 %}
2914 enc_class Con32(immI src)
2915 %{
2916 // Output immediate
2917 $$$emit32$src$$constant;
2918 %}
2920 enc_class Con64(immL src)
2921 %{
2922 // Output immediate
2923 emit_d64($src$$constant);
2924 %}
2926 enc_class Con32F_as_bits(immF src)
2927 %{
2928 // Output Float immediate bits
2929 jfloat jf = $src$$constant;
2930 jint jf_as_bits = jint_cast(jf);
2931 emit_d32(cbuf, jf_as_bits);
2932 %}
2934 enc_class Con16(immI src)
2935 %{
2936 // Output immediate
2937 $$$emit16$src$$constant;
2938 %}
2940 // How is this different from Con32??? XXX
2941 enc_class Con_d32(immI src)
2942 %{
2943 emit_d32(cbuf,$src$$constant);
2944 %}
2946 enc_class conmemref (rRegP t1) %{ // Con32(storeImmI)
2947 // Output immediate memory reference
2948 emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
2949 emit_d32(cbuf, 0x00);
2950 %}
2952 enc_class jump_enc(rRegL switch_val, rRegI dest) %{
2953 MacroAssembler masm(&cbuf);
2955 Register switch_reg = as_Register($switch_val$$reg);
2956 Register dest_reg = as_Register($dest$$reg);
2957 address table_base = masm.address_table_constant(_index2label);
2959 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
2960 // to do that and the compiler is using that register as one it can allocate.
2961 // So we build it all by hand.
2962 // Address index(noreg, switch_reg, Address::times_1);
2963 // ArrayAddress dispatch(table, index);
2965 Address dispatch(dest_reg, switch_reg, Address::times_1);
2967 masm.lea(dest_reg, InternalAddress(table_base));
2968 masm.jmp(dispatch);
2969 %}
2971 enc_class jump_enc_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
2972 MacroAssembler masm(&cbuf);
2974 Register switch_reg = as_Register($switch_val$$reg);
2975 Register dest_reg = as_Register($dest$$reg);
2976 address table_base = masm.address_table_constant(_index2label);
2978 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
2979 // to do that and the compiler is using that register as one it can allocate.
2980 // So we build it all by hand.
2981 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
2982 // ArrayAddress dispatch(table, index);
2984 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
2986 masm.lea(dest_reg, InternalAddress(table_base));
2987 masm.jmp(dispatch);
2988 %}
2990 enc_class jump_enc_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
2991 MacroAssembler masm(&cbuf);
2993 Register switch_reg = as_Register($switch_val$$reg);
2994 Register dest_reg = as_Register($dest$$reg);
2995 address table_base = masm.address_table_constant(_index2label);
2997 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
2998 // to do that and the compiler is using that register as one it can allocate.
2999 // So we build it all by hand.
3000 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);
3001 // ArrayAddress dispatch(table, index);
3003 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant);
3004 masm.lea(dest_reg, InternalAddress(table_base));
3005 masm.jmp(dispatch);
3007 %}
3009 enc_class lock_prefix()
3010 %{
3011 if (os::is_MP()) {
3012 emit_opcode(cbuf, 0xF0); // lock
3013 }
3014 %}
3016 enc_class REX_mem(memory mem)
3017 %{
3018 if ($mem$$base >= 8) {
3019 if ($mem$$index < 8) {
3020 emit_opcode(cbuf, Assembler::REX_B);
3021 } else {
3022 emit_opcode(cbuf, Assembler::REX_XB);
3023 }
3024 } else {
3025 if ($mem$$index >= 8) {
3026 emit_opcode(cbuf, Assembler::REX_X);
3027 }
3028 }
3029 %}
3031 enc_class REX_mem_wide(memory mem)
3032 %{
3033 if ($mem$$base >= 8) {
3034 if ($mem$$index < 8) {
3035 emit_opcode(cbuf, Assembler::REX_WB);
3036 } else {
3037 emit_opcode(cbuf, Assembler::REX_WXB);
3038 }
3039 } else {
3040 if ($mem$$index < 8) {
3041 emit_opcode(cbuf, Assembler::REX_W);
3042 } else {
3043 emit_opcode(cbuf, Assembler::REX_WX);
3044 }
3045 }
3046 %}
3048 // for byte regs
3049 enc_class REX_breg(rRegI reg)
3050 %{
3051 if ($reg$$reg >= 4) {
3052 emit_opcode(cbuf, $reg$$reg < 8 ? Assembler::REX : Assembler::REX_B);
3053 }
3054 %}
3056 // for byte regs
3057 enc_class REX_reg_breg(rRegI dst, rRegI src)
3058 %{
3059 if ($dst$$reg < 8) {
3060 if ($src$$reg >= 4) {
3061 emit_opcode(cbuf, $src$$reg < 8 ? Assembler::REX : Assembler::REX_B);
3062 }
3063 } else {
3064 if ($src$$reg < 8) {
3065 emit_opcode(cbuf, Assembler::REX_R);
3066 } else {
3067 emit_opcode(cbuf, Assembler::REX_RB);
3068 }
3069 }
3070 %}
3072 // for byte regs
3073 enc_class REX_breg_mem(rRegI reg, memory mem)
3074 %{
3075 if ($reg$$reg < 8) {
3076 if ($mem$$base < 8) {
3077 if ($mem$$index >= 8) {
3078 emit_opcode(cbuf, Assembler::REX_X);
3079 } else if ($reg$$reg >= 4) {
3080 emit_opcode(cbuf, Assembler::REX);
3081 }
3082 } else {
3083 if ($mem$$index < 8) {
3084 emit_opcode(cbuf, Assembler::REX_B);
3085 } else {
3086 emit_opcode(cbuf, Assembler::REX_XB);
3087 }
3088 }
3089 } else {
3090 if ($mem$$base < 8) {
3091 if ($mem$$index < 8) {
3092 emit_opcode(cbuf, Assembler::REX_R);
3093 } else {
3094 emit_opcode(cbuf, Assembler::REX_RX);
3095 }
3096 } else {
3097 if ($mem$$index < 8) {
3098 emit_opcode(cbuf, Assembler::REX_RB);
3099 } else {
3100 emit_opcode(cbuf, Assembler::REX_RXB);
3101 }
3102 }
3103 }
3104 %}
3106 enc_class REX_reg(rRegI reg)
3107 %{
3108 if ($reg$$reg >= 8) {
3109 emit_opcode(cbuf, Assembler::REX_B);
3110 }
3111 %}
3113 enc_class REX_reg_wide(rRegI reg)
3114 %{
3115 if ($reg$$reg < 8) {
3116 emit_opcode(cbuf, Assembler::REX_W);
3117 } else {
3118 emit_opcode(cbuf, Assembler::REX_WB);
3119 }
3120 %}
3122 enc_class REX_reg_reg(rRegI dst, rRegI src)
3123 %{
3124 if ($dst$$reg < 8) {
3125 if ($src$$reg >= 8) {
3126 emit_opcode(cbuf, Assembler::REX_B);
3127 }
3128 } else {
3129 if ($src$$reg < 8) {
3130 emit_opcode(cbuf, Assembler::REX_R);
3131 } else {
3132 emit_opcode(cbuf, Assembler::REX_RB);
3133 }
3134 }
3135 %}
3137 enc_class REX_reg_reg_wide(rRegI dst, rRegI src)
3138 %{
3139 if ($dst$$reg < 8) {
3140 if ($src$$reg < 8) {
3141 emit_opcode(cbuf, Assembler::REX_W);
3142 } else {
3143 emit_opcode(cbuf, Assembler::REX_WB);
3144 }
3145 } else {
3146 if ($src$$reg < 8) {
3147 emit_opcode(cbuf, Assembler::REX_WR);
3148 } else {
3149 emit_opcode(cbuf, Assembler::REX_WRB);
3150 }
3151 }
3152 %}
3154 enc_class REX_reg_mem(rRegI reg, memory mem)
3155 %{
3156 if ($reg$$reg < 8) {
3157 if ($mem$$base < 8) {
3158 if ($mem$$index >= 8) {
3159 emit_opcode(cbuf, Assembler::REX_X);
3160 }
3161 } else {
3162 if ($mem$$index < 8) {
3163 emit_opcode(cbuf, Assembler::REX_B);
3164 } else {
3165 emit_opcode(cbuf, Assembler::REX_XB);
3166 }
3167 }
3168 } else {
3169 if ($mem$$base < 8) {
3170 if ($mem$$index < 8) {
3171 emit_opcode(cbuf, Assembler::REX_R);
3172 } else {
3173 emit_opcode(cbuf, Assembler::REX_RX);
3174 }
3175 } else {
3176 if ($mem$$index < 8) {
3177 emit_opcode(cbuf, Assembler::REX_RB);
3178 } else {
3179 emit_opcode(cbuf, Assembler::REX_RXB);
3180 }
3181 }
3182 }
3183 %}
3185 enc_class REX_reg_mem_wide(rRegL reg, memory mem)
3186 %{
3187 if ($reg$$reg < 8) {
3188 if ($mem$$base < 8) {
3189 if ($mem$$index < 8) {
3190 emit_opcode(cbuf, Assembler::REX_W);
3191 } else {
3192 emit_opcode(cbuf, Assembler::REX_WX);
3193 }
3194 } else {
3195 if ($mem$$index < 8) {
3196 emit_opcode(cbuf, Assembler::REX_WB);
3197 } else {
3198 emit_opcode(cbuf, Assembler::REX_WXB);
3199 }
3200 }
3201 } else {
3202 if ($mem$$base < 8) {
3203 if ($mem$$index < 8) {
3204 emit_opcode(cbuf, Assembler::REX_WR);
3205 } else {
3206 emit_opcode(cbuf, Assembler::REX_WRX);
3207 }
3208 } else {
3209 if ($mem$$index < 8) {
3210 emit_opcode(cbuf, Assembler::REX_WRB);
3211 } else {
3212 emit_opcode(cbuf, Assembler::REX_WRXB);
3213 }
3214 }
3215 }
3216 %}
3218 enc_class reg_mem(rRegI ereg, memory mem)
3219 %{
3220 // High registers handle in encode_RegMem
3221 int reg = $ereg$$reg;
3222 int base = $mem$$base;
3223 int index = $mem$$index;
3224 int scale = $mem$$scale;
3225 int disp = $mem$$disp;
3226 bool disp_is_oop = $mem->disp_is_oop();
3228 encode_RegMem(cbuf, reg, base, index, scale, disp, disp_is_oop);
3229 %}
3231 enc_class RM_opc_mem(immI rm_opcode, memory mem)
3232 %{
3233 int rm_byte_opcode = $rm_opcode$$constant;
3235 // High registers handle in encode_RegMem
3236 int base = $mem$$base;
3237 int index = $mem$$index;
3238 int scale = $mem$$scale;
3239 int displace = $mem$$disp;
3241 bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when
3242 // working with static
3243 // globals
3244 encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace,
3245 disp_is_oop);
3246 %}
3248 enc_class reg_lea(rRegI dst, rRegI src0, immI src1)
3249 %{
3250 int reg_encoding = $dst$$reg;
3251 int base = $src0$$reg; // 0xFFFFFFFF indicates no base
3252 int index = 0x04; // 0x04 indicates no index
3253 int scale = 0x00; // 0x00 indicates no scale
3254 int displace = $src1$$constant; // 0x00 indicates no displacement
3255 bool disp_is_oop = false;
3256 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace,
3257 disp_is_oop);
3258 %}
3260 enc_class neg_reg(rRegI dst)
3261 %{
3262 int dstenc = $dst$$reg;
3263 if (dstenc >= 8) {
3264 emit_opcode(cbuf, Assembler::REX_B);
3265 dstenc -= 8;
3266 }
3267 // NEG $dst
3268 emit_opcode(cbuf, 0xF7);
3269 emit_rm(cbuf, 0x3, 0x03, dstenc);
3270 %}
3272 enc_class neg_reg_wide(rRegI dst)
3273 %{
3274 int dstenc = $dst$$reg;
3275 if (dstenc < 8) {
3276 emit_opcode(cbuf, Assembler::REX_W);
3277 } else {
3278 emit_opcode(cbuf, Assembler::REX_WB);
3279 dstenc -= 8;
3280 }
3281 // NEG $dst
3282 emit_opcode(cbuf, 0xF7);
3283 emit_rm(cbuf, 0x3, 0x03, dstenc);
3284 %}
3286 enc_class setLT_reg(rRegI dst)
3287 %{
3288 int dstenc = $dst$$reg;
3289 if (dstenc >= 8) {
3290 emit_opcode(cbuf, Assembler::REX_B);
3291 dstenc -= 8;
3292 } else if (dstenc >= 4) {
3293 emit_opcode(cbuf, Assembler::REX);
3294 }
3295 // SETLT $dst
3296 emit_opcode(cbuf, 0x0F);
3297 emit_opcode(cbuf, 0x9C);
3298 emit_rm(cbuf, 0x3, 0x0, dstenc);
3299 %}
3301 enc_class setNZ_reg(rRegI dst)
3302 %{
3303 int dstenc = $dst$$reg;
3304 if (dstenc >= 8) {
3305 emit_opcode(cbuf, Assembler::REX_B);
3306 dstenc -= 8;
3307 } else if (dstenc >= 4) {
3308 emit_opcode(cbuf, Assembler::REX);
3309 }
3310 // SETNZ $dst
3311 emit_opcode(cbuf, 0x0F);
3312 emit_opcode(cbuf, 0x95);
3313 emit_rm(cbuf, 0x3, 0x0, dstenc);
3314 %}
3316 enc_class enc_cmpLTP(no_rcx_RegI p, no_rcx_RegI q, no_rcx_RegI y,
3317 rcx_RegI tmp)
3318 %{
3319 // cadd_cmpLT
3321 int tmpReg = $tmp$$reg;
3323 int penc = $p$$reg;
3324 int qenc = $q$$reg;
3325 int yenc = $y$$reg;
3327 // subl $p,$q
3328 if (penc < 8) {
3329 if (qenc >= 8) {
3330 emit_opcode(cbuf, Assembler::REX_B);
3331 }
3332 } else {
3333 if (qenc < 8) {
3334 emit_opcode(cbuf, Assembler::REX_R);
3335 } else {
3336 emit_opcode(cbuf, Assembler::REX_RB);
3337 }
3338 }
3339 emit_opcode(cbuf, 0x2B);
3340 emit_rm(cbuf, 0x3, penc & 7, qenc & 7);
3342 // sbbl $tmp, $tmp
3343 emit_opcode(cbuf, 0x1B);
3344 emit_rm(cbuf, 0x3, tmpReg, tmpReg);
3346 // andl $tmp, $y
3347 if (yenc >= 8) {
3348 emit_opcode(cbuf, Assembler::REX_B);
3349 }
3350 emit_opcode(cbuf, 0x23);
3351 emit_rm(cbuf, 0x3, tmpReg, yenc & 7);
3353 // addl $p,$tmp
3354 if (penc >= 8) {
3355 emit_opcode(cbuf, Assembler::REX_R);
3356 }
3357 emit_opcode(cbuf, 0x03);
3358 emit_rm(cbuf, 0x3, penc & 7, tmpReg);
3359 %}
3361 // Compare the lonogs and set -1, 0, or 1 into dst
3362 enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst)
3363 %{
3364 int src1enc = $src1$$reg;
3365 int src2enc = $src2$$reg;
3366 int dstenc = $dst$$reg;
3368 // cmpq $src1, $src2
3369 if (src1enc < 8) {
3370 if (src2enc < 8) {
3371 emit_opcode(cbuf, Assembler::REX_W);
3372 } else {
3373 emit_opcode(cbuf, Assembler::REX_WB);
3374 }
3375 } else {
3376 if (src2enc < 8) {
3377 emit_opcode(cbuf, Assembler::REX_WR);
3378 } else {
3379 emit_opcode(cbuf, Assembler::REX_WRB);
3380 }
3381 }
3382 emit_opcode(cbuf, 0x3B);
3383 emit_rm(cbuf, 0x3, src1enc & 7, src2enc & 7);
3385 // movl $dst, -1
3386 if (dstenc >= 8) {
3387 emit_opcode(cbuf, Assembler::REX_B);
3388 }
3389 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
3390 emit_d32(cbuf, -1);
3392 // jl,s done
3393 emit_opcode(cbuf, 0x7C);
3394 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
3396 // setne $dst
3397 if (dstenc >= 4) {
3398 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
3399 }
3400 emit_opcode(cbuf, 0x0F);
3401 emit_opcode(cbuf, 0x95);
3402 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
3404 // movzbl $dst, $dst
3405 if (dstenc >= 4) {
3406 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
3407 }
3408 emit_opcode(cbuf, 0x0F);
3409 emit_opcode(cbuf, 0xB6);
3410 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
3411 %}
3413 enc_class Push_ResultXD(regD dst) %{
3414 int dstenc = $dst$$reg;
3416 store_to_stackslot( cbuf, 0xDD, 0x03, 0 ); //FSTP [RSP]
3418 // UseXmmLoadAndClearUpper ? movsd dst,[rsp] : movlpd dst,[rsp]
3419 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
3420 if (dstenc >= 8) {
3421 emit_opcode(cbuf, Assembler::REX_R);
3422 }
3423 emit_opcode (cbuf, 0x0F );
3424 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12 );
3425 encode_RegMem(cbuf, dstenc, RSP_enc, 0x4, 0, 0, false);
3427 // add rsp,8
3428 emit_opcode(cbuf, Assembler::REX_W);
3429 emit_opcode(cbuf,0x83);
3430 emit_rm(cbuf,0x3, 0x0, RSP_enc);
3431 emit_d8(cbuf,0x08);
3432 %}
3434 enc_class Push_SrcXD(regD src) %{
3435 int srcenc = $src$$reg;
3437 // subq rsp,#8
3438 emit_opcode(cbuf, Assembler::REX_W);
3439 emit_opcode(cbuf, 0x83);
3440 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
3441 emit_d8(cbuf, 0x8);
3443 // movsd [rsp],src
3444 emit_opcode(cbuf, 0xF2);
3445 if (srcenc >= 8) {
3446 emit_opcode(cbuf, Assembler::REX_R);
3447 }
3448 emit_opcode(cbuf, 0x0F);
3449 emit_opcode(cbuf, 0x11);
3450 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false);
3452 // fldd [rsp]
3453 emit_opcode(cbuf, 0x66);
3454 emit_opcode(cbuf, 0xDD);
3455 encode_RegMem(cbuf, 0x0, RSP_enc, 0x4, 0, 0, false);
3456 %}
3459 enc_class movq_ld(regD dst, memory mem) %{
3460 MacroAssembler _masm(&cbuf);
3461 Address madr = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
3462 __ movq(as_XMMRegister($dst$$reg), madr);
3463 %}
3465 enc_class movq_st(memory mem, regD src) %{
3466 MacroAssembler _masm(&cbuf);
3467 Address madr = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
3468 __ movq(madr, as_XMMRegister($src$$reg));
3469 %}
3471 enc_class pshufd_8x8(regF dst, regF src) %{
3472 MacroAssembler _masm(&cbuf);
3474 encode_CopyXD(cbuf, $dst$$reg, $src$$reg);
3475 __ punpcklbw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg));
3476 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg), 0x00);
3477 %}
3479 enc_class pshufd_4x16(regF dst, regF src) %{
3480 MacroAssembler _masm(&cbuf);
3482 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), 0x00);
3483 %}
3485 enc_class pshufd(regD dst, regD src, int mode) %{
3486 MacroAssembler _masm(&cbuf);
3488 __ pshufd(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), $mode);
3489 %}
3491 enc_class pxor(regD dst, regD src) %{
3492 MacroAssembler _masm(&cbuf);
3494 __ pxor(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg));
3495 %}
3497 enc_class mov_i2x(regD dst, rRegI src) %{
3498 MacroAssembler _masm(&cbuf);
3500 __ movdl(as_XMMRegister($dst$$reg), as_Register($src$$reg));
3501 %}
3503 // obj: object to lock
3504 // box: box address (header location) -- killed
3505 // tmp: rax -- killed
3506 // scr: rbx -- killed
3507 //
3508 // What follows is a direct transliteration of fast_lock() and fast_unlock()
3509 // from i486.ad. See that file for comments.
3510 // TODO: where possible switch from movq (r, 0) to movl(r,0) and
3511 // use the shorter encoding. (Movl clears the high-order 32-bits).
3514 enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr)
3515 %{
3516 Register objReg = as_Register((int)$obj$$reg);
3517 Register boxReg = as_Register((int)$box$$reg);
3518 Register tmpReg = as_Register($tmp$$reg);
3519 Register scrReg = as_Register($scr$$reg);
3520 MacroAssembler masm(&cbuf);
3522 // Verify uniqueness of register assignments -- necessary but not sufficient
3523 assert (objReg != boxReg && objReg != tmpReg &&
3524 objReg != scrReg && tmpReg != scrReg, "invariant") ;
3526 if (_counters != NULL) {
3527 masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
3528 }
3529 if (EmitSync & 1) {
3530 masm.movptr (Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())) ;
3531 masm.cmpq (rsp, 0) ;
3532 } else
3533 if (EmitSync & 2) {
3534 Label DONE_LABEL;
3535 if (UseBiasedLocking) {
3536 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
3537 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
3538 }
3539 masm.movl(tmpReg, 0x1);
3540 masm.orq(tmpReg, Address(objReg, 0));
3541 masm.movq(Address(boxReg, 0), tmpReg);
3542 if (os::is_MP()) {
3543 masm.lock();
3544 }
3545 masm.cmpxchgq(boxReg, Address(objReg, 0)); // Updates tmpReg
3546 masm.jcc(Assembler::equal, DONE_LABEL);
3548 // Recursive locking
3549 masm.subq(tmpReg, rsp);
3550 masm.andq(tmpReg, 7 - os::vm_page_size());
3551 masm.movq(Address(boxReg, 0), tmpReg);
3553 masm.bind(DONE_LABEL);
3554 masm.nop(); // avoid branch to branch
3555 } else {
3556 Label DONE_LABEL, IsInflated, Egress;
3558 masm.movq (tmpReg, Address(objReg, 0)) ;
3559 masm.testq (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
3560 masm.jcc (Assembler::notZero, IsInflated) ;
3562 // it's stack-locked, biased or neutral
3563 // TODO: optimize markword triage order to reduce the number of
3564 // conditional branches in the most common cases.
3565 // Beware -- there's a subtle invariant that fetch of the markword
3566 // at [FETCH], below, will never observe a biased encoding (*101b).
3567 // If this invariant is not held we'll suffer exclusion (safety) failure.
3569 if (UseBiasedLocking) {
3570 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
3571 masm.movq (tmpReg, Address(objReg, 0)) ; // [FETCH]
3572 }
3574 masm.orq (tmpReg, 1) ;
3575 masm.movq (Address(boxReg, 0), tmpReg) ;
3576 if (os::is_MP()) { masm.lock(); }
3577 masm.cmpxchgq(boxReg, Address(objReg, 0)); // Updates tmpReg
3578 if (_counters != NULL) {
3579 masm.cond_inc32(Assembler::equal,
3580 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
3581 }
3582 masm.jcc (Assembler::equal, DONE_LABEL);
3584 // Recursive locking
3585 masm.subq (tmpReg, rsp);
3586 masm.andq (tmpReg, 7 - os::vm_page_size());
3587 masm.movq (Address(boxReg, 0), tmpReg);
3588 if (_counters != NULL) {
3589 masm.cond_inc32(Assembler::equal,
3590 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
3591 }
3592 masm.jmp (DONE_LABEL) ;
3594 masm.bind (IsInflated) ;
3595 // It's inflated
3597 // TODO: someday avoid the ST-before-CAS penalty by
3598 // relocating (deferring) the following ST.
3599 // We should also think about trying a CAS without having
3600 // fetched _owner. If the CAS is successful we may
3601 // avoid an RTO->RTS upgrade on the $line.
3602 masm.movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())) ;
3604 masm.movq (boxReg, tmpReg) ;
3605 masm.movq (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3606 masm.testq (tmpReg, tmpReg) ;
3607 masm.jcc (Assembler::notZero, DONE_LABEL) ;
3609 // It's inflated and appears unlocked
3610 if (os::is_MP()) { masm.lock(); }
3611 masm.cmpxchgq(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3612 // Intentional fall-through into DONE_LABEL ...
3614 masm.bind (DONE_LABEL) ;
3615 masm.nop () ; // avoid jmp to jmp
3616 }
3617 %}
3619 // obj: object to unlock
3620 // box: box address (displaced header location), killed
3621 // RBX: killed tmp; cannot be obj nor box
3622 enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp)
3623 %{
3625 Register objReg = as_Register($obj$$reg);
3626 Register boxReg = as_Register($box$$reg);
3627 Register tmpReg = as_Register($tmp$$reg);
3628 MacroAssembler masm(&cbuf);
3630 if (EmitSync & 4) {
3631 masm.cmpq (rsp, 0) ;
3632 } else
3633 if (EmitSync & 8) {
3634 Label DONE_LABEL;
3635 if (UseBiasedLocking) {
3636 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3637 }
3639 // Check whether the displaced header is 0
3640 //(=> recursive unlock)
3641 masm.movq(tmpReg, Address(boxReg, 0));
3642 masm.testq(tmpReg, tmpReg);
3643 masm.jcc(Assembler::zero, DONE_LABEL);
3645 // If not recursive lock, reset the header to displaced header
3646 if (os::is_MP()) {
3647 masm.lock();
3648 }
3649 masm.cmpxchgq(tmpReg, Address(objReg, 0)); // Uses RAX which is box
3650 masm.bind(DONE_LABEL);
3651 masm.nop(); // avoid branch to branch
3652 } else {
3653 Label DONE_LABEL, Stacked, CheckSucc ;
3655 if (UseBiasedLocking) {
3656 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3657 }
3659 masm.movq (tmpReg, Address(objReg, 0)) ;
3660 masm.cmpq (Address(boxReg, 0), (int)NULL_WORD) ;
3661 masm.jcc (Assembler::zero, DONE_LABEL) ;
3662 masm.testq (tmpReg, 0x02) ;
3663 masm.jcc (Assembler::zero, Stacked) ;
3665 // It's inflated
3666 masm.movq (boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3667 masm.xorq (boxReg, r15_thread) ;
3668 masm.orq (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
3669 masm.jcc (Assembler::notZero, DONE_LABEL) ;
3670 masm.movq (boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
3671 masm.orq (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
3672 masm.jcc (Assembler::notZero, CheckSucc) ;
3673 masm.mov64 (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int)NULL_WORD) ;
3674 masm.jmp (DONE_LABEL) ;
3676 if ((EmitSync & 65536) == 0) {
3677 Label LSuccess, LGoSlowPath ;
3678 masm.bind (CheckSucc) ;
3679 masm.cmpq (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int)NULL_WORD) ;
3680 masm.jcc (Assembler::zero, LGoSlowPath) ;
3682 // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
3683 // the explicit ST;MEMBAR combination, but masm doesn't currently support
3684 // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
3685 // are all faster when the write buffer is populated.
3686 masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int)NULL_WORD) ;
3687 if (os::is_MP()) {
3688 masm.lock () ; masm.addq (Address(rsp, 0), 0) ;
3689 }
3690 masm.cmpq (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int)NULL_WORD) ;
3691 masm.jcc (Assembler::notZero, LSuccess) ;
3693 masm.movptr (boxReg, (int)NULL_WORD) ; // box is really EAX
3694 if (os::is_MP()) { masm.lock(); }
3695 masm.cmpxchgq (r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
3696 masm.jcc (Assembler::notEqual, LSuccess) ;
3697 // Intentional fall-through into slow-path
3699 masm.bind (LGoSlowPath) ;
3700 masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure
3701 masm.jmp (DONE_LABEL) ;
3703 masm.bind (LSuccess) ;
3704 masm.testl (boxReg, 0) ; // set ICC.ZF=1 to indicate success
3705 masm.jmp (DONE_LABEL) ;
3706 }
3708 masm.bind (Stacked) ;
3709 masm.movq (tmpReg, Address (boxReg, 0)) ; // re-fetch
3710 if (os::is_MP()) { masm.lock(); }
3711 masm.cmpxchgq(tmpReg, Address(objReg, 0)); // Uses RAX which is box
3713 if (EmitSync & 65536) {
3714 masm.bind (CheckSucc) ;
3715 }
3716 masm.bind(DONE_LABEL);
3717 if (EmitSync & 32768) {
3718 masm.nop(); // avoid branch to branch
3719 }
3720 }
3721 %}
3723 enc_class enc_String_Compare()
3724 %{
3725 Label RCX_GOOD_LABEL, LENGTH_DIFF_LABEL,
3726 POP_LABEL, DONE_LABEL, CONT_LABEL,
3727 WHILE_HEAD_LABEL;
3728 MacroAssembler masm(&cbuf);
3730 // Get the first character position in both strings
3731 // [8] char array, [12] offset, [16] count
3732 int value_offset = java_lang_String::value_offset_in_bytes();
3733 int offset_offset = java_lang_String::offset_offset_in_bytes();
3734 int count_offset = java_lang_String::count_offset_in_bytes();
3735 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
3737 masm.load_heap_oop(rax, Address(rsi, value_offset));
3738 masm.movl(rcx, Address(rsi, offset_offset));
3739 masm.leaq(rax, Address(rax, rcx, Address::times_2, base_offset));
3740 masm.load_heap_oop(rbx, Address(rdi, value_offset));
3741 masm.movl(rcx, Address(rdi, offset_offset));
3742 masm.leaq(rbx, Address(rbx, rcx, Address::times_2, base_offset));
3744 // Compute the minimum of the string lengths(rsi) and the
3745 // difference of the string lengths (stack)
3747 masm.movl(rdi, Address(rdi, count_offset));
3748 masm.movl(rsi, Address(rsi, count_offset));
3749 masm.movl(rcx, rdi);
3750 masm.subl(rdi, rsi);
3751 masm.pushq(rdi);
3752 masm.cmovl(Assembler::lessEqual, rsi, rcx);
3754 // Is the minimum length zero?
3755 masm.bind(RCX_GOOD_LABEL);
3756 masm.testl(rsi, rsi);
3757 masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
3759 // Load first characters
3760 masm.load_unsigned_word(rcx, Address(rbx, 0));
3761 masm.load_unsigned_word(rdi, Address(rax, 0));
3763 // Compare first characters
3764 masm.subl(rcx, rdi);
3765 masm.jcc(Assembler::notZero, POP_LABEL);
3766 masm.decrementl(rsi);
3767 masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
3769 {
3770 // Check after comparing first character to see if strings are equivalent
3771 Label LSkip2;
3772 // Check if the strings start at same location
3773 masm.cmpq(rbx, rax);
3774 masm.jcc(Assembler::notEqual, LSkip2);
3776 // Check if the length difference is zero (from stack)
3777 masm.cmpl(Address(rsp, 0), 0x0);
3778 masm.jcc(Assembler::equal, LENGTH_DIFF_LABEL);
3780 // Strings might not be equivalent
3781 masm.bind(LSkip2);
3782 }
3784 // Shift RAX and RBX to the end of the arrays, negate min
3785 masm.leaq(rax, Address(rax, rsi, Address::times_2, 2));
3786 masm.leaq(rbx, Address(rbx, rsi, Address::times_2, 2));
3787 masm.negq(rsi);
3789 // Compare the rest of the characters
3790 masm.bind(WHILE_HEAD_LABEL);
3791 masm.load_unsigned_word(rcx, Address(rbx, rsi, Address::times_2, 0));
3792 masm.load_unsigned_word(rdi, Address(rax, rsi, Address::times_2, 0));
3793 masm.subl(rcx, rdi);
3794 masm.jcc(Assembler::notZero, POP_LABEL);
3795 masm.incrementq(rsi);
3796 masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL);
3798 // Strings are equal up to min length. Return the length difference.
3799 masm.bind(LENGTH_DIFF_LABEL);
3800 masm.popq(rcx);
3801 masm.jmp(DONE_LABEL);
3803 // Discard the stored length difference
3804 masm.bind(POP_LABEL);
3805 masm.addq(rsp, 8);
3807 // That's it
3808 masm.bind(DONE_LABEL);
3809 %}
3811 enc_class enc_rethrow()
3812 %{
3813 cbuf.set_inst_mark();
3814 emit_opcode(cbuf, 0xE9); // jmp entry
3815 emit_d32_reloc(cbuf,
3816 (int) (OptoRuntime::rethrow_stub() - cbuf.code_end() - 4),
3817 runtime_call_Relocation::spec(),
3818 RELOC_DISP32);
3819 %}
3821 enc_class absF_encoding(regF dst)
3822 %{
3823 int dstenc = $dst$$reg;
3824 address signmask_address = (address) StubRoutines::amd64::float_sign_mask();
3826 cbuf.set_inst_mark();
3827 if (dstenc >= 8) {
3828 emit_opcode(cbuf, Assembler::REX_R);
3829 dstenc -= 8;
3830 }
3831 // XXX reg_mem doesn't support RIP-relative addressing yet
3832 emit_opcode(cbuf, 0x0F);
3833 emit_opcode(cbuf, 0x54);
3834 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3835 emit_d32_reloc(cbuf, signmask_address);
3836 %}
3838 enc_class absD_encoding(regD dst)
3839 %{
3840 int dstenc = $dst$$reg;
3841 address signmask_address = (address) StubRoutines::amd64::double_sign_mask();
3843 cbuf.set_inst_mark();
3844 emit_opcode(cbuf, 0x66);
3845 if (dstenc >= 8) {
3846 emit_opcode(cbuf, Assembler::REX_R);
3847 dstenc -= 8;
3848 }
3849 // XXX reg_mem doesn't support RIP-relative addressing yet
3850 emit_opcode(cbuf, 0x0F);
3851 emit_opcode(cbuf, 0x54);
3852 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3853 emit_d32_reloc(cbuf, signmask_address);
3854 %}
3856 enc_class negF_encoding(regF dst)
3857 %{
3858 int dstenc = $dst$$reg;
3859 address signflip_address = (address) StubRoutines::amd64::float_sign_flip();
3861 cbuf.set_inst_mark();
3862 if (dstenc >= 8) {
3863 emit_opcode(cbuf, Assembler::REX_R);
3864 dstenc -= 8;
3865 }
3866 // XXX reg_mem doesn't support RIP-relative addressing yet
3867 emit_opcode(cbuf, 0x0F);
3868 emit_opcode(cbuf, 0x57);
3869 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3870 emit_d32_reloc(cbuf, signflip_address);
3871 %}
3873 enc_class negD_encoding(regD dst)
3874 %{
3875 int dstenc = $dst$$reg;
3876 address signflip_address = (address) StubRoutines::amd64::double_sign_flip();
3878 cbuf.set_inst_mark();
3879 emit_opcode(cbuf, 0x66);
3880 if (dstenc >= 8) {
3881 emit_opcode(cbuf, Assembler::REX_R);
3882 dstenc -= 8;
3883 }
3884 // XXX reg_mem doesn't support RIP-relative addressing yet
3885 emit_opcode(cbuf, 0x0F);
3886 emit_opcode(cbuf, 0x57);
3887 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3888 emit_d32_reloc(cbuf, signflip_address);
3889 %}
3891 enc_class f2i_fixup(rRegI dst, regF src)
3892 %{
3893 int dstenc = $dst$$reg;
3894 int srcenc = $src$$reg;
3896 // cmpl $dst, #0x80000000
3897 if (dstenc >= 8) {
3898 emit_opcode(cbuf, Assembler::REX_B);
3899 }
3900 emit_opcode(cbuf, 0x81);
3901 emit_rm(cbuf, 0x3, 0x7, dstenc & 7);
3902 emit_d32(cbuf, 0x80000000);
3904 // jne,s done
3905 emit_opcode(cbuf, 0x75);
3906 if (srcenc < 8 && dstenc < 8) {
3907 emit_d8(cbuf, 0xF);
3908 } else if (srcenc >= 8 && dstenc >= 8) {
3909 emit_d8(cbuf, 0x11);
3910 } else {
3911 emit_d8(cbuf, 0x10);
3912 }
3914 // subq rsp, #8
3915 emit_opcode(cbuf, Assembler::REX_W);
3916 emit_opcode(cbuf, 0x83);
3917 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
3918 emit_d8(cbuf, 8);
3920 // movss [rsp], $src
3921 emit_opcode(cbuf, 0xF3);
3922 if (srcenc >= 8) {
3923 emit_opcode(cbuf, Assembler::REX_R);
3924 }
3925 emit_opcode(cbuf, 0x0F);
3926 emit_opcode(cbuf, 0x11);
3927 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
3929 // call f2i_fixup
3930 cbuf.set_inst_mark();
3931 emit_opcode(cbuf, 0xE8);
3932 emit_d32_reloc(cbuf,
3933 (int)
3934 (StubRoutines::amd64::f2i_fixup() - cbuf.code_end() - 4),
3935 runtime_call_Relocation::spec(),
3936 RELOC_DISP32);
3938 // popq $dst
3939 if (dstenc >= 8) {
3940 emit_opcode(cbuf, Assembler::REX_B);
3941 }
3942 emit_opcode(cbuf, 0x58 | (dstenc & 7));
3944 // done:
3945 %}
3947 enc_class f2l_fixup(rRegL dst, regF src)
3948 %{
3949 int dstenc = $dst$$reg;
3950 int srcenc = $src$$reg;
3951 address const_address = (address) StubRoutines::amd64::double_sign_flip();
3953 // cmpq $dst, [0x8000000000000000]
3954 cbuf.set_inst_mark();
3955 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
3956 emit_opcode(cbuf, 0x39);
3957 // XXX reg_mem doesn't support RIP-relative addressing yet
3958 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101
3959 emit_d32_reloc(cbuf, const_address);
3962 // jne,s done
3963 emit_opcode(cbuf, 0x75);
3964 if (srcenc < 8 && dstenc < 8) {
3965 emit_d8(cbuf, 0xF);
3966 } else if (srcenc >= 8 && dstenc >= 8) {
3967 emit_d8(cbuf, 0x11);
3968 } else {
3969 emit_d8(cbuf, 0x10);
3970 }
3972 // subq rsp, #8
3973 emit_opcode(cbuf, Assembler::REX_W);
3974 emit_opcode(cbuf, 0x83);
3975 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
3976 emit_d8(cbuf, 8);
3978 // movss [rsp], $src
3979 emit_opcode(cbuf, 0xF3);
3980 if (srcenc >= 8) {
3981 emit_opcode(cbuf, Assembler::REX_R);
3982 }
3983 emit_opcode(cbuf, 0x0F);
3984 emit_opcode(cbuf, 0x11);
3985 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
3987 // call f2l_fixup
3988 cbuf.set_inst_mark();
3989 emit_opcode(cbuf, 0xE8);
3990 emit_d32_reloc(cbuf,
3991 (int)
3992 (StubRoutines::amd64::f2l_fixup() - cbuf.code_end() - 4),
3993 runtime_call_Relocation::spec(),
3994 RELOC_DISP32);
3996 // popq $dst
3997 if (dstenc >= 8) {
3998 emit_opcode(cbuf, Assembler::REX_B);
3999 }
4000 emit_opcode(cbuf, 0x58 | (dstenc & 7));
4002 // done:
4003 %}
4005 enc_class d2i_fixup(rRegI dst, regD src)
4006 %{
4007 int dstenc = $dst$$reg;
4008 int srcenc = $src$$reg;
4010 // cmpl $dst, #0x80000000
4011 if (dstenc >= 8) {
4012 emit_opcode(cbuf, Assembler::REX_B);
4013 }
4014 emit_opcode(cbuf, 0x81);
4015 emit_rm(cbuf, 0x3, 0x7, dstenc & 7);
4016 emit_d32(cbuf, 0x80000000);
4018 // jne,s done
4019 emit_opcode(cbuf, 0x75);
4020 if (srcenc < 8 && dstenc < 8) {
4021 emit_d8(cbuf, 0xF);
4022 } else if (srcenc >= 8 && dstenc >= 8) {
4023 emit_d8(cbuf, 0x11);
4024 } else {
4025 emit_d8(cbuf, 0x10);
4026 }
4028 // subq rsp, #8
4029 emit_opcode(cbuf, Assembler::REX_W);
4030 emit_opcode(cbuf, 0x83);
4031 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
4032 emit_d8(cbuf, 8);
4034 // movsd [rsp], $src
4035 emit_opcode(cbuf, 0xF2);
4036 if (srcenc >= 8) {
4037 emit_opcode(cbuf, Assembler::REX_R);
4038 }
4039 emit_opcode(cbuf, 0x0F);
4040 emit_opcode(cbuf, 0x11);
4041 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
4043 // call d2i_fixup
4044 cbuf.set_inst_mark();
4045 emit_opcode(cbuf, 0xE8);
4046 emit_d32_reloc(cbuf,
4047 (int)
4048 (StubRoutines::amd64::d2i_fixup() - cbuf.code_end() - 4),
4049 runtime_call_Relocation::spec(),
4050 RELOC_DISP32);
4052 // popq $dst
4053 if (dstenc >= 8) {
4054 emit_opcode(cbuf, Assembler::REX_B);
4055 }
4056 emit_opcode(cbuf, 0x58 | (dstenc & 7));
4058 // done:
4059 %}
4061 enc_class d2l_fixup(rRegL dst, regD src)
4062 %{
4063 int dstenc = $dst$$reg;
4064 int srcenc = $src$$reg;
4065 address const_address = (address) StubRoutines::amd64::double_sign_flip();
4067 // cmpq $dst, [0x8000000000000000]
4068 cbuf.set_inst_mark();
4069 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
4070 emit_opcode(cbuf, 0x39);
4071 // XXX reg_mem doesn't support RIP-relative addressing yet
4072 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101
4073 emit_d32_reloc(cbuf, const_address);
4076 // jne,s done
4077 emit_opcode(cbuf, 0x75);
4078 if (srcenc < 8 && dstenc < 8) {
4079 emit_d8(cbuf, 0xF);
4080 } else if (srcenc >= 8 && dstenc >= 8) {
4081 emit_d8(cbuf, 0x11);
4082 } else {
4083 emit_d8(cbuf, 0x10);
4084 }
4086 // subq rsp, #8
4087 emit_opcode(cbuf, Assembler::REX_W);
4088 emit_opcode(cbuf, 0x83);
4089 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
4090 emit_d8(cbuf, 8);
4092 // movsd [rsp], $src
4093 emit_opcode(cbuf, 0xF2);
4094 if (srcenc >= 8) {
4095 emit_opcode(cbuf, Assembler::REX_R);
4096 }
4097 emit_opcode(cbuf, 0x0F);
4098 emit_opcode(cbuf, 0x11);
4099 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
4101 // call d2l_fixup
4102 cbuf.set_inst_mark();
4103 emit_opcode(cbuf, 0xE8);
4104 emit_d32_reloc(cbuf,
4105 (int)
4106 (StubRoutines::amd64::d2l_fixup() - cbuf.code_end() - 4),
4107 runtime_call_Relocation::spec(),
4108 RELOC_DISP32);
4110 // popq $dst
4111 if (dstenc >= 8) {
4112 emit_opcode(cbuf, Assembler::REX_B);
4113 }
4114 emit_opcode(cbuf, 0x58 | (dstenc & 7));
4116 // done:
4117 %}
4119 enc_class enc_membar_acquire
4120 %{
4121 // [jk] not needed currently, if you enable this and it really
4122 // emits code don't forget to the remove the "size(0)" line in
4123 // membar_acquire()
4124 // MacroAssembler masm(&cbuf);
4125 // masm.membar(Assembler::Membar_mask_bits(Assembler::LoadStore |
4126 // Assembler::LoadLoad));
4127 %}
4129 enc_class enc_membar_release
4130 %{
4131 // [jk] not needed currently, if you enable this and it really
4132 // emits code don't forget to the remove the "size(0)" line in
4133 // membar_release()
4134 // MacroAssembler masm(&cbuf);
4135 // masm.membar(Assembler::Membar_mask_bits(Assembler::LoadStore |
4136 // Assembler::StoreStore));
4137 %}
4139 enc_class enc_membar_volatile
4140 %{
4141 MacroAssembler masm(&cbuf);
4142 masm.membar(Assembler::Membar_mask_bits(Assembler::StoreLoad |
4143 Assembler::StoreStore));
4144 %}
4146 // Safepoint Poll. This polls the safepoint page, and causes an
4147 // exception if it is not readable. Unfortunately, it kills
4148 // RFLAGS in the process.
4149 enc_class enc_safepoint_poll
4150 %{
4151 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
4152 // XXX reg_mem doesn't support RIP-relative addressing yet
4153 cbuf.set_inst_mark();
4154 cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_type, 0); // XXX
4155 emit_opcode(cbuf, 0x85); // testl
4156 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
4157 // cbuf.inst_mark() is beginning of instruction
4158 emit_d32_reloc(cbuf, os::get_polling_page());
4159 // relocInfo::poll_type,
4160 %}
4161 %}
4165 //----------FRAME--------------------------------------------------------------
4166 // Definition of frame structure and management information.
4167 //
4168 // S T A C K L A Y O U T Allocators stack-slot number
4169 // | (to get allocators register number
4170 // G Owned by | | v add OptoReg::stack0())
4171 // r CALLER | |
4172 // o | +--------+ pad to even-align allocators stack-slot
4173 // w V | pad0 | numbers; owned by CALLER
4174 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
4175 // h ^ | in | 5
4176 // | | args | 4 Holes in incoming args owned by SELF
4177 // | | | | 3
4178 // | | +--------+
4179 // V | | old out| Empty on Intel, window on Sparc
4180 // | old |preserve| Must be even aligned.
4181 // | SP-+--------+----> Matcher::_old_SP, even aligned
4182 // | | in | 3 area for Intel ret address
4183 // Owned by |preserve| Empty on Sparc.
4184 // SELF +--------+
4185 // | | pad2 | 2 pad to align old SP
4186 // | +--------+ 1
4187 // | | locks | 0
4188 // | +--------+----> OptoReg::stack0(), even aligned
4189 // | | pad1 | 11 pad to align new SP
4190 // | +--------+
4191 // | | | 10
4192 // | | spills | 9 spills
4193 // V | | 8 (pad0 slot for callee)
4194 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
4195 // ^ | out | 7
4196 // | | args | 6 Holes in outgoing args owned by CALLEE
4197 // Owned by +--------+
4198 // CALLEE | new out| 6 Empty on Intel, window on Sparc
4199 // | new |preserve| Must be even-aligned.
4200 // | SP-+--------+----> Matcher::_new_SP, even aligned
4201 // | | |
4202 //
4203 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
4204 // known from SELF's arguments and the Java calling convention.
4205 // Region 6-7 is determined per call site.
4206 // Note 2: If the calling convention leaves holes in the incoming argument
4207 // area, those holes are owned by SELF. Holes in the outgoing area
4208 // are owned by the CALLEE. Holes should not be nessecary in the
4209 // incoming area, as the Java calling convention is completely under
4210 // the control of the AD file. Doubles can be sorted and packed to
4211 // avoid holes. Holes in the outgoing arguments may be nessecary for
4212 // varargs C calling conventions.
4213 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
4214 // even aligned with pad0 as needed.
4215 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
4216 // region 6-11 is even aligned; it may be padded out more so that
4217 // the region from SP to FP meets the minimum stack alignment.
4218 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4219 // alignment. Region 11, pad1, may be dynamically extended so that
4220 // SP meets the minimum alignment.
4222 frame
4223 %{
4224 // What direction does stack grow in (assumed to be same for C & Java)
4225 stack_direction(TOWARDS_LOW);
4227 // These three registers define part of the calling convention
4228 // between compiled code and the interpreter.
4229 inline_cache_reg(RAX); // Inline Cache Register
4230 interpreter_method_oop_reg(RBX); // Method Oop Register when
4231 // calling interpreter
4233 // Optional: name the operand used by cisc-spilling to access
4234 // [stack_pointer + offset]
4235 cisc_spilling_operand_name(indOffset32);
4237 // Number of stack slots consumed by locking an object
4238 sync_stack_slots(2);
4240 // Compiled code's Frame Pointer
4241 frame_pointer(RSP);
4243 // Interpreter stores its frame pointer in a register which is
4244 // stored to the stack by I2CAdaptors.
4245 // I2CAdaptors convert from interpreted java to compiled java.
4246 interpreter_frame_pointer(RBP);
4248 // Stack alignment requirement
4249 stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4251 // Number of stack slots between incoming argument block and the start of
4252 // a new frame. The PROLOG must add this many slots to the stack. The
4253 // EPILOG must remove this many slots. amd64 needs two slots for
4254 // return address.
4255 in_preserve_stack_slots(4 + 2 * VerifyStackAtCalls);
4257 // Number of outgoing stack slots killed above the out_preserve_stack_slots
4258 // for calls to C. Supports the var-args backing area for register parms.
4259 varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4261 // The after-PROLOG location of the return address. Location of
4262 // return address specifies a type (REG or STACK) and a number
4263 // representing the register number (i.e. - use a register name) or
4264 // stack slot.
4265 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
4266 // Otherwise, it is above the locks and verification slot and alignment word
4267 return_addr(STACK - 2 +
4268 round_to(2 + 2 * VerifyStackAtCalls +
4269 Compile::current()->fixed_slots(),
4270 WordsPerLong * 2));
4272 // Body of function which returns an integer array locating
4273 // arguments either in registers or in stack slots. Passed an array
4274 // of ideal registers called "sig" and a "length" count. Stack-slot
4275 // offsets are based on outgoing arguments, i.e. a CALLER setting up
4276 // arguments for a CALLEE. Incoming stack arguments are
4277 // automatically biased by the preserve_stack_slots field above.
4279 calling_convention
4280 %{
4281 // No difference between ingoing/outgoing just pass false
4282 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
4283 %}
4285 c_calling_convention
4286 %{
4287 // This is obviously always outgoing
4288 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
4289 %}
4291 // Location of compiled Java return values. Same as C for now.
4292 return_value
4293 %{
4294 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
4295 "only return normal values");
4297 static const int lo[Op_RegL + 1] = {
4298 0,
4299 0,
4300 RAX_num, // Op_RegN
4301 RAX_num, // Op_RegI
4302 RAX_num, // Op_RegP
4303 XMM0_num, // Op_RegF
4304 XMM0_num, // Op_RegD
4305 RAX_num // Op_RegL
4306 };
4307 static const int hi[Op_RegL + 1] = {
4308 0,
4309 0,
4310 OptoReg::Bad, // Op_RegN
4311 OptoReg::Bad, // Op_RegI
4312 RAX_H_num, // Op_RegP
4313 OptoReg::Bad, // Op_RegF
4314 XMM0_H_num, // Op_RegD
4315 RAX_H_num // Op_RegL
4316 };
4317 assert(ARRAY_SIZE(hi) == _last_machine_leaf - 1, "missing type");
4318 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
4319 %}
4320 %}
4322 //----------ATTRIBUTES---------------------------------------------------------
4323 //----------Operand Attributes-------------------------------------------------
4324 op_attrib op_cost(0); // Required cost attribute
4326 //----------Instruction Attributes---------------------------------------------
4327 ins_attrib ins_cost(100); // Required cost attribute
4328 ins_attrib ins_size(8); // Required size attribute (in bits)
4329 ins_attrib ins_pc_relative(0); // Required PC Relative flag
4330 ins_attrib ins_short_branch(0); // Required flag: is this instruction
4331 // a non-matching short branch variant
4332 // of some long branch?
4333 ins_attrib ins_alignment(1); // Required alignment attribute (must
4334 // be a power of 2) specifies the
4335 // alignment that some part of the
4336 // instruction (not necessarily the
4337 // start) requires. If > 1, a
4338 // compute_padding() function must be
4339 // provided for the instruction
4341 //----------OPERANDS-----------------------------------------------------------
4342 // Operand definitions must precede instruction definitions for correct parsing
4343 // in the ADLC because operands constitute user defined types which are used in
4344 // instruction definitions.
4346 //----------Simple Operands----------------------------------------------------
4347 // Immediate Operands
4348 // Integer Immediate
4349 operand immI()
4350 %{
4351 match(ConI);
4353 op_cost(10);
4354 format %{ %}
4355 interface(CONST_INTER);
4356 %}
4358 // Constant for test vs zero
4359 operand immI0()
4360 %{
4361 predicate(n->get_int() == 0);
4362 match(ConI);
4364 op_cost(0);
4365 format %{ %}
4366 interface(CONST_INTER);
4367 %}
4369 // Constant for increment
4370 operand immI1()
4371 %{
4372 predicate(n->get_int() == 1);
4373 match(ConI);
4375 op_cost(0);
4376 format %{ %}
4377 interface(CONST_INTER);
4378 %}
4380 // Constant for decrement
4381 operand immI_M1()
4382 %{
4383 predicate(n->get_int() == -1);
4384 match(ConI);
4386 op_cost(0);
4387 format %{ %}
4388 interface(CONST_INTER);
4389 %}
4391 // Valid scale values for addressing modes
4392 operand immI2()
4393 %{
4394 predicate(0 <= n->get_int() && (n->get_int() <= 3));
4395 match(ConI);
4397 format %{ %}
4398 interface(CONST_INTER);
4399 %}
4401 operand immI8()
4402 %{
4403 predicate((-0x80 <= n->get_int()) && (n->get_int() < 0x80));
4404 match(ConI);
4406 op_cost(5);
4407 format %{ %}
4408 interface(CONST_INTER);
4409 %}
4411 operand immI16()
4412 %{
4413 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
4414 match(ConI);
4416 op_cost(10);
4417 format %{ %}
4418 interface(CONST_INTER);
4419 %}
4421 // Constant for long shifts
4422 operand immI_32()
4423 %{
4424 predicate( n->get_int() == 32 );
4425 match(ConI);
4427 op_cost(0);
4428 format %{ %}
4429 interface(CONST_INTER);
4430 %}
4432 // Constant for long shifts
4433 operand immI_64()
4434 %{
4435 predicate( n->get_int() == 64 );
4436 match(ConI);
4438 op_cost(0);
4439 format %{ %}
4440 interface(CONST_INTER);
4441 %}
4443 // Pointer Immediate
4444 operand immP()
4445 %{
4446 match(ConP);
4448 op_cost(10);
4449 format %{ %}
4450 interface(CONST_INTER);
4451 %}
4453 // NULL Pointer Immediate
4454 operand immP0()
4455 %{
4456 predicate(n->get_ptr() == 0);
4457 match(ConP);
4459 op_cost(5);
4460 format %{ %}
4461 interface(CONST_INTER);
4462 %}
4464 // Pointer Immediate
4465 operand immN() %{
4466 match(ConN);
4468 op_cost(10);
4469 format %{ %}
4470 interface(CONST_INTER);
4471 %}
4473 // NULL Pointer Immediate
4474 operand immN0() %{
4475 predicate(n->get_narrowcon() == 0);
4476 match(ConN);
4478 op_cost(5);
4479 format %{ %}
4480 interface(CONST_INTER);
4481 %}
4483 operand immP31()
4484 %{
4485 predicate(!n->as_Type()->type()->isa_oopptr()
4486 && (n->get_ptr() >> 31) == 0);
4487 match(ConP);
4489 op_cost(5);
4490 format %{ %}
4491 interface(CONST_INTER);
4492 %}
4495 // Long Immediate
4496 operand immL()
4497 %{
4498 match(ConL);
4500 op_cost(20);
4501 format %{ %}
4502 interface(CONST_INTER);
4503 %}
4505 // Long Immediate 8-bit
4506 operand immL8()
4507 %{
4508 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
4509 match(ConL);
4511 op_cost(5);
4512 format %{ %}
4513 interface(CONST_INTER);
4514 %}
4516 // Long Immediate 32-bit unsigned
4517 operand immUL32()
4518 %{
4519 predicate(n->get_long() == (unsigned int) (n->get_long()));
4520 match(ConL);
4522 op_cost(10);
4523 format %{ %}
4524 interface(CONST_INTER);
4525 %}
4527 // Long Immediate 32-bit signed
4528 operand immL32()
4529 %{
4530 predicate(n->get_long() == (int) (n->get_long()));
4531 match(ConL);
4533 op_cost(15);
4534 format %{ %}
4535 interface(CONST_INTER);
4536 %}
4538 // Long Immediate zero
4539 operand immL0()
4540 %{
4541 predicate(n->get_long() == 0L);
4542 match(ConL);
4544 op_cost(10);
4545 format %{ %}
4546 interface(CONST_INTER);
4547 %}
4549 // Constant for increment
4550 operand immL1()
4551 %{
4552 predicate(n->get_long() == 1);
4553 match(ConL);
4555 format %{ %}
4556 interface(CONST_INTER);
4557 %}
4559 // Constant for decrement
4560 operand immL_M1()
4561 %{
4562 predicate(n->get_long() == -1);
4563 match(ConL);
4565 format %{ %}
4566 interface(CONST_INTER);
4567 %}
4569 // Long Immediate: the value 10
4570 operand immL10()
4571 %{
4572 predicate(n->get_long() == 10);
4573 match(ConL);
4575 format %{ %}
4576 interface(CONST_INTER);
4577 %}
4579 // Long immediate from 0 to 127.
4580 // Used for a shorter form of long mul by 10.
4581 operand immL_127()
4582 %{
4583 predicate(0 <= n->get_long() && n->get_long() < 0x80);
4584 match(ConL);
4586 op_cost(10);
4587 format %{ %}
4588 interface(CONST_INTER);
4589 %}
4591 // Long Immediate: low 32-bit mask
4592 operand immL_32bits()
4593 %{
4594 predicate(n->get_long() == 0xFFFFFFFFL);
4595 match(ConL);
4596 op_cost(20);
4598 format %{ %}
4599 interface(CONST_INTER);
4600 %}
4602 // Float Immediate zero
4603 operand immF0()
4604 %{
4605 predicate(jint_cast(n->getf()) == 0);
4606 match(ConF);
4608 op_cost(5);
4609 format %{ %}
4610 interface(CONST_INTER);
4611 %}
4613 // Float Immediate
4614 operand immF()
4615 %{
4616 match(ConF);
4618 op_cost(15);
4619 format %{ %}
4620 interface(CONST_INTER);
4621 %}
4623 // Double Immediate zero
4624 operand immD0()
4625 %{
4626 predicate(jlong_cast(n->getd()) == 0);
4627 match(ConD);
4629 op_cost(5);
4630 format %{ %}
4631 interface(CONST_INTER);
4632 %}
4634 // Double Immediate
4635 operand immD()
4636 %{
4637 match(ConD);
4639 op_cost(15);
4640 format %{ %}
4641 interface(CONST_INTER);
4642 %}
4644 // Immediates for special shifts (sign extend)
4646 // Constants for increment
4647 operand immI_16()
4648 %{
4649 predicate(n->get_int() == 16);
4650 match(ConI);
4652 format %{ %}
4653 interface(CONST_INTER);
4654 %}
4656 operand immI_24()
4657 %{
4658 predicate(n->get_int() == 24);
4659 match(ConI);
4661 format %{ %}
4662 interface(CONST_INTER);
4663 %}
4665 // Constant for byte-wide masking
4666 operand immI_255()
4667 %{
4668 predicate(n->get_int() == 255);
4669 match(ConI);
4671 format %{ %}
4672 interface(CONST_INTER);
4673 %}
4675 // Constant for short-wide masking
4676 operand immI_65535()
4677 %{
4678 predicate(n->get_int() == 65535);
4679 match(ConI);
4681 format %{ %}
4682 interface(CONST_INTER);
4683 %}
4685 // Constant for byte-wide masking
4686 operand immL_255()
4687 %{
4688 predicate(n->get_long() == 255);
4689 match(ConL);
4691 format %{ %}
4692 interface(CONST_INTER);
4693 %}
4695 // Constant for short-wide masking
4696 operand immL_65535()
4697 %{
4698 predicate(n->get_long() == 65535);
4699 match(ConL);
4701 format %{ %}
4702 interface(CONST_INTER);
4703 %}
4705 // Register Operands
4706 // Integer Register
4707 operand rRegI()
4708 %{
4709 constraint(ALLOC_IN_RC(int_reg));
4710 match(RegI);
4712 match(rax_RegI);
4713 match(rbx_RegI);
4714 match(rcx_RegI);
4715 match(rdx_RegI);
4716 match(rdi_RegI);
4718 format %{ %}
4719 interface(REG_INTER);
4720 %}
4722 // Special Registers
4723 operand rax_RegI()
4724 %{
4725 constraint(ALLOC_IN_RC(int_rax_reg));
4726 match(RegI);
4727 match(rRegI);
4729 format %{ "RAX" %}
4730 interface(REG_INTER);
4731 %}
4733 // Special Registers
4734 operand rbx_RegI()
4735 %{
4736 constraint(ALLOC_IN_RC(int_rbx_reg));
4737 match(RegI);
4738 match(rRegI);
4740 format %{ "RBX" %}
4741 interface(REG_INTER);
4742 %}
4744 operand rcx_RegI()
4745 %{
4746 constraint(ALLOC_IN_RC(int_rcx_reg));
4747 match(RegI);
4748 match(rRegI);
4750 format %{ "RCX" %}
4751 interface(REG_INTER);
4752 %}
4754 operand rdx_RegI()
4755 %{
4756 constraint(ALLOC_IN_RC(int_rdx_reg));
4757 match(RegI);
4758 match(rRegI);
4760 format %{ "RDX" %}
4761 interface(REG_INTER);
4762 %}
4764 operand rdi_RegI()
4765 %{
4766 constraint(ALLOC_IN_RC(int_rdi_reg));
4767 match(RegI);
4768 match(rRegI);
4770 format %{ "RDI" %}
4771 interface(REG_INTER);
4772 %}
4774 operand no_rcx_RegI()
4775 %{
4776 constraint(ALLOC_IN_RC(int_no_rcx_reg));
4777 match(RegI);
4778 match(rax_RegI);
4779 match(rbx_RegI);
4780 match(rdx_RegI);
4781 match(rdi_RegI);
4783 format %{ %}
4784 interface(REG_INTER);
4785 %}
4787 operand no_rax_rdx_RegI()
4788 %{
4789 constraint(ALLOC_IN_RC(int_no_rax_rdx_reg));
4790 match(RegI);
4791 match(rbx_RegI);
4792 match(rcx_RegI);
4793 match(rdi_RegI);
4795 format %{ %}
4796 interface(REG_INTER);
4797 %}
4799 // Pointer Register
4800 operand any_RegP()
4801 %{
4802 constraint(ALLOC_IN_RC(any_reg));
4803 match(RegP);
4804 match(rax_RegP);
4805 match(rbx_RegP);
4806 match(rdi_RegP);
4807 match(rsi_RegP);
4808 match(rbp_RegP);
4809 match(r15_RegP);
4810 match(rRegP);
4812 format %{ %}
4813 interface(REG_INTER);
4814 %}
4816 operand rRegP()
4817 %{
4818 constraint(ALLOC_IN_RC(ptr_reg));
4819 match(RegP);
4820 match(rax_RegP);
4821 match(rbx_RegP);
4822 match(rdi_RegP);
4823 match(rsi_RegP);
4824 match(rbp_RegP);
4825 match(r15_RegP); // See Q&A below about r15_RegP.
4827 format %{ %}
4828 interface(REG_INTER);
4829 %}
4832 operand r12RegL() %{
4833 constraint(ALLOC_IN_RC(long_r12_reg));
4834 match(RegL);
4836 format %{ %}
4837 interface(REG_INTER);
4838 %}
4840 operand rRegN() %{
4841 constraint(ALLOC_IN_RC(int_reg));
4842 match(RegN);
4844 format %{ %}
4845 interface(REG_INTER);
4846 %}
4848 // Question: Why is r15_RegP (the read-only TLS register) a match for rRegP?
4849 // Answer: Operand match rules govern the DFA as it processes instruction inputs.
4850 // It's fine for an instruction input which expects rRegP to match a r15_RegP.
4851 // The output of an instruction is controlled by the allocator, which respects
4852 // register class masks, not match rules. Unless an instruction mentions
4853 // r15_RegP or any_RegP explicitly as its output, r15 will not be considered
4854 // by the allocator as an input.
4856 operand no_rax_RegP()
4857 %{
4858 constraint(ALLOC_IN_RC(ptr_no_rax_reg));
4859 match(RegP);
4860 match(rbx_RegP);
4861 match(rsi_RegP);
4862 match(rdi_RegP);
4864 format %{ %}
4865 interface(REG_INTER);
4866 %}
4868 operand no_rbp_RegP()
4869 %{
4870 constraint(ALLOC_IN_RC(ptr_no_rbp_reg));
4871 match(RegP);
4872 match(rbx_RegP);
4873 match(rsi_RegP);
4874 match(rdi_RegP);
4876 format %{ %}
4877 interface(REG_INTER);
4878 %}
4880 operand no_rax_rbx_RegP()
4881 %{
4882 constraint(ALLOC_IN_RC(ptr_no_rax_rbx_reg));
4883 match(RegP);
4884 match(rsi_RegP);
4885 match(rdi_RegP);
4887 format %{ %}
4888 interface(REG_INTER);
4889 %}
4891 // Special Registers
4892 // Return a pointer value
4893 operand rax_RegP()
4894 %{
4895 constraint(ALLOC_IN_RC(ptr_rax_reg));
4896 match(RegP);
4897 match(rRegP);
4899 format %{ %}
4900 interface(REG_INTER);
4901 %}
4903 // Special Registers
4904 // Return a compressed pointer value
4905 operand rax_RegN()
4906 %{
4907 constraint(ALLOC_IN_RC(int_rax_reg));
4908 match(RegN);
4909 match(rRegN);
4911 format %{ %}
4912 interface(REG_INTER);
4913 %}
4915 // Used in AtomicAdd
4916 operand rbx_RegP()
4917 %{
4918 constraint(ALLOC_IN_RC(ptr_rbx_reg));
4919 match(RegP);
4920 match(rRegP);
4922 format %{ %}
4923 interface(REG_INTER);
4924 %}
4926 operand rsi_RegP()
4927 %{
4928 constraint(ALLOC_IN_RC(ptr_rsi_reg));
4929 match(RegP);
4930 match(rRegP);
4932 format %{ %}
4933 interface(REG_INTER);
4934 %}
4936 // Used in rep stosq
4937 operand rdi_RegP()
4938 %{
4939 constraint(ALLOC_IN_RC(ptr_rdi_reg));
4940 match(RegP);
4941 match(rRegP);
4943 format %{ %}
4944 interface(REG_INTER);
4945 %}
4947 operand rbp_RegP()
4948 %{
4949 constraint(ALLOC_IN_RC(ptr_rbp_reg));
4950 match(RegP);
4951 match(rRegP);
4953 format %{ %}
4954 interface(REG_INTER);
4955 %}
4957 operand r15_RegP()
4958 %{
4959 constraint(ALLOC_IN_RC(ptr_r15_reg));
4960 match(RegP);
4961 match(rRegP);
4963 format %{ %}
4964 interface(REG_INTER);
4965 %}
4967 operand rRegL()
4968 %{
4969 constraint(ALLOC_IN_RC(long_reg));
4970 match(RegL);
4971 match(rax_RegL);
4972 match(rdx_RegL);
4974 format %{ %}
4975 interface(REG_INTER);
4976 %}
4978 // Special Registers
4979 operand no_rax_rdx_RegL()
4980 %{
4981 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg));
4982 match(RegL);
4983 match(rRegL);
4985 format %{ %}
4986 interface(REG_INTER);
4987 %}
4989 operand no_rax_RegL()
4990 %{
4991 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg));
4992 match(RegL);
4993 match(rRegL);
4994 match(rdx_RegL);
4996 format %{ %}
4997 interface(REG_INTER);
4998 %}
5000 operand no_rcx_RegL()
5001 %{
5002 constraint(ALLOC_IN_RC(long_no_rcx_reg));
5003 match(RegL);
5004 match(rRegL);
5006 format %{ %}
5007 interface(REG_INTER);
5008 %}
5010 operand rax_RegL()
5011 %{
5012 constraint(ALLOC_IN_RC(long_rax_reg));
5013 match(RegL);
5014 match(rRegL);
5016 format %{ "RAX" %}
5017 interface(REG_INTER);
5018 %}
5020 operand rcx_RegL()
5021 %{
5022 constraint(ALLOC_IN_RC(long_rcx_reg));
5023 match(RegL);
5024 match(rRegL);
5026 format %{ %}
5027 interface(REG_INTER);
5028 %}
5030 operand rdx_RegL()
5031 %{
5032 constraint(ALLOC_IN_RC(long_rdx_reg));
5033 match(RegL);
5034 match(rRegL);
5036 format %{ %}
5037 interface(REG_INTER);
5038 %}
5040 // Flags register, used as output of compare instructions
5041 operand rFlagsReg()
5042 %{
5043 constraint(ALLOC_IN_RC(int_flags));
5044 match(RegFlags);
5046 format %{ "RFLAGS" %}
5047 interface(REG_INTER);
5048 %}
5050 // Flags register, used as output of FLOATING POINT compare instructions
5051 operand rFlagsRegU()
5052 %{
5053 constraint(ALLOC_IN_RC(int_flags));
5054 match(RegFlags);
5056 format %{ "RFLAGS_U" %}
5057 interface(REG_INTER);
5058 %}
5060 // Float register operands
5061 operand regF()
5062 %{
5063 constraint(ALLOC_IN_RC(float_reg));
5064 match(RegF);
5066 format %{ %}
5067 interface(REG_INTER);
5068 %}
5070 // Double register operands
5071 operand regD()
5072 %{
5073 constraint(ALLOC_IN_RC(double_reg));
5074 match(RegD);
5076 format %{ %}
5077 interface(REG_INTER);
5078 %}
5081 //----------Memory Operands----------------------------------------------------
5082 // Direct Memory Operand
5083 // operand direct(immP addr)
5084 // %{
5085 // match(addr);
5087 // format %{ "[$addr]" %}
5088 // interface(MEMORY_INTER) %{
5089 // base(0xFFFFFFFF);
5090 // index(0x4);
5091 // scale(0x0);
5092 // disp($addr);
5093 // %}
5094 // %}
5096 // Indirect Memory Operand
5097 operand indirect(any_RegP reg)
5098 %{
5099 constraint(ALLOC_IN_RC(ptr_reg));
5100 match(reg);
5102 format %{ "[$reg]" %}
5103 interface(MEMORY_INTER) %{
5104 base($reg);
5105 index(0x4);
5106 scale(0x0);
5107 disp(0x0);
5108 %}
5109 %}
5111 // Indirect Memory Plus Short Offset Operand
5112 operand indOffset8(any_RegP reg, immL8 off)
5113 %{
5114 constraint(ALLOC_IN_RC(ptr_reg));
5115 match(AddP reg off);
5117 format %{ "[$reg + $off (8-bit)]" %}
5118 interface(MEMORY_INTER) %{
5119 base($reg);
5120 index(0x4);
5121 scale(0x0);
5122 disp($off);
5123 %}
5124 %}
5126 // Indirect Memory Plus Long Offset Operand
5127 operand indOffset32(any_RegP reg, immL32 off)
5128 %{
5129 constraint(ALLOC_IN_RC(ptr_reg));
5130 match(AddP reg off);
5132 format %{ "[$reg + $off (32-bit)]" %}
5133 interface(MEMORY_INTER) %{
5134 base($reg);
5135 index(0x4);
5136 scale(0x0);
5137 disp($off);
5138 %}
5139 %}
5141 // Indirect Memory Plus Index Register Plus Offset Operand
5142 operand indIndexOffset(any_RegP reg, rRegL lreg, immL32 off)
5143 %{
5144 constraint(ALLOC_IN_RC(ptr_reg));
5145 match(AddP (AddP reg lreg) off);
5147 op_cost(10);
5148 format %{"[$reg + $off + $lreg]" %}
5149 interface(MEMORY_INTER) %{
5150 base($reg);
5151 index($lreg);
5152 scale(0x0);
5153 disp($off);
5154 %}
5155 %}
5157 // Indirect Memory Plus Index Register Plus Offset Operand
5158 operand indIndex(any_RegP reg, rRegL lreg)
5159 %{
5160 constraint(ALLOC_IN_RC(ptr_reg));
5161 match(AddP reg lreg);
5163 op_cost(10);
5164 format %{"[$reg + $lreg]" %}
5165 interface(MEMORY_INTER) %{
5166 base($reg);
5167 index($lreg);
5168 scale(0x0);
5169 disp(0x0);
5170 %}
5171 %}
5173 // Indirect Memory Times Scale Plus Index Register
5174 operand indIndexScale(any_RegP reg, rRegL lreg, immI2 scale)
5175 %{
5176 constraint(ALLOC_IN_RC(ptr_reg));
5177 match(AddP reg (LShiftL lreg scale));
5179 op_cost(10);
5180 format %{"[$reg + $lreg << $scale]" %}
5181 interface(MEMORY_INTER) %{
5182 base($reg);
5183 index($lreg);
5184 scale($scale);
5185 disp(0x0);
5186 %}
5187 %}
5189 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5190 operand indIndexScaleOffset(any_RegP reg, immL32 off, rRegL lreg, immI2 scale)
5191 %{
5192 constraint(ALLOC_IN_RC(ptr_reg));
5193 match(AddP (AddP reg (LShiftL lreg scale)) off);
5195 op_cost(10);
5196 format %{"[$reg + $off + $lreg << $scale]" %}
5197 interface(MEMORY_INTER) %{
5198 base($reg);
5199 index($lreg);
5200 scale($scale);
5201 disp($off);
5202 %}
5203 %}
5205 // Indirect Narrow Oop Plus Offset Operand
5206 operand indNarrowOopOffset(rRegN src, immL32 off) %{
5207 constraint(ALLOC_IN_RC(ptr_reg));
5208 match(AddP (DecodeN src) off);
5210 op_cost(10);
5211 format %{"[R12 + $src << 3 + $off] (compressed oop addressing)" %}
5212 interface(MEMORY_INTER) %{
5213 base(0xc); // R12
5214 index($src);
5215 scale(0x3);
5216 disp($off);
5217 %}
5218 %}
5220 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
5221 operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
5222 %{
5223 constraint(ALLOC_IN_RC(ptr_reg));
5224 predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5225 match(AddP (AddP reg (LShiftL (ConvI2L idx) scale)) off);
5227 op_cost(10);
5228 format %{"[$reg + $off + $idx << $scale]" %}
5229 interface(MEMORY_INTER) %{
5230 base($reg);
5231 index($idx);
5232 scale($scale);
5233 disp($off);
5234 %}
5235 %}
5237 //----------Special Memory Operands--------------------------------------------
5238 // Stack Slot Operand - This operand is used for loading and storing temporary
5239 // values on the stack where a match requires a value to
5240 // flow through memory.
5241 operand stackSlotP(sRegP reg)
5242 %{
5243 constraint(ALLOC_IN_RC(stack_slots));
5244 // No match rule because this operand is only generated in matching
5246 format %{ "[$reg]" %}
5247 interface(MEMORY_INTER) %{
5248 base(0x4); // RSP
5249 index(0x4); // No Index
5250 scale(0x0); // No Scale
5251 disp($reg); // Stack Offset
5252 %}
5253 %}
5255 operand stackSlotI(sRegI reg)
5256 %{
5257 constraint(ALLOC_IN_RC(stack_slots));
5258 // No match rule because this operand is only generated in matching
5260 format %{ "[$reg]" %}
5261 interface(MEMORY_INTER) %{
5262 base(0x4); // RSP
5263 index(0x4); // No Index
5264 scale(0x0); // No Scale
5265 disp($reg); // Stack Offset
5266 %}
5267 %}
5269 operand stackSlotF(sRegF reg)
5270 %{
5271 constraint(ALLOC_IN_RC(stack_slots));
5272 // No match rule because this operand is only generated in matching
5274 format %{ "[$reg]" %}
5275 interface(MEMORY_INTER) %{
5276 base(0x4); // RSP
5277 index(0x4); // No Index
5278 scale(0x0); // No Scale
5279 disp($reg); // Stack Offset
5280 %}
5281 %}
5283 operand stackSlotD(sRegD reg)
5284 %{
5285 constraint(ALLOC_IN_RC(stack_slots));
5286 // No match rule because this operand is only generated in matching
5288 format %{ "[$reg]" %}
5289 interface(MEMORY_INTER) %{
5290 base(0x4); // RSP
5291 index(0x4); // No Index
5292 scale(0x0); // No Scale
5293 disp($reg); // Stack Offset
5294 %}
5295 %}
5296 operand stackSlotL(sRegL reg)
5297 %{
5298 constraint(ALLOC_IN_RC(stack_slots));
5299 // No match rule because this operand is only generated in matching
5301 format %{ "[$reg]" %}
5302 interface(MEMORY_INTER) %{
5303 base(0x4); // RSP
5304 index(0x4); // No Index
5305 scale(0x0); // No Scale
5306 disp($reg); // Stack Offset
5307 %}
5308 %}
5310 //----------Conditional Branch Operands----------------------------------------
5311 // Comparison Op - This is the operation of the comparison, and is limited to
5312 // the following set of codes:
5313 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5314 //
5315 // Other attributes of the comparison, such as unsignedness, are specified
5316 // by the comparison instruction that sets a condition code flags register.
5317 // That result is represented by a flags operand whose subtype is appropriate
5318 // to the unsignedness (etc.) of the comparison.
5319 //
5320 // Later, the instruction which matches both the Comparison Op (a Bool) and
5321 // the flags (produced by the Cmp) specifies the coding of the comparison op
5322 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5324 // Comparision Code
5325 operand cmpOp()
5326 %{
5327 match(Bool);
5329 format %{ "" %}
5330 interface(COND_INTER) %{
5331 equal(0x4);
5332 not_equal(0x5);
5333 less(0xC);
5334 greater_equal(0xD);
5335 less_equal(0xE);
5336 greater(0xF);
5337 %}
5338 %}
5340 // Comparison Code, unsigned compare. Used by FP also, with
5341 // C2 (unordered) turned into GT or LT already. The other bits
5342 // C0 and C3 are turned into Carry & Zero flags.
5343 operand cmpOpU()
5344 %{
5345 match(Bool);
5347 format %{ "" %}
5348 interface(COND_INTER) %{
5349 equal(0x4);
5350 not_equal(0x5);
5351 less(0x2);
5352 greater_equal(0x3);
5353 less_equal(0x6);
5354 greater(0x7);
5355 %}
5356 %}
5359 //----------OPERAND CLASSES----------------------------------------------------
5360 // Operand Classes are groups of operands that are used as to simplify
5361 // instruction definitions by not requiring the AD writer to specify seperate
5362 // instructions for every form of operand when the instruction accepts
5363 // multiple operand types with the same basic encoding and format. The classic
5364 // case of this is memory operands.
5366 opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
5367 indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset,
5368 indNarrowOopOffset);
5370 //----------PIPELINE-----------------------------------------------------------
5371 // Rules which define the behavior of the target architectures pipeline.
5372 pipeline %{
5374 //----------ATTRIBUTES---------------------------------------------------------
5375 attributes %{
5376 variable_size_instructions; // Fixed size instructions
5377 max_instructions_per_bundle = 3; // Up to 3 instructions per bundle
5378 instruction_unit_size = 1; // An instruction is 1 bytes long
5379 instruction_fetch_unit_size = 16; // The processor fetches one line
5380 instruction_fetch_units = 1; // of 16 bytes
5382 // List of nop instructions
5383 nops( MachNop );
5384 %}
5386 //----------RESOURCES----------------------------------------------------------
5387 // Resources are the functional units available to the machine
5389 // Generic P2/P3 pipeline
5390 // 3 decoders, only D0 handles big operands; a "bundle" is the limit of
5391 // 3 instructions decoded per cycle.
5392 // 2 load/store ops per cycle, 1 branch, 1 FPU,
5393 // 3 ALU op, only ALU0 handles mul instructions.
5394 resources( D0, D1, D2, DECODE = D0 | D1 | D2,
5395 MS0, MS1, MS2, MEM = MS0 | MS1 | MS2,
5396 BR, FPU,
5397 ALU0, ALU1, ALU2, ALU = ALU0 | ALU1 | ALU2);
5399 //----------PIPELINE DESCRIPTION-----------------------------------------------
5400 // Pipeline Description specifies the stages in the machine's pipeline
5402 // Generic P2/P3 pipeline
5403 pipe_desc(S0, S1, S2, S3, S4, S5);
5405 //----------PIPELINE CLASSES---------------------------------------------------
5406 // Pipeline Classes describe the stages in which input and output are
5407 // referenced by the hardware pipeline.
5409 // Naming convention: ialu or fpu
5410 // Then: _reg
5411 // Then: _reg if there is a 2nd register
5412 // Then: _long if it's a pair of instructions implementing a long
5413 // Then: _fat if it requires the big decoder
5414 // Or: _mem if it requires the big decoder and a memory unit.
5416 // Integer ALU reg operation
5417 pipe_class ialu_reg(rRegI dst)
5418 %{
5419 single_instruction;
5420 dst : S4(write);
5421 dst : S3(read);
5422 DECODE : S0; // any decoder
5423 ALU : S3; // any alu
5424 %}
5426 // Long ALU reg operation
5427 pipe_class ialu_reg_long(rRegL dst)
5428 %{
5429 instruction_count(2);
5430 dst : S4(write);
5431 dst : S3(read);
5432 DECODE : S0(2); // any 2 decoders
5433 ALU : S3(2); // both alus
5434 %}
5436 // Integer ALU reg operation using big decoder
5437 pipe_class ialu_reg_fat(rRegI dst)
5438 %{
5439 single_instruction;
5440 dst : S4(write);
5441 dst : S3(read);
5442 D0 : S0; // big decoder only
5443 ALU : S3; // any alu
5444 %}
5446 // Long ALU reg operation using big decoder
5447 pipe_class ialu_reg_long_fat(rRegL dst)
5448 %{
5449 instruction_count(2);
5450 dst : S4(write);
5451 dst : S3(read);
5452 D0 : S0(2); // big decoder only; twice
5453 ALU : S3(2); // any 2 alus
5454 %}
5456 // Integer ALU reg-reg operation
5457 pipe_class ialu_reg_reg(rRegI dst, rRegI src)
5458 %{
5459 single_instruction;
5460 dst : S4(write);
5461 src : S3(read);
5462 DECODE : S0; // any decoder
5463 ALU : S3; // any alu
5464 %}
5466 // Long ALU reg-reg operation
5467 pipe_class ialu_reg_reg_long(rRegL dst, rRegL src)
5468 %{
5469 instruction_count(2);
5470 dst : S4(write);
5471 src : S3(read);
5472 DECODE : S0(2); // any 2 decoders
5473 ALU : S3(2); // both alus
5474 %}
5476 // Integer ALU reg-reg operation
5477 pipe_class ialu_reg_reg_fat(rRegI dst, memory src)
5478 %{
5479 single_instruction;
5480 dst : S4(write);
5481 src : S3(read);
5482 D0 : S0; // big decoder only
5483 ALU : S3; // any alu
5484 %}
5486 // Long ALU reg-reg operation
5487 pipe_class ialu_reg_reg_long_fat(rRegL dst, rRegL src)
5488 %{
5489 instruction_count(2);
5490 dst : S4(write);
5491 src : S3(read);
5492 D0 : S0(2); // big decoder only; twice
5493 ALU : S3(2); // both alus
5494 %}
5496 // Integer ALU reg-mem operation
5497 pipe_class ialu_reg_mem(rRegI dst, memory mem)
5498 %{
5499 single_instruction;
5500 dst : S5(write);
5501 mem : S3(read);
5502 D0 : S0; // big decoder only
5503 ALU : S4; // any alu
5504 MEM : S3; // any mem
5505 %}
5507 // Integer mem operation (prefetch)
5508 pipe_class ialu_mem(memory mem)
5509 %{
5510 single_instruction;
5511 mem : S3(read);
5512 D0 : S0; // big decoder only
5513 MEM : S3; // any mem
5514 %}
5516 // Integer Store to Memory
5517 pipe_class ialu_mem_reg(memory mem, rRegI src)
5518 %{
5519 single_instruction;
5520 mem : S3(read);
5521 src : S5(read);
5522 D0 : S0; // big decoder only
5523 ALU : S4; // any alu
5524 MEM : S3;
5525 %}
5527 // // Long Store to Memory
5528 // pipe_class ialu_mem_long_reg(memory mem, rRegL src)
5529 // %{
5530 // instruction_count(2);
5531 // mem : S3(read);
5532 // src : S5(read);
5533 // D0 : S0(2); // big decoder only; twice
5534 // ALU : S4(2); // any 2 alus
5535 // MEM : S3(2); // Both mems
5536 // %}
5538 // Integer Store to Memory
5539 pipe_class ialu_mem_imm(memory mem)
5540 %{
5541 single_instruction;
5542 mem : S3(read);
5543 D0 : S0; // big decoder only
5544 ALU : S4; // any alu
5545 MEM : S3;
5546 %}
5548 // Integer ALU0 reg-reg operation
5549 pipe_class ialu_reg_reg_alu0(rRegI dst, rRegI src)
5550 %{
5551 single_instruction;
5552 dst : S4(write);
5553 src : S3(read);
5554 D0 : S0; // Big decoder only
5555 ALU0 : S3; // only alu0
5556 %}
5558 // Integer ALU0 reg-mem operation
5559 pipe_class ialu_reg_mem_alu0(rRegI dst, memory mem)
5560 %{
5561 single_instruction;
5562 dst : S5(write);
5563 mem : S3(read);
5564 D0 : S0; // big decoder only
5565 ALU0 : S4; // ALU0 only
5566 MEM : S3; // any mem
5567 %}
5569 // Integer ALU reg-reg operation
5570 pipe_class ialu_cr_reg_reg(rFlagsReg cr, rRegI src1, rRegI src2)
5571 %{
5572 single_instruction;
5573 cr : S4(write);
5574 src1 : S3(read);
5575 src2 : S3(read);
5576 DECODE : S0; // any decoder
5577 ALU : S3; // any alu
5578 %}
5580 // Integer ALU reg-imm operation
5581 pipe_class ialu_cr_reg_imm(rFlagsReg cr, rRegI src1)
5582 %{
5583 single_instruction;
5584 cr : S4(write);
5585 src1 : S3(read);
5586 DECODE : S0; // any decoder
5587 ALU : S3; // any alu
5588 %}
5590 // Integer ALU reg-mem operation
5591 pipe_class ialu_cr_reg_mem(rFlagsReg cr, rRegI src1, memory src2)
5592 %{
5593 single_instruction;
5594 cr : S4(write);
5595 src1 : S3(read);
5596 src2 : S3(read);
5597 D0 : S0; // big decoder only
5598 ALU : S4; // any alu
5599 MEM : S3;
5600 %}
5602 // Conditional move reg-reg
5603 pipe_class pipe_cmplt( rRegI p, rRegI q, rRegI y)
5604 %{
5605 instruction_count(4);
5606 y : S4(read);
5607 q : S3(read);
5608 p : S3(read);
5609 DECODE : S0(4); // any decoder
5610 %}
5612 // Conditional move reg-reg
5613 pipe_class pipe_cmov_reg( rRegI dst, rRegI src, rFlagsReg cr)
5614 %{
5615 single_instruction;
5616 dst : S4(write);
5617 src : S3(read);
5618 cr : S3(read);
5619 DECODE : S0; // any decoder
5620 %}
5622 // Conditional move reg-mem
5623 pipe_class pipe_cmov_mem( rFlagsReg cr, rRegI dst, memory src)
5624 %{
5625 single_instruction;
5626 dst : S4(write);
5627 src : S3(read);
5628 cr : S3(read);
5629 DECODE : S0; // any decoder
5630 MEM : S3;
5631 %}
5633 // Conditional move reg-reg long
5634 pipe_class pipe_cmov_reg_long( rFlagsReg cr, rRegL dst, rRegL src)
5635 %{
5636 single_instruction;
5637 dst : S4(write);
5638 src : S3(read);
5639 cr : S3(read);
5640 DECODE : S0(2); // any 2 decoders
5641 %}
5643 // XXX
5644 // // Conditional move double reg-reg
5645 // pipe_class pipe_cmovD_reg( rFlagsReg cr, regDPR1 dst, regD src)
5646 // %{
5647 // single_instruction;
5648 // dst : S4(write);
5649 // src : S3(read);
5650 // cr : S3(read);
5651 // DECODE : S0; // any decoder
5652 // %}
5654 // Float reg-reg operation
5655 pipe_class fpu_reg(regD dst)
5656 %{
5657 instruction_count(2);
5658 dst : S3(read);
5659 DECODE : S0(2); // any 2 decoders
5660 FPU : S3;
5661 %}
5663 // Float reg-reg operation
5664 pipe_class fpu_reg_reg(regD dst, regD src)
5665 %{
5666 instruction_count(2);
5667 dst : S4(write);
5668 src : S3(read);
5669 DECODE : S0(2); // any 2 decoders
5670 FPU : S3;
5671 %}
5673 // Float reg-reg operation
5674 pipe_class fpu_reg_reg_reg(regD dst, regD src1, regD src2)
5675 %{
5676 instruction_count(3);
5677 dst : S4(write);
5678 src1 : S3(read);
5679 src2 : S3(read);
5680 DECODE : S0(3); // any 3 decoders
5681 FPU : S3(2);
5682 %}
5684 // Float reg-reg operation
5685 pipe_class fpu_reg_reg_reg_reg(regD dst, regD src1, regD src2, regD src3)
5686 %{
5687 instruction_count(4);
5688 dst : S4(write);
5689 src1 : S3(read);
5690 src2 : S3(read);
5691 src3 : S3(read);
5692 DECODE : S0(4); // any 3 decoders
5693 FPU : S3(2);
5694 %}
5696 // Float reg-reg operation
5697 pipe_class fpu_reg_mem_reg_reg(regD dst, memory src1, regD src2, regD src3)
5698 %{
5699 instruction_count(4);
5700 dst : S4(write);
5701 src1 : S3(read);
5702 src2 : S3(read);
5703 src3 : S3(read);
5704 DECODE : S1(3); // any 3 decoders
5705 D0 : S0; // Big decoder only
5706 FPU : S3(2);
5707 MEM : S3;
5708 %}
5710 // Float reg-mem operation
5711 pipe_class fpu_reg_mem(regD dst, memory mem)
5712 %{
5713 instruction_count(2);
5714 dst : S5(write);
5715 mem : S3(read);
5716 D0 : S0; // big decoder only
5717 DECODE : S1; // any decoder for FPU POP
5718 FPU : S4;
5719 MEM : S3; // any mem
5720 %}
5722 // Float reg-mem operation
5723 pipe_class fpu_reg_reg_mem(regD dst, regD src1, memory mem)
5724 %{
5725 instruction_count(3);
5726 dst : S5(write);
5727 src1 : S3(read);
5728 mem : S3(read);
5729 D0 : S0; // big decoder only
5730 DECODE : S1(2); // any decoder for FPU POP
5731 FPU : S4;
5732 MEM : S3; // any mem
5733 %}
5735 // Float mem-reg operation
5736 pipe_class fpu_mem_reg(memory mem, regD src)
5737 %{
5738 instruction_count(2);
5739 src : S5(read);
5740 mem : S3(read);
5741 DECODE : S0; // any decoder for FPU PUSH
5742 D0 : S1; // big decoder only
5743 FPU : S4;
5744 MEM : S3; // any mem
5745 %}
5747 pipe_class fpu_mem_reg_reg(memory mem, regD src1, regD src2)
5748 %{
5749 instruction_count(3);
5750 src1 : S3(read);
5751 src2 : S3(read);
5752 mem : S3(read);
5753 DECODE : S0(2); // any decoder for FPU PUSH
5754 D0 : S1; // big decoder only
5755 FPU : S4;
5756 MEM : S3; // any mem
5757 %}
5759 pipe_class fpu_mem_reg_mem(memory mem, regD src1, memory src2)
5760 %{
5761 instruction_count(3);
5762 src1 : S3(read);
5763 src2 : S3(read);
5764 mem : S4(read);
5765 DECODE : S0; // any decoder for FPU PUSH
5766 D0 : S0(2); // big decoder only
5767 FPU : S4;
5768 MEM : S3(2); // any mem
5769 %}
5771 pipe_class fpu_mem_mem(memory dst, memory src1)
5772 %{
5773 instruction_count(2);
5774 src1 : S3(read);
5775 dst : S4(read);
5776 D0 : S0(2); // big decoder only
5777 MEM : S3(2); // any mem
5778 %}
5780 pipe_class fpu_mem_mem_mem(memory dst, memory src1, memory src2)
5781 %{
5782 instruction_count(3);
5783 src1 : S3(read);
5784 src2 : S3(read);
5785 dst : S4(read);
5786 D0 : S0(3); // big decoder only
5787 FPU : S4;
5788 MEM : S3(3); // any mem
5789 %}
5791 pipe_class fpu_mem_reg_con(memory mem, regD src1)
5792 %{
5793 instruction_count(3);
5794 src1 : S4(read);
5795 mem : S4(read);
5796 DECODE : S0; // any decoder for FPU PUSH
5797 D0 : S0(2); // big decoder only
5798 FPU : S4;
5799 MEM : S3(2); // any mem
5800 %}
5802 // Float load constant
5803 pipe_class fpu_reg_con(regD dst)
5804 %{
5805 instruction_count(2);
5806 dst : S5(write);
5807 D0 : S0; // big decoder only for the load
5808 DECODE : S1; // any decoder for FPU POP
5809 FPU : S4;
5810 MEM : S3; // any mem
5811 %}
5813 // Float load constant
5814 pipe_class fpu_reg_reg_con(regD dst, regD src)
5815 %{
5816 instruction_count(3);
5817 dst : S5(write);
5818 src : S3(read);
5819 D0 : S0; // big decoder only for the load
5820 DECODE : S1(2); // any decoder for FPU POP
5821 FPU : S4;
5822 MEM : S3; // any mem
5823 %}
5825 // UnConditional branch
5826 pipe_class pipe_jmp(label labl)
5827 %{
5828 single_instruction;
5829 BR : S3;
5830 %}
5832 // Conditional branch
5833 pipe_class pipe_jcc(cmpOp cmp, rFlagsReg cr, label labl)
5834 %{
5835 single_instruction;
5836 cr : S1(read);
5837 BR : S3;
5838 %}
5840 // Allocation idiom
5841 pipe_class pipe_cmpxchg(rRegP dst, rRegP heap_ptr)
5842 %{
5843 instruction_count(1); force_serialization;
5844 fixed_latency(6);
5845 heap_ptr : S3(read);
5846 DECODE : S0(3);
5847 D0 : S2;
5848 MEM : S3;
5849 ALU : S3(2);
5850 dst : S5(write);
5851 BR : S5;
5852 %}
5854 // Generic big/slow expanded idiom
5855 pipe_class pipe_slow()
5856 %{
5857 instruction_count(10); multiple_bundles; force_serialization;
5858 fixed_latency(100);
5859 D0 : S0(2);
5860 MEM : S3(2);
5861 %}
5863 // The real do-nothing guy
5864 pipe_class empty()
5865 %{
5866 instruction_count(0);
5867 %}
5869 // Define the class for the Nop node
5870 define
5871 %{
5872 MachNop = empty;
5873 %}
5875 %}
5877 //----------INSTRUCTIONS-------------------------------------------------------
5878 //
5879 // match -- States which machine-independent subtree may be replaced
5880 // by this instruction.
5881 // ins_cost -- The estimated cost of this instruction is used by instruction
5882 // selection to identify a minimum cost tree of machine
5883 // instructions that matches a tree of machine-independent
5884 // instructions.
5885 // format -- A string providing the disassembly for this instruction.
5886 // The value of an instruction's operand may be inserted
5887 // by referring to it with a '$' prefix.
5888 // opcode -- Three instruction opcodes may be provided. These are referred
5889 // to within an encode class as $primary, $secondary, and $tertiary
5890 // rrspectively. The primary opcode is commonly used to
5891 // indicate the type of machine instruction, while secondary
5892 // and tertiary are often used for prefix options or addressing
5893 // modes.
5894 // ins_encode -- A list of encode classes with parameters. The encode class
5895 // name must have been defined in an 'enc_class' specification
5896 // in the encode section of the architecture description.
5899 //----------Load/Store/Move Instructions---------------------------------------
5900 //----------Load Instructions--------------------------------------------------
5902 // Load Byte (8 bit signed)
5903 instruct loadB(rRegI dst, memory mem)
5904 %{
5905 match(Set dst (LoadB mem));
5907 ins_cost(125);
5908 format %{ "movsbl $dst, $mem\t# byte" %}
5909 opcode(0x0F, 0xBE);
5910 ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
5911 ins_pipe(ialu_reg_mem);
5912 %}
5914 // Load Byte (8 bit signed) into long
5915 // instruct loadB2L(rRegL dst, memory mem)
5916 // %{
5917 // match(Set dst (ConvI2L (LoadB mem)));
5919 // ins_cost(125);
5920 // format %{ "movsbq $dst, $mem\t# byte -> long" %}
5921 // opcode(0x0F, 0xBE);
5922 // ins_encode(REX_reg_mem_wide(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
5923 // ins_pipe(ialu_reg_mem);
5924 // %}
5926 // Load Byte (8 bit UNsigned)
5927 instruct loadUB(rRegI dst, memory mem, immI_255 bytemask)
5928 %{
5929 match(Set dst (AndI (LoadB mem) bytemask));
5931 ins_cost(125);
5932 format %{ "movzbl $dst, $mem\t# ubyte" %}
5933 opcode(0x0F, 0xB6);
5934 ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
5935 ins_pipe(ialu_reg_mem);
5936 %}
5938 // Load Byte (8 bit UNsigned) into long
5939 // instruct loadUB2L(rRegL dst, memory mem, immI_255 bytemask)
5940 // %{
5941 // match(Set dst (ConvI2L (AndI (LoadB mem) bytemask)));
5943 // ins_cost(125);
5944 // format %{ "movzbl $dst, $mem\t# ubyte -> long" %}
5945 // opcode(0x0F, 0xB6);
5946 // ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
5947 // ins_pipe(ialu_reg_mem);
5948 // %}
5950 // Load Short (16 bit signed)
5951 instruct loadS(rRegI dst, memory mem)
5952 %{
5953 match(Set dst (LoadS mem));
5955 ins_cost(125); // XXX
5956 format %{ "movswl $dst, $mem\t# short" %}
5957 opcode(0x0F, 0xBF);
5958 ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
5959 ins_pipe(ialu_reg_mem);
5960 %}
5962 // Load Short (16 bit signed) into long
5963 // instruct loadS2L(rRegL dst, memory mem)
5964 // %{
5965 // match(Set dst (ConvI2L (LoadS mem)));
5967 // ins_cost(125); // XXX
5968 // format %{ "movswq $dst, $mem\t# short -> long" %}
5969 // opcode(0x0F, 0xBF);
5970 // ins_encode(REX_reg_mem_wide(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
5971 // ins_pipe(ialu_reg_mem);
5972 // %}
5974 // Load Char (16 bit UNsigned)
5975 instruct loadC(rRegI dst, memory mem)
5976 %{
5977 match(Set dst (LoadC mem));
5979 ins_cost(125);
5980 format %{ "movzwl $dst, $mem\t# char" %}
5981 opcode(0x0F, 0xB7);
5982 ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
5983 ins_pipe(ialu_reg_mem);
5984 %}
5986 // Load Char (16 bit UNsigned) into long
5987 // instruct loadC2L(rRegL dst, memory mem)
5988 // %{
5989 // match(Set dst (ConvI2L (LoadC mem)));
5991 // ins_cost(125);
5992 // format %{ "movzwl $dst, $mem\t# char -> long" %}
5993 // opcode(0x0F, 0xB7);
5994 // ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
5995 // ins_pipe(ialu_reg_mem);
5996 // %}
5998 // Load Integer
5999 instruct loadI(rRegI dst, memory mem)
6000 %{
6001 match(Set dst (LoadI mem));
6003 ins_cost(125); // XXX
6004 format %{ "movl $dst, $mem\t# int" %}
6005 opcode(0x8B);
6006 ins_encode(REX_reg_mem(dst, mem), OpcP, reg_mem(dst, mem));
6007 ins_pipe(ialu_reg_mem);
6008 %}
6010 // Load Long
6011 instruct loadL(rRegL dst, memory mem)
6012 %{
6013 match(Set dst (LoadL mem));
6015 ins_cost(125); // XXX
6016 format %{ "movq $dst, $mem\t# long" %}
6017 opcode(0x8B);
6018 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6019 ins_pipe(ialu_reg_mem); // XXX
6020 %}
6022 // Load Range
6023 instruct loadRange(rRegI dst, memory mem)
6024 %{
6025 match(Set dst (LoadRange mem));
6027 ins_cost(125); // XXX
6028 format %{ "movl $dst, $mem\t# range" %}
6029 opcode(0x8B);
6030 ins_encode(REX_reg_mem(dst, mem), OpcP, reg_mem(dst, mem));
6031 ins_pipe(ialu_reg_mem);
6032 %}
6034 // Load Pointer
6035 instruct loadP(rRegP dst, memory mem)
6036 %{
6037 match(Set dst (LoadP mem));
6039 ins_cost(125); // XXX
6040 format %{ "movq $dst, $mem\t# ptr" %}
6041 opcode(0x8B);
6042 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6043 ins_pipe(ialu_reg_mem); // XXX
6044 %}
6046 // Load Compressed Pointer
6047 instruct loadN(rRegN dst, memory mem)
6048 %{
6049 match(Set dst (LoadN mem));
6051 ins_cost(125); // XXX
6052 format %{ "movl $dst, $mem\t# compressed ptr" %}
6053 ins_encode %{
6054 Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
6055 Register dst = as_Register($dst$$reg);
6056 __ movl(dst, addr);
6057 %}
6058 ins_pipe(ialu_reg_mem); // XXX
6059 %}
6062 // Load Klass Pointer
6063 instruct loadKlass(rRegP dst, memory mem)
6064 %{
6065 match(Set dst (LoadKlass mem));
6067 ins_cost(125); // XXX
6068 format %{ "movq $dst, $mem\t# class" %}
6069 opcode(0x8B);
6070 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6071 ins_pipe(ialu_reg_mem); // XXX
6072 %}
6074 // Load narrow Klass Pointer
6075 instruct loadNKlass(rRegN dst, memory mem)
6076 %{
6077 match(Set dst (LoadNKlass mem));
6079 ins_cost(125); // XXX
6080 format %{ "movl $dst, $mem\t# compressed klass ptr\n\t" %}
6081 ins_encode %{
6082 Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
6083 Register dst = as_Register($dst$$reg);
6084 __ movl(dst, addr);
6085 %}
6086 ins_pipe(ialu_reg_mem); // XXX
6087 %}
6089 // Load Float
6090 instruct loadF(regF dst, memory mem)
6091 %{
6092 match(Set dst (LoadF mem));
6094 ins_cost(145); // XXX
6095 format %{ "movss $dst, $mem\t# float" %}
6096 opcode(0xF3, 0x0F, 0x10);
6097 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6098 ins_pipe(pipe_slow); // XXX
6099 %}
6101 // Load Double
6102 instruct loadD_partial(regD dst, memory mem)
6103 %{
6104 predicate(!UseXmmLoadAndClearUpper);
6105 match(Set dst (LoadD mem));
6107 ins_cost(145); // XXX
6108 format %{ "movlpd $dst, $mem\t# double" %}
6109 opcode(0x66, 0x0F, 0x12);
6110 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6111 ins_pipe(pipe_slow); // XXX
6112 %}
6114 instruct loadD(regD dst, memory mem)
6115 %{
6116 predicate(UseXmmLoadAndClearUpper);
6117 match(Set dst (LoadD mem));
6119 ins_cost(145); // XXX
6120 format %{ "movsd $dst, $mem\t# double" %}
6121 opcode(0xF2, 0x0F, 0x10);
6122 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6123 ins_pipe(pipe_slow); // XXX
6124 %}
6126 // Load Aligned Packed Byte to XMM register
6127 instruct loadA8B(regD dst, memory mem) %{
6128 match(Set dst (Load8B mem));
6129 ins_cost(125);
6130 format %{ "MOVQ $dst,$mem\t! packed8B" %}
6131 ins_encode( movq_ld(dst, mem));
6132 ins_pipe( pipe_slow );
6133 %}
6135 // Load Aligned Packed Short to XMM register
6136 instruct loadA4S(regD dst, memory mem) %{
6137 match(Set dst (Load4S mem));
6138 ins_cost(125);
6139 format %{ "MOVQ $dst,$mem\t! packed4S" %}
6140 ins_encode( movq_ld(dst, mem));
6141 ins_pipe( pipe_slow );
6142 %}
6144 // Load Aligned Packed Char to XMM register
6145 instruct loadA4C(regD dst, memory mem) %{
6146 match(Set dst (Load4C mem));
6147 ins_cost(125);
6148 format %{ "MOVQ $dst,$mem\t! packed4C" %}
6149 ins_encode( movq_ld(dst, mem));
6150 ins_pipe( pipe_slow );
6151 %}
6153 // Load Aligned Packed Integer to XMM register
6154 instruct load2IU(regD dst, memory mem) %{
6155 match(Set dst (Load2I mem));
6156 ins_cost(125);
6157 format %{ "MOVQ $dst,$mem\t! packed2I" %}
6158 ins_encode( movq_ld(dst, mem));
6159 ins_pipe( pipe_slow );
6160 %}
6162 // Load Aligned Packed Single to XMM
6163 instruct loadA2F(regD dst, memory mem) %{
6164 match(Set dst (Load2F mem));
6165 ins_cost(145);
6166 format %{ "MOVQ $dst,$mem\t! packed2F" %}
6167 ins_encode( movq_ld(dst, mem));
6168 ins_pipe( pipe_slow );
6169 %}
6171 // Load Effective Address
6172 instruct leaP8(rRegP dst, indOffset8 mem)
6173 %{
6174 match(Set dst mem);
6176 ins_cost(110); // XXX
6177 format %{ "leaq $dst, $mem\t# ptr 8" %}
6178 opcode(0x8D);
6179 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6180 ins_pipe(ialu_reg_reg_fat);
6181 %}
6183 instruct leaP32(rRegP dst, indOffset32 mem)
6184 %{
6185 match(Set dst mem);
6187 ins_cost(110);
6188 format %{ "leaq $dst, $mem\t# ptr 32" %}
6189 opcode(0x8D);
6190 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6191 ins_pipe(ialu_reg_reg_fat);
6192 %}
6194 // instruct leaPIdx(rRegP dst, indIndex mem)
6195 // %{
6196 // match(Set dst mem);
6198 // ins_cost(110);
6199 // format %{ "leaq $dst, $mem\t# ptr idx" %}
6200 // opcode(0x8D);
6201 // ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6202 // ins_pipe(ialu_reg_reg_fat);
6203 // %}
6205 instruct leaPIdxOff(rRegP dst, indIndexOffset mem)
6206 %{
6207 match(Set dst mem);
6209 ins_cost(110);
6210 format %{ "leaq $dst, $mem\t# ptr idxoff" %}
6211 opcode(0x8D);
6212 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6213 ins_pipe(ialu_reg_reg_fat);
6214 %}
6216 instruct leaPIdxScale(rRegP dst, indIndexScale mem)
6217 %{
6218 match(Set dst mem);
6220 ins_cost(110);
6221 format %{ "leaq $dst, $mem\t# ptr idxscale" %}
6222 opcode(0x8D);
6223 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6224 ins_pipe(ialu_reg_reg_fat);
6225 %}
6227 instruct leaPIdxScaleOff(rRegP dst, indIndexScaleOffset mem)
6228 %{
6229 match(Set dst mem);
6231 ins_cost(110);
6232 format %{ "leaq $dst, $mem\t# ptr idxscaleoff" %}
6233 opcode(0x8D);
6234 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6235 ins_pipe(ialu_reg_reg_fat);
6236 %}
6238 instruct loadConI(rRegI dst, immI src)
6239 %{
6240 match(Set dst src);
6242 format %{ "movl $dst, $src\t# int" %}
6243 ins_encode(load_immI(dst, src));
6244 ins_pipe(ialu_reg_fat); // XXX
6245 %}
6247 instruct loadConI0(rRegI dst, immI0 src, rFlagsReg cr)
6248 %{
6249 match(Set dst src);
6250 effect(KILL cr);
6252 ins_cost(50);
6253 format %{ "xorl $dst, $dst\t# int" %}
6254 opcode(0x33); /* + rd */
6255 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6256 ins_pipe(ialu_reg);
6257 %}
6259 instruct loadConL(rRegL dst, immL src)
6260 %{
6261 match(Set dst src);
6263 ins_cost(150);
6264 format %{ "movq $dst, $src\t# long" %}
6265 ins_encode(load_immL(dst, src));
6266 ins_pipe(ialu_reg);
6267 %}
6269 instruct loadConL0(rRegL dst, immL0 src, rFlagsReg cr)
6270 %{
6271 match(Set dst src);
6272 effect(KILL cr);
6274 ins_cost(50);
6275 format %{ "xorl $dst, $dst\t# long" %}
6276 opcode(0x33); /* + rd */
6277 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6278 ins_pipe(ialu_reg); // XXX
6279 %}
6281 instruct loadConUL32(rRegL dst, immUL32 src)
6282 %{
6283 match(Set dst src);
6285 ins_cost(60);
6286 format %{ "movl $dst, $src\t# long (unsigned 32-bit)" %}
6287 ins_encode(load_immUL32(dst, src));
6288 ins_pipe(ialu_reg);
6289 %}
6291 instruct loadConL32(rRegL dst, immL32 src)
6292 %{
6293 match(Set dst src);
6295 ins_cost(70);
6296 format %{ "movq $dst, $src\t# long (32-bit)" %}
6297 ins_encode(load_immL32(dst, src));
6298 ins_pipe(ialu_reg);
6299 %}
6301 instruct loadConP(rRegP dst, immP src)
6302 %{
6303 match(Set dst src);
6305 format %{ "movq $dst, $src\t# ptr" %}
6306 ins_encode(load_immP(dst, src));
6307 ins_pipe(ialu_reg_fat); // XXX
6308 %}
6310 instruct loadConP0(rRegP dst, immP0 src, rFlagsReg cr)
6311 %{
6312 match(Set dst src);
6313 effect(KILL cr);
6315 ins_cost(50);
6316 format %{ "xorl $dst, $dst\t# ptr" %}
6317 opcode(0x33); /* + rd */
6318 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6319 ins_pipe(ialu_reg);
6320 %}
6322 instruct loadConP31(rRegP dst, immP31 src, rFlagsReg cr)
6323 %{
6324 match(Set dst src);
6325 effect(KILL cr);
6327 ins_cost(60);
6328 format %{ "movl $dst, $src\t# ptr (positive 32-bit)" %}
6329 ins_encode(load_immP31(dst, src));
6330 ins_pipe(ialu_reg);
6331 %}
6333 instruct loadConF(regF dst, immF src)
6334 %{
6335 match(Set dst src);
6336 ins_cost(125);
6338 format %{ "movss $dst, [$src]" %}
6339 ins_encode(load_conF(dst, src));
6340 ins_pipe(pipe_slow);
6341 %}
6343 instruct loadConN0(rRegN dst, immN0 src, rFlagsReg cr) %{
6344 match(Set dst src);
6345 effect(KILL cr);
6346 format %{ "xorq $dst, $src\t# compressed NULL ptr" %}
6347 ins_encode %{
6348 Register dst = $dst$$Register;
6349 __ xorq(dst, dst);
6350 %}
6351 ins_pipe(ialu_reg);
6352 %}
6354 instruct loadConN(rRegN dst, immN src) %{
6355 match(Set dst src);
6357 ins_cost(125);
6358 format %{ "movl $dst, $src\t# compressed ptr" %}
6359 ins_encode %{
6360 address con = (address)$src$$constant;
6361 Register dst = $dst$$Register;
6362 if (con == NULL) {
6363 ShouldNotReachHere();
6364 } else {
6365 __ set_narrow_oop(dst, (jobject)$src$$constant);
6366 }
6367 %}
6368 ins_pipe(ialu_reg_fat); // XXX
6369 %}
6371 instruct loadConF0(regF dst, immF0 src)
6372 %{
6373 match(Set dst src);
6374 ins_cost(100);
6376 format %{ "xorps $dst, $dst\t# float 0.0" %}
6377 opcode(0x0F, 0x57);
6378 ins_encode(REX_reg_reg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
6379 ins_pipe(pipe_slow);
6380 %}
6382 // Use the same format since predicate() can not be used here.
6383 instruct loadConD(regD dst, immD src)
6384 %{
6385 match(Set dst src);
6386 ins_cost(125);
6388 format %{ "movsd $dst, [$src]" %}
6389 ins_encode(load_conD(dst, src));
6390 ins_pipe(pipe_slow);
6391 %}
6393 instruct loadConD0(regD dst, immD0 src)
6394 %{
6395 match(Set dst src);
6396 ins_cost(100);
6398 format %{ "xorpd $dst, $dst\t# double 0.0" %}
6399 opcode(0x66, 0x0F, 0x57);
6400 ins_encode(OpcP, REX_reg_reg(dst, dst), OpcS, OpcT, reg_reg(dst, dst));
6401 ins_pipe(pipe_slow);
6402 %}
6404 instruct loadSSI(rRegI dst, stackSlotI src)
6405 %{
6406 match(Set dst src);
6408 ins_cost(125);
6409 format %{ "movl $dst, $src\t# int stk" %}
6410 opcode(0x8B);
6411 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
6412 ins_pipe(ialu_reg_mem);
6413 %}
6415 instruct loadSSL(rRegL dst, stackSlotL src)
6416 %{
6417 match(Set dst src);
6419 ins_cost(125);
6420 format %{ "movq $dst, $src\t# long stk" %}
6421 opcode(0x8B);
6422 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
6423 ins_pipe(ialu_reg_mem);
6424 %}
6426 instruct loadSSP(rRegP dst, stackSlotP src)
6427 %{
6428 match(Set dst src);
6430 ins_cost(125);
6431 format %{ "movq $dst, $src\t# ptr stk" %}
6432 opcode(0x8B);
6433 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
6434 ins_pipe(ialu_reg_mem);
6435 %}
6437 instruct loadSSF(regF dst, stackSlotF src)
6438 %{
6439 match(Set dst src);
6441 ins_cost(125);
6442 format %{ "movss $dst, $src\t# float stk" %}
6443 opcode(0xF3, 0x0F, 0x10);
6444 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
6445 ins_pipe(pipe_slow); // XXX
6446 %}
6448 // Use the same format since predicate() can not be used here.
6449 instruct loadSSD(regD dst, stackSlotD src)
6450 %{
6451 match(Set dst src);
6453 ins_cost(125);
6454 format %{ "movsd $dst, $src\t# double stk" %}
6455 ins_encode %{
6456 __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp));
6457 %}
6458 ins_pipe(pipe_slow); // XXX
6459 %}
6461 // Prefetch instructions.
6462 // Must be safe to execute with invalid address (cannot fault).
6464 instruct prefetchr( memory mem ) %{
6465 predicate(ReadPrefetchInstr==3);
6466 match(PrefetchRead mem);
6467 ins_cost(125);
6469 format %{ "PREFETCHR $mem\t# Prefetch into level 1 cache" %}
6470 opcode(0x0F, 0x0D); /* Opcode 0F 0D /0 */
6471 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
6472 ins_pipe(ialu_mem);
6473 %}
6475 instruct prefetchrNTA( memory mem ) %{
6476 predicate(ReadPrefetchInstr==0);
6477 match(PrefetchRead mem);
6478 ins_cost(125);
6480 format %{ "PREFETCHNTA $mem\t# Prefetch into non-temporal cache for read" %}
6481 opcode(0x0F, 0x18); /* Opcode 0F 18 /0 */
6482 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
6483 ins_pipe(ialu_mem);
6484 %}
6486 instruct prefetchrT0( memory mem ) %{
6487 predicate(ReadPrefetchInstr==1);
6488 match(PrefetchRead mem);
6489 ins_cost(125);
6491 format %{ "PREFETCHT0 $mem\t# prefetch into L1 and L2 caches for read" %}
6492 opcode(0x0F, 0x18); /* Opcode 0F 18 /1 */
6493 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
6494 ins_pipe(ialu_mem);
6495 %}
6497 instruct prefetchrT2( memory mem ) %{
6498 predicate(ReadPrefetchInstr==2);
6499 match(PrefetchRead mem);
6500 ins_cost(125);
6502 format %{ "PREFETCHT2 $mem\t# prefetch into L2 caches for read" %}
6503 opcode(0x0F, 0x18); /* Opcode 0F 18 /3 */
6504 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x03, mem));
6505 ins_pipe(ialu_mem);
6506 %}
6508 instruct prefetchw( memory mem ) %{
6509 predicate(AllocatePrefetchInstr==3);
6510 match(PrefetchWrite mem);
6511 ins_cost(125);
6513 format %{ "PREFETCHW $mem\t# Prefetch into level 1 cache and mark modified" %}
6514 opcode(0x0F, 0x0D); /* Opcode 0F 0D /1 */
6515 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
6516 ins_pipe(ialu_mem);
6517 %}
6519 instruct prefetchwNTA( memory mem ) %{
6520 predicate(AllocatePrefetchInstr==0);
6521 match(PrefetchWrite mem);
6522 ins_cost(125);
6524 format %{ "PREFETCHNTA $mem\t# Prefetch to non-temporal cache for write" %}
6525 opcode(0x0F, 0x18); /* Opcode 0F 18 /0 */
6526 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
6527 ins_pipe(ialu_mem);
6528 %}
6530 instruct prefetchwT0( memory mem ) %{
6531 predicate(AllocatePrefetchInstr==1);
6532 match(PrefetchWrite mem);
6533 ins_cost(125);
6535 format %{ "PREFETCHT0 $mem\t# Prefetch to level 1 and 2 caches for write" %}
6536 opcode(0x0F, 0x18); /* Opcode 0F 18 /1 */
6537 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
6538 ins_pipe(ialu_mem);
6539 %}
6541 instruct prefetchwT2( memory mem ) %{
6542 predicate(AllocatePrefetchInstr==2);
6543 match(PrefetchWrite mem);
6544 ins_cost(125);
6546 format %{ "PREFETCHT2 $mem\t# Prefetch to level 2 cache for write" %}
6547 opcode(0x0F, 0x18); /* Opcode 0F 18 /3 */
6548 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x03, mem));
6549 ins_pipe(ialu_mem);
6550 %}
6552 //----------Store Instructions-------------------------------------------------
6554 // Store Byte
6555 instruct storeB(memory mem, rRegI src)
6556 %{
6557 match(Set mem (StoreB mem src));
6559 ins_cost(125); // XXX
6560 format %{ "movb $mem, $src\t# byte" %}
6561 opcode(0x88);
6562 ins_encode(REX_breg_mem(src, mem), OpcP, reg_mem(src, mem));
6563 ins_pipe(ialu_mem_reg);
6564 %}
6566 // Store Char/Short
6567 instruct storeC(memory mem, rRegI src)
6568 %{
6569 match(Set mem (StoreC mem src));
6571 ins_cost(125); // XXX
6572 format %{ "movw $mem, $src\t# char/short" %}
6573 opcode(0x89);
6574 ins_encode(SizePrefix, REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
6575 ins_pipe(ialu_mem_reg);
6576 %}
6578 // Store Integer
6579 instruct storeI(memory mem, rRegI src)
6580 %{
6581 match(Set mem (StoreI mem src));
6583 ins_cost(125); // XXX
6584 format %{ "movl $mem, $src\t# int" %}
6585 opcode(0x89);
6586 ins_encode(REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
6587 ins_pipe(ialu_mem_reg);
6588 %}
6590 // Store Long
6591 instruct storeL(memory mem, rRegL src)
6592 %{
6593 match(Set mem (StoreL mem src));
6595 ins_cost(125); // XXX
6596 format %{ "movq $mem, $src\t# long" %}
6597 opcode(0x89);
6598 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
6599 ins_pipe(ialu_mem_reg); // XXX
6600 %}
6602 // Store Pointer
6603 instruct storeP(memory mem, any_RegP src)
6604 %{
6605 match(Set mem (StoreP mem src));
6607 ins_cost(125); // XXX
6608 format %{ "movq $mem, $src\t# ptr" %}
6609 opcode(0x89);
6610 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
6611 ins_pipe(ialu_mem_reg);
6612 %}
6614 // Store NULL Pointer, mark word, or other simple pointer constant.
6615 instruct storeImmP(memory mem, immP31 src)
6616 %{
6617 match(Set mem (StoreP mem src));
6619 ins_cost(125); // XXX
6620 format %{ "movq $mem, $src\t# ptr" %}
6621 opcode(0xC7); /* C7 /0 */
6622 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
6623 ins_pipe(ialu_mem_imm);
6624 %}
6626 // Store Compressed Pointer
6627 instruct storeN(memory mem, rRegN src)
6628 %{
6629 match(Set mem (StoreN mem src));
6631 ins_cost(125); // XXX
6632 format %{ "movl $mem, $src\t# compressed ptr" %}
6633 ins_encode %{
6634 Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
6635 Register src = as_Register($src$$reg);
6636 __ movl(addr, src);
6637 %}
6638 ins_pipe(ialu_mem_reg);
6639 %}
6641 // Store Integer Immediate
6642 instruct storeImmI(memory mem, immI src)
6643 %{
6644 match(Set mem (StoreI mem src));
6646 ins_cost(150);
6647 format %{ "movl $mem, $src\t# int" %}
6648 opcode(0xC7); /* C7 /0 */
6649 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
6650 ins_pipe(ialu_mem_imm);
6651 %}
6653 // Store Long Immediate
6654 instruct storeImmL(memory mem, immL32 src)
6655 %{
6656 match(Set mem (StoreL mem src));
6658 ins_cost(150);
6659 format %{ "movq $mem, $src\t# long" %}
6660 opcode(0xC7); /* C7 /0 */
6661 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
6662 ins_pipe(ialu_mem_imm);
6663 %}
6665 // Store Short/Char Immediate
6666 instruct storeImmI16(memory mem, immI16 src)
6667 %{
6668 predicate(UseStoreImmI16);
6669 match(Set mem (StoreC mem src));
6671 ins_cost(150);
6672 format %{ "movw $mem, $src\t# short/char" %}
6673 opcode(0xC7); /* C7 /0 Same as 32 store immediate with prefix */
6674 ins_encode(SizePrefix, REX_mem(mem), OpcP, RM_opc_mem(0x00, mem),Con16(src));
6675 ins_pipe(ialu_mem_imm);
6676 %}
6678 // Store Byte Immediate
6679 instruct storeImmB(memory mem, immI8 src)
6680 %{
6681 match(Set mem (StoreB mem src));
6683 ins_cost(150); // XXX
6684 format %{ "movb $mem, $src\t# byte" %}
6685 opcode(0xC6); /* C6 /0 */
6686 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src));
6687 ins_pipe(ialu_mem_imm);
6688 %}
6690 // Store Aligned Packed Byte XMM register to memory
6691 instruct storeA8B(memory mem, regD src) %{
6692 match(Set mem (Store8B mem src));
6693 ins_cost(145);
6694 format %{ "MOVQ $mem,$src\t! packed8B" %}
6695 ins_encode( movq_st(mem, src));
6696 ins_pipe( pipe_slow );
6697 %}
6699 // Store Aligned Packed Char/Short XMM register to memory
6700 instruct storeA4C(memory mem, regD src) %{
6701 match(Set mem (Store4C mem src));
6702 ins_cost(145);
6703 format %{ "MOVQ $mem,$src\t! packed4C" %}
6704 ins_encode( movq_st(mem, src));
6705 ins_pipe( pipe_slow );
6706 %}
6708 // Store Aligned Packed Integer XMM register to memory
6709 instruct storeA2I(memory mem, regD src) %{
6710 match(Set mem (Store2I mem src));
6711 ins_cost(145);
6712 format %{ "MOVQ $mem,$src\t! packed2I" %}
6713 ins_encode( movq_st(mem, src));
6714 ins_pipe( pipe_slow );
6715 %}
6717 // Store CMS card-mark Immediate
6718 instruct storeImmCM0(memory mem, immI0 src)
6719 %{
6720 match(Set mem (StoreCM mem src));
6722 ins_cost(150); // XXX
6723 format %{ "movb $mem, $src\t# CMS card-mark byte 0" %}
6724 opcode(0xC6); /* C6 /0 */
6725 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src));
6726 ins_pipe(ialu_mem_imm);
6727 %}
6729 // Store Aligned Packed Single Float XMM register to memory
6730 instruct storeA2F(memory mem, regD src) %{
6731 match(Set mem (Store2F mem src));
6732 ins_cost(145);
6733 format %{ "MOVQ $mem,$src\t! packed2F" %}
6734 ins_encode( movq_st(mem, src));
6735 ins_pipe( pipe_slow );
6736 %}
6738 // Store Float
6739 instruct storeF(memory mem, regF src)
6740 %{
6741 match(Set mem (StoreF mem src));
6743 ins_cost(95); // XXX
6744 format %{ "movss $mem, $src\t# float" %}
6745 opcode(0xF3, 0x0F, 0x11);
6746 ins_encode(OpcP, REX_reg_mem(src, mem), OpcS, OpcT, reg_mem(src, mem));
6747 ins_pipe(pipe_slow); // XXX
6748 %}
6750 // Store immediate Float value (it is faster than store from XMM register)
6751 instruct storeF_imm(memory mem, immF src)
6752 %{
6753 match(Set mem (StoreF mem src));
6755 ins_cost(50);
6756 format %{ "movl $mem, $src\t# float" %}
6757 opcode(0xC7); /* C7 /0 */
6758 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con32F_as_bits(src));
6759 ins_pipe(ialu_mem_imm);
6760 %}
6762 // Store Double
6763 instruct storeD(memory mem, regD src)
6764 %{
6765 match(Set mem (StoreD mem src));
6767 ins_cost(95); // XXX
6768 format %{ "movsd $mem, $src\t# double" %}
6769 opcode(0xF2, 0x0F, 0x11);
6770 ins_encode(OpcP, REX_reg_mem(src, mem), OpcS, OpcT, reg_mem(src, mem));
6771 ins_pipe(pipe_slow); // XXX
6772 %}
6774 // Store immediate double 0.0 (it is faster than store from XMM register)
6775 instruct storeD0_imm(memory mem, immD0 src)
6776 %{
6777 match(Set mem (StoreD mem src));
6779 ins_cost(50);
6780 format %{ "movq $mem, $src\t# double 0." %}
6781 opcode(0xC7); /* C7 /0 */
6782 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32F_as_bits(src));
6783 ins_pipe(ialu_mem_imm);
6784 %}
6786 instruct storeSSI(stackSlotI dst, rRegI src)
6787 %{
6788 match(Set dst src);
6790 ins_cost(100);
6791 format %{ "movl $dst, $src\t# int stk" %}
6792 opcode(0x89);
6793 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
6794 ins_pipe( ialu_mem_reg );
6795 %}
6797 instruct storeSSL(stackSlotL dst, rRegL src)
6798 %{
6799 match(Set dst src);
6801 ins_cost(100);
6802 format %{ "movq $dst, $src\t# long stk" %}
6803 opcode(0x89);
6804 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
6805 ins_pipe(ialu_mem_reg);
6806 %}
6808 instruct storeSSP(stackSlotP dst, rRegP src)
6809 %{
6810 match(Set dst src);
6812 ins_cost(100);
6813 format %{ "movq $dst, $src\t# ptr stk" %}
6814 opcode(0x89);
6815 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
6816 ins_pipe(ialu_mem_reg);
6817 %}
6819 instruct storeSSF(stackSlotF dst, regF src)
6820 %{
6821 match(Set dst src);
6823 ins_cost(95); // XXX
6824 format %{ "movss $dst, $src\t# float stk" %}
6825 opcode(0xF3, 0x0F, 0x11);
6826 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
6827 ins_pipe(pipe_slow); // XXX
6828 %}
6830 instruct storeSSD(stackSlotD dst, regD src)
6831 %{
6832 match(Set dst src);
6834 ins_cost(95); // XXX
6835 format %{ "movsd $dst, $src\t# double stk" %}
6836 opcode(0xF2, 0x0F, 0x11);
6837 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
6838 ins_pipe(pipe_slow); // XXX
6839 %}
6841 //----------BSWAP Instructions-------------------------------------------------
6842 instruct bytes_reverse_int(rRegI dst) %{
6843 match(Set dst (ReverseBytesI dst));
6845 format %{ "bswapl $dst" %}
6846 opcode(0x0F, 0xC8); /*Opcode 0F /C8 */
6847 ins_encode( REX_reg(dst), OpcP, opc2_reg(dst) );
6848 ins_pipe( ialu_reg );
6849 %}
6851 instruct bytes_reverse_long(rRegL dst) %{
6852 match(Set dst (ReverseBytesL dst));
6854 format %{ "bswapq $dst" %}
6856 opcode(0x0F, 0xC8); /* Opcode 0F /C8 */
6857 ins_encode( REX_reg_wide(dst), OpcP, opc2_reg(dst) );
6858 ins_pipe( ialu_reg);
6859 %}
6861 instruct loadI_reversed(rRegI dst, memory src) %{
6862 match(Set dst (ReverseBytesI (LoadI src)));
6864 format %{ "bswap_movl $dst, $src" %}
6865 opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */
6866 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src), REX_reg(dst), OpcS, opc3_reg(dst));
6867 ins_pipe( ialu_reg_mem );
6868 %}
6870 instruct loadL_reversed(rRegL dst, memory src) %{
6871 match(Set dst (ReverseBytesL (LoadL src)));
6873 format %{ "bswap_movq $dst, $src" %}
6874 opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */
6875 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src), REX_reg_wide(dst), OpcS, opc3_reg(dst));
6876 ins_pipe( ialu_reg_mem );
6877 %}
6879 instruct storeI_reversed(memory dst, rRegI src) %{
6880 match(Set dst (StoreI dst (ReverseBytesI src)));
6882 format %{ "movl_bswap $dst, $src" %}
6883 opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */
6884 ins_encode( REX_reg(src), OpcP, opc2_reg(src), REX_reg_mem(src, dst), OpcT, reg_mem(src, dst) );
6885 ins_pipe( ialu_mem_reg );
6886 %}
6888 instruct storeL_reversed(memory dst, rRegL src) %{
6889 match(Set dst (StoreL dst (ReverseBytesL src)));
6891 format %{ "movq_bswap $dst, $src" %}
6892 opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */
6893 ins_encode( REX_reg_wide(src), OpcP, opc2_reg(src), REX_reg_mem_wide(src, dst), OpcT, reg_mem(src, dst) );
6894 ins_pipe( ialu_mem_reg );
6895 %}
6897 //----------MemBar Instructions-----------------------------------------------
6898 // Memory barrier flavors
6900 instruct membar_acquire()
6901 %{
6902 match(MemBarAcquire);
6903 ins_cost(0);
6905 size(0);
6906 format %{ "MEMBAR-acquire" %}
6907 ins_encode();
6908 ins_pipe(empty);
6909 %}
6911 instruct membar_acquire_lock()
6912 %{
6913 match(MemBarAcquire);
6914 predicate(Matcher::prior_fast_lock(n));
6915 ins_cost(0);
6917 size(0);
6918 format %{ "MEMBAR-acquire (prior CMPXCHG in FastLock so empty encoding)" %}
6919 ins_encode();
6920 ins_pipe(empty);
6921 %}
6923 instruct membar_release()
6924 %{
6925 match(MemBarRelease);
6926 ins_cost(0);
6928 size(0);
6929 format %{ "MEMBAR-release" %}
6930 ins_encode();
6931 ins_pipe(empty);
6932 %}
6934 instruct membar_release_lock()
6935 %{
6936 match(MemBarRelease);
6937 predicate(Matcher::post_fast_unlock(n));
6938 ins_cost(0);
6940 size(0);
6941 format %{ "MEMBAR-release (a FastUnlock follows so empty encoding)" %}
6942 ins_encode();
6943 ins_pipe(empty);
6944 %}
6946 instruct membar_volatile()
6947 %{
6948 match(MemBarVolatile);
6949 ins_cost(400);
6951 format %{ "MEMBAR-volatile" %}
6952 ins_encode(enc_membar_volatile);
6953 ins_pipe(pipe_slow);
6954 %}
6956 instruct unnecessary_membar_volatile()
6957 %{
6958 match(MemBarVolatile);
6959 predicate(Matcher::post_store_load_barrier(n));
6960 ins_cost(0);
6962 size(0);
6963 format %{ "MEMBAR-volatile (unnecessary so empty encoding)" %}
6964 ins_encode();
6965 ins_pipe(empty);
6966 %}
6968 //----------Move Instructions--------------------------------------------------
6970 instruct castX2P(rRegP dst, rRegL src)
6971 %{
6972 match(Set dst (CastX2P src));
6974 format %{ "movq $dst, $src\t# long->ptr" %}
6975 ins_encode(enc_copy_wide(dst, src));
6976 ins_pipe(ialu_reg_reg); // XXX
6977 %}
6979 instruct castP2X(rRegL dst, rRegP src)
6980 %{
6981 match(Set dst (CastP2X src));
6983 format %{ "movq $dst, $src\t# ptr -> long" %}
6984 ins_encode(enc_copy_wide(dst, src));
6985 ins_pipe(ialu_reg_reg); // XXX
6986 %}
6989 // Convert oop pointer into compressed form
6990 instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{
6991 predicate(n->bottom_type()->is_narrowoop()->make_oopptr()->ptr() != TypePtr::NotNull);
6992 match(Set dst (EncodeP src));
6993 effect(KILL cr);
6994 format %{ "encode_heap_oop $dst,$src" %}
6995 ins_encode %{
6996 Register s = $src$$Register;
6997 Register d = $dst$$Register;
6998 if (s != d) {
6999 __ movq(d, s);
7000 }
7001 __ encode_heap_oop(d);
7002 %}
7003 ins_pipe(ialu_reg_long);
7004 %}
7006 instruct encodeHeapOop_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
7007 predicate(n->bottom_type()->is_narrowoop()->make_oopptr()->ptr() == TypePtr::NotNull);
7008 match(Set dst (EncodeP src));
7009 effect(KILL cr);
7010 format %{ "encode_heap_oop_not_null $dst,$src" %}
7011 ins_encode %{
7012 Register s = $src$$Register;
7013 Register d = $dst$$Register;
7014 __ encode_heap_oop_not_null(d, s);
7015 %}
7016 ins_pipe(ialu_reg_long);
7017 %}
7019 instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
7020 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull);
7021 match(Set dst (DecodeN src));
7022 effect(KILL cr);
7023 format %{ "decode_heap_oop $dst,$src" %}
7024 ins_encode %{
7025 Register s = $src$$Register;
7026 Register d = $dst$$Register;
7027 if (s != d) {
7028 __ movq(d, s);
7029 }
7030 __ decode_heap_oop(d);
7031 %}
7032 ins_pipe(ialu_reg_long);
7033 %}
7035 instruct decodeHeapOop_not_null(rRegP dst, rRegN src) %{
7036 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull);
7037 match(Set dst (DecodeN src));
7038 format %{ "decode_heap_oop_not_null $dst,$src" %}
7039 ins_encode %{
7040 Register s = $src$$Register;
7041 Register d = $dst$$Register;
7042 __ decode_heap_oop_not_null(d, s);
7043 %}
7044 ins_pipe(ialu_reg_long);
7045 %}
7048 //----------Conditional Move---------------------------------------------------
7049 // Jump
7050 // dummy instruction for generating temp registers
7051 instruct jumpXtnd_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
7052 match(Jump (LShiftL switch_val shift));
7053 ins_cost(350);
7054 predicate(false);
7055 effect(TEMP dest);
7057 format %{ "leaq $dest, table_base\n\t"
7058 "jmp [$dest + $switch_val << $shift]\n\t" %}
7059 ins_encode(jump_enc_offset(switch_val, shift, dest));
7060 ins_pipe(pipe_jmp);
7061 ins_pc_relative(1);
7062 %}
7064 instruct jumpXtnd_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
7065 match(Jump (AddL (LShiftL switch_val shift) offset));
7066 ins_cost(350);
7067 effect(TEMP dest);
7069 format %{ "leaq $dest, table_base\n\t"
7070 "jmp [$dest + $switch_val << $shift + $offset]\n\t" %}
7071 ins_encode(jump_enc_addr(switch_val, shift, offset, dest));
7072 ins_pipe(pipe_jmp);
7073 ins_pc_relative(1);
7074 %}
7076 instruct jumpXtnd(rRegL switch_val, rRegI dest) %{
7077 match(Jump switch_val);
7078 ins_cost(350);
7079 effect(TEMP dest);
7081 format %{ "leaq $dest, table_base\n\t"
7082 "jmp [$dest + $switch_val]\n\t" %}
7083 ins_encode(jump_enc(switch_val, dest));
7084 ins_pipe(pipe_jmp);
7085 ins_pc_relative(1);
7086 %}
7088 // Conditional move
7089 instruct cmovI_reg(rRegI dst, rRegI src, rFlagsReg cr, cmpOp cop)
7090 %{
7091 match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
7093 ins_cost(200); // XXX
7094 format %{ "cmovl$cop $dst, $src\t# signed, int" %}
7095 opcode(0x0F, 0x40);
7096 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7097 ins_pipe(pipe_cmov_reg);
7098 %}
7100 instruct cmovI_regU(rRegI dst, rRegI src, rFlagsRegU cr, cmpOpU cop)
7101 %{
7102 match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
7104 ins_cost(200); // XXX
7105 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %}
7106 opcode(0x0F, 0x40);
7107 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7108 ins_pipe(pipe_cmov_reg);
7109 %}
7111 // Conditional move
7112 instruct cmovI_mem(cmpOp cop, rFlagsReg cr, rRegI dst, memory src)
7113 %{
7114 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
7116 ins_cost(250); // XXX
7117 format %{ "cmovl$cop $dst, $src\t# signed, int" %}
7118 opcode(0x0F, 0x40);
7119 ins_encode(REX_reg_mem(dst, src), enc_cmov(cop), reg_mem(dst, src));
7120 ins_pipe(pipe_cmov_mem);
7121 %}
7123 // Conditional move
7124 instruct cmovI_memU(cmpOpU cop, rFlagsRegU cr, rRegI dst, memory src)
7125 %{
7126 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
7128 ins_cost(250); // XXX
7129 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %}
7130 opcode(0x0F, 0x40);
7131 ins_encode(REX_reg_mem(dst, src), enc_cmov(cop), reg_mem(dst, src));
7132 ins_pipe(pipe_cmov_mem);
7133 %}
7135 // Conditional move
7136 instruct cmovN_reg(rRegN dst, rRegN src, rFlagsReg cr, cmpOp cop)
7137 %{
7138 match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
7140 ins_cost(200); // XXX
7141 format %{ "cmovl$cop $dst, $src\t# signed, compressed ptr" %}
7142 opcode(0x0F, 0x40);
7143 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7144 ins_pipe(pipe_cmov_reg);
7145 %}
7147 // Conditional move
7148 instruct cmovN_regU(rRegN dst, rRegN src, rFlagsRegU cr, cmpOpU cop)
7149 %{
7150 match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
7152 ins_cost(200); // XXX
7153 format %{ "cmovl$cop $dst, $src\t# unsigned, compressed ptr" %}
7154 opcode(0x0F, 0x40);
7155 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7156 ins_pipe(pipe_cmov_reg);
7157 %}
7159 // Conditional move
7160 instruct cmovP_reg(rRegP dst, rRegP src, rFlagsReg cr, cmpOp cop)
7161 %{
7162 match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
7164 ins_cost(200); // XXX
7165 format %{ "cmovq$cop $dst, $src\t# signed, ptr" %}
7166 opcode(0x0F, 0x40);
7167 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7168 ins_pipe(pipe_cmov_reg); // XXX
7169 %}
7171 // Conditional move
7172 instruct cmovP_regU(rRegP dst, rRegP src, rFlagsRegU cr, cmpOpU cop)
7173 %{
7174 match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
7176 ins_cost(200); // XXX
7177 format %{ "cmovq$cop $dst, $src\t# unsigned, ptr" %}
7178 opcode(0x0F, 0x40);
7179 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7180 ins_pipe(pipe_cmov_reg); // XXX
7181 %}
7183 // DISABLED: Requires the ADLC to emit a bottom_type call that
7184 // correctly meets the two pointer arguments; one is an incoming
7185 // register but the other is a memory operand. ALSO appears to
7186 // be buggy with implicit null checks.
7187 //
7188 //// Conditional move
7189 //instruct cmovP_mem(cmpOp cop, rFlagsReg cr, rRegP dst, memory src)
7190 //%{
7191 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
7192 // ins_cost(250);
7193 // format %{ "CMOV$cop $dst,$src\t# ptr" %}
7194 // opcode(0x0F,0x40);
7195 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) );
7196 // ins_pipe( pipe_cmov_mem );
7197 //%}
7198 //
7199 //// Conditional move
7200 //instruct cmovP_memU(cmpOpU cop, rFlagsRegU cr, rRegP dst, memory src)
7201 //%{
7202 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
7203 // ins_cost(250);
7204 // format %{ "CMOV$cop $dst,$src\t# ptr" %}
7205 // opcode(0x0F,0x40);
7206 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) );
7207 // ins_pipe( pipe_cmov_mem );
7208 //%}
7210 instruct cmovL_reg(cmpOp cop, rFlagsReg cr, rRegL dst, rRegL src)
7211 %{
7212 match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
7214 ins_cost(200); // XXX
7215 format %{ "cmovq$cop $dst, $src\t# signed, long" %}
7216 opcode(0x0F, 0x40);
7217 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7218 ins_pipe(pipe_cmov_reg); // XXX
7219 %}
7221 instruct cmovL_mem(cmpOp cop, rFlagsReg cr, rRegL dst, memory src)
7222 %{
7223 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
7225 ins_cost(200); // XXX
7226 format %{ "cmovq$cop $dst, $src\t# signed, long" %}
7227 opcode(0x0F, 0x40);
7228 ins_encode(REX_reg_mem_wide(dst, src), enc_cmov(cop), reg_mem(dst, src));
7229 ins_pipe(pipe_cmov_mem); // XXX
7230 %}
7232 instruct cmovL_regU(cmpOpU cop, rFlagsRegU cr, rRegL dst, rRegL src)
7233 %{
7234 match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
7236 ins_cost(200); // XXX
7237 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %}
7238 opcode(0x0F, 0x40);
7239 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7240 ins_pipe(pipe_cmov_reg); // XXX
7241 %}
7243 instruct cmovL_memU(cmpOpU cop, rFlagsRegU cr, rRegL dst, memory src)
7244 %{
7245 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
7247 ins_cost(200); // XXX
7248 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %}
7249 opcode(0x0F, 0x40);
7250 ins_encode(REX_reg_mem_wide(dst, src), enc_cmov(cop), reg_mem(dst, src));
7251 ins_pipe(pipe_cmov_mem); // XXX
7252 %}
7254 instruct cmovF_reg(cmpOp cop, rFlagsReg cr, regF dst, regF src)
7255 %{
7256 match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
7258 ins_cost(200); // XXX
7259 format %{ "jn$cop skip\t# signed cmove float\n\t"
7260 "movss $dst, $src\n"
7261 "skip:" %}
7262 ins_encode(enc_cmovf_branch(cop, dst, src));
7263 ins_pipe(pipe_slow);
7264 %}
7266 // instruct cmovF_mem(cmpOp cop, rFlagsReg cr, regF dst, memory src)
7267 // %{
7268 // match(Set dst (CMoveF (Binary cop cr) (Binary dst (LoadL src))));
7270 // ins_cost(200); // XXX
7271 // format %{ "jn$cop skip\t# signed cmove float\n\t"
7272 // "movss $dst, $src\n"
7273 // "skip:" %}
7274 // ins_encode(enc_cmovf_mem_branch(cop, dst, src));
7275 // ins_pipe(pipe_slow);
7276 // %}
7278 instruct cmovF_regU(cmpOpU cop, rFlagsRegU cr, regF dst, regF src)
7279 %{
7280 match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
7282 ins_cost(200); // XXX
7283 format %{ "jn$cop skip\t# unsigned cmove float\n\t"
7284 "movss $dst, $src\n"
7285 "skip:" %}
7286 ins_encode(enc_cmovf_branch(cop, dst, src));
7287 ins_pipe(pipe_slow);
7288 %}
7290 instruct cmovD_reg(cmpOp cop, rFlagsReg cr, regD dst, regD src)
7291 %{
7292 match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
7294 ins_cost(200); // XXX
7295 format %{ "jn$cop skip\t# signed cmove double\n\t"
7296 "movsd $dst, $src\n"
7297 "skip:" %}
7298 ins_encode(enc_cmovd_branch(cop, dst, src));
7299 ins_pipe(pipe_slow);
7300 %}
7302 instruct cmovD_regU(cmpOpU cop, rFlagsRegU cr, regD dst, regD src)
7303 %{
7304 match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
7306 ins_cost(200); // XXX
7307 format %{ "jn$cop skip\t# unsigned cmove double\n\t"
7308 "movsd $dst, $src\n"
7309 "skip:" %}
7310 ins_encode(enc_cmovd_branch(cop, dst, src));
7311 ins_pipe(pipe_slow);
7312 %}
7314 //----------Arithmetic Instructions--------------------------------------------
7315 //----------Addition Instructions----------------------------------------------
7317 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
7318 %{
7319 match(Set dst (AddI dst src));
7320 effect(KILL cr);
7322 format %{ "addl $dst, $src\t# int" %}
7323 opcode(0x03);
7324 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
7325 ins_pipe(ialu_reg_reg);
7326 %}
7328 instruct addI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
7329 %{
7330 match(Set dst (AddI dst src));
7331 effect(KILL cr);
7333 format %{ "addl $dst, $src\t# int" %}
7334 opcode(0x81, 0x00); /* /0 id */
7335 ins_encode(OpcSErm(dst, src), Con8or32(src));
7336 ins_pipe( ialu_reg );
7337 %}
7339 instruct addI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
7340 %{
7341 match(Set dst (AddI dst (LoadI src)));
7342 effect(KILL cr);
7344 ins_cost(125); // XXX
7345 format %{ "addl $dst, $src\t# int" %}
7346 opcode(0x03);
7347 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
7348 ins_pipe(ialu_reg_mem);
7349 %}
7351 instruct addI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
7352 %{
7353 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7354 effect(KILL cr);
7356 ins_cost(150); // XXX
7357 format %{ "addl $dst, $src\t# int" %}
7358 opcode(0x01); /* Opcode 01 /r */
7359 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
7360 ins_pipe(ialu_mem_reg);
7361 %}
7363 instruct addI_mem_imm(memory dst, immI src, rFlagsReg cr)
7364 %{
7365 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7366 effect(KILL cr);
7368 ins_cost(125); // XXX
7369 format %{ "addl $dst, $src\t# int" %}
7370 opcode(0x81); /* Opcode 81 /0 id */
7371 ins_encode(REX_mem(dst), OpcSE(src), RM_opc_mem(0x00, dst), Con8or32(src));
7372 ins_pipe(ialu_mem_imm);
7373 %}
7375 instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr)
7376 %{
7377 predicate(UseIncDec);
7378 match(Set dst (AddI dst src));
7379 effect(KILL cr);
7381 format %{ "incl $dst\t# int" %}
7382 opcode(0xFF, 0x00); // FF /0
7383 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
7384 ins_pipe(ialu_reg);
7385 %}
7387 instruct incI_mem(memory dst, immI1 src, rFlagsReg cr)
7388 %{
7389 predicate(UseIncDec);
7390 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7391 effect(KILL cr);
7393 ins_cost(125); // XXX
7394 format %{ "incl $dst\t# int" %}
7395 opcode(0xFF); /* Opcode FF /0 */
7396 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(0x00, dst));
7397 ins_pipe(ialu_mem_imm);
7398 %}
7400 // XXX why does that use AddI
7401 instruct decI_rReg(rRegI dst, immI_M1 src, rFlagsReg cr)
7402 %{
7403 predicate(UseIncDec);
7404 match(Set dst (AddI dst src));
7405 effect(KILL cr);
7407 format %{ "decl $dst\t# int" %}
7408 opcode(0xFF, 0x01); // FF /1
7409 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
7410 ins_pipe(ialu_reg);
7411 %}
7413 // XXX why does that use AddI
7414 instruct decI_mem(memory dst, immI_M1 src, rFlagsReg cr)
7415 %{
7416 predicate(UseIncDec);
7417 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7418 effect(KILL cr);
7420 ins_cost(125); // XXX
7421 format %{ "decl $dst\t# int" %}
7422 opcode(0xFF); /* Opcode FF /1 */
7423 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(0x01, dst));
7424 ins_pipe(ialu_mem_imm);
7425 %}
7427 instruct leaI_rReg_immI(rRegI dst, rRegI src0, immI src1)
7428 %{
7429 match(Set dst (AddI src0 src1));
7431 ins_cost(110);
7432 format %{ "addr32 leal $dst, [$src0 + $src1]\t# int" %}
7433 opcode(0x8D); /* 0x8D /r */
7434 ins_encode(Opcode(0x67), REX_reg_reg(dst, src0), OpcP, reg_lea(dst, src0, src1)); // XXX
7435 ins_pipe(ialu_reg_reg);
7436 %}
7438 instruct addL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
7439 %{
7440 match(Set dst (AddL dst src));
7441 effect(KILL cr);
7443 format %{ "addq $dst, $src\t# long" %}
7444 opcode(0x03);
7445 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
7446 ins_pipe(ialu_reg_reg);
7447 %}
7449 instruct addL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
7450 %{
7451 match(Set dst (AddL dst src));
7452 effect(KILL cr);
7454 format %{ "addq $dst, $src\t# long" %}
7455 opcode(0x81, 0x00); /* /0 id */
7456 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
7457 ins_pipe( ialu_reg );
7458 %}
7460 instruct addL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
7461 %{
7462 match(Set dst (AddL dst (LoadL src)));
7463 effect(KILL cr);
7465 ins_cost(125); // XXX
7466 format %{ "addq $dst, $src\t# long" %}
7467 opcode(0x03);
7468 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
7469 ins_pipe(ialu_reg_mem);
7470 %}
7472 instruct addL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
7473 %{
7474 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
7475 effect(KILL cr);
7477 ins_cost(150); // XXX
7478 format %{ "addq $dst, $src\t# long" %}
7479 opcode(0x01); /* Opcode 01 /r */
7480 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
7481 ins_pipe(ialu_mem_reg);
7482 %}
7484 instruct addL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
7485 %{
7486 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
7487 effect(KILL cr);
7489 ins_cost(125); // XXX
7490 format %{ "addq $dst, $src\t# long" %}
7491 opcode(0x81); /* Opcode 81 /0 id */
7492 ins_encode(REX_mem_wide(dst),
7493 OpcSE(src), RM_opc_mem(0x00, dst), Con8or32(src));
7494 ins_pipe(ialu_mem_imm);
7495 %}
7497 instruct incL_rReg(rRegI dst, immL1 src, rFlagsReg cr)
7498 %{
7499 predicate(UseIncDec);
7500 match(Set dst (AddL dst src));
7501 effect(KILL cr);
7503 format %{ "incq $dst\t# long" %}
7504 opcode(0xFF, 0x00); // FF /0
7505 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
7506 ins_pipe(ialu_reg);
7507 %}
7509 instruct incL_mem(memory dst, immL1 src, rFlagsReg cr)
7510 %{
7511 predicate(UseIncDec);
7512 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
7513 effect(KILL cr);
7515 ins_cost(125); // XXX
7516 format %{ "incq $dst\t# long" %}
7517 opcode(0xFF); /* Opcode FF /0 */
7518 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(0x00, dst));
7519 ins_pipe(ialu_mem_imm);
7520 %}
7522 // XXX why does that use AddL
7523 instruct decL_rReg(rRegL dst, immL_M1 src, rFlagsReg cr)
7524 %{
7525 predicate(UseIncDec);
7526 match(Set dst (AddL dst src));
7527 effect(KILL cr);
7529 format %{ "decq $dst\t# long" %}
7530 opcode(0xFF, 0x01); // FF /1
7531 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
7532 ins_pipe(ialu_reg);
7533 %}
7535 // XXX why does that use AddL
7536 instruct decL_mem(memory dst, immL_M1 src, rFlagsReg cr)
7537 %{
7538 predicate(UseIncDec);
7539 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
7540 effect(KILL cr);
7542 ins_cost(125); // XXX
7543 format %{ "decq $dst\t# long" %}
7544 opcode(0xFF); /* Opcode FF /1 */
7545 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(0x01, dst));
7546 ins_pipe(ialu_mem_imm);
7547 %}
7549 instruct leaL_rReg_immL(rRegL dst, rRegL src0, immL32 src1)
7550 %{
7551 match(Set dst (AddL src0 src1));
7553 ins_cost(110);
7554 format %{ "leaq $dst, [$src0 + $src1]\t# long" %}
7555 opcode(0x8D); /* 0x8D /r */
7556 ins_encode(REX_reg_reg_wide(dst, src0), OpcP, reg_lea(dst, src0, src1)); // XXX
7557 ins_pipe(ialu_reg_reg);
7558 %}
7560 instruct addP_rReg(rRegP dst, rRegL src, rFlagsReg cr)
7561 %{
7562 match(Set dst (AddP dst src));
7563 effect(KILL cr);
7565 format %{ "addq $dst, $src\t# ptr" %}
7566 opcode(0x03);
7567 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
7568 ins_pipe(ialu_reg_reg);
7569 %}
7571 instruct addP_rReg_imm(rRegP dst, immL32 src, rFlagsReg cr)
7572 %{
7573 match(Set dst (AddP dst src));
7574 effect(KILL cr);
7576 format %{ "addq $dst, $src\t# ptr" %}
7577 opcode(0x81, 0x00); /* /0 id */
7578 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
7579 ins_pipe( ialu_reg );
7580 %}
7582 // XXX addP mem ops ????
7584 instruct leaP_rReg_imm(rRegP dst, rRegP src0, immL32 src1)
7585 %{
7586 match(Set dst (AddP src0 src1));
7588 ins_cost(110);
7589 format %{ "leaq $dst, [$src0 + $src1]\t# ptr" %}
7590 opcode(0x8D); /* 0x8D /r */
7591 ins_encode(REX_reg_reg_wide(dst, src0), OpcP, reg_lea(dst, src0, src1));// XXX
7592 ins_pipe(ialu_reg_reg);
7593 %}
7595 instruct checkCastPP(rRegP dst)
7596 %{
7597 match(Set dst (CheckCastPP dst));
7599 size(0);
7600 format %{ "# checkcastPP of $dst" %}
7601 ins_encode(/* empty encoding */);
7602 ins_pipe(empty);
7603 %}
7605 instruct castPP(rRegP dst)
7606 %{
7607 match(Set dst (CastPP dst));
7609 size(0);
7610 format %{ "# castPP of $dst" %}
7611 ins_encode(/* empty encoding */);
7612 ins_pipe(empty);
7613 %}
7615 instruct castII(rRegI dst)
7616 %{
7617 match(Set dst (CastII dst));
7619 size(0);
7620 format %{ "# castII of $dst" %}
7621 ins_encode(/* empty encoding */);
7622 ins_cost(0);
7623 ins_pipe(empty);
7624 %}
7626 // LoadP-locked same as a regular LoadP when used with compare-swap
7627 instruct loadPLocked(rRegP dst, memory mem)
7628 %{
7629 match(Set dst (LoadPLocked mem));
7631 ins_cost(125); // XXX
7632 format %{ "movq $dst, $mem\t# ptr locked" %}
7633 opcode(0x8B);
7634 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
7635 ins_pipe(ialu_reg_mem); // XXX
7636 %}
7638 // LoadL-locked - same as a regular LoadL when used with compare-swap
7639 instruct loadLLocked(rRegL dst, memory mem)
7640 %{
7641 match(Set dst (LoadLLocked mem));
7643 ins_cost(125); // XXX
7644 format %{ "movq $dst, $mem\t# long locked" %}
7645 opcode(0x8B);
7646 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
7647 ins_pipe(ialu_reg_mem); // XXX
7648 %}
7650 // Conditional-store of the updated heap-top.
7651 // Used during allocation of the shared heap.
7652 // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.
7654 instruct storePConditional(memory heap_top_ptr,
7655 rax_RegP oldval, rRegP newval,
7656 rFlagsReg cr)
7657 %{
7658 match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
7660 format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) "
7661 "If rax == $heap_top_ptr then store $newval into $heap_top_ptr" %}
7662 opcode(0x0F, 0xB1);
7663 ins_encode(lock_prefix,
7664 REX_reg_mem_wide(newval, heap_top_ptr),
7665 OpcP, OpcS,
7666 reg_mem(newval, heap_top_ptr));
7667 ins_pipe(pipe_cmpxchg);
7668 %}
7670 // Conditional-store of a long value
7671 // Returns a boolean value (0/1) on success. Implemented with a
7672 // CMPXCHG8 on Intel. mem_ptr can actually be in either RSI or RDI
7674 instruct storeLConditional(rRegI res,
7675 memory mem_ptr,
7676 rax_RegL oldval, rRegL newval,
7677 rFlagsReg cr)
7678 %{
7679 match(Set res (StoreLConditional mem_ptr (Binary oldval newval)));
7680 effect(KILL cr);
7682 format %{ "cmpxchgq $mem_ptr, $newval\t# (long) "
7683 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
7684 "sete $res\n\t"
7685 "movzbl $res, $res" %}
7686 opcode(0x0F, 0xB1);
7687 ins_encode(lock_prefix,
7688 REX_reg_mem_wide(newval, mem_ptr),
7689 OpcP, OpcS,
7690 reg_mem(newval, mem_ptr),
7691 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
7692 REX_reg_breg(res, res), // movzbl
7693 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
7694 ins_pipe(pipe_cmpxchg);
7695 %}
7697 // Conditional-store of a long value
7698 // ZF flag is set on success, reset otherwise. Implemented with a
7699 // CMPXCHG8 on Intel. mem_ptr can actually be in either RSI or RDI
7700 instruct storeLConditional_flags(memory mem_ptr,
7701 rax_RegL oldval, rRegL newval,
7702 rFlagsReg cr,
7703 immI0 zero)
7704 %{
7705 match(Set cr (CmpI (StoreLConditional mem_ptr (Binary oldval newval)) zero));
7707 format %{ "cmpxchgq $mem_ptr, $newval\t# (long) "
7708 "If rax == $mem_ptr then store $newval into $mem_ptr" %}
7709 opcode(0x0F, 0xB1);
7710 ins_encode(lock_prefix,
7711 REX_reg_mem_wide(newval, mem_ptr),
7712 OpcP, OpcS,
7713 reg_mem(newval, mem_ptr));
7714 ins_pipe(pipe_cmpxchg);
7715 %}
7717 instruct compareAndSwapP(rRegI res,
7718 memory mem_ptr,
7719 rax_RegP oldval, rRegP newval,
7720 rFlagsReg cr)
7721 %{
7722 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
7723 effect(KILL cr, KILL oldval);
7725 format %{ "cmpxchgq $mem_ptr,$newval\t# "
7726 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
7727 "sete $res\n\t"
7728 "movzbl $res, $res" %}
7729 opcode(0x0F, 0xB1);
7730 ins_encode(lock_prefix,
7731 REX_reg_mem_wide(newval, mem_ptr),
7732 OpcP, OpcS,
7733 reg_mem(newval, mem_ptr),
7734 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
7735 REX_reg_breg(res, res), // movzbl
7736 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
7737 ins_pipe( pipe_cmpxchg );
7738 %}
7740 // XXX No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
7741 instruct compareAndSwapL(rRegI res,
7742 memory mem_ptr,
7743 rax_RegL oldval, rRegL newval,
7744 rFlagsReg cr)
7745 %{
7746 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
7747 effect(KILL cr, KILL oldval);
7749 format %{ "cmpxchgq $mem_ptr,$newval\t# "
7750 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
7751 "sete $res\n\t"
7752 "movzbl $res, $res" %}
7753 opcode(0x0F, 0xB1);
7754 ins_encode(lock_prefix,
7755 REX_reg_mem_wide(newval, mem_ptr),
7756 OpcP, OpcS,
7757 reg_mem(newval, mem_ptr),
7758 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
7759 REX_reg_breg(res, res), // movzbl
7760 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
7761 ins_pipe( pipe_cmpxchg );
7762 %}
7764 instruct compareAndSwapI(rRegI res,
7765 memory mem_ptr,
7766 rax_RegI oldval, rRegI newval,
7767 rFlagsReg cr)
7768 %{
7769 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
7770 effect(KILL cr, KILL oldval);
7772 format %{ "cmpxchgl $mem_ptr,$newval\t# "
7773 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
7774 "sete $res\n\t"
7775 "movzbl $res, $res" %}
7776 opcode(0x0F, 0xB1);
7777 ins_encode(lock_prefix,
7778 REX_reg_mem(newval, mem_ptr),
7779 OpcP, OpcS,
7780 reg_mem(newval, mem_ptr),
7781 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
7782 REX_reg_breg(res, res), // movzbl
7783 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
7784 ins_pipe( pipe_cmpxchg );
7785 %}
7788 instruct compareAndSwapN(rRegI res,
7789 memory mem_ptr,
7790 rax_RegN oldval, rRegN newval,
7791 rFlagsReg cr) %{
7792 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
7793 effect(KILL cr, KILL oldval);
7795 format %{ "cmpxchgl $mem_ptr,$newval\t# "
7796 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
7797 "sete $res\n\t"
7798 "movzbl $res, $res" %}
7799 opcode(0x0F, 0xB1);
7800 ins_encode(lock_prefix,
7801 REX_reg_mem(newval, mem_ptr),
7802 OpcP, OpcS,
7803 reg_mem(newval, mem_ptr),
7804 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
7805 REX_reg_breg(res, res), // movzbl
7806 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
7807 ins_pipe( pipe_cmpxchg );
7808 %}
7810 //----------Subtraction Instructions-------------------------------------------
7812 // Integer Subtraction Instructions
7813 instruct subI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
7814 %{
7815 match(Set dst (SubI dst src));
7816 effect(KILL cr);
7818 format %{ "subl $dst, $src\t# int" %}
7819 opcode(0x2B);
7820 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
7821 ins_pipe(ialu_reg_reg);
7822 %}
7824 instruct subI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
7825 %{
7826 match(Set dst (SubI dst src));
7827 effect(KILL cr);
7829 format %{ "subl $dst, $src\t# int" %}
7830 opcode(0x81, 0x05); /* Opcode 81 /5 */
7831 ins_encode(OpcSErm(dst, src), Con8or32(src));
7832 ins_pipe(ialu_reg);
7833 %}
7835 instruct subI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
7836 %{
7837 match(Set dst (SubI dst (LoadI src)));
7838 effect(KILL cr);
7840 ins_cost(125);
7841 format %{ "subl $dst, $src\t# int" %}
7842 opcode(0x2B);
7843 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
7844 ins_pipe(ialu_reg_mem);
7845 %}
7847 instruct subI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
7848 %{
7849 match(Set dst (StoreI dst (SubI (LoadI dst) src)));
7850 effect(KILL cr);
7852 ins_cost(150);
7853 format %{ "subl $dst, $src\t# int" %}
7854 opcode(0x29); /* Opcode 29 /r */
7855 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
7856 ins_pipe(ialu_mem_reg);
7857 %}
7859 instruct subI_mem_imm(memory dst, immI src, rFlagsReg cr)
7860 %{
7861 match(Set dst (StoreI dst (SubI (LoadI dst) src)));
7862 effect(KILL cr);
7864 ins_cost(125); // XXX
7865 format %{ "subl $dst, $src\t# int" %}
7866 opcode(0x81); /* Opcode 81 /5 id */
7867 ins_encode(REX_mem(dst), OpcSE(src), RM_opc_mem(0x05, dst), Con8or32(src));
7868 ins_pipe(ialu_mem_imm);
7869 %}
7871 instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
7872 %{
7873 match(Set dst (SubL dst src));
7874 effect(KILL cr);
7876 format %{ "subq $dst, $src\t# long" %}
7877 opcode(0x2B);
7878 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
7879 ins_pipe(ialu_reg_reg);
7880 %}
7882 instruct subL_rReg_imm(rRegI dst, immL32 src, rFlagsReg cr)
7883 %{
7884 match(Set dst (SubL dst src));
7885 effect(KILL cr);
7887 format %{ "subq $dst, $src\t# long" %}
7888 opcode(0x81, 0x05); /* Opcode 81 /5 */
7889 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
7890 ins_pipe(ialu_reg);
7891 %}
7893 instruct subL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
7894 %{
7895 match(Set dst (SubL dst (LoadL src)));
7896 effect(KILL cr);
7898 ins_cost(125);
7899 format %{ "subq $dst, $src\t# long" %}
7900 opcode(0x2B);
7901 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
7902 ins_pipe(ialu_reg_mem);
7903 %}
7905 instruct subL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
7906 %{
7907 match(Set dst (StoreL dst (SubL (LoadL dst) src)));
7908 effect(KILL cr);
7910 ins_cost(150);
7911 format %{ "subq $dst, $src\t# long" %}
7912 opcode(0x29); /* Opcode 29 /r */
7913 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
7914 ins_pipe(ialu_mem_reg);
7915 %}
7917 instruct subL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
7918 %{
7919 match(Set dst (StoreL dst (SubL (LoadL dst) src)));
7920 effect(KILL cr);
7922 ins_cost(125); // XXX
7923 format %{ "subq $dst, $src\t# long" %}
7924 opcode(0x81); /* Opcode 81 /5 id */
7925 ins_encode(REX_mem_wide(dst),
7926 OpcSE(src), RM_opc_mem(0x05, dst), Con8or32(src));
7927 ins_pipe(ialu_mem_imm);
7928 %}
7930 // Subtract from a pointer
7931 // XXX hmpf???
7932 instruct subP_rReg(rRegP dst, rRegI src, immI0 zero, rFlagsReg cr)
7933 %{
7934 match(Set dst (AddP dst (SubI zero src)));
7935 effect(KILL cr);
7937 format %{ "subq $dst, $src\t# ptr - int" %}
7938 opcode(0x2B);
7939 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
7940 ins_pipe(ialu_reg_reg);
7941 %}
7943 instruct negI_rReg(rRegI dst, immI0 zero, rFlagsReg cr)
7944 %{
7945 match(Set dst (SubI zero dst));
7946 effect(KILL cr);
7948 format %{ "negl $dst\t# int" %}
7949 opcode(0xF7, 0x03); // Opcode F7 /3
7950 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
7951 ins_pipe(ialu_reg);
7952 %}
7954 instruct negI_mem(memory dst, immI0 zero, rFlagsReg cr)
7955 %{
7956 match(Set dst (StoreI dst (SubI zero (LoadI dst))));
7957 effect(KILL cr);
7959 format %{ "negl $dst\t# int" %}
7960 opcode(0xF7, 0x03); // Opcode F7 /3
7961 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
7962 ins_pipe(ialu_reg);
7963 %}
7965 instruct negL_rReg(rRegL dst, immL0 zero, rFlagsReg cr)
7966 %{
7967 match(Set dst (SubL zero dst));
7968 effect(KILL cr);
7970 format %{ "negq $dst\t# long" %}
7971 opcode(0xF7, 0x03); // Opcode F7 /3
7972 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
7973 ins_pipe(ialu_reg);
7974 %}
7976 instruct negL_mem(memory dst, immL0 zero, rFlagsReg cr)
7977 %{
7978 match(Set dst (StoreL dst (SubL zero (LoadL dst))));
7979 effect(KILL cr);
7981 format %{ "negq $dst\t# long" %}
7982 opcode(0xF7, 0x03); // Opcode F7 /3
7983 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
7984 ins_pipe(ialu_reg);
7985 %}
7988 //----------Multiplication/Division Instructions-------------------------------
7989 // Integer Multiplication Instructions
7990 // Multiply Register
7992 instruct mulI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
7993 %{
7994 match(Set dst (MulI dst src));
7995 effect(KILL cr);
7997 ins_cost(300);
7998 format %{ "imull $dst, $src\t# int" %}
7999 opcode(0x0F, 0xAF);
8000 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
8001 ins_pipe(ialu_reg_reg_alu0);
8002 %}
8004 instruct mulI_rReg_imm(rRegI dst, rRegI src, immI imm, rFlagsReg cr)
8005 %{
8006 match(Set dst (MulI src imm));
8007 effect(KILL cr);
8009 ins_cost(300);
8010 format %{ "imull $dst, $src, $imm\t# int" %}
8011 opcode(0x69); /* 69 /r id */
8012 ins_encode(REX_reg_reg(dst, src),
8013 OpcSE(imm), reg_reg(dst, src), Con8or32(imm));
8014 ins_pipe(ialu_reg_reg_alu0);
8015 %}
8017 instruct mulI_mem(rRegI dst, memory src, rFlagsReg cr)
8018 %{
8019 match(Set dst (MulI dst (LoadI src)));
8020 effect(KILL cr);
8022 ins_cost(350);
8023 format %{ "imull $dst, $src\t# int" %}
8024 opcode(0x0F, 0xAF);
8025 ins_encode(REX_reg_mem(dst, src), OpcP, OpcS, reg_mem(dst, src));
8026 ins_pipe(ialu_reg_mem_alu0);
8027 %}
8029 instruct mulI_mem_imm(rRegI dst, memory src, immI imm, rFlagsReg cr)
8030 %{
8031 match(Set dst (MulI (LoadI src) imm));
8032 effect(KILL cr);
8034 ins_cost(300);
8035 format %{ "imull $dst, $src, $imm\t# int" %}
8036 opcode(0x69); /* 69 /r id */
8037 ins_encode(REX_reg_mem(dst, src),
8038 OpcSE(imm), reg_mem(dst, src), Con8or32(imm));
8039 ins_pipe(ialu_reg_mem_alu0);
8040 %}
8042 instruct mulL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
8043 %{
8044 match(Set dst (MulL dst src));
8045 effect(KILL cr);
8047 ins_cost(300);
8048 format %{ "imulq $dst, $src\t# long" %}
8049 opcode(0x0F, 0xAF);
8050 ins_encode(REX_reg_reg_wide(dst, src), OpcP, OpcS, reg_reg(dst, src));
8051 ins_pipe(ialu_reg_reg_alu0);
8052 %}
8054 instruct mulL_rReg_imm(rRegL dst, rRegL src, immL32 imm, rFlagsReg cr)
8055 %{
8056 match(Set dst (MulL src imm));
8057 effect(KILL cr);
8059 ins_cost(300);
8060 format %{ "imulq $dst, $src, $imm\t# long" %}
8061 opcode(0x69); /* 69 /r id */
8062 ins_encode(REX_reg_reg_wide(dst, src),
8063 OpcSE(imm), reg_reg(dst, src), Con8or32(imm));
8064 ins_pipe(ialu_reg_reg_alu0);
8065 %}
8067 instruct mulL_mem(rRegL dst, memory src, rFlagsReg cr)
8068 %{
8069 match(Set dst (MulL dst (LoadL src)));
8070 effect(KILL cr);
8072 ins_cost(350);
8073 format %{ "imulq $dst, $src\t# long" %}
8074 opcode(0x0F, 0xAF);
8075 ins_encode(REX_reg_mem_wide(dst, src), OpcP, OpcS, reg_mem(dst, src));
8076 ins_pipe(ialu_reg_mem_alu0);
8077 %}
8079 instruct mulL_mem_imm(rRegL dst, memory src, immL32 imm, rFlagsReg cr)
8080 %{
8081 match(Set dst (MulL (LoadL src) imm));
8082 effect(KILL cr);
8084 ins_cost(300);
8085 format %{ "imulq $dst, $src, $imm\t# long" %}
8086 opcode(0x69); /* 69 /r id */
8087 ins_encode(REX_reg_mem_wide(dst, src),
8088 OpcSE(imm), reg_mem(dst, src), Con8or32(imm));
8089 ins_pipe(ialu_reg_mem_alu0);
8090 %}
8092 instruct mulHiL_rReg(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr)
8093 %{
8094 match(Set dst (MulHiL src rax));
8095 effect(USE_KILL rax, KILL cr);
8097 ins_cost(300);
8098 format %{ "imulq RDX:RAX, RAX, $src\t# mulhi" %}
8099 opcode(0xF7, 0x5); /* Opcode F7 /5 */
8100 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src));
8101 ins_pipe(ialu_reg_reg_alu0);
8102 %}
8104 instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
8105 rFlagsReg cr)
8106 %{
8107 match(Set rax (DivI rax div));
8108 effect(KILL rdx, KILL cr);
8110 ins_cost(30*100+10*100); // XXX
8111 format %{ "cmpl rax, 0x80000000\t# idiv\n\t"
8112 "jne,s normal\n\t"
8113 "xorl rdx, rdx\n\t"
8114 "cmpl $div, -1\n\t"
8115 "je,s done\n"
8116 "normal: cdql\n\t"
8117 "idivl $div\n"
8118 "done:" %}
8119 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8120 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
8121 ins_pipe(ialu_reg_reg_alu0);
8122 %}
8124 instruct divL_rReg(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div,
8125 rFlagsReg cr)
8126 %{
8127 match(Set rax (DivL rax div));
8128 effect(KILL rdx, KILL cr);
8130 ins_cost(30*100+10*100); // XXX
8131 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t"
8132 "cmpq rax, rdx\n\t"
8133 "jne,s normal\n\t"
8134 "xorl rdx, rdx\n\t"
8135 "cmpq $div, -1\n\t"
8136 "je,s done\n"
8137 "normal: cdqq\n\t"
8138 "idivq $div\n"
8139 "done:" %}
8140 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8141 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
8142 ins_pipe(ialu_reg_reg_alu0);
8143 %}
8145 // Integer DIVMOD with Register, both quotient and mod results
8146 instruct divModI_rReg_divmod(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
8147 rFlagsReg cr)
8148 %{
8149 match(DivModI rax div);
8150 effect(KILL cr);
8152 ins_cost(30*100+10*100); // XXX
8153 format %{ "cmpl rax, 0x80000000\t# idiv\n\t"
8154 "jne,s normal\n\t"
8155 "xorl rdx, rdx\n\t"
8156 "cmpl $div, -1\n\t"
8157 "je,s done\n"
8158 "normal: cdql\n\t"
8159 "idivl $div\n"
8160 "done:" %}
8161 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8162 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
8163 ins_pipe(pipe_slow);
8164 %}
8166 // Long DIVMOD with Register, both quotient and mod results
8167 instruct divModL_rReg_divmod(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div,
8168 rFlagsReg cr)
8169 %{
8170 match(DivModL rax div);
8171 effect(KILL cr);
8173 ins_cost(30*100+10*100); // XXX
8174 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t"
8175 "cmpq rax, rdx\n\t"
8176 "jne,s normal\n\t"
8177 "xorl rdx, rdx\n\t"
8178 "cmpq $div, -1\n\t"
8179 "je,s done\n"
8180 "normal: cdqq\n\t"
8181 "idivq $div\n"
8182 "done:" %}
8183 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8184 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
8185 ins_pipe(pipe_slow);
8186 %}
8188 //----------- DivL-By-Constant-Expansions--------------------------------------
8189 // DivI cases are handled by the compiler
8191 // Magic constant, reciprical of 10
8192 instruct loadConL_0x6666666666666667(rRegL dst)
8193 %{
8194 effect(DEF dst);
8196 format %{ "movq $dst, #0x666666666666667\t# Used in div-by-10" %}
8197 ins_encode(load_immL(dst, 0x6666666666666667));
8198 ins_pipe(ialu_reg);
8199 %}
8201 instruct mul_hi(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr)
8202 %{
8203 effect(DEF dst, USE src, USE_KILL rax, KILL cr);
8205 format %{ "imulq rdx:rax, rax, $src\t# Used in div-by-10" %}
8206 opcode(0xF7, 0x5); /* Opcode F7 /5 */
8207 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src));
8208 ins_pipe(ialu_reg_reg_alu0);
8209 %}
8211 instruct sarL_rReg_63(rRegL dst, rFlagsReg cr)
8212 %{
8213 effect(USE_DEF dst, KILL cr);
8215 format %{ "sarq $dst, #63\t# Used in div-by-10" %}
8216 opcode(0xC1, 0x7); /* C1 /7 ib */
8217 ins_encode(reg_opc_imm_wide(dst, 0x3F));
8218 ins_pipe(ialu_reg);
8219 %}
8221 instruct sarL_rReg_2(rRegL dst, rFlagsReg cr)
8222 %{
8223 effect(USE_DEF dst, KILL cr);
8225 format %{ "sarq $dst, #2\t# Used in div-by-10" %}
8226 opcode(0xC1, 0x7); /* C1 /7 ib */
8227 ins_encode(reg_opc_imm_wide(dst, 0x2));
8228 ins_pipe(ialu_reg);
8229 %}
8231 instruct divL_10(rdx_RegL dst, no_rax_RegL src, immL10 div)
8232 %{
8233 match(Set dst (DivL src div));
8235 ins_cost((5+8)*100);
8236 expand %{
8237 rax_RegL rax; // Killed temp
8238 rFlagsReg cr; // Killed
8239 loadConL_0x6666666666666667(rax); // movq rax, 0x6666666666666667
8240 mul_hi(dst, src, rax, cr); // mulq rdx:rax <= rax * $src
8241 sarL_rReg_63(src, cr); // sarq src, 63
8242 sarL_rReg_2(dst, cr); // sarq rdx, 2
8243 subL_rReg(dst, src, cr); // subl rdx, src
8244 %}
8245 %}
8247 //-----------------------------------------------------------------------------
8249 instruct modI_rReg(rdx_RegI rdx, rax_RegI rax, no_rax_rdx_RegI div,
8250 rFlagsReg cr)
8251 %{
8252 match(Set rdx (ModI rax div));
8253 effect(KILL rax, KILL cr);
8255 ins_cost(300); // XXX
8256 format %{ "cmpl rax, 0x80000000\t# irem\n\t"
8257 "jne,s normal\n\t"
8258 "xorl rdx, rdx\n\t"
8259 "cmpl $div, -1\n\t"
8260 "je,s done\n"
8261 "normal: cdql\n\t"
8262 "idivl $div\n"
8263 "done:" %}
8264 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8265 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
8266 ins_pipe(ialu_reg_reg_alu0);
8267 %}
8269 instruct modL_rReg(rdx_RegL rdx, rax_RegL rax, no_rax_rdx_RegL div,
8270 rFlagsReg cr)
8271 %{
8272 match(Set rdx (ModL rax div));
8273 effect(KILL rax, KILL cr);
8275 ins_cost(300); // XXX
8276 format %{ "movq rdx, 0x8000000000000000\t# lrem\n\t"
8277 "cmpq rax, rdx\n\t"
8278 "jne,s normal\n\t"
8279 "xorl rdx, rdx\n\t"
8280 "cmpq $div, -1\n\t"
8281 "je,s done\n"
8282 "normal: cdqq\n\t"
8283 "idivq $div\n"
8284 "done:" %}
8285 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8286 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
8287 ins_pipe(ialu_reg_reg_alu0);
8288 %}
8290 // Integer Shift Instructions
8291 // Shift Left by one
8292 instruct salI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
8293 %{
8294 match(Set dst (LShiftI dst shift));
8295 effect(KILL cr);
8297 format %{ "sall $dst, $shift" %}
8298 opcode(0xD1, 0x4); /* D1 /4 */
8299 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8300 ins_pipe(ialu_reg);
8301 %}
8303 // Shift Left by one
8304 instruct salI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
8305 %{
8306 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
8307 effect(KILL cr);
8309 format %{ "sall $dst, $shift\t" %}
8310 opcode(0xD1, 0x4); /* D1 /4 */
8311 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8312 ins_pipe(ialu_mem_imm);
8313 %}
8315 // Shift Left by 8-bit immediate
8316 instruct salI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
8317 %{
8318 match(Set dst (LShiftI dst shift));
8319 effect(KILL cr);
8321 format %{ "sall $dst, $shift" %}
8322 opcode(0xC1, 0x4); /* C1 /4 ib */
8323 ins_encode(reg_opc_imm(dst, shift));
8324 ins_pipe(ialu_reg);
8325 %}
8327 // Shift Left by 8-bit immediate
8328 instruct salI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
8329 %{
8330 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
8331 effect(KILL cr);
8333 format %{ "sall $dst, $shift" %}
8334 opcode(0xC1, 0x4); /* C1 /4 ib */
8335 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
8336 ins_pipe(ialu_mem_imm);
8337 %}
8339 // Shift Left by variable
8340 instruct salI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
8341 %{
8342 match(Set dst (LShiftI dst shift));
8343 effect(KILL cr);
8345 format %{ "sall $dst, $shift" %}
8346 opcode(0xD3, 0x4); /* D3 /4 */
8347 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8348 ins_pipe(ialu_reg_reg);
8349 %}
8351 // Shift Left by variable
8352 instruct salI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
8353 %{
8354 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
8355 effect(KILL cr);
8357 format %{ "sall $dst, $shift" %}
8358 opcode(0xD3, 0x4); /* D3 /4 */
8359 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8360 ins_pipe(ialu_mem_reg);
8361 %}
8363 // Arithmetic shift right by one
8364 instruct sarI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
8365 %{
8366 match(Set dst (RShiftI dst shift));
8367 effect(KILL cr);
8369 format %{ "sarl $dst, $shift" %}
8370 opcode(0xD1, 0x7); /* D1 /7 */
8371 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8372 ins_pipe(ialu_reg);
8373 %}
8375 // Arithmetic shift right by one
8376 instruct sarI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
8377 %{
8378 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
8379 effect(KILL cr);
8381 format %{ "sarl $dst, $shift" %}
8382 opcode(0xD1, 0x7); /* D1 /7 */
8383 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8384 ins_pipe(ialu_mem_imm);
8385 %}
8387 // Arithmetic Shift Right by 8-bit immediate
8388 instruct sarI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
8389 %{
8390 match(Set dst (RShiftI dst shift));
8391 effect(KILL cr);
8393 format %{ "sarl $dst, $shift" %}
8394 opcode(0xC1, 0x7); /* C1 /7 ib */
8395 ins_encode(reg_opc_imm(dst, shift));
8396 ins_pipe(ialu_mem_imm);
8397 %}
8399 // Arithmetic Shift Right by 8-bit immediate
8400 instruct sarI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
8401 %{
8402 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
8403 effect(KILL cr);
8405 format %{ "sarl $dst, $shift" %}
8406 opcode(0xC1, 0x7); /* C1 /7 ib */
8407 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
8408 ins_pipe(ialu_mem_imm);
8409 %}
8411 // Arithmetic Shift Right by variable
8412 instruct sarI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
8413 %{
8414 match(Set dst (RShiftI dst shift));
8415 effect(KILL cr);
8417 format %{ "sarl $dst, $shift" %}
8418 opcode(0xD3, 0x7); /* D3 /7 */
8419 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8420 ins_pipe(ialu_reg_reg);
8421 %}
8423 // Arithmetic Shift Right by variable
8424 instruct sarI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
8425 %{
8426 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
8427 effect(KILL cr);
8429 format %{ "sarl $dst, $shift" %}
8430 opcode(0xD3, 0x7); /* D3 /7 */
8431 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8432 ins_pipe(ialu_mem_reg);
8433 %}
8435 // Logical shift right by one
8436 instruct shrI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
8437 %{
8438 match(Set dst (URShiftI dst shift));
8439 effect(KILL cr);
8441 format %{ "shrl $dst, $shift" %}
8442 opcode(0xD1, 0x5); /* D1 /5 */
8443 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8444 ins_pipe(ialu_reg);
8445 %}
8447 // Logical shift right by one
8448 instruct shrI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
8449 %{
8450 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
8451 effect(KILL cr);
8453 format %{ "shrl $dst, $shift" %}
8454 opcode(0xD1, 0x5); /* D1 /5 */
8455 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8456 ins_pipe(ialu_mem_imm);
8457 %}
8459 // Logical Shift Right by 8-bit immediate
8460 instruct shrI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
8461 %{
8462 match(Set dst (URShiftI dst shift));
8463 effect(KILL cr);
8465 format %{ "shrl $dst, $shift" %}
8466 opcode(0xC1, 0x5); /* C1 /5 ib */
8467 ins_encode(reg_opc_imm(dst, shift));
8468 ins_pipe(ialu_reg);
8469 %}
8471 // Logical Shift Right by 8-bit immediate
8472 instruct shrI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
8473 %{
8474 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
8475 effect(KILL cr);
8477 format %{ "shrl $dst, $shift" %}
8478 opcode(0xC1, 0x5); /* C1 /5 ib */
8479 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
8480 ins_pipe(ialu_mem_imm);
8481 %}
8483 // Logical Shift Right by variable
8484 instruct shrI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
8485 %{
8486 match(Set dst (URShiftI dst shift));
8487 effect(KILL cr);
8489 format %{ "shrl $dst, $shift" %}
8490 opcode(0xD3, 0x5); /* D3 /5 */
8491 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8492 ins_pipe(ialu_reg_reg);
8493 %}
8495 // Logical Shift Right by variable
8496 instruct shrI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
8497 %{
8498 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
8499 effect(KILL cr);
8501 format %{ "shrl $dst, $shift" %}
8502 opcode(0xD3, 0x5); /* D3 /5 */
8503 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8504 ins_pipe(ialu_mem_reg);
8505 %}
8507 // Long Shift Instructions
8508 // Shift Left by one
8509 instruct salL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
8510 %{
8511 match(Set dst (LShiftL dst shift));
8512 effect(KILL cr);
8514 format %{ "salq $dst, $shift" %}
8515 opcode(0xD1, 0x4); /* D1 /4 */
8516 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8517 ins_pipe(ialu_reg);
8518 %}
8520 // Shift Left by one
8521 instruct salL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
8522 %{
8523 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
8524 effect(KILL cr);
8526 format %{ "salq $dst, $shift" %}
8527 opcode(0xD1, 0x4); /* D1 /4 */
8528 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
8529 ins_pipe(ialu_mem_imm);
8530 %}
8532 // Shift Left by 8-bit immediate
8533 instruct salL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
8534 %{
8535 match(Set dst (LShiftL dst shift));
8536 effect(KILL cr);
8538 format %{ "salq $dst, $shift" %}
8539 opcode(0xC1, 0x4); /* C1 /4 ib */
8540 ins_encode(reg_opc_imm_wide(dst, shift));
8541 ins_pipe(ialu_reg);
8542 %}
8544 // Shift Left by 8-bit immediate
8545 instruct salL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
8546 %{
8547 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
8548 effect(KILL cr);
8550 format %{ "salq $dst, $shift" %}
8551 opcode(0xC1, 0x4); /* C1 /4 ib */
8552 ins_encode(REX_mem_wide(dst), OpcP,
8553 RM_opc_mem(secondary, dst), Con8or32(shift));
8554 ins_pipe(ialu_mem_imm);
8555 %}
8557 // Shift Left by variable
8558 instruct salL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
8559 %{
8560 match(Set dst (LShiftL dst shift));
8561 effect(KILL cr);
8563 format %{ "salq $dst, $shift" %}
8564 opcode(0xD3, 0x4); /* D3 /4 */
8565 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8566 ins_pipe(ialu_reg_reg);
8567 %}
8569 // Shift Left by variable
8570 instruct salL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
8571 %{
8572 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
8573 effect(KILL cr);
8575 format %{ "salq $dst, $shift" %}
8576 opcode(0xD3, 0x4); /* D3 /4 */
8577 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
8578 ins_pipe(ialu_mem_reg);
8579 %}
8581 // Arithmetic shift right by one
8582 instruct sarL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
8583 %{
8584 match(Set dst (RShiftL dst shift));
8585 effect(KILL cr);
8587 format %{ "sarq $dst, $shift" %}
8588 opcode(0xD1, 0x7); /* D1 /7 */
8589 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8590 ins_pipe(ialu_reg);
8591 %}
8593 // Arithmetic shift right by one
8594 instruct sarL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
8595 %{
8596 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
8597 effect(KILL cr);
8599 format %{ "sarq $dst, $shift" %}
8600 opcode(0xD1, 0x7); /* D1 /7 */
8601 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
8602 ins_pipe(ialu_mem_imm);
8603 %}
8605 // Arithmetic Shift Right by 8-bit immediate
8606 instruct sarL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
8607 %{
8608 match(Set dst (RShiftL dst shift));
8609 effect(KILL cr);
8611 format %{ "sarq $dst, $shift" %}
8612 opcode(0xC1, 0x7); /* C1 /7 ib */
8613 ins_encode(reg_opc_imm_wide(dst, shift));
8614 ins_pipe(ialu_mem_imm);
8615 %}
8617 // Arithmetic Shift Right by 8-bit immediate
8618 instruct sarL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
8619 %{
8620 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
8621 effect(KILL cr);
8623 format %{ "sarq $dst, $shift" %}
8624 opcode(0xC1, 0x7); /* C1 /7 ib */
8625 ins_encode(REX_mem_wide(dst), OpcP,
8626 RM_opc_mem(secondary, dst), Con8or32(shift));
8627 ins_pipe(ialu_mem_imm);
8628 %}
8630 // Arithmetic Shift Right by variable
8631 instruct sarL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
8632 %{
8633 match(Set dst (RShiftL dst shift));
8634 effect(KILL cr);
8636 format %{ "sarq $dst, $shift" %}
8637 opcode(0xD3, 0x7); /* D3 /7 */
8638 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8639 ins_pipe(ialu_reg_reg);
8640 %}
8642 // Arithmetic Shift Right by variable
8643 instruct sarL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
8644 %{
8645 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
8646 effect(KILL cr);
8648 format %{ "sarq $dst, $shift" %}
8649 opcode(0xD3, 0x7); /* D3 /7 */
8650 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
8651 ins_pipe(ialu_mem_reg);
8652 %}
8654 // Logical shift right by one
8655 instruct shrL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
8656 %{
8657 match(Set dst (URShiftL dst shift));
8658 effect(KILL cr);
8660 format %{ "shrq $dst, $shift" %}
8661 opcode(0xD1, 0x5); /* D1 /5 */
8662 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst ));
8663 ins_pipe(ialu_reg);
8664 %}
8666 // Logical shift right by one
8667 instruct shrL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
8668 %{
8669 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
8670 effect(KILL cr);
8672 format %{ "shrq $dst, $shift" %}
8673 opcode(0xD1, 0x5); /* D1 /5 */
8674 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
8675 ins_pipe(ialu_mem_imm);
8676 %}
8678 // Logical Shift Right by 8-bit immediate
8679 instruct shrL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
8680 %{
8681 match(Set dst (URShiftL dst shift));
8682 effect(KILL cr);
8684 format %{ "shrq $dst, $shift" %}
8685 opcode(0xC1, 0x5); /* C1 /5 ib */
8686 ins_encode(reg_opc_imm_wide(dst, shift));
8687 ins_pipe(ialu_reg);
8688 %}
8690 // Logical Shift Right by 8-bit immediate
8691 instruct shrL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
8692 %{
8693 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
8694 effect(KILL cr);
8696 format %{ "shrq $dst, $shift" %}
8697 opcode(0xC1, 0x5); /* C1 /5 ib */
8698 ins_encode(REX_mem_wide(dst), OpcP,
8699 RM_opc_mem(secondary, dst), Con8or32(shift));
8700 ins_pipe(ialu_mem_imm);
8701 %}
8703 // Logical Shift Right by variable
8704 instruct shrL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
8705 %{
8706 match(Set dst (URShiftL dst shift));
8707 effect(KILL cr);
8709 format %{ "shrq $dst, $shift" %}
8710 opcode(0xD3, 0x5); /* D3 /5 */
8711 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8712 ins_pipe(ialu_reg_reg);
8713 %}
8715 // Logical Shift Right by variable
8716 instruct shrL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
8717 %{
8718 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
8719 effect(KILL cr);
8721 format %{ "shrq $dst, $shift" %}
8722 opcode(0xD3, 0x5); /* D3 /5 */
8723 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
8724 ins_pipe(ialu_mem_reg);
8725 %}
8727 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
8728 // This idiom is used by the compiler for the i2b bytecode.
8729 instruct i2b(rRegI dst, rRegI src, immI_24 twentyfour)
8730 %{
8731 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
8733 format %{ "movsbl $dst, $src\t# i2b" %}
8734 opcode(0x0F, 0xBE);
8735 ins_encode(REX_reg_breg(dst, src), OpcP, OpcS, reg_reg(dst, src));
8736 ins_pipe(ialu_reg_reg);
8737 %}
8739 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
8740 // This idiom is used by the compiler the i2s bytecode.
8741 instruct i2s(rRegI dst, rRegI src, immI_16 sixteen)
8742 %{
8743 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
8745 format %{ "movswl $dst, $src\t# i2s" %}
8746 opcode(0x0F, 0xBF);
8747 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
8748 ins_pipe(ialu_reg_reg);
8749 %}
8751 // ROL/ROR instructions
8753 // ROL expand
8754 instruct rolI_rReg_imm1(rRegI dst, rFlagsReg cr) %{
8755 effect(KILL cr, USE_DEF dst);
8757 format %{ "roll $dst" %}
8758 opcode(0xD1, 0x0); /* Opcode D1 /0 */
8759 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8760 ins_pipe(ialu_reg);
8761 %}
8763 instruct rolI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr) %{
8764 effect(USE_DEF dst, USE shift, KILL cr);
8766 format %{ "roll $dst, $shift" %}
8767 opcode(0xC1, 0x0); /* Opcode C1 /0 ib */
8768 ins_encode( reg_opc_imm(dst, shift) );
8769 ins_pipe(ialu_reg);
8770 %}
8772 instruct rolI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr)
8773 %{
8774 effect(USE_DEF dst, USE shift, KILL cr);
8776 format %{ "roll $dst, $shift" %}
8777 opcode(0xD3, 0x0); /* Opcode D3 /0 */
8778 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8779 ins_pipe(ialu_reg_reg);
8780 %}
8781 // end of ROL expand
8783 // Rotate Left by one
8784 instruct rolI_rReg_i1(rRegI dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr)
8785 %{
8786 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
8788 expand %{
8789 rolI_rReg_imm1(dst, cr);
8790 %}
8791 %}
8793 // Rotate Left by 8-bit immediate
8794 instruct rolI_rReg_i8(rRegI dst, immI8 lshift, immI8 rshift, rFlagsReg cr)
8795 %{
8796 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
8797 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
8799 expand %{
8800 rolI_rReg_imm8(dst, lshift, cr);
8801 %}
8802 %}
8804 // Rotate Left by variable
8805 instruct rolI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
8806 %{
8807 match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI zero shift))));
8809 expand %{
8810 rolI_rReg_CL(dst, shift, cr);
8811 %}
8812 %}
8814 // Rotate Left by variable
8815 instruct rolI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr)
8816 %{
8817 match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI c32 shift))));
8819 expand %{
8820 rolI_rReg_CL(dst, shift, cr);
8821 %}
8822 %}
8824 // ROR expand
8825 instruct rorI_rReg_imm1(rRegI dst, rFlagsReg cr)
8826 %{
8827 effect(USE_DEF dst, KILL cr);
8829 format %{ "rorl $dst" %}
8830 opcode(0xD1, 0x1); /* D1 /1 */
8831 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8832 ins_pipe(ialu_reg);
8833 %}
8835 instruct rorI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr)
8836 %{
8837 effect(USE_DEF dst, USE shift, KILL cr);
8839 format %{ "rorl $dst, $shift" %}
8840 opcode(0xC1, 0x1); /* C1 /1 ib */
8841 ins_encode(reg_opc_imm(dst, shift));
8842 ins_pipe(ialu_reg);
8843 %}
8845 instruct rorI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr)
8846 %{
8847 effect(USE_DEF dst, USE shift, KILL cr);
8849 format %{ "rorl $dst, $shift" %}
8850 opcode(0xD3, 0x1); /* D3 /1 */
8851 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8852 ins_pipe(ialu_reg_reg);
8853 %}
8854 // end of ROR expand
8856 // Rotate Right by one
8857 instruct rorI_rReg_i1(rRegI dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr)
8858 %{
8859 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
8861 expand %{
8862 rorI_rReg_imm1(dst, cr);
8863 %}
8864 %}
8866 // Rotate Right by 8-bit immediate
8867 instruct rorI_rReg_i8(rRegI dst, immI8 rshift, immI8 lshift, rFlagsReg cr)
8868 %{
8869 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
8870 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
8872 expand %{
8873 rorI_rReg_imm8(dst, rshift, cr);
8874 %}
8875 %}
8877 // Rotate Right by variable
8878 instruct rorI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
8879 %{
8880 match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI zero shift))));
8882 expand %{
8883 rorI_rReg_CL(dst, shift, cr);
8884 %}
8885 %}
8887 // Rotate Right by variable
8888 instruct rorI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr)
8889 %{
8890 match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI c32 shift))));
8892 expand %{
8893 rorI_rReg_CL(dst, shift, cr);
8894 %}
8895 %}
8897 // for long rotate
8898 // ROL expand
8899 instruct rolL_rReg_imm1(rRegL dst, rFlagsReg cr) %{
8900 effect(USE_DEF dst, KILL cr);
8902 format %{ "rolq $dst" %}
8903 opcode(0xD1, 0x0); /* Opcode D1 /0 */
8904 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8905 ins_pipe(ialu_reg);
8906 %}
8908 instruct rolL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr) %{
8909 effect(USE_DEF dst, USE shift, KILL cr);
8911 format %{ "rolq $dst, $shift" %}
8912 opcode(0xC1, 0x0); /* Opcode C1 /0 ib */
8913 ins_encode( reg_opc_imm_wide(dst, shift) );
8914 ins_pipe(ialu_reg);
8915 %}
8917 instruct rolL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr)
8918 %{
8919 effect(USE_DEF dst, USE shift, KILL cr);
8921 format %{ "rolq $dst, $shift" %}
8922 opcode(0xD3, 0x0); /* Opcode D3 /0 */
8923 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8924 ins_pipe(ialu_reg_reg);
8925 %}
8926 // end of ROL expand
8928 // Rotate Left by one
8929 instruct rolL_rReg_i1(rRegL dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr)
8930 %{
8931 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
8933 expand %{
8934 rolL_rReg_imm1(dst, cr);
8935 %}
8936 %}
8938 // Rotate Left by 8-bit immediate
8939 instruct rolL_rReg_i8(rRegL dst, immI8 lshift, immI8 rshift, rFlagsReg cr)
8940 %{
8941 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
8942 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
8944 expand %{
8945 rolL_rReg_imm8(dst, lshift, cr);
8946 %}
8947 %}
8949 // Rotate Left by variable
8950 instruct rolL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
8951 %{
8952 match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI zero shift))));
8954 expand %{
8955 rolL_rReg_CL(dst, shift, cr);
8956 %}
8957 %}
8959 // Rotate Left by variable
8960 instruct rolL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr)
8961 %{
8962 match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI c64 shift))));
8964 expand %{
8965 rolL_rReg_CL(dst, shift, cr);
8966 %}
8967 %}
8969 // ROR expand
8970 instruct rorL_rReg_imm1(rRegL dst, rFlagsReg cr)
8971 %{
8972 effect(USE_DEF dst, KILL cr);
8974 format %{ "rorq $dst" %}
8975 opcode(0xD1, 0x1); /* D1 /1 */
8976 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8977 ins_pipe(ialu_reg);
8978 %}
8980 instruct rorL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr)
8981 %{
8982 effect(USE_DEF dst, USE shift, KILL cr);
8984 format %{ "rorq $dst, $shift" %}
8985 opcode(0xC1, 0x1); /* C1 /1 ib */
8986 ins_encode(reg_opc_imm_wide(dst, shift));
8987 ins_pipe(ialu_reg);
8988 %}
8990 instruct rorL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr)
8991 %{
8992 effect(USE_DEF dst, USE shift, KILL cr);
8994 format %{ "rorq $dst, $shift" %}
8995 opcode(0xD3, 0x1); /* D3 /1 */
8996 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8997 ins_pipe(ialu_reg_reg);
8998 %}
8999 // end of ROR expand
9001 // Rotate Right by one
9002 instruct rorL_rReg_i1(rRegL dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr)
9003 %{
9004 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
9006 expand %{
9007 rorL_rReg_imm1(dst, cr);
9008 %}
9009 %}
9011 // Rotate Right by 8-bit immediate
9012 instruct rorL_rReg_i8(rRegL dst, immI8 rshift, immI8 lshift, rFlagsReg cr)
9013 %{
9014 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
9015 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
9017 expand %{
9018 rorL_rReg_imm8(dst, rshift, cr);
9019 %}
9020 %}
9022 // Rotate Right by variable
9023 instruct rorL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9024 %{
9025 match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI zero shift))));
9027 expand %{
9028 rorL_rReg_CL(dst, shift, cr);
9029 %}
9030 %}
9032 // Rotate Right by variable
9033 instruct rorL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr)
9034 %{
9035 match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI c64 shift))));
9037 expand %{
9038 rorL_rReg_CL(dst, shift, cr);
9039 %}
9040 %}
9042 // Logical Instructions
9044 // Integer Logical Instructions
9046 // And Instructions
9047 // And Register with Register
9048 instruct andI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
9049 %{
9050 match(Set dst (AndI dst src));
9051 effect(KILL cr);
9053 format %{ "andl $dst, $src\t# int" %}
9054 opcode(0x23);
9055 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
9056 ins_pipe(ialu_reg_reg);
9057 %}
9059 // And Register with Immediate 255
9060 instruct andI_rReg_imm255(rRegI dst, immI_255 src)
9061 %{
9062 match(Set dst (AndI dst src));
9064 format %{ "movzbl $dst, $dst\t# int & 0xFF" %}
9065 opcode(0x0F, 0xB6);
9066 ins_encode(REX_reg_breg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9067 ins_pipe(ialu_reg);
9068 %}
9070 // And Register with Immediate 255 and promote to long
9071 instruct andI2L_rReg_imm255(rRegL dst, rRegI src, immI_255 mask)
9072 %{
9073 match(Set dst (ConvI2L (AndI src mask)));
9075 format %{ "movzbl $dst, $src\t# int & 0xFF -> long" %}
9076 opcode(0x0F, 0xB6);
9077 ins_encode(REX_reg_breg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9078 ins_pipe(ialu_reg);
9079 %}
9081 // And Register with Immediate 65535
9082 instruct andI_rReg_imm65535(rRegI dst, immI_65535 src)
9083 %{
9084 match(Set dst (AndI dst src));
9086 format %{ "movzwl $dst, $dst\t# int & 0xFFFF" %}
9087 opcode(0x0F, 0xB7);
9088 ins_encode(REX_reg_reg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9089 ins_pipe(ialu_reg);
9090 %}
9092 // And Register with Immediate 65535 and promote to long
9093 instruct andI2L_rReg_imm65535(rRegL dst, rRegI src, immI_65535 mask)
9094 %{
9095 match(Set dst (ConvI2L (AndI src mask)));
9097 format %{ "movzwl $dst, $src\t# int & 0xFFFF -> long" %}
9098 opcode(0x0F, 0xB7);
9099 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9100 ins_pipe(ialu_reg);
9101 %}
9103 // And Register with Immediate
9104 instruct andI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
9105 %{
9106 match(Set dst (AndI dst src));
9107 effect(KILL cr);
9109 format %{ "andl $dst, $src\t# int" %}
9110 opcode(0x81, 0x04); /* Opcode 81 /4 */
9111 ins_encode(OpcSErm(dst, src), Con8or32(src));
9112 ins_pipe(ialu_reg);
9113 %}
9115 // And Register with Memory
9116 instruct andI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
9117 %{
9118 match(Set dst (AndI dst (LoadI src)));
9119 effect(KILL cr);
9121 ins_cost(125);
9122 format %{ "andl $dst, $src\t# int" %}
9123 opcode(0x23);
9124 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
9125 ins_pipe(ialu_reg_mem);
9126 %}
9128 // And Memory with Register
9129 instruct andI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
9130 %{
9131 match(Set dst (StoreI dst (AndI (LoadI dst) src)));
9132 effect(KILL cr);
9134 ins_cost(150);
9135 format %{ "andl $dst, $src\t# int" %}
9136 opcode(0x21); /* Opcode 21 /r */
9137 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
9138 ins_pipe(ialu_mem_reg);
9139 %}
9141 // And Memory with Immediate
9142 instruct andI_mem_imm(memory dst, immI src, rFlagsReg cr)
9143 %{
9144 match(Set dst (StoreI dst (AndI (LoadI dst) src)));
9145 effect(KILL cr);
9147 ins_cost(125);
9148 format %{ "andl $dst, $src\t# int" %}
9149 opcode(0x81, 0x4); /* Opcode 81 /4 id */
9150 ins_encode(REX_mem(dst), OpcSE(src),
9151 RM_opc_mem(secondary, dst), Con8or32(src));
9152 ins_pipe(ialu_mem_imm);
9153 %}
9155 // Or Instructions
9156 // Or Register with Register
9157 instruct orI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
9158 %{
9159 match(Set dst (OrI dst src));
9160 effect(KILL cr);
9162 format %{ "orl $dst, $src\t# int" %}
9163 opcode(0x0B);
9164 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
9165 ins_pipe(ialu_reg_reg);
9166 %}
9168 // Or Register with Immediate
9169 instruct orI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
9170 %{
9171 match(Set dst (OrI dst src));
9172 effect(KILL cr);
9174 format %{ "orl $dst, $src\t# int" %}
9175 opcode(0x81, 0x01); /* Opcode 81 /1 id */
9176 ins_encode(OpcSErm(dst, src), Con8or32(src));
9177 ins_pipe(ialu_reg);
9178 %}
9180 // Or Register with Memory
9181 instruct orI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
9182 %{
9183 match(Set dst (OrI dst (LoadI src)));
9184 effect(KILL cr);
9186 ins_cost(125);
9187 format %{ "orl $dst, $src\t# int" %}
9188 opcode(0x0B);
9189 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
9190 ins_pipe(ialu_reg_mem);
9191 %}
9193 // Or Memory with Register
9194 instruct orI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
9195 %{
9196 match(Set dst (StoreI dst (OrI (LoadI dst) src)));
9197 effect(KILL cr);
9199 ins_cost(150);
9200 format %{ "orl $dst, $src\t# int" %}
9201 opcode(0x09); /* Opcode 09 /r */
9202 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
9203 ins_pipe(ialu_mem_reg);
9204 %}
9206 // Or Memory with Immediate
9207 instruct orI_mem_imm(memory dst, immI src, rFlagsReg cr)
9208 %{
9209 match(Set dst (StoreI dst (OrI (LoadI dst) src)));
9210 effect(KILL cr);
9212 ins_cost(125);
9213 format %{ "orl $dst, $src\t# int" %}
9214 opcode(0x81, 0x1); /* Opcode 81 /1 id */
9215 ins_encode(REX_mem(dst), OpcSE(src),
9216 RM_opc_mem(secondary, dst), Con8or32(src));
9217 ins_pipe(ialu_mem_imm);
9218 %}
9220 // Xor Instructions
9221 // Xor Register with Register
9222 instruct xorI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
9223 %{
9224 match(Set dst (XorI dst src));
9225 effect(KILL cr);
9227 format %{ "xorl $dst, $src\t# int" %}
9228 opcode(0x33);
9229 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
9230 ins_pipe(ialu_reg_reg);
9231 %}
9233 // Xor Register with Immediate
9234 instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
9235 %{
9236 match(Set dst (XorI dst src));
9237 effect(KILL cr);
9239 format %{ "xorl $dst, $src\t# int" %}
9240 opcode(0x81, 0x06); /* Opcode 81 /6 id */
9241 ins_encode(OpcSErm(dst, src), Con8or32(src));
9242 ins_pipe(ialu_reg);
9243 %}
9245 // Xor Register with Memory
9246 instruct xorI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
9247 %{
9248 match(Set dst (XorI dst (LoadI src)));
9249 effect(KILL cr);
9251 ins_cost(125);
9252 format %{ "xorl $dst, $src\t# int" %}
9253 opcode(0x33);
9254 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
9255 ins_pipe(ialu_reg_mem);
9256 %}
9258 // Xor Memory with Register
9259 instruct xorI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
9260 %{
9261 match(Set dst (StoreI dst (XorI (LoadI dst) src)));
9262 effect(KILL cr);
9264 ins_cost(150);
9265 format %{ "xorl $dst, $src\t# int" %}
9266 opcode(0x31); /* Opcode 31 /r */
9267 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
9268 ins_pipe(ialu_mem_reg);
9269 %}
9271 // Xor Memory with Immediate
9272 instruct xorI_mem_imm(memory dst, immI src, rFlagsReg cr)
9273 %{
9274 match(Set dst (StoreI dst (XorI (LoadI dst) src)));
9275 effect(KILL cr);
9277 ins_cost(125);
9278 format %{ "xorl $dst, $src\t# int" %}
9279 opcode(0x81, 0x6); /* Opcode 81 /6 id */
9280 ins_encode(REX_mem(dst), OpcSE(src),
9281 RM_opc_mem(secondary, dst), Con8or32(src));
9282 ins_pipe(ialu_mem_imm);
9283 %}
9286 // Long Logical Instructions
9288 // And Instructions
9289 // And Register with Register
9290 instruct andL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
9291 %{
9292 match(Set dst (AndL dst src));
9293 effect(KILL cr);
9295 format %{ "andq $dst, $src\t# long" %}
9296 opcode(0x23);
9297 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
9298 ins_pipe(ialu_reg_reg);
9299 %}
9301 // And Register with Immediate 255
9302 instruct andL_rReg_imm255(rRegL dst, immL_255 src)
9303 %{
9304 match(Set dst (AndL dst src));
9306 format %{ "movzbq $dst, $src\t# long & 0xFF" %}
9307 opcode(0x0F, 0xB6);
9308 ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9309 ins_pipe(ialu_reg);
9310 %}
9312 // And Register with Immediate 65535
9313 instruct andL_rReg_imm65535(rRegI dst, immL_65535 src)
9314 %{
9315 match(Set dst (AndL dst src));
9317 format %{ "movzwq $dst, $dst\t# long & 0xFFFF" %}
9318 opcode(0x0F, 0xB7);
9319 ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9320 ins_pipe(ialu_reg);
9321 %}
9323 // And Register with Immediate
9324 instruct andL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
9325 %{
9326 match(Set dst (AndL dst src));
9327 effect(KILL cr);
9329 format %{ "andq $dst, $src\t# long" %}
9330 opcode(0x81, 0x04); /* Opcode 81 /4 */
9331 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
9332 ins_pipe(ialu_reg);
9333 %}
9335 // And Register with Memory
9336 instruct andL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
9337 %{
9338 match(Set dst (AndL dst (LoadL src)));
9339 effect(KILL cr);
9341 ins_cost(125);
9342 format %{ "andq $dst, $src\t# long" %}
9343 opcode(0x23);
9344 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
9345 ins_pipe(ialu_reg_mem);
9346 %}
9348 // And Memory with Register
9349 instruct andL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
9350 %{
9351 match(Set dst (StoreL dst (AndL (LoadL dst) src)));
9352 effect(KILL cr);
9354 ins_cost(150);
9355 format %{ "andq $dst, $src\t# long" %}
9356 opcode(0x21); /* Opcode 21 /r */
9357 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
9358 ins_pipe(ialu_mem_reg);
9359 %}
9361 // And Memory with Immediate
9362 instruct andL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
9363 %{
9364 match(Set dst (StoreL dst (AndL (LoadL dst) src)));
9365 effect(KILL cr);
9367 ins_cost(125);
9368 format %{ "andq $dst, $src\t# long" %}
9369 opcode(0x81, 0x4); /* Opcode 81 /4 id */
9370 ins_encode(REX_mem_wide(dst), OpcSE(src),
9371 RM_opc_mem(secondary, dst), Con8or32(src));
9372 ins_pipe(ialu_mem_imm);
9373 %}
9375 // Or Instructions
9376 // Or Register with Register
9377 instruct orL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
9378 %{
9379 match(Set dst (OrL dst src));
9380 effect(KILL cr);
9382 format %{ "orq $dst, $src\t# long" %}
9383 opcode(0x0B);
9384 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
9385 ins_pipe(ialu_reg_reg);
9386 %}
9388 // Or Register with Immediate
9389 instruct orL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
9390 %{
9391 match(Set dst (OrL dst src));
9392 effect(KILL cr);
9394 format %{ "orq $dst, $src\t# long" %}
9395 opcode(0x81, 0x01); /* Opcode 81 /1 id */
9396 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
9397 ins_pipe(ialu_reg);
9398 %}
9400 // Or Register with Memory
9401 instruct orL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
9402 %{
9403 match(Set dst (OrL dst (LoadL src)));
9404 effect(KILL cr);
9406 ins_cost(125);
9407 format %{ "orq $dst, $src\t# long" %}
9408 opcode(0x0B);
9409 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
9410 ins_pipe(ialu_reg_mem);
9411 %}
9413 // Or Memory with Register
9414 instruct orL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
9415 %{
9416 match(Set dst (StoreL dst (OrL (LoadL dst) src)));
9417 effect(KILL cr);
9419 ins_cost(150);
9420 format %{ "orq $dst, $src\t# long" %}
9421 opcode(0x09); /* Opcode 09 /r */
9422 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
9423 ins_pipe(ialu_mem_reg);
9424 %}
9426 // Or Memory with Immediate
9427 instruct orL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
9428 %{
9429 match(Set dst (StoreL dst (OrL (LoadL dst) src)));
9430 effect(KILL cr);
9432 ins_cost(125);
9433 format %{ "orq $dst, $src\t# long" %}
9434 opcode(0x81, 0x1); /* Opcode 81 /1 id */
9435 ins_encode(REX_mem_wide(dst), OpcSE(src),
9436 RM_opc_mem(secondary, dst), Con8or32(src));
9437 ins_pipe(ialu_mem_imm);
9438 %}
9440 // Xor Instructions
9441 // Xor Register with Register
9442 instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
9443 %{
9444 match(Set dst (XorL dst src));
9445 effect(KILL cr);
9447 format %{ "xorq $dst, $src\t# long" %}
9448 opcode(0x33);
9449 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
9450 ins_pipe(ialu_reg_reg);
9451 %}
9453 // Xor Register with Immediate
9454 instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
9455 %{
9456 match(Set dst (XorL dst src));
9457 effect(KILL cr);
9459 format %{ "xorq $dst, $src\t# long" %}
9460 opcode(0x81, 0x06); /* Opcode 81 /6 id */
9461 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
9462 ins_pipe(ialu_reg);
9463 %}
9465 // Xor Register with Memory
9466 instruct xorL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
9467 %{
9468 match(Set dst (XorL dst (LoadL src)));
9469 effect(KILL cr);
9471 ins_cost(125);
9472 format %{ "xorq $dst, $src\t# long" %}
9473 opcode(0x33);
9474 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
9475 ins_pipe(ialu_reg_mem);
9476 %}
9478 // Xor Memory with Register
9479 instruct xorL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
9480 %{
9481 match(Set dst (StoreL dst (XorL (LoadL dst) src)));
9482 effect(KILL cr);
9484 ins_cost(150);
9485 format %{ "xorq $dst, $src\t# long" %}
9486 opcode(0x31); /* Opcode 31 /r */
9487 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
9488 ins_pipe(ialu_mem_reg);
9489 %}
9491 // Xor Memory with Immediate
9492 instruct xorL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
9493 %{
9494 match(Set dst (StoreL dst (XorL (LoadL dst) src)));
9495 effect(KILL cr);
9497 ins_cost(125);
9498 format %{ "xorq $dst, $src\t# long" %}
9499 opcode(0x81, 0x6); /* Opcode 81 /6 id */
9500 ins_encode(REX_mem_wide(dst), OpcSE(src),
9501 RM_opc_mem(secondary, dst), Con8or32(src));
9502 ins_pipe(ialu_mem_imm);
9503 %}
9505 // Convert Int to Boolean
9506 instruct convI2B(rRegI dst, rRegI src, rFlagsReg cr)
9507 %{
9508 match(Set dst (Conv2B src));
9509 effect(KILL cr);
9511 format %{ "testl $src, $src\t# ci2b\n\t"
9512 "setnz $dst\n\t"
9513 "movzbl $dst, $dst" %}
9514 ins_encode(REX_reg_reg(src, src), opc_reg_reg(0x85, src, src), // testl
9515 setNZ_reg(dst),
9516 REX_reg_breg(dst, dst), // movzbl
9517 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst));
9518 ins_pipe(pipe_slow); // XXX
9519 %}
9521 // Convert Pointer to Boolean
9522 instruct convP2B(rRegI dst, rRegP src, rFlagsReg cr)
9523 %{
9524 match(Set dst (Conv2B src));
9525 effect(KILL cr);
9527 format %{ "testq $src, $src\t# cp2b\n\t"
9528 "setnz $dst\n\t"
9529 "movzbl $dst, $dst" %}
9530 ins_encode(REX_reg_reg_wide(src, src), opc_reg_reg(0x85, src, src), // testq
9531 setNZ_reg(dst),
9532 REX_reg_breg(dst, dst), // movzbl
9533 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst));
9534 ins_pipe(pipe_slow); // XXX
9535 %}
9537 instruct cmpLTMask(rRegI dst, rRegI p, rRegI q, rFlagsReg cr)
9538 %{
9539 match(Set dst (CmpLTMask p q));
9540 effect(KILL cr);
9542 ins_cost(400); // XXX
9543 format %{ "cmpl $p, $q\t# cmpLTMask\n\t"
9544 "setlt $dst\n\t"
9545 "movzbl $dst, $dst\n\t"
9546 "negl $dst" %}
9547 ins_encode(REX_reg_reg(p, q), opc_reg_reg(0x3B, p, q), // cmpl
9548 setLT_reg(dst),
9549 REX_reg_breg(dst, dst), // movzbl
9550 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst),
9551 neg_reg(dst));
9552 ins_pipe(pipe_slow);
9553 %}
9555 instruct cmpLTMask0(rRegI dst, immI0 zero, rFlagsReg cr)
9556 %{
9557 match(Set dst (CmpLTMask dst zero));
9558 effect(KILL cr);
9560 ins_cost(100); // XXX
9561 format %{ "sarl $dst, #31\t# cmpLTMask0" %}
9562 opcode(0xC1, 0x7); /* C1 /7 ib */
9563 ins_encode(reg_opc_imm(dst, 0x1F));
9564 ins_pipe(ialu_reg);
9565 %}
9568 instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y,
9569 rRegI tmp,
9570 rFlagsReg cr)
9571 %{
9572 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
9573 effect(TEMP tmp, KILL cr);
9575 ins_cost(400); // XXX
9576 format %{ "subl $p, $q\t# cadd_cmpLTMask1\n\t"
9577 "sbbl $tmp, $tmp\n\t"
9578 "andl $tmp, $y\n\t"
9579 "addl $p, $tmp" %}
9580 ins_encode(enc_cmpLTP(p, q, y, tmp));
9581 ins_pipe(pipe_cmplt);
9582 %}
9584 /* If I enable this, I encourage spilling in the inner loop of compress.
9585 instruct cadd_cmpLTMask_mem( rRegI p, rRegI q, memory y, rRegI tmp, rFlagsReg cr )
9586 %{
9587 match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
9588 effect( TEMP tmp, KILL cr );
9589 ins_cost(400);
9591 format %{ "SUB $p,$q\n\t"
9592 "SBB RCX,RCX\n\t"
9593 "AND RCX,$y\n\t"
9594 "ADD $p,RCX" %}
9595 ins_encode( enc_cmpLTP_mem(p,q,y,tmp) );
9596 %}
9597 */
9599 //---------- FP Instructions------------------------------------------------
9601 instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
9602 %{
9603 match(Set cr (CmpF src1 src2));
9605 ins_cost(145);
9606 format %{ "ucomiss $src1, $src2\n\t"
9607 "jnp,s exit\n\t"
9608 "pushfq\t# saw NaN, set CF\n\t"
9609 "andq [rsp], #0xffffff2b\n\t"
9610 "popfq\n"
9611 "exit: nop\t# avoid branch to branch" %}
9612 opcode(0x0F, 0x2E);
9613 ins_encode(REX_reg_reg(src1, src2), OpcP, OpcS, reg_reg(src1, src2),
9614 cmpfp_fixup);
9615 ins_pipe(pipe_slow);
9616 %}
9618 instruct cmpF_cc_mem(rFlagsRegU cr, regF src1, memory src2)
9619 %{
9620 match(Set cr (CmpF src1 (LoadF src2)));
9622 ins_cost(145);
9623 format %{ "ucomiss $src1, $src2\n\t"
9624 "jnp,s exit\n\t"
9625 "pushfq\t# saw NaN, set CF\n\t"
9626 "andq [rsp], #0xffffff2b\n\t"
9627 "popfq\n"
9628 "exit: nop\t# avoid branch to branch" %}
9629 opcode(0x0F, 0x2E);
9630 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2),
9631 cmpfp_fixup);
9632 ins_pipe(pipe_slow);
9633 %}
9635 instruct cmpF_cc_imm(rFlagsRegU cr, regF src1, immF src2)
9636 %{
9637 match(Set cr (CmpF src1 src2));
9639 ins_cost(145);
9640 format %{ "ucomiss $src1, $src2\n\t"
9641 "jnp,s exit\n\t"
9642 "pushfq\t# saw NaN, set CF\n\t"
9643 "andq [rsp], #0xffffff2b\n\t"
9644 "popfq\n"
9645 "exit: nop\t# avoid branch to branch" %}
9646 opcode(0x0F, 0x2E);
9647 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2),
9648 cmpfp_fixup);
9649 ins_pipe(pipe_slow);
9650 %}
9652 instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2)
9653 %{
9654 match(Set cr (CmpD src1 src2));
9656 ins_cost(145);
9657 format %{ "ucomisd $src1, $src2\n\t"
9658 "jnp,s exit\n\t"
9659 "pushfq\t# saw NaN, set CF\n\t"
9660 "andq [rsp], #0xffffff2b\n\t"
9661 "popfq\n"
9662 "exit: nop\t# avoid branch to branch" %}
9663 opcode(0x66, 0x0F, 0x2E);
9664 ins_encode(OpcP, REX_reg_reg(src1, src2), OpcS, OpcT, reg_reg(src1, src2),
9665 cmpfp_fixup);
9666 ins_pipe(pipe_slow);
9667 %}
9669 instruct cmpD_cc_mem(rFlagsRegU cr, regD src1, memory src2)
9670 %{
9671 match(Set cr (CmpD src1 (LoadD src2)));
9673 ins_cost(145);
9674 format %{ "ucomisd $src1, $src2\n\t"
9675 "jnp,s exit\n\t"
9676 "pushfq\t# saw NaN, set CF\n\t"
9677 "andq [rsp], #0xffffff2b\n\t"
9678 "popfq\n"
9679 "exit: nop\t# avoid branch to branch" %}
9680 opcode(0x66, 0x0F, 0x2E);
9681 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2),
9682 cmpfp_fixup);
9683 ins_pipe(pipe_slow);
9684 %}
9686 instruct cmpD_cc_imm(rFlagsRegU cr, regD src1, immD src2)
9687 %{
9688 match(Set cr (CmpD src1 src2));
9690 ins_cost(145);
9691 format %{ "ucomisd $src1, [$src2]\n\t"
9692 "jnp,s exit\n\t"
9693 "pushfq\t# saw NaN, set CF\n\t"
9694 "andq [rsp], #0xffffff2b\n\t"
9695 "popfq\n"
9696 "exit: nop\t# avoid branch to branch" %}
9697 opcode(0x66, 0x0F, 0x2E);
9698 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2),
9699 cmpfp_fixup);
9700 ins_pipe(pipe_slow);
9701 %}
9703 // Compare into -1,0,1
9704 instruct cmpF_reg(rRegI dst, regF src1, regF src2, rFlagsReg cr)
9705 %{
9706 match(Set dst (CmpF3 src1 src2));
9707 effect(KILL cr);
9709 ins_cost(275);
9710 format %{ "ucomiss $src1, $src2\n\t"
9711 "movl $dst, #-1\n\t"
9712 "jp,s done\n\t"
9713 "jb,s done\n\t"
9714 "setne $dst\n\t"
9715 "movzbl $dst, $dst\n"
9716 "done:" %}
9718 opcode(0x0F, 0x2E);
9719 ins_encode(REX_reg_reg(src1, src2), OpcP, OpcS, reg_reg(src1, src2),
9720 cmpfp3(dst));
9721 ins_pipe(pipe_slow);
9722 %}
9724 // Compare into -1,0,1
9725 instruct cmpF_mem(rRegI dst, regF src1, memory src2, rFlagsReg cr)
9726 %{
9727 match(Set dst (CmpF3 src1 (LoadF src2)));
9728 effect(KILL cr);
9730 ins_cost(275);
9731 format %{ "ucomiss $src1, $src2\n\t"
9732 "movl $dst, #-1\n\t"
9733 "jp,s done\n\t"
9734 "jb,s done\n\t"
9735 "setne $dst\n\t"
9736 "movzbl $dst, $dst\n"
9737 "done:" %}
9739 opcode(0x0F, 0x2E);
9740 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2),
9741 cmpfp3(dst));
9742 ins_pipe(pipe_slow);
9743 %}
9745 // Compare into -1,0,1
9746 instruct cmpF_imm(rRegI dst, regF src1, immF src2, rFlagsReg cr)
9747 %{
9748 match(Set dst (CmpF3 src1 src2));
9749 effect(KILL cr);
9751 ins_cost(275);
9752 format %{ "ucomiss $src1, [$src2]\n\t"
9753 "movl $dst, #-1\n\t"
9754 "jp,s done\n\t"
9755 "jb,s done\n\t"
9756 "setne $dst\n\t"
9757 "movzbl $dst, $dst\n"
9758 "done:" %}
9760 opcode(0x0F, 0x2E);
9761 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2),
9762 cmpfp3(dst));
9763 ins_pipe(pipe_slow);
9764 %}
9766 // Compare into -1,0,1
9767 instruct cmpD_reg(rRegI dst, regD src1, regD src2, rFlagsReg cr)
9768 %{
9769 match(Set dst (CmpD3 src1 src2));
9770 effect(KILL cr);
9772 ins_cost(275);
9773 format %{ "ucomisd $src1, $src2\n\t"
9774 "movl $dst, #-1\n\t"
9775 "jp,s done\n\t"
9776 "jb,s done\n\t"
9777 "setne $dst\n\t"
9778 "movzbl $dst, $dst\n"
9779 "done:" %}
9781 opcode(0x66, 0x0F, 0x2E);
9782 ins_encode(OpcP, REX_reg_reg(src1, src2), OpcS, OpcT, reg_reg(src1, src2),
9783 cmpfp3(dst));
9784 ins_pipe(pipe_slow);
9785 %}
9787 // Compare into -1,0,1
9788 instruct cmpD_mem(rRegI dst, regD src1, memory src2, rFlagsReg cr)
9789 %{
9790 match(Set dst (CmpD3 src1 (LoadD src2)));
9791 effect(KILL cr);
9793 ins_cost(275);
9794 format %{ "ucomisd $src1, $src2\n\t"
9795 "movl $dst, #-1\n\t"
9796 "jp,s done\n\t"
9797 "jb,s done\n\t"
9798 "setne $dst\n\t"
9799 "movzbl $dst, $dst\n"
9800 "done:" %}
9802 opcode(0x66, 0x0F, 0x2E);
9803 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2),
9804 cmpfp3(dst));
9805 ins_pipe(pipe_slow);
9806 %}
9808 // Compare into -1,0,1
9809 instruct cmpD_imm(rRegI dst, regD src1, immD src2, rFlagsReg cr)
9810 %{
9811 match(Set dst (CmpD3 src1 src2));
9812 effect(KILL cr);
9814 ins_cost(275);
9815 format %{ "ucomisd $src1, [$src2]\n\t"
9816 "movl $dst, #-1\n\t"
9817 "jp,s done\n\t"
9818 "jb,s done\n\t"
9819 "setne $dst\n\t"
9820 "movzbl $dst, $dst\n"
9821 "done:" %}
9823 opcode(0x66, 0x0F, 0x2E);
9824 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2),
9825 cmpfp3(dst));
9826 ins_pipe(pipe_slow);
9827 %}
9829 instruct addF_reg(regF dst, regF src)
9830 %{
9831 match(Set dst (AddF dst src));
9833 format %{ "addss $dst, $src" %}
9834 ins_cost(150); // XXX
9835 opcode(0xF3, 0x0F, 0x58);
9836 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
9837 ins_pipe(pipe_slow);
9838 %}
9840 instruct addF_mem(regF dst, memory src)
9841 %{
9842 match(Set dst (AddF dst (LoadF src)));
9844 format %{ "addss $dst, $src" %}
9845 ins_cost(150); // XXX
9846 opcode(0xF3, 0x0F, 0x58);
9847 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
9848 ins_pipe(pipe_slow);
9849 %}
9851 instruct addF_imm(regF dst, immF src)
9852 %{
9853 match(Set dst (AddF dst src));
9855 format %{ "addss $dst, [$src]" %}
9856 ins_cost(150); // XXX
9857 opcode(0xF3, 0x0F, 0x58);
9858 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
9859 ins_pipe(pipe_slow);
9860 %}
9862 instruct addD_reg(regD dst, regD src)
9863 %{
9864 match(Set dst (AddD dst src));
9866 format %{ "addsd $dst, $src" %}
9867 ins_cost(150); // XXX
9868 opcode(0xF2, 0x0F, 0x58);
9869 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
9870 ins_pipe(pipe_slow);
9871 %}
9873 instruct addD_mem(regD dst, memory src)
9874 %{
9875 match(Set dst (AddD dst (LoadD src)));
9877 format %{ "addsd $dst, $src" %}
9878 ins_cost(150); // XXX
9879 opcode(0xF2, 0x0F, 0x58);
9880 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
9881 ins_pipe(pipe_slow);
9882 %}
9884 instruct addD_imm(regD dst, immD src)
9885 %{
9886 match(Set dst (AddD dst src));
9888 format %{ "addsd $dst, [$src]" %}
9889 ins_cost(150); // XXX
9890 opcode(0xF2, 0x0F, 0x58);
9891 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
9892 ins_pipe(pipe_slow);
9893 %}
9895 instruct subF_reg(regF dst, regF src)
9896 %{
9897 match(Set dst (SubF dst src));
9899 format %{ "subss $dst, $src" %}
9900 ins_cost(150); // XXX
9901 opcode(0xF3, 0x0F, 0x5C);
9902 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
9903 ins_pipe(pipe_slow);
9904 %}
9906 instruct subF_mem(regF dst, memory src)
9907 %{
9908 match(Set dst (SubF dst (LoadF src)));
9910 format %{ "subss $dst, $src" %}
9911 ins_cost(150); // XXX
9912 opcode(0xF3, 0x0F, 0x5C);
9913 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
9914 ins_pipe(pipe_slow);
9915 %}
9917 instruct subF_imm(regF dst, immF src)
9918 %{
9919 match(Set dst (SubF dst src));
9921 format %{ "subss $dst, [$src]" %}
9922 ins_cost(150); // XXX
9923 opcode(0xF3, 0x0F, 0x5C);
9924 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
9925 ins_pipe(pipe_slow);
9926 %}
9928 instruct subD_reg(regD dst, regD src)
9929 %{
9930 match(Set dst (SubD dst src));
9932 format %{ "subsd $dst, $src" %}
9933 ins_cost(150); // XXX
9934 opcode(0xF2, 0x0F, 0x5C);
9935 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
9936 ins_pipe(pipe_slow);
9937 %}
9939 instruct subD_mem(regD dst, memory src)
9940 %{
9941 match(Set dst (SubD dst (LoadD src)));
9943 format %{ "subsd $dst, $src" %}
9944 ins_cost(150); // XXX
9945 opcode(0xF2, 0x0F, 0x5C);
9946 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
9947 ins_pipe(pipe_slow);
9948 %}
9950 instruct subD_imm(regD dst, immD src)
9951 %{
9952 match(Set dst (SubD dst src));
9954 format %{ "subsd $dst, [$src]" %}
9955 ins_cost(150); // XXX
9956 opcode(0xF2, 0x0F, 0x5C);
9957 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
9958 ins_pipe(pipe_slow);
9959 %}
9961 instruct mulF_reg(regF dst, regF src)
9962 %{
9963 match(Set dst (MulF dst src));
9965 format %{ "mulss $dst, $src" %}
9966 ins_cost(150); // XXX
9967 opcode(0xF3, 0x0F, 0x59);
9968 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
9969 ins_pipe(pipe_slow);
9970 %}
9972 instruct mulF_mem(regF dst, memory src)
9973 %{
9974 match(Set dst (MulF dst (LoadF src)));
9976 format %{ "mulss $dst, $src" %}
9977 ins_cost(150); // XXX
9978 opcode(0xF3, 0x0F, 0x59);
9979 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
9980 ins_pipe(pipe_slow);
9981 %}
9983 instruct mulF_imm(regF dst, immF src)
9984 %{
9985 match(Set dst (MulF dst src));
9987 format %{ "mulss $dst, [$src]" %}
9988 ins_cost(150); // XXX
9989 opcode(0xF3, 0x0F, 0x59);
9990 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
9991 ins_pipe(pipe_slow);
9992 %}
9994 instruct mulD_reg(regD dst, regD src)
9995 %{
9996 match(Set dst (MulD dst src));
9998 format %{ "mulsd $dst, $src" %}
9999 ins_cost(150); // XXX
10000 opcode(0xF2, 0x0F, 0x59);
10001 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10002 ins_pipe(pipe_slow);
10003 %}
10005 instruct mulD_mem(regD dst, memory src)
10006 %{
10007 match(Set dst (MulD dst (LoadD src)));
10009 format %{ "mulsd $dst, $src" %}
10010 ins_cost(150); // XXX
10011 opcode(0xF2, 0x0F, 0x59);
10012 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10013 ins_pipe(pipe_slow);
10014 %}
10016 instruct mulD_imm(regD dst, immD src)
10017 %{
10018 match(Set dst (MulD dst src));
10020 format %{ "mulsd $dst, [$src]" %}
10021 ins_cost(150); // XXX
10022 opcode(0xF2, 0x0F, 0x59);
10023 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
10024 ins_pipe(pipe_slow);
10025 %}
10027 instruct divF_reg(regF dst, regF src)
10028 %{
10029 match(Set dst (DivF dst src));
10031 format %{ "divss $dst, $src" %}
10032 ins_cost(150); // XXX
10033 opcode(0xF3, 0x0F, 0x5E);
10034 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10035 ins_pipe(pipe_slow);
10036 %}
10038 instruct divF_mem(regF dst, memory src)
10039 %{
10040 match(Set dst (DivF dst (LoadF src)));
10042 format %{ "divss $dst, $src" %}
10043 ins_cost(150); // XXX
10044 opcode(0xF3, 0x0F, 0x5E);
10045 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10046 ins_pipe(pipe_slow);
10047 %}
10049 instruct divF_imm(regF dst, immF src)
10050 %{
10051 match(Set dst (DivF dst src));
10053 format %{ "divss $dst, [$src]" %}
10054 ins_cost(150); // XXX
10055 opcode(0xF3, 0x0F, 0x5E);
10056 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
10057 ins_pipe(pipe_slow);
10058 %}
10060 instruct divD_reg(regD dst, regD src)
10061 %{
10062 match(Set dst (DivD dst src));
10064 format %{ "divsd $dst, $src" %}
10065 ins_cost(150); // XXX
10066 opcode(0xF2, 0x0F, 0x5E);
10067 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10068 ins_pipe(pipe_slow);
10069 %}
10071 instruct divD_mem(regD dst, memory src)
10072 %{
10073 match(Set dst (DivD dst (LoadD src)));
10075 format %{ "divsd $dst, $src" %}
10076 ins_cost(150); // XXX
10077 opcode(0xF2, 0x0F, 0x5E);
10078 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10079 ins_pipe(pipe_slow);
10080 %}
10082 instruct divD_imm(regD dst, immD src)
10083 %{
10084 match(Set dst (DivD dst src));
10086 format %{ "divsd $dst, [$src]" %}
10087 ins_cost(150); // XXX
10088 opcode(0xF2, 0x0F, 0x5E);
10089 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
10090 ins_pipe(pipe_slow);
10091 %}
10093 instruct sqrtF_reg(regF dst, regF src)
10094 %{
10095 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10097 format %{ "sqrtss $dst, $src" %}
10098 ins_cost(150); // XXX
10099 opcode(0xF3, 0x0F, 0x51);
10100 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10101 ins_pipe(pipe_slow);
10102 %}
10104 instruct sqrtF_mem(regF dst, memory src)
10105 %{
10106 match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src)))));
10108 format %{ "sqrtss $dst, $src" %}
10109 ins_cost(150); // XXX
10110 opcode(0xF3, 0x0F, 0x51);
10111 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10112 ins_pipe(pipe_slow);
10113 %}
10115 instruct sqrtF_imm(regF dst, immF src)
10116 %{
10117 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10119 format %{ "sqrtss $dst, [$src]" %}
10120 ins_cost(150); // XXX
10121 opcode(0xF3, 0x0F, 0x51);
10122 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
10123 ins_pipe(pipe_slow);
10124 %}
10126 instruct sqrtD_reg(regD dst, regD src)
10127 %{
10128 match(Set dst (SqrtD src));
10130 format %{ "sqrtsd $dst, $src" %}
10131 ins_cost(150); // XXX
10132 opcode(0xF2, 0x0F, 0x51);
10133 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10134 ins_pipe(pipe_slow);
10135 %}
10137 instruct sqrtD_mem(regD dst, memory src)
10138 %{
10139 match(Set dst (SqrtD (LoadD src)));
10141 format %{ "sqrtsd $dst, $src" %}
10142 ins_cost(150); // XXX
10143 opcode(0xF2, 0x0F, 0x51);
10144 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10145 ins_pipe(pipe_slow);
10146 %}
10148 instruct sqrtD_imm(regD dst, immD src)
10149 %{
10150 match(Set dst (SqrtD src));
10152 format %{ "sqrtsd $dst, [$src]" %}
10153 ins_cost(150); // XXX
10154 opcode(0xF2, 0x0F, 0x51);
10155 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
10156 ins_pipe(pipe_slow);
10157 %}
10159 instruct absF_reg(regF dst)
10160 %{
10161 match(Set dst (AbsF dst));
10163 format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %}
10164 ins_encode(absF_encoding(dst));
10165 ins_pipe(pipe_slow);
10166 %}
10168 instruct absD_reg(regD dst)
10169 %{
10170 match(Set dst (AbsD dst));
10172 format %{ "andpd $dst, [0x7fffffffffffffff]\t"
10173 "# abs double by sign masking" %}
10174 ins_encode(absD_encoding(dst));
10175 ins_pipe(pipe_slow);
10176 %}
10178 instruct negF_reg(regF dst)
10179 %{
10180 match(Set dst (NegF dst));
10182 format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %}
10183 ins_encode(negF_encoding(dst));
10184 ins_pipe(pipe_slow);
10185 %}
10187 instruct negD_reg(regD dst)
10188 %{
10189 match(Set dst (NegD dst));
10191 format %{ "xorpd $dst, [0x8000000000000000]\t"
10192 "# neg double by sign flipping" %}
10193 ins_encode(negD_encoding(dst));
10194 ins_pipe(pipe_slow);
10195 %}
10197 // -----------Trig and Trancendental Instructions------------------------------
10198 instruct cosD_reg(regD dst) %{
10199 match(Set dst (CosD dst));
10201 format %{ "dcos $dst\n\t" %}
10202 opcode(0xD9, 0xFF);
10203 ins_encode( Push_SrcXD(dst), OpcP, OpcS, Push_ResultXD(dst) );
10204 ins_pipe( pipe_slow );
10205 %}
10207 instruct sinD_reg(regD dst) %{
10208 match(Set dst (SinD dst));
10210 format %{ "dsin $dst\n\t" %}
10211 opcode(0xD9, 0xFE);
10212 ins_encode( Push_SrcXD(dst), OpcP, OpcS, Push_ResultXD(dst) );
10213 ins_pipe( pipe_slow );
10214 %}
10216 instruct tanD_reg(regD dst) %{
10217 match(Set dst (TanD dst));
10219 format %{ "dtan $dst\n\t" %}
10220 ins_encode( Push_SrcXD(dst),
10221 Opcode(0xD9), Opcode(0xF2), //fptan
10222 Opcode(0xDD), Opcode(0xD8), //fstp st
10223 Push_ResultXD(dst) );
10224 ins_pipe( pipe_slow );
10225 %}
10227 instruct log10D_reg(regD dst) %{
10228 // The source and result Double operands in XMM registers
10229 match(Set dst (Log10D dst));
10230 // fldlg2 ; push log_10(2) on the FPU stack; full 80-bit number
10231 // fyl2x ; compute log_10(2) * log_2(x)
10232 format %{ "fldlg2\t\t\t#Log10\n\t"
10233 "fyl2x\t\t\t# Q=Log10*Log_2(x)\n\t"
10234 %}
10235 ins_encode(Opcode(0xD9), Opcode(0xEC), // fldlg2
10236 Push_SrcXD(dst),
10237 Opcode(0xD9), Opcode(0xF1), // fyl2x
10238 Push_ResultXD(dst));
10240 ins_pipe( pipe_slow );
10241 %}
10243 instruct logD_reg(regD dst) %{
10244 // The source and result Double operands in XMM registers
10245 match(Set dst (LogD dst));
10246 // fldln2 ; push log_e(2) on the FPU stack; full 80-bit number
10247 // fyl2x ; compute log_e(2) * log_2(x)
10248 format %{ "fldln2\t\t\t#Log_e\n\t"
10249 "fyl2x\t\t\t# Q=Log_e*Log_2(x)\n\t"
10250 %}
10251 ins_encode( Opcode(0xD9), Opcode(0xED), // fldln2
10252 Push_SrcXD(dst),
10253 Opcode(0xD9), Opcode(0xF1), // fyl2x
10254 Push_ResultXD(dst));
10255 ins_pipe( pipe_slow );
10256 %}
10260 //----------Arithmetic Conversion Instructions---------------------------------
10262 instruct roundFloat_nop(regF dst)
10263 %{
10264 match(Set dst (RoundFloat dst));
10266 ins_cost(0);
10267 ins_encode();
10268 ins_pipe(empty);
10269 %}
10271 instruct roundDouble_nop(regD dst)
10272 %{
10273 match(Set dst (RoundDouble dst));
10275 ins_cost(0);
10276 ins_encode();
10277 ins_pipe(empty);
10278 %}
10280 instruct convF2D_reg_reg(regD dst, regF src)
10281 %{
10282 match(Set dst (ConvF2D src));
10284 format %{ "cvtss2sd $dst, $src" %}
10285 opcode(0xF3, 0x0F, 0x5A);
10286 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10287 ins_pipe(pipe_slow); // XXX
10288 %}
10290 instruct convF2D_reg_mem(regD dst, memory src)
10291 %{
10292 match(Set dst (ConvF2D (LoadF src)));
10294 format %{ "cvtss2sd $dst, $src" %}
10295 opcode(0xF3, 0x0F, 0x5A);
10296 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10297 ins_pipe(pipe_slow); // XXX
10298 %}
10300 instruct convD2F_reg_reg(regF dst, regD src)
10301 %{
10302 match(Set dst (ConvD2F src));
10304 format %{ "cvtsd2ss $dst, $src" %}
10305 opcode(0xF2, 0x0F, 0x5A);
10306 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10307 ins_pipe(pipe_slow); // XXX
10308 %}
10310 instruct convD2F_reg_mem(regF dst, memory src)
10311 %{
10312 match(Set dst (ConvD2F (LoadD src)));
10314 format %{ "cvtsd2ss $dst, $src" %}
10315 opcode(0xF2, 0x0F, 0x5A);
10316 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10317 ins_pipe(pipe_slow); // XXX
10318 %}
10320 // XXX do mem variants
10321 instruct convF2I_reg_reg(rRegI dst, regF src, rFlagsReg cr)
10322 %{
10323 match(Set dst (ConvF2I src));
10324 effect(KILL cr);
10326 format %{ "cvttss2sil $dst, $src\t# f2i\n\t"
10327 "cmpl $dst, #0x80000000\n\t"
10328 "jne,s done\n\t"
10329 "subq rsp, #8\n\t"
10330 "movss [rsp], $src\n\t"
10331 "call f2i_fixup\n\t"
10332 "popq $dst\n"
10333 "done: "%}
10334 opcode(0xF3, 0x0F, 0x2C);
10335 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src),
10336 f2i_fixup(dst, src));
10337 ins_pipe(pipe_slow);
10338 %}
10340 instruct convF2L_reg_reg(rRegL dst, regF src, rFlagsReg cr)
10341 %{
10342 match(Set dst (ConvF2L src));
10343 effect(KILL cr);
10345 format %{ "cvttss2siq $dst, $src\t# f2l\n\t"
10346 "cmpq $dst, [0x8000000000000000]\n\t"
10347 "jne,s done\n\t"
10348 "subq rsp, #8\n\t"
10349 "movss [rsp], $src\n\t"
10350 "call f2l_fixup\n\t"
10351 "popq $dst\n"
10352 "done: "%}
10353 opcode(0xF3, 0x0F, 0x2C);
10354 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src),
10355 f2l_fixup(dst, src));
10356 ins_pipe(pipe_slow);
10357 %}
10359 instruct convD2I_reg_reg(rRegI dst, regD src, rFlagsReg cr)
10360 %{
10361 match(Set dst (ConvD2I src));
10362 effect(KILL cr);
10364 format %{ "cvttsd2sil $dst, $src\t# d2i\n\t"
10365 "cmpl $dst, #0x80000000\n\t"
10366 "jne,s done\n\t"
10367 "subq rsp, #8\n\t"
10368 "movsd [rsp], $src\n\t"
10369 "call d2i_fixup\n\t"
10370 "popq $dst\n"
10371 "done: "%}
10372 opcode(0xF2, 0x0F, 0x2C);
10373 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src),
10374 d2i_fixup(dst, src));
10375 ins_pipe(pipe_slow);
10376 %}
10378 instruct convD2L_reg_reg(rRegL dst, regD src, rFlagsReg cr)
10379 %{
10380 match(Set dst (ConvD2L src));
10381 effect(KILL cr);
10383 format %{ "cvttsd2siq $dst, $src\t# d2l\n\t"
10384 "cmpq $dst, [0x8000000000000000]\n\t"
10385 "jne,s done\n\t"
10386 "subq rsp, #8\n\t"
10387 "movsd [rsp], $src\n\t"
10388 "call d2l_fixup\n\t"
10389 "popq $dst\n"
10390 "done: "%}
10391 opcode(0xF2, 0x0F, 0x2C);
10392 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src),
10393 d2l_fixup(dst, src));
10394 ins_pipe(pipe_slow);
10395 %}
10397 instruct convI2F_reg_reg(regF dst, rRegI src)
10398 %{
10399 predicate(!UseXmmI2F);
10400 match(Set dst (ConvI2F src));
10402 format %{ "cvtsi2ssl $dst, $src\t# i2f" %}
10403 opcode(0xF3, 0x0F, 0x2A);
10404 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10405 ins_pipe(pipe_slow); // XXX
10406 %}
10408 instruct convI2F_reg_mem(regF dst, memory src)
10409 %{
10410 match(Set dst (ConvI2F (LoadI src)));
10412 format %{ "cvtsi2ssl $dst, $src\t# i2f" %}
10413 opcode(0xF3, 0x0F, 0x2A);
10414 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10415 ins_pipe(pipe_slow); // XXX
10416 %}
10418 instruct convI2D_reg_reg(regD dst, rRegI src)
10419 %{
10420 predicate(!UseXmmI2D);
10421 match(Set dst (ConvI2D src));
10423 format %{ "cvtsi2sdl $dst, $src\t# i2d" %}
10424 opcode(0xF2, 0x0F, 0x2A);
10425 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10426 ins_pipe(pipe_slow); // XXX
10427 %}
10429 instruct convI2D_reg_mem(regD dst, memory src)
10430 %{
10431 match(Set dst (ConvI2D (LoadI src)));
10433 format %{ "cvtsi2sdl $dst, $src\t# i2d" %}
10434 opcode(0xF2, 0x0F, 0x2A);
10435 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10436 ins_pipe(pipe_slow); // XXX
10437 %}
10439 instruct convXI2F_reg(regF dst, rRegI src)
10440 %{
10441 predicate(UseXmmI2F);
10442 match(Set dst (ConvI2F src));
10444 format %{ "movdl $dst, $src\n\t"
10445 "cvtdq2psl $dst, $dst\t# i2f" %}
10446 ins_encode %{
10447 __ movdl($dst$$XMMRegister, $src$$Register);
10448 __ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister);
10449 %}
10450 ins_pipe(pipe_slow); // XXX
10451 %}
10453 instruct convXI2D_reg(regD dst, rRegI src)
10454 %{
10455 predicate(UseXmmI2D);
10456 match(Set dst (ConvI2D src));
10458 format %{ "movdl $dst, $src\n\t"
10459 "cvtdq2pdl $dst, $dst\t# i2d" %}
10460 ins_encode %{
10461 __ movdl($dst$$XMMRegister, $src$$Register);
10462 __ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister);
10463 %}
10464 ins_pipe(pipe_slow); // XXX
10465 %}
10467 instruct convL2F_reg_reg(regF dst, rRegL src)
10468 %{
10469 match(Set dst (ConvL2F src));
10471 format %{ "cvtsi2ssq $dst, $src\t# l2f" %}
10472 opcode(0xF3, 0x0F, 0x2A);
10473 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src));
10474 ins_pipe(pipe_slow); // XXX
10475 %}
10477 instruct convL2F_reg_mem(regF dst, memory src)
10478 %{
10479 match(Set dst (ConvL2F (LoadL src)));
10481 format %{ "cvtsi2ssq $dst, $src\t# l2f" %}
10482 opcode(0xF3, 0x0F, 0x2A);
10483 ins_encode(OpcP, REX_reg_mem_wide(dst, src), OpcS, OpcT, reg_mem(dst, src));
10484 ins_pipe(pipe_slow); // XXX
10485 %}
10487 instruct convL2D_reg_reg(regD dst, rRegL src)
10488 %{
10489 match(Set dst (ConvL2D src));
10491 format %{ "cvtsi2sdq $dst, $src\t# l2d" %}
10492 opcode(0xF2, 0x0F, 0x2A);
10493 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src));
10494 ins_pipe(pipe_slow); // XXX
10495 %}
10497 instruct convL2D_reg_mem(regD dst, memory src)
10498 %{
10499 match(Set dst (ConvL2D (LoadL src)));
10501 format %{ "cvtsi2sdq $dst, $src\t# l2d" %}
10502 opcode(0xF2, 0x0F, 0x2A);
10503 ins_encode(OpcP, REX_reg_mem_wide(dst, src), OpcS, OpcT, reg_mem(dst, src));
10504 ins_pipe(pipe_slow); // XXX
10505 %}
10507 instruct convI2L_reg_reg(rRegL dst, rRegI src)
10508 %{
10509 match(Set dst (ConvI2L src));
10511 ins_cost(125);
10512 format %{ "movslq $dst, $src\t# i2l" %}
10513 opcode(0x63); // needs REX.W
10514 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst,src));
10515 ins_pipe(ialu_reg_reg);
10516 %}
10518 // instruct convI2L_reg_reg_foo(rRegL dst, rRegI src)
10519 // %{
10520 // match(Set dst (ConvI2L src));
10521 // // predicate(_kids[0]->_leaf->as_Type()->type()->is_int()->_lo >= 0 &&
10522 // // _kids[0]->_leaf->as_Type()->type()->is_int()->_hi >= 0);
10523 // predicate(((const TypeNode*) n)->type()->is_long()->_hi ==
10524 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_hi &&
10525 // ((const TypeNode*) n)->type()->is_long()->_lo ==
10526 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_lo);
10528 // format %{ "movl $dst, $src\t# unsigned i2l" %}
10529 // ins_encode(enc_copy(dst, src));
10530 // // opcode(0x63); // needs REX.W
10531 // // ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst,src));
10532 // ins_pipe(ialu_reg_reg);
10533 // %}
10535 instruct convI2L_reg_mem(rRegL dst, memory src)
10536 %{
10537 match(Set dst (ConvI2L (LoadI src)));
10539 format %{ "movslq $dst, $src\t# i2l" %}
10540 opcode(0x63); // needs REX.W
10541 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst,src));
10542 ins_pipe(ialu_reg_mem);
10543 %}
10545 // Zero-extend convert int to long
10546 instruct convI2L_reg_reg_zex(rRegL dst, rRegI src, immL_32bits mask)
10547 %{
10548 match(Set dst (AndL (ConvI2L src) mask));
10550 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %}
10551 ins_encode(enc_copy(dst, src));
10552 ins_pipe(ialu_reg_reg);
10553 %}
10555 // Zero-extend convert int to long
10556 instruct convI2L_reg_mem_zex(rRegL dst, memory src, immL_32bits mask)
10557 %{
10558 match(Set dst (AndL (ConvI2L (LoadI src)) mask));
10560 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %}
10561 opcode(0x8B);
10562 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
10563 ins_pipe(ialu_reg_mem);
10564 %}
10566 instruct zerox_long_reg_reg(rRegL dst, rRegL src, immL_32bits mask)
10567 %{
10568 match(Set dst (AndL src mask));
10570 format %{ "movl $dst, $src\t# zero-extend long" %}
10571 ins_encode(enc_copy_always(dst, src));
10572 ins_pipe(ialu_reg_reg);
10573 %}
10575 instruct convL2I_reg_reg(rRegI dst, rRegL src)
10576 %{
10577 match(Set dst (ConvL2I src));
10579 format %{ "movl $dst, $src\t# l2i" %}
10580 ins_encode(enc_copy_always(dst, src));
10581 ins_pipe(ialu_reg_reg);
10582 %}
10585 instruct MoveF2I_stack_reg(rRegI dst, stackSlotF src) %{
10586 match(Set dst (MoveF2I src));
10587 effect(DEF dst, USE src);
10589 ins_cost(125);
10590 format %{ "movl $dst, $src\t# MoveF2I_stack_reg" %}
10591 opcode(0x8B);
10592 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
10593 ins_pipe(ialu_reg_mem);
10594 %}
10596 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{
10597 match(Set dst (MoveI2F src));
10598 effect(DEF dst, USE src);
10600 ins_cost(125);
10601 format %{ "movss $dst, $src\t# MoveI2F_stack_reg" %}
10602 opcode(0xF3, 0x0F, 0x10);
10603 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10604 ins_pipe(pipe_slow);
10605 %}
10607 instruct MoveD2L_stack_reg(rRegL dst, stackSlotD src) %{
10608 match(Set dst (MoveD2L src));
10609 effect(DEF dst, USE src);
10611 ins_cost(125);
10612 format %{ "movq $dst, $src\t# MoveD2L_stack_reg" %}
10613 opcode(0x8B);
10614 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
10615 ins_pipe(ialu_reg_mem);
10616 %}
10618 instruct MoveL2D_stack_reg_partial(regD dst, stackSlotL src) %{
10619 predicate(!UseXmmLoadAndClearUpper);
10620 match(Set dst (MoveL2D src));
10621 effect(DEF dst, USE src);
10623 ins_cost(125);
10624 format %{ "movlpd $dst, $src\t# MoveL2D_stack_reg" %}
10625 opcode(0x66, 0x0F, 0x12);
10626 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10627 ins_pipe(pipe_slow);
10628 %}
10630 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{
10631 predicate(UseXmmLoadAndClearUpper);
10632 match(Set dst (MoveL2D src));
10633 effect(DEF dst, USE src);
10635 ins_cost(125);
10636 format %{ "movsd $dst, $src\t# MoveL2D_stack_reg" %}
10637 opcode(0xF2, 0x0F, 0x10);
10638 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10639 ins_pipe(pipe_slow);
10640 %}
10643 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{
10644 match(Set dst (MoveF2I src));
10645 effect(DEF dst, USE src);
10647 ins_cost(95); // XXX
10648 format %{ "movss $dst, $src\t# MoveF2I_reg_stack" %}
10649 opcode(0xF3, 0x0F, 0x11);
10650 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
10651 ins_pipe(pipe_slow);
10652 %}
10654 instruct MoveI2F_reg_stack(stackSlotF dst, rRegI src) %{
10655 match(Set dst (MoveI2F src));
10656 effect(DEF dst, USE src);
10658 ins_cost(100);
10659 format %{ "movl $dst, $src\t# MoveI2F_reg_stack" %}
10660 opcode(0x89);
10661 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
10662 ins_pipe( ialu_mem_reg );
10663 %}
10665 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{
10666 match(Set dst (MoveD2L src));
10667 effect(DEF dst, USE src);
10669 ins_cost(95); // XXX
10670 format %{ "movsd $dst, $src\t# MoveL2D_reg_stack" %}
10671 opcode(0xF2, 0x0F, 0x11);
10672 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
10673 ins_pipe(pipe_slow);
10674 %}
10676 instruct MoveL2D_reg_stack(stackSlotD dst, rRegL src) %{
10677 match(Set dst (MoveL2D src));
10678 effect(DEF dst, USE src);
10680 ins_cost(100);
10681 format %{ "movq $dst, $src\t# MoveL2D_reg_stack" %}
10682 opcode(0x89);
10683 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
10684 ins_pipe(ialu_mem_reg);
10685 %}
10687 instruct MoveF2I_reg_reg(rRegI dst, regF src) %{
10688 match(Set dst (MoveF2I src));
10689 effect(DEF dst, USE src);
10690 ins_cost(85);
10691 format %{ "movd $dst,$src\t# MoveF2I" %}
10692 ins_encode %{ __ movdl($dst$$Register, $src$$XMMRegister); %}
10693 ins_pipe( pipe_slow );
10694 %}
10696 instruct MoveD2L_reg_reg(rRegL dst, regD src) %{
10697 match(Set dst (MoveD2L src));
10698 effect(DEF dst, USE src);
10699 ins_cost(85);
10700 format %{ "movd $dst,$src\t# MoveD2L" %}
10701 ins_encode %{ __ movdq($dst$$Register, $src$$XMMRegister); %}
10702 ins_pipe( pipe_slow );
10703 %}
10705 // The next instructions have long latency and use Int unit. Set high cost.
10706 instruct MoveI2F_reg_reg(regF dst, rRegI src) %{
10707 match(Set dst (MoveI2F src));
10708 effect(DEF dst, USE src);
10709 ins_cost(300);
10710 format %{ "movd $dst,$src\t# MoveI2F" %}
10711 ins_encode %{ __ movdl($dst$$XMMRegister, $src$$Register); %}
10712 ins_pipe( pipe_slow );
10713 %}
10715 instruct MoveL2D_reg_reg(regD dst, rRegL src) %{
10716 match(Set dst (MoveL2D src));
10717 effect(DEF dst, USE src);
10718 ins_cost(300);
10719 format %{ "movd $dst,$src\t# MoveL2D" %}
10720 ins_encode %{ __ movdq($dst$$XMMRegister, $src$$Register); %}
10721 ins_pipe( pipe_slow );
10722 %}
10724 // Replicate scalar to packed byte (1 byte) values in xmm
10725 instruct Repl8B_reg(regD dst, regD src) %{
10726 match(Set dst (Replicate8B src));
10727 format %{ "MOVDQA $dst,$src\n\t"
10728 "PUNPCKLBW $dst,$dst\n\t"
10729 "PSHUFLW $dst,$dst,0x00\t! replicate8B" %}
10730 ins_encode( pshufd_8x8(dst, src));
10731 ins_pipe( pipe_slow );
10732 %}
10734 // Replicate scalar to packed byte (1 byte) values in xmm
10735 instruct Repl8B_rRegI(regD dst, rRegI src) %{
10736 match(Set dst (Replicate8B src));
10737 format %{ "MOVD $dst,$src\n\t"
10738 "PUNPCKLBW $dst,$dst\n\t"
10739 "PSHUFLW $dst,$dst,0x00\t! replicate8B" %}
10740 ins_encode( mov_i2x(dst, src), pshufd_8x8(dst, dst));
10741 ins_pipe( pipe_slow );
10742 %}
10744 // Replicate scalar zero to packed byte (1 byte) values in xmm
10745 instruct Repl8B_immI0(regD dst, immI0 zero) %{
10746 match(Set dst (Replicate8B zero));
10747 format %{ "PXOR $dst,$dst\t! replicate8B" %}
10748 ins_encode( pxor(dst, dst));
10749 ins_pipe( fpu_reg_reg );
10750 %}
10752 // Replicate scalar to packed shore (2 byte) values in xmm
10753 instruct Repl4S_reg(regD dst, regD src) %{
10754 match(Set dst (Replicate4S src));
10755 format %{ "PSHUFLW $dst,$src,0x00\t! replicate4S" %}
10756 ins_encode( pshufd_4x16(dst, src));
10757 ins_pipe( fpu_reg_reg );
10758 %}
10760 // Replicate scalar to packed shore (2 byte) values in xmm
10761 instruct Repl4S_rRegI(regD dst, rRegI src) %{
10762 match(Set dst (Replicate4S src));
10763 format %{ "MOVD $dst,$src\n\t"
10764 "PSHUFLW $dst,$dst,0x00\t! replicate4S" %}
10765 ins_encode( mov_i2x(dst, src), pshufd_4x16(dst, dst));
10766 ins_pipe( fpu_reg_reg );
10767 %}
10769 // Replicate scalar zero to packed short (2 byte) values in xmm
10770 instruct Repl4S_immI0(regD dst, immI0 zero) %{
10771 match(Set dst (Replicate4S zero));
10772 format %{ "PXOR $dst,$dst\t! replicate4S" %}
10773 ins_encode( pxor(dst, dst));
10774 ins_pipe( fpu_reg_reg );
10775 %}
10777 // Replicate scalar to packed char (2 byte) values in xmm
10778 instruct Repl4C_reg(regD dst, regD src) %{
10779 match(Set dst (Replicate4C src));
10780 format %{ "PSHUFLW $dst,$src,0x00\t! replicate4C" %}
10781 ins_encode( pshufd_4x16(dst, src));
10782 ins_pipe( fpu_reg_reg );
10783 %}
10785 // Replicate scalar to packed char (2 byte) values in xmm
10786 instruct Repl4C_rRegI(regD dst, rRegI src) %{
10787 match(Set dst (Replicate4C src));
10788 format %{ "MOVD $dst,$src\n\t"
10789 "PSHUFLW $dst,$dst,0x00\t! replicate4C" %}
10790 ins_encode( mov_i2x(dst, src), pshufd_4x16(dst, dst));
10791 ins_pipe( fpu_reg_reg );
10792 %}
10794 // Replicate scalar zero to packed char (2 byte) values in xmm
10795 instruct Repl4C_immI0(regD dst, immI0 zero) %{
10796 match(Set dst (Replicate4C zero));
10797 format %{ "PXOR $dst,$dst\t! replicate4C" %}
10798 ins_encode( pxor(dst, dst));
10799 ins_pipe( fpu_reg_reg );
10800 %}
10802 // Replicate scalar to packed integer (4 byte) values in xmm
10803 instruct Repl2I_reg(regD dst, regD src) %{
10804 match(Set dst (Replicate2I src));
10805 format %{ "PSHUFD $dst,$src,0x00\t! replicate2I" %}
10806 ins_encode( pshufd(dst, src, 0x00));
10807 ins_pipe( fpu_reg_reg );
10808 %}
10810 // Replicate scalar to packed integer (4 byte) values in xmm
10811 instruct Repl2I_rRegI(regD dst, rRegI src) %{
10812 match(Set dst (Replicate2I src));
10813 format %{ "MOVD $dst,$src\n\t"
10814 "PSHUFD $dst,$dst,0x00\t! replicate2I" %}
10815 ins_encode( mov_i2x(dst, src), pshufd(dst, dst, 0x00));
10816 ins_pipe( fpu_reg_reg );
10817 %}
10819 // Replicate scalar zero to packed integer (2 byte) values in xmm
10820 instruct Repl2I_immI0(regD dst, immI0 zero) %{
10821 match(Set dst (Replicate2I zero));
10822 format %{ "PXOR $dst,$dst\t! replicate2I" %}
10823 ins_encode( pxor(dst, dst));
10824 ins_pipe( fpu_reg_reg );
10825 %}
10827 // Replicate scalar to packed single precision floating point values in xmm
10828 instruct Repl2F_reg(regD dst, regD src) %{
10829 match(Set dst (Replicate2F src));
10830 format %{ "PSHUFD $dst,$src,0xe0\t! replicate2F" %}
10831 ins_encode( pshufd(dst, src, 0xe0));
10832 ins_pipe( fpu_reg_reg );
10833 %}
10835 // Replicate scalar to packed single precision floating point values in xmm
10836 instruct Repl2F_regF(regD dst, regF src) %{
10837 match(Set dst (Replicate2F src));
10838 format %{ "PSHUFD $dst,$src,0xe0\t! replicate2F" %}
10839 ins_encode( pshufd(dst, src, 0xe0));
10840 ins_pipe( fpu_reg_reg );
10841 %}
10843 // Replicate scalar to packed single precision floating point values in xmm
10844 instruct Repl2F_immF0(regD dst, immF0 zero) %{
10845 match(Set dst (Replicate2F zero));
10846 format %{ "PXOR $dst,$dst\t! replicate2F" %}
10847 ins_encode( pxor(dst, dst));
10848 ins_pipe( fpu_reg_reg );
10849 %}
10852 // =======================================================================
10853 // fast clearing of an array
10854 instruct rep_stos(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy,
10855 rFlagsReg cr)
10856 %{
10857 match(Set dummy (ClearArray cnt base));
10858 effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
10860 format %{ "xorl rax, rax\t# ClearArray:\n\t"
10861 "rep stosq\t# Store rax to *rdi++ while rcx--" %}
10862 ins_encode(opc_reg_reg(0x33, RAX, RAX), // xorl %eax, %eax
10863 Opcode(0xF3), Opcode(0x48), Opcode(0xAB)); // rep REX_W stos
10864 ins_pipe(pipe_slow);
10865 %}
10867 instruct string_compare(rdi_RegP str1, rsi_RegP str2, rax_RegI tmp1,
10868 rbx_RegI tmp2, rcx_RegI result, rFlagsReg cr)
10869 %{
10870 match(Set result (StrComp str1 str2));
10871 effect(USE_KILL str1, USE_KILL str2, KILL tmp1, KILL tmp2, KILL cr);
10872 //ins_cost(300);
10874 format %{ "String Compare $str1, $str2 -> $result // XXX KILL RAX, RBX" %}
10875 ins_encode( enc_String_Compare() );
10876 ins_pipe( pipe_slow );
10877 %}
10879 //----------Control Flow Instructions------------------------------------------
10880 // Signed compare Instructions
10882 // XXX more variants!!
10883 instruct compI_rReg(rFlagsReg cr, rRegI op1, rRegI op2)
10884 %{
10885 match(Set cr (CmpI op1 op2));
10886 effect(DEF cr, USE op1, USE op2);
10888 format %{ "cmpl $op1, $op2" %}
10889 opcode(0x3B); /* Opcode 3B /r */
10890 ins_encode(REX_reg_reg(op1, op2), OpcP, reg_reg(op1, op2));
10891 ins_pipe(ialu_cr_reg_reg);
10892 %}
10894 instruct compI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2)
10895 %{
10896 match(Set cr (CmpI op1 op2));
10898 format %{ "cmpl $op1, $op2" %}
10899 opcode(0x81, 0x07); /* Opcode 81 /7 */
10900 ins_encode(OpcSErm(op1, op2), Con8or32(op2));
10901 ins_pipe(ialu_cr_reg_imm);
10902 %}
10904 instruct compI_rReg_mem(rFlagsReg cr, rRegI op1, memory op2)
10905 %{
10906 match(Set cr (CmpI op1 (LoadI op2)));
10908 ins_cost(500); // XXX
10909 format %{ "cmpl $op1, $op2" %}
10910 opcode(0x3B); /* Opcode 3B /r */
10911 ins_encode(REX_reg_mem(op1, op2), OpcP, reg_mem(op1, op2));
10912 ins_pipe(ialu_cr_reg_mem);
10913 %}
10915 instruct testI_reg(rFlagsReg cr, rRegI src, immI0 zero)
10916 %{
10917 match(Set cr (CmpI src zero));
10919 format %{ "testl $src, $src" %}
10920 opcode(0x85);
10921 ins_encode(REX_reg_reg(src, src), OpcP, reg_reg(src, src));
10922 ins_pipe(ialu_cr_reg_imm);
10923 %}
10925 instruct testI_reg_imm(rFlagsReg cr, rRegI src, immI con, immI0 zero)
10926 %{
10927 match(Set cr (CmpI (AndI src con) zero));
10929 format %{ "testl $src, $con" %}
10930 opcode(0xF7, 0x00);
10931 ins_encode(REX_reg(src), OpcP, reg_opc(src), Con32(con));
10932 ins_pipe(ialu_cr_reg_imm);
10933 %}
10935 instruct testI_reg_mem(rFlagsReg cr, rRegI src, memory mem, immI0 zero)
10936 %{
10937 match(Set cr (CmpI (AndI src (LoadI mem)) zero));
10939 format %{ "testl $src, $mem" %}
10940 opcode(0x85);
10941 ins_encode(REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
10942 ins_pipe(ialu_cr_reg_mem);
10943 %}
10945 // Unsigned compare Instructions; really, same as signed except they
10946 // produce an rFlagsRegU instead of rFlagsReg.
10947 instruct compU_rReg(rFlagsRegU cr, rRegI op1, rRegI op2)
10948 %{
10949 match(Set cr (CmpU op1 op2));
10951 format %{ "cmpl $op1, $op2\t# unsigned" %}
10952 opcode(0x3B); /* Opcode 3B /r */
10953 ins_encode(REX_reg_reg(op1, op2), OpcP, reg_reg(op1, op2));
10954 ins_pipe(ialu_cr_reg_reg);
10955 %}
10957 instruct compU_rReg_imm(rFlagsRegU cr, rRegI op1, immI op2)
10958 %{
10959 match(Set cr (CmpU op1 op2));
10961 format %{ "cmpl $op1, $op2\t# unsigned" %}
10962 opcode(0x81,0x07); /* Opcode 81 /7 */
10963 ins_encode(OpcSErm(op1, op2), Con8or32(op2));
10964 ins_pipe(ialu_cr_reg_imm);
10965 %}
10967 instruct compU_rReg_mem(rFlagsRegU cr, rRegI op1, memory op2)
10968 %{
10969 match(Set cr (CmpU op1 (LoadI op2)));
10971 ins_cost(500); // XXX
10972 format %{ "cmpl $op1, $op2\t# unsigned" %}
10973 opcode(0x3B); /* Opcode 3B /r */
10974 ins_encode(REX_reg_mem(op1, op2), OpcP, reg_mem(op1, op2));
10975 ins_pipe(ialu_cr_reg_mem);
10976 %}
10978 // // // Cisc-spilled version of cmpU_rReg
10979 // //instruct compU_mem_rReg(rFlagsRegU cr, memory op1, rRegI op2)
10980 // //%{
10981 // // match(Set cr (CmpU (LoadI op1) op2));
10982 // //
10983 // // format %{ "CMPu $op1,$op2" %}
10984 // // ins_cost(500);
10985 // // opcode(0x39); /* Opcode 39 /r */
10986 // // ins_encode( OpcP, reg_mem( op1, op2) );
10987 // //%}
10989 instruct testU_reg(rFlagsRegU cr, rRegI src, immI0 zero)
10990 %{
10991 match(Set cr (CmpU src zero));
10993 format %{ "testl $src, $src\t# unsigned" %}
10994 opcode(0x85);
10995 ins_encode(REX_reg_reg(src, src), OpcP, reg_reg(src, src));
10996 ins_pipe(ialu_cr_reg_imm);
10997 %}
10999 instruct compP_rReg(rFlagsRegU cr, rRegP op1, rRegP op2)
11000 %{
11001 match(Set cr (CmpP op1 op2));
11003 format %{ "cmpq $op1, $op2\t# ptr" %}
11004 opcode(0x3B); /* Opcode 3B /r */
11005 ins_encode(REX_reg_reg_wide(op1, op2), OpcP, reg_reg(op1, op2));
11006 ins_pipe(ialu_cr_reg_reg);
11007 %}
11009 instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2)
11010 %{
11011 match(Set cr (CmpP op1 (LoadP op2)));
11013 ins_cost(500); // XXX
11014 format %{ "cmpq $op1, $op2\t# ptr" %}
11015 opcode(0x3B); /* Opcode 3B /r */
11016 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
11017 ins_pipe(ialu_cr_reg_mem);
11018 %}
11020 // // // Cisc-spilled version of cmpP_rReg
11021 // //instruct compP_mem_rReg(rFlagsRegU cr, memory op1, rRegP op2)
11022 // //%{
11023 // // match(Set cr (CmpP (LoadP op1) op2));
11024 // //
11025 // // format %{ "CMPu $op1,$op2" %}
11026 // // ins_cost(500);
11027 // // opcode(0x39); /* Opcode 39 /r */
11028 // // ins_encode( OpcP, reg_mem( op1, op2) );
11029 // //%}
11031 // XXX this is generalized by compP_rReg_mem???
11032 // Compare raw pointer (used in out-of-heap check).
11033 // Only works because non-oop pointers must be raw pointers
11034 // and raw pointers have no anti-dependencies.
11035 instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2)
11036 %{
11037 predicate(!n->in(2)->in(2)->bottom_type()->isa_oop_ptr());
11038 match(Set cr (CmpP op1 (LoadP op2)));
11040 format %{ "cmpq $op1, $op2\t# raw ptr" %}
11041 opcode(0x3B); /* Opcode 3B /r */
11042 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
11043 ins_pipe(ialu_cr_reg_mem);
11044 %}
11046 // This will generate a signed flags result. This should be OK since
11047 // any compare to a zero should be eq/neq.
11048 instruct testP_reg(rFlagsReg cr, rRegP src, immP0 zero)
11049 %{
11050 match(Set cr (CmpP src zero));
11052 format %{ "testq $src, $src\t# ptr" %}
11053 opcode(0x85);
11054 ins_encode(REX_reg_reg_wide(src, src), OpcP, reg_reg(src, src));
11055 ins_pipe(ialu_cr_reg_imm);
11056 %}
11058 // This will generate a signed flags result. This should be OK since
11059 // any compare to a zero should be eq/neq.
11060 instruct testP_reg_mem(rFlagsReg cr, memory op, immP0 zero)
11061 %{
11062 match(Set cr (CmpP (LoadP op) zero));
11064 ins_cost(500); // XXX
11065 format %{ "testq $op, 0xffffffffffffffff\t# ptr" %}
11066 opcode(0xF7); /* Opcode F7 /0 */
11067 ins_encode(REX_mem_wide(op),
11068 OpcP, RM_opc_mem(0x00, op), Con_d32(0xFFFFFFFF));
11069 ins_pipe(ialu_cr_reg_imm);
11070 %}
11073 instruct compN_rReg(rFlagsRegU cr, rRegN op1, rRegN op2)
11074 %{
11075 match(Set cr (CmpN op1 op2));
11077 format %{ "cmpl $op1, $op2\t# compressed ptr" %}
11078 ins_encode %{ __ cmpl(as_Register($op1$$reg), as_Register($op2$$reg)); %}
11079 ins_pipe(ialu_cr_reg_reg);
11080 %}
11082 instruct compN_rReg_mem(rFlagsRegU cr, rRegN src, memory mem)
11083 %{
11084 match(Set cr (CmpN src (LoadN mem)));
11086 ins_cost(500); // XXX
11087 format %{ "cmpl $src, mem\t# compressed ptr" %}
11088 ins_encode %{
11089 Address adr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
11090 __ cmpl(as_Register($src$$reg), adr);
11091 %}
11092 ins_pipe(ialu_cr_reg_mem);
11093 %}
11095 instruct testN_reg(rFlagsReg cr, rRegN src, immN0 zero) %{
11096 match(Set cr (CmpN src zero));
11098 format %{ "testl $src, $src\t# compressed ptr" %}
11099 ins_encode %{ __ testl($src$$Register, $src$$Register); %}
11100 ins_pipe(ialu_cr_reg_imm);
11101 %}
11103 instruct testN_reg_mem(rFlagsReg cr, memory mem, immN0 zero)
11104 %{
11105 match(Set cr (CmpN (LoadN mem) zero));
11107 ins_cost(500); // XXX
11108 format %{ "testl $mem, 0xffffffff\t# compressed ptr" %}
11109 ins_encode %{
11110 Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
11111 __ cmpl(addr, (int)0xFFFFFFFF);
11112 %}
11113 ins_pipe(ialu_cr_reg_mem);
11114 %}
11116 // Yanked all unsigned pointer compare operations.
11117 // Pointer compares are done with CmpP which is already unsigned.
11119 instruct compL_rReg(rFlagsReg cr, rRegL op1, rRegL op2)
11120 %{
11121 match(Set cr (CmpL op1 op2));
11123 format %{ "cmpq $op1, $op2" %}
11124 opcode(0x3B); /* Opcode 3B /r */
11125 ins_encode(REX_reg_reg_wide(op1, op2), OpcP, reg_reg(op1, op2));
11126 ins_pipe(ialu_cr_reg_reg);
11127 %}
11129 instruct compL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2)
11130 %{
11131 match(Set cr (CmpL op1 op2));
11133 format %{ "cmpq $op1, $op2" %}
11134 opcode(0x81, 0x07); /* Opcode 81 /7 */
11135 ins_encode(OpcSErm_wide(op1, op2), Con8or32(op2));
11136 ins_pipe(ialu_cr_reg_imm);
11137 %}
11139 instruct compL_rReg_mem(rFlagsReg cr, rRegL op1, memory op2)
11140 %{
11141 match(Set cr (CmpL op1 (LoadL op2)));
11143 ins_cost(500); // XXX
11144 format %{ "cmpq $op1, $op2" %}
11145 opcode(0x3B); /* Opcode 3B /r */
11146 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
11147 ins_pipe(ialu_cr_reg_mem);
11148 %}
11150 instruct testL_reg(rFlagsReg cr, rRegL src, immL0 zero)
11151 %{
11152 match(Set cr (CmpL src zero));
11154 format %{ "testq $src, $src" %}
11155 opcode(0x85);
11156 ins_encode(REX_reg_reg_wide(src, src), OpcP, reg_reg(src, src));
11157 ins_pipe(ialu_cr_reg_imm);
11158 %}
11160 instruct testL_reg_imm(rFlagsReg cr, rRegL src, immL32 con, immL0 zero)
11161 %{
11162 match(Set cr (CmpL (AndL src con) zero));
11164 format %{ "testq $src, $con\t# long" %}
11165 opcode(0xF7, 0x00);
11166 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src), Con32(con));
11167 ins_pipe(ialu_cr_reg_imm);
11168 %}
11170 instruct testL_reg_mem(rFlagsReg cr, rRegL src, memory mem, immL0 zero)
11171 %{
11172 match(Set cr (CmpL (AndL src (LoadL mem)) zero));
11174 format %{ "testq $src, $mem" %}
11175 opcode(0x85);
11176 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
11177 ins_pipe(ialu_cr_reg_mem);
11178 %}
11180 // Manifest a CmpL result in an integer register. Very painful.
11181 // This is the test to avoid.
11182 instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags)
11183 %{
11184 match(Set dst (CmpL3 src1 src2));
11185 effect(KILL flags);
11187 ins_cost(275); // XXX
11188 format %{ "cmpq $src1, $src2\t# CmpL3\n\t"
11189 "movl $dst, -1\n\t"
11190 "jl,s done\n\t"
11191 "setne $dst\n\t"
11192 "movzbl $dst, $dst\n\t"
11193 "done:" %}
11194 ins_encode(cmpl3_flag(src1, src2, dst));
11195 ins_pipe(pipe_slow);
11196 %}
11198 //----------Max and Min--------------------------------------------------------
11199 // Min Instructions
11201 instruct cmovI_reg_g(rRegI dst, rRegI src, rFlagsReg cr)
11202 %{
11203 effect(USE_DEF dst, USE src, USE cr);
11205 format %{ "cmovlgt $dst, $src\t# min" %}
11206 opcode(0x0F, 0x4F);
11207 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
11208 ins_pipe(pipe_cmov_reg);
11209 %}
11212 instruct minI_rReg(rRegI dst, rRegI src)
11213 %{
11214 match(Set dst (MinI dst src));
11216 ins_cost(200);
11217 expand %{
11218 rFlagsReg cr;
11219 compI_rReg(cr, dst, src);
11220 cmovI_reg_g(dst, src, cr);
11221 %}
11222 %}
11224 instruct cmovI_reg_l(rRegI dst, rRegI src, rFlagsReg cr)
11225 %{
11226 effect(USE_DEF dst, USE src, USE cr);
11228 format %{ "cmovllt $dst, $src\t# max" %}
11229 opcode(0x0F, 0x4C);
11230 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
11231 ins_pipe(pipe_cmov_reg);
11232 %}
11235 instruct maxI_rReg(rRegI dst, rRegI src)
11236 %{
11237 match(Set dst (MaxI dst src));
11239 ins_cost(200);
11240 expand %{
11241 rFlagsReg cr;
11242 compI_rReg(cr, dst, src);
11243 cmovI_reg_l(dst, src, cr);
11244 %}
11245 %}
11247 // ============================================================================
11248 // Branch Instructions
11250 // Jump Direct - Label defines a relative address from JMP+1
11251 instruct jmpDir(label labl)
11252 %{
11253 match(Goto);
11254 effect(USE labl);
11256 ins_cost(300);
11257 format %{ "jmp $labl" %}
11258 size(5);
11259 opcode(0xE9);
11260 ins_encode(OpcP, Lbl(labl));
11261 ins_pipe(pipe_jmp);
11262 ins_pc_relative(1);
11263 %}
11265 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11266 instruct jmpCon(cmpOp cop, rFlagsReg cr, label labl)
11267 %{
11268 match(If cop cr);
11269 effect(USE labl);
11271 ins_cost(300);
11272 format %{ "j$cop $labl" %}
11273 size(6);
11274 opcode(0x0F, 0x80);
11275 ins_encode(Jcc(cop, labl));
11276 ins_pipe(pipe_jcc);
11277 ins_pc_relative(1);
11278 %}
11280 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11281 instruct jmpLoopEnd(cmpOp cop, rFlagsReg cr, label labl)
11282 %{
11283 match(CountedLoopEnd cop cr);
11284 effect(USE labl);
11286 ins_cost(300);
11287 format %{ "j$cop $labl\t# loop end" %}
11288 size(6);
11289 opcode(0x0F, 0x80);
11290 ins_encode(Jcc(cop, labl));
11291 ins_pipe(pipe_jcc);
11292 ins_pc_relative(1);
11293 %}
11295 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11296 instruct jmpLoopEndU(cmpOpU cop, rFlagsRegU cmp, label labl)
11297 %{
11298 match(CountedLoopEnd cop cmp);
11299 effect(USE labl);
11301 ins_cost(300);
11302 format %{ "j$cop,u $labl\t# loop end" %}
11303 size(6);
11304 opcode(0x0F, 0x80);
11305 ins_encode(Jcc(cop, labl));
11306 ins_pipe(pipe_jcc);
11307 ins_pc_relative(1);
11308 %}
11310 // Jump Direct Conditional - using unsigned comparison
11311 instruct jmpConU(cmpOpU cop, rFlagsRegU cmp, label labl)
11312 %{
11313 match(If cop cmp);
11314 effect(USE labl);
11316 ins_cost(300);
11317 format %{ "j$cop,u $labl" %}
11318 size(6);
11319 opcode(0x0F, 0x80);
11320 ins_encode(Jcc(cop, labl));
11321 ins_pipe(pipe_jcc);
11322 ins_pc_relative(1);
11323 %}
11325 // ============================================================================
11326 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary
11327 // superklass array for an instance of the superklass. Set a hidden
11328 // internal cache on a hit (cache is checked with exposed code in
11329 // gen_subtype_check()). Return NZ for a miss or zero for a hit. The
11330 // encoding ALSO sets flags.
11332 instruct partialSubtypeCheck(rdi_RegP result,
11333 rsi_RegP sub, rax_RegP super, rcx_RegI rcx,
11334 rFlagsReg cr)
11335 %{
11336 match(Set result (PartialSubtypeCheck sub super));
11337 effect(KILL rcx, KILL cr);
11339 ins_cost(1100); // slightly larger than the next version
11340 format %{ "cmpq rax, rsi\n\t"
11341 "jeq,s hit\n\t"
11342 "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t"
11343 "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
11344 "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
11345 "repne scasq\t# Scan *rdi++ for a match with rax while rcx--\n\t"
11346 "jne,s miss\t\t# Missed: rdi not-zero\n\t"
11347 "movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t"
11348 "hit:\n\t"
11349 "xorq $result, $result\t\t Hit: rdi zero\n\t"
11350 "miss:\t" %}
11352 opcode(0x1); // Force a XOR of RDI
11353 ins_encode(enc_PartialSubtypeCheck());
11354 ins_pipe(pipe_slow);
11355 %}
11357 instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr,
11358 rsi_RegP sub, rax_RegP super, rcx_RegI rcx,
11359 immP0 zero,
11360 rdi_RegP result)
11361 %{
11362 match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
11363 predicate(!UseCompressedOops); // decoding oop kills condition codes
11364 effect(KILL rcx, KILL result);
11366 ins_cost(1000);
11367 format %{ "cmpq rax, rsi\n\t"
11368 "jeq,s miss\t# Actually a hit; we are done.\n\t"
11369 "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t"
11370 "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
11371 "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
11372 "repne scasq\t# Scan *rdi++ for a match with rax while cx-- != 0\n\t"
11373 "jne,s miss\t\t# Missed: flags nz\n\t"
11374 "movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t"
11375 "miss:\t" %}
11377 opcode(0x0); // No need to XOR RDI
11378 ins_encode(enc_PartialSubtypeCheck());
11379 ins_pipe(pipe_slow);
11380 %}
11382 // ============================================================================
11383 // Branch Instructions -- short offset versions
11384 //
11385 // These instructions are used to replace jumps of a long offset (the default
11386 // match) with jumps of a shorter offset. These instructions are all tagged
11387 // with the ins_short_branch attribute, which causes the ADLC to suppress the
11388 // match rules in general matching. Instead, the ADLC generates a conversion
11389 // method in the MachNode which can be used to do in-place replacement of the
11390 // long variant with the shorter variant. The compiler will determine if a
11391 // branch can be taken by the is_short_branch_offset() predicate in the machine
11392 // specific code section of the file.
11394 // Jump Direct - Label defines a relative address from JMP+1
11395 instruct jmpDir_short(label labl)
11396 %{
11397 match(Goto);
11398 effect(USE labl);
11400 ins_cost(300);
11401 format %{ "jmp,s $labl" %}
11402 size(2);
11403 opcode(0xEB);
11404 ins_encode(OpcP, LblShort(labl));
11405 ins_pipe(pipe_jmp);
11406 ins_pc_relative(1);
11407 ins_short_branch(1);
11408 %}
11410 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11411 instruct jmpCon_short(cmpOp cop, rFlagsReg cr, label labl)
11412 %{
11413 match(If cop cr);
11414 effect(USE labl);
11416 ins_cost(300);
11417 format %{ "j$cop,s $labl" %}
11418 size(2);
11419 opcode(0x70);
11420 ins_encode(JccShort(cop, labl));
11421 ins_pipe(pipe_jcc);
11422 ins_pc_relative(1);
11423 ins_short_branch(1);
11424 %}
11426 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11427 instruct jmpLoopEnd_short(cmpOp cop, rFlagsReg cr, label labl)
11428 %{
11429 match(CountedLoopEnd cop cr);
11430 effect(USE labl);
11432 ins_cost(300);
11433 format %{ "j$cop,s $labl" %}
11434 size(2);
11435 opcode(0x70);
11436 ins_encode(JccShort(cop, labl));
11437 ins_pipe(pipe_jcc);
11438 ins_pc_relative(1);
11439 ins_short_branch(1);
11440 %}
11442 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11443 instruct jmpLoopEndU_short(cmpOpU cop, rFlagsRegU cmp, label labl)
11444 %{
11445 match(CountedLoopEnd cop cmp);
11446 effect(USE labl);
11448 ins_cost(300);
11449 format %{ "j$cop,us $labl" %}
11450 size(2);
11451 opcode(0x70);
11452 ins_encode(JccShort(cop, labl));
11453 ins_pipe(pipe_jcc);
11454 ins_pc_relative(1);
11455 ins_short_branch(1);
11456 %}
11458 // Jump Direct Conditional - using unsigned comparison
11459 instruct jmpConU_short(cmpOpU cop, rFlagsRegU cmp, label labl)
11460 %{
11461 match(If cop cmp);
11462 effect(USE labl);
11464 ins_cost(300);
11465 format %{ "j$cop,us $labl" %}
11466 size(2);
11467 opcode(0x70);
11468 ins_encode(JccShort(cop, labl));
11469 ins_pipe(pipe_jcc);
11470 ins_pc_relative(1);
11471 ins_short_branch(1);
11472 %}
11474 // ============================================================================
11475 // inlined locking and unlocking
11477 instruct cmpFastLock(rFlagsReg cr,
11478 rRegP object, rRegP box, rax_RegI tmp, rRegP scr)
11479 %{
11480 match(Set cr (FastLock object box));
11481 effect(TEMP tmp, TEMP scr);
11483 ins_cost(300);
11484 format %{ "fastlock $object,$box,$tmp,$scr" %}
11485 ins_encode(Fast_Lock(object, box, tmp, scr));
11486 ins_pipe(pipe_slow);
11487 ins_pc_relative(1);
11488 %}
11490 instruct cmpFastUnlock(rFlagsReg cr,
11491 rRegP object, rax_RegP box, rRegP tmp)
11492 %{
11493 match(Set cr (FastUnlock object box));
11494 effect(TEMP tmp);
11496 ins_cost(300);
11497 format %{ "fastunlock $object, $box, $tmp" %}
11498 ins_encode(Fast_Unlock(object, box, tmp));
11499 ins_pipe(pipe_slow);
11500 ins_pc_relative(1);
11501 %}
11504 // ============================================================================
11505 // Safepoint Instructions
11506 instruct safePoint_poll(rFlagsReg cr)
11507 %{
11508 match(SafePoint);
11509 effect(KILL cr);
11511 format %{ "testl rax, [rip + #offset_to_poll_page]\t"
11512 "# Safepoint: poll for GC" %}
11513 size(6); // Opcode + ModRM + Disp32 == 6 bytes
11514 ins_cost(125);
11515 ins_encode(enc_safepoint_poll);
11516 ins_pipe(ialu_reg_mem);
11517 %}
11519 // ============================================================================
11520 // Procedure Call/Return Instructions
11521 // Call Java Static Instruction
11522 // Note: If this code changes, the corresponding ret_addr_offset() and
11523 // compute_padding() functions will have to be adjusted.
11524 instruct CallStaticJavaDirect(method meth)
11525 %{
11526 match(CallStaticJava);
11527 effect(USE meth);
11529 ins_cost(300);
11530 format %{ "call,static " %}
11531 opcode(0xE8); /* E8 cd */
11532 ins_encode(Java_Static_Call(meth), call_epilog);
11533 ins_pipe(pipe_slow);
11534 ins_pc_relative(1);
11535 ins_alignment(4);
11536 %}
11538 // Call Java Dynamic Instruction
11539 // Note: If this code changes, the corresponding ret_addr_offset() and
11540 // compute_padding() functions will have to be adjusted.
11541 instruct CallDynamicJavaDirect(method meth)
11542 %{
11543 match(CallDynamicJava);
11544 effect(USE meth);
11546 ins_cost(300);
11547 format %{ "movq rax, #Universe::non_oop_word()\n\t"
11548 "call,dynamic " %}
11549 opcode(0xE8); /* E8 cd */
11550 ins_encode(Java_Dynamic_Call(meth), call_epilog);
11551 ins_pipe(pipe_slow);
11552 ins_pc_relative(1);
11553 ins_alignment(4);
11554 %}
11556 // Call Runtime Instruction
11557 instruct CallRuntimeDirect(method meth)
11558 %{
11559 match(CallRuntime);
11560 effect(USE meth);
11562 ins_cost(300);
11563 format %{ "call,runtime " %}
11564 opcode(0xE8); /* E8 cd */
11565 ins_encode(Java_To_Runtime(meth));
11566 ins_pipe(pipe_slow);
11567 ins_pc_relative(1);
11568 %}
11570 // Call runtime without safepoint
11571 instruct CallLeafDirect(method meth)
11572 %{
11573 match(CallLeaf);
11574 effect(USE meth);
11576 ins_cost(300);
11577 format %{ "call_leaf,runtime " %}
11578 opcode(0xE8); /* E8 cd */
11579 ins_encode(Java_To_Runtime(meth));
11580 ins_pipe(pipe_slow);
11581 ins_pc_relative(1);
11582 %}
11584 // Call runtime without safepoint
11585 instruct CallLeafNoFPDirect(method meth)
11586 %{
11587 match(CallLeafNoFP);
11588 effect(USE meth);
11590 ins_cost(300);
11591 format %{ "call_leaf_nofp,runtime " %}
11592 opcode(0xE8); /* E8 cd */
11593 ins_encode(Java_To_Runtime(meth));
11594 ins_pipe(pipe_slow);
11595 ins_pc_relative(1);
11596 %}
11598 // Return Instruction
11599 // Remove the return address & jump to it.
11600 // Notice: We always emit a nop after a ret to make sure there is room
11601 // for safepoint patching
11602 instruct Ret()
11603 %{
11604 match(Return);
11606 format %{ "ret" %}
11607 opcode(0xC3);
11608 ins_encode(OpcP);
11609 ins_pipe(pipe_jmp);
11610 %}
11612 // Tail Call; Jump from runtime stub to Java code.
11613 // Also known as an 'interprocedural jump'.
11614 // Target of jump will eventually return to caller.
11615 // TailJump below removes the return address.
11616 instruct TailCalljmpInd(no_rbp_RegP jump_target, rbx_RegP method_oop)
11617 %{
11618 match(TailCall jump_target method_oop);
11620 ins_cost(300);
11621 format %{ "jmp $jump_target\t# rbx holds method oop" %}
11622 opcode(0xFF, 0x4); /* Opcode FF /4 */
11623 ins_encode(REX_reg(jump_target), OpcP, reg_opc(jump_target));
11624 ins_pipe(pipe_jmp);
11625 %}
11627 // Tail Jump; remove the return address; jump to target.
11628 // TailCall above leaves the return address around.
11629 instruct tailjmpInd(no_rbp_RegP jump_target, rax_RegP ex_oop)
11630 %{
11631 match(TailJump jump_target ex_oop);
11633 ins_cost(300);
11634 format %{ "popq rdx\t# pop return address\n\t"
11635 "jmp $jump_target" %}
11636 opcode(0xFF, 0x4); /* Opcode FF /4 */
11637 ins_encode(Opcode(0x5a), // popq rdx
11638 REX_reg(jump_target), OpcP, reg_opc(jump_target));
11639 ins_pipe(pipe_jmp);
11640 %}
11642 // Create exception oop: created by stack-crawling runtime code.
11643 // Created exception is now available to this handler, and is setup
11644 // just prior to jumping to this handler. No code emitted.
11645 instruct CreateException(rax_RegP ex_oop)
11646 %{
11647 match(Set ex_oop (CreateEx));
11649 size(0);
11650 // use the following format syntax
11651 format %{ "# exception oop is in rax; no code emitted" %}
11652 ins_encode();
11653 ins_pipe(empty);
11654 %}
11656 // Rethrow exception:
11657 // The exception oop will come in the first argument position.
11658 // Then JUMP (not call) to the rethrow stub code.
11659 instruct RethrowException()
11660 %{
11661 match(Rethrow);
11663 // use the following format syntax
11664 format %{ "jmp rethrow_stub" %}
11665 ins_encode(enc_rethrow);
11666 ins_pipe(pipe_jmp);
11667 %}
11670 //----------PEEPHOLE RULES-----------------------------------------------------
11671 // These must follow all instruction definitions as they use the names
11672 // defined in the instructions definitions.
11673 //
11674 // peepmatch ( root_instr_name [precerding_instruction]* );
11675 //
11676 // peepconstraint %{
11677 // (instruction_number.operand_name relational_op instruction_number.operand_name
11678 // [, ...] );
11679 // // instruction numbers are zero-based using left to right order in peepmatch
11680 //
11681 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
11682 // // provide an instruction_number.operand_name for each operand that appears
11683 // // in the replacement instruction's match rule
11684 //
11685 // ---------VM FLAGS---------------------------------------------------------
11686 //
11687 // All peephole optimizations can be turned off using -XX:-OptoPeephole
11688 //
11689 // Each peephole rule is given an identifying number starting with zero and
11690 // increasing by one in the order seen by the parser. An individual peephole
11691 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
11692 // on the command-line.
11693 //
11694 // ---------CURRENT LIMITATIONS----------------------------------------------
11695 //
11696 // Only match adjacent instructions in same basic block
11697 // Only equality constraints
11698 // Only constraints between operands, not (0.dest_reg == RAX_enc)
11699 // Only one replacement instruction
11700 //
11701 // ---------EXAMPLE----------------------------------------------------------
11702 //
11703 // // pertinent parts of existing instructions in architecture description
11704 // instruct movI(rRegI dst, rRegI src)
11705 // %{
11706 // match(Set dst (CopyI src));
11707 // %}
11708 //
11709 // instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr)
11710 // %{
11711 // match(Set dst (AddI dst src));
11712 // effect(KILL cr);
11713 // %}
11714 //
11715 // // Change (inc mov) to lea
11716 // peephole %{
11717 // // increment preceeded by register-register move
11718 // peepmatch ( incI_rReg movI );
11719 // // require that the destination register of the increment
11720 // // match the destination register of the move
11721 // peepconstraint ( 0.dst == 1.dst );
11722 // // construct a replacement instruction that sets
11723 // // the destination to ( move's source register + one )
11724 // peepreplace ( leaI_rReg_immI( 0.dst 1.src 0.src ) );
11725 // %}
11726 //
11728 // Implementation no longer uses movX instructions since
11729 // machine-independent system no longer uses CopyX nodes.
11730 //
11731 // peephole
11732 // %{
11733 // peepmatch (incI_rReg movI);
11734 // peepconstraint (0.dst == 1.dst);
11735 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
11736 // %}
11738 // peephole
11739 // %{
11740 // peepmatch (decI_rReg movI);
11741 // peepconstraint (0.dst == 1.dst);
11742 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
11743 // %}
11745 // peephole
11746 // %{
11747 // peepmatch (addI_rReg_imm movI);
11748 // peepconstraint (0.dst == 1.dst);
11749 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
11750 // %}
11752 // peephole
11753 // %{
11754 // peepmatch (incL_rReg movL);
11755 // peepconstraint (0.dst == 1.dst);
11756 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
11757 // %}
11759 // peephole
11760 // %{
11761 // peepmatch (decL_rReg movL);
11762 // peepconstraint (0.dst == 1.dst);
11763 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
11764 // %}
11766 // peephole
11767 // %{
11768 // peepmatch (addL_rReg_imm movL);
11769 // peepconstraint (0.dst == 1.dst);
11770 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
11771 // %}
11773 // peephole
11774 // %{
11775 // peepmatch (addP_rReg_imm movP);
11776 // peepconstraint (0.dst == 1.dst);
11777 // peepreplace (leaP_rReg_imm(0.dst 1.src 0.src));
11778 // %}
11780 // // Change load of spilled value to only a spill
11781 // instruct storeI(memory mem, rRegI src)
11782 // %{
11783 // match(Set mem (StoreI mem src));
11784 // %}
11785 //
11786 // instruct loadI(rRegI dst, memory mem)
11787 // %{
11788 // match(Set dst (LoadI mem));
11789 // %}
11790 //
11792 peephole
11793 %{
11794 peepmatch (loadI storeI);
11795 peepconstraint (1.src == 0.dst, 1.mem == 0.mem);
11796 peepreplace (storeI(1.mem 1.mem 1.src));
11797 %}
11799 peephole
11800 %{
11801 peepmatch (loadL storeL);
11802 peepconstraint (1.src == 0.dst, 1.mem == 0.mem);
11803 peepreplace (storeL(1.mem 1.mem 1.src));
11804 %}
11806 //----------SMARTSPILL RULES---------------------------------------------------
11807 // These must follow all instruction definitions as they use the names
11808 // defined in the instructions definitions.