Thu, 12 Mar 2009 10:37:46 -0700
6791178: Specialize for zero as the compressed oop vm heap base
Summary: Use zero based compressed oops if java heap is below 32gb and unscaled compressed oops if java heap is below 4gb.
Reviewed-by: never, twisti, jcoomes, coleenp
1 //
2 // Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 // CA 95054 USA or visit www.sun.com if you need additional information or
21 // have any questions.
22 //
23 //
25 // AMD64 Architecture Description File
27 //----------REGISTER DEFINITION BLOCK------------------------------------------
28 // This information is used by the matcher and the register allocator to
29 // describe individual registers and classes of registers within the target
30 // archtecture.
32 register %{
33 //----------Architecture Description Register Definitions----------------------
34 // General Registers
35 // "reg_def" name ( register save type, C convention save type,
36 // ideal register type, encoding );
37 // Register Save Types:
38 //
39 // NS = No-Save: The register allocator assumes that these registers
40 // can be used without saving upon entry to the method, &
41 // that they do not need to be saved at call sites.
42 //
43 // SOC = Save-On-Call: The register allocator assumes that these registers
44 // can be used without saving upon entry to the method,
45 // but that they must be saved at call sites.
46 //
47 // SOE = Save-On-Entry: The register allocator assumes that these registers
48 // must be saved before using them upon entry to the
49 // method, but they do not need to be saved at call
50 // sites.
51 //
52 // AS = Always-Save: The register allocator assumes that these registers
53 // must be saved before using them upon entry to the
54 // method, & that they must be saved at call sites.
55 //
56 // Ideal Register Type is used to determine how to save & restore a
57 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
58 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
59 //
60 // The encoding number is the actual bit-pattern placed into the opcodes.
62 // General Registers
63 // R8-R15 must be encoded with REX. (RSP, RBP, RSI, RDI need REX when
64 // used as byte registers)
66 // Previously set RBX, RSI, and RDI as save-on-entry for java code
67 // Turn off SOE in java-code due to frequent use of uncommon-traps.
68 // Now that allocator is better, turn on RSI and RDI as SOE registers.
70 reg_def RAX (SOC, SOC, Op_RegI, 0, rax->as_VMReg());
71 reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax->as_VMReg()->next());
73 reg_def RCX (SOC, SOC, Op_RegI, 1, rcx->as_VMReg());
74 reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()->next());
76 reg_def RDX (SOC, SOC, Op_RegI, 2, rdx->as_VMReg());
77 reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()->next());
79 reg_def RBX (SOC, SOE, Op_RegI, 3, rbx->as_VMReg());
80 reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx->as_VMReg()->next());
82 reg_def RSP (NS, NS, Op_RegI, 4, rsp->as_VMReg());
83 reg_def RSP_H(NS, NS, Op_RegI, 4, rsp->as_VMReg()->next());
85 // now that adapter frames are gone RBP is always saved and restored by the prolog/epilog code
86 reg_def RBP (NS, SOE, Op_RegI, 5, rbp->as_VMReg());
87 reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp->as_VMReg()->next());
89 #ifdef _WIN64
91 reg_def RSI (SOC, SOE, Op_RegI, 6, rsi->as_VMReg());
92 reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()->next());
94 reg_def RDI (SOC, SOE, Op_RegI, 7, rdi->as_VMReg());
95 reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()->next());
97 #else
99 reg_def RSI (SOC, SOC, Op_RegI, 6, rsi->as_VMReg());
100 reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi->as_VMReg()->next());
102 reg_def RDI (SOC, SOC, Op_RegI, 7, rdi->as_VMReg());
103 reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi->as_VMReg()->next());
105 #endif
107 reg_def R8 (SOC, SOC, Op_RegI, 8, r8->as_VMReg());
108 reg_def R8_H (SOC, SOC, Op_RegI, 8, r8->as_VMReg()->next());
110 reg_def R9 (SOC, SOC, Op_RegI, 9, r9->as_VMReg());
111 reg_def R9_H (SOC, SOC, Op_RegI, 9, r9->as_VMReg()->next());
113 reg_def R10 (SOC, SOC, Op_RegI, 10, r10->as_VMReg());
114 reg_def R10_H(SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
116 reg_def R11 (SOC, SOC, Op_RegI, 11, r11->as_VMReg());
117 reg_def R11_H(SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
119 reg_def R12 (SOC, SOE, Op_RegI, 12, r12->as_VMReg());
120 reg_def R12_H(SOC, SOE, Op_RegI, 12, r12->as_VMReg()->next());
122 reg_def R13 (SOC, SOE, Op_RegI, 13, r13->as_VMReg());
123 reg_def R13_H(SOC, SOE, Op_RegI, 13, r13->as_VMReg()->next());
125 reg_def R14 (SOC, SOE, Op_RegI, 14, r14->as_VMReg());
126 reg_def R14_H(SOC, SOE, Op_RegI, 14, r14->as_VMReg()->next());
128 reg_def R15 (SOC, SOE, Op_RegI, 15, r15->as_VMReg());
129 reg_def R15_H(SOC, SOE, Op_RegI, 15, r15->as_VMReg()->next());
132 // Floating Point Registers
134 // XMM registers. 128-bit registers or 4 words each, labeled (a)-d.
135 // Word a in each register holds a Float, words ab hold a Double. We
136 // currently do not use the SIMD capabilities, so registers cd are
137 // unused at the moment.
138 // XMM8-XMM15 must be encoded with REX.
139 // Linux ABI: No register preserved across function calls
140 // XMM0-XMM7 might hold parameters
141 // Windows ABI: XMM6-XMM15 preserved across function calls
142 // XMM0-XMM3 might hold parameters
144 reg_def XMM0 (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg());
145 reg_def XMM0_H (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next());
147 reg_def XMM1 (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg());
148 reg_def XMM1_H (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next());
150 reg_def XMM2 (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg());
151 reg_def XMM2_H (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next());
153 reg_def XMM3 (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg());
154 reg_def XMM3_H (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next());
156 reg_def XMM4 (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg());
157 reg_def XMM4_H (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next());
159 reg_def XMM5 (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg());
160 reg_def XMM5_H (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next());
162 #ifdef _WIN64
164 reg_def XMM6 (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg());
165 reg_def XMM6_H (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next());
167 reg_def XMM7 (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg());
168 reg_def XMM7_H (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next());
170 reg_def XMM8 (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg());
171 reg_def XMM8_H (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next());
173 reg_def XMM9 (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg());
174 reg_def XMM9_H (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next());
176 reg_def XMM10 (SOC, SOE, Op_RegF, 10, xmm10->as_VMReg());
177 reg_def XMM10_H(SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next());
179 reg_def XMM11 (SOC, SOE, Op_RegF, 11, xmm11->as_VMReg());
180 reg_def XMM11_H(SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next());
182 reg_def XMM12 (SOC, SOE, Op_RegF, 12, xmm12->as_VMReg());
183 reg_def XMM12_H(SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next());
185 reg_def XMM13 (SOC, SOE, Op_RegF, 13, xmm13->as_VMReg());
186 reg_def XMM13_H(SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next());
188 reg_def XMM14 (SOC, SOE, Op_RegF, 14, xmm14->as_VMReg());
189 reg_def XMM14_H(SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next());
191 reg_def XMM15 (SOC, SOE, Op_RegF, 15, xmm15->as_VMReg());
192 reg_def XMM15_H(SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next());
194 #else
196 reg_def XMM6 (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg());
197 reg_def XMM6_H (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next());
199 reg_def XMM7 (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg());
200 reg_def XMM7_H (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next());
202 reg_def XMM8 (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg());
203 reg_def XMM8_H (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next());
205 reg_def XMM9 (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg());
206 reg_def XMM9_H (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next());
208 reg_def XMM10 (SOC, SOC, Op_RegF, 10, xmm10->as_VMReg());
209 reg_def XMM10_H(SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next());
211 reg_def XMM11 (SOC, SOC, Op_RegF, 11, xmm11->as_VMReg());
212 reg_def XMM11_H(SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next());
214 reg_def XMM12 (SOC, SOC, Op_RegF, 12, xmm12->as_VMReg());
215 reg_def XMM12_H(SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next());
217 reg_def XMM13 (SOC, SOC, Op_RegF, 13, xmm13->as_VMReg());
218 reg_def XMM13_H(SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next());
220 reg_def XMM14 (SOC, SOC, Op_RegF, 14, xmm14->as_VMReg());
221 reg_def XMM14_H(SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next());
223 reg_def XMM15 (SOC, SOC, Op_RegF, 15, xmm15->as_VMReg());
224 reg_def XMM15_H(SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next());
226 #endif // _WIN64
228 reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad());
230 // Specify priority of register selection within phases of register
231 // allocation. Highest priority is first. A useful heuristic is to
232 // give registers a low priority when they are required by machine
233 // instructions, like EAX and EDX on I486, and choose no-save registers
234 // before save-on-call, & save-on-call before save-on-entry. Registers
235 // which participate in fixed calling sequences should come last.
236 // Registers which are used as pairs must fall on an even boundary.
238 alloc_class chunk0(R10, R10_H,
239 R11, R11_H,
240 R8, R8_H,
241 R9, R9_H,
242 R12, R12_H,
243 RCX, RCX_H,
244 RBX, RBX_H,
245 RDI, RDI_H,
246 RDX, RDX_H,
247 RSI, RSI_H,
248 RAX, RAX_H,
249 RBP, RBP_H,
250 R13, R13_H,
251 R14, R14_H,
252 R15, R15_H,
253 RSP, RSP_H);
255 // XXX probably use 8-15 first on Linux
256 alloc_class chunk1(XMM0, XMM0_H,
257 XMM1, XMM1_H,
258 XMM2, XMM2_H,
259 XMM3, XMM3_H,
260 XMM4, XMM4_H,
261 XMM5, XMM5_H,
262 XMM6, XMM6_H,
263 XMM7, XMM7_H,
264 XMM8, XMM8_H,
265 XMM9, XMM9_H,
266 XMM10, XMM10_H,
267 XMM11, XMM11_H,
268 XMM12, XMM12_H,
269 XMM13, XMM13_H,
270 XMM14, XMM14_H,
271 XMM15, XMM15_H);
273 alloc_class chunk2(RFLAGS);
276 //----------Architecture Description Register Classes--------------------------
277 // Several register classes are automatically defined based upon information in
278 // this architecture description.
279 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
280 // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ )
281 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
283 //
285 // Class for all pointer registers (including RSP)
286 reg_class any_reg(RAX, RAX_H,
287 RDX, RDX_H,
288 RBP, RBP_H,
289 RDI, RDI_H,
290 RSI, RSI_H,
291 RCX, RCX_H,
292 RBX, RBX_H,
293 RSP, RSP_H,
294 R8, R8_H,
295 R9, R9_H,
296 R10, R10_H,
297 R11, R11_H,
298 R12, R12_H,
299 R13, R13_H,
300 R14, R14_H,
301 R15, R15_H);
303 // Class for all pointer registers except RSP
304 reg_class ptr_reg(RAX, RAX_H,
305 RDX, RDX_H,
306 RBP, RBP_H,
307 RDI, RDI_H,
308 RSI, RSI_H,
309 RCX, RCX_H,
310 RBX, RBX_H,
311 R8, R8_H,
312 R9, R9_H,
313 R10, R10_H,
314 R11, R11_H,
315 R13, R13_H,
316 R14, R14_H);
318 // Class for all pointer registers except RAX and RSP
319 reg_class ptr_no_rax_reg(RDX, RDX_H,
320 RBP, RBP_H,
321 RDI, RDI_H,
322 RSI, RSI_H,
323 RCX, RCX_H,
324 RBX, RBX_H,
325 R8, R8_H,
326 R9, R9_H,
327 R10, R10_H,
328 R11, R11_H,
329 R13, R13_H,
330 R14, R14_H);
332 reg_class ptr_no_rbp_reg(RDX, RDX_H,
333 RAX, RAX_H,
334 RDI, RDI_H,
335 RSI, RSI_H,
336 RCX, RCX_H,
337 RBX, RBX_H,
338 R8, R8_H,
339 R9, R9_H,
340 R10, R10_H,
341 R11, R11_H,
342 R13, R13_H,
343 R14, R14_H);
345 // Class for all pointer registers except RAX, RBX and RSP
346 reg_class ptr_no_rax_rbx_reg(RDX, RDX_H,
347 RBP, RBP_H,
348 RDI, RDI_H,
349 RSI, RSI_H,
350 RCX, RCX_H,
351 R8, R8_H,
352 R9, R9_H,
353 R10, R10_H,
354 R11, R11_H,
355 R13, R13_H,
356 R14, R14_H);
358 // Singleton class for RAX pointer register
359 reg_class ptr_rax_reg(RAX, RAX_H);
361 // Singleton class for RBX pointer register
362 reg_class ptr_rbx_reg(RBX, RBX_H);
364 // Singleton class for RSI pointer register
365 reg_class ptr_rsi_reg(RSI, RSI_H);
367 // Singleton class for RDI pointer register
368 reg_class ptr_rdi_reg(RDI, RDI_H);
370 // Singleton class for RBP pointer register
371 reg_class ptr_rbp_reg(RBP, RBP_H);
373 // Singleton class for stack pointer
374 reg_class ptr_rsp_reg(RSP, RSP_H);
376 // Singleton class for TLS pointer
377 reg_class ptr_r15_reg(R15, R15_H);
379 // Class for all long registers (except RSP)
380 reg_class long_reg(RAX, RAX_H,
381 RDX, RDX_H,
382 RBP, RBP_H,
383 RDI, RDI_H,
384 RSI, RSI_H,
385 RCX, RCX_H,
386 RBX, RBX_H,
387 R8, R8_H,
388 R9, R9_H,
389 R10, R10_H,
390 R11, R11_H,
391 R13, R13_H,
392 R14, R14_H);
394 // Class for all long registers except RAX, RDX (and RSP)
395 reg_class long_no_rax_rdx_reg(RBP, RBP_H,
396 RDI, RDI_H,
397 RSI, RSI_H,
398 RCX, RCX_H,
399 RBX, RBX_H,
400 R8, R8_H,
401 R9, R9_H,
402 R10, R10_H,
403 R11, R11_H,
404 R13, R13_H,
405 R14, R14_H);
407 // Class for all long registers except RCX (and RSP)
408 reg_class long_no_rcx_reg(RBP, RBP_H,
409 RDI, RDI_H,
410 RSI, RSI_H,
411 RAX, RAX_H,
412 RDX, RDX_H,
413 RBX, RBX_H,
414 R8, R8_H,
415 R9, R9_H,
416 R10, R10_H,
417 R11, R11_H,
418 R13, R13_H,
419 R14, R14_H);
421 // Class for all long registers except RAX (and RSP)
422 reg_class long_no_rax_reg(RBP, RBP_H,
423 RDX, RDX_H,
424 RDI, RDI_H,
425 RSI, RSI_H,
426 RCX, RCX_H,
427 RBX, RBX_H,
428 R8, R8_H,
429 R9, R9_H,
430 R10, R10_H,
431 R11, R11_H,
432 R13, R13_H,
433 R14, R14_H);
435 // Singleton class for RAX long register
436 reg_class long_rax_reg(RAX, RAX_H);
438 // Singleton class for RCX long register
439 reg_class long_rcx_reg(RCX, RCX_H);
441 // Singleton class for RDX long register
442 reg_class long_rdx_reg(RDX, RDX_H);
444 // Class for all int registers (except RSP)
445 reg_class int_reg(RAX,
446 RDX,
447 RBP,
448 RDI,
449 RSI,
450 RCX,
451 RBX,
452 R8,
453 R9,
454 R10,
455 R11,
456 R13,
457 R14);
459 // Class for all int registers except RCX (and RSP)
460 reg_class int_no_rcx_reg(RAX,
461 RDX,
462 RBP,
463 RDI,
464 RSI,
465 RBX,
466 R8,
467 R9,
468 R10,
469 R11,
470 R13,
471 R14);
473 // Class for all int registers except RAX, RDX (and RSP)
474 reg_class int_no_rax_rdx_reg(RBP,
475 RDI,
476 RSI,
477 RCX,
478 RBX,
479 R8,
480 R9,
481 R10,
482 R11,
483 R13,
484 R14);
486 // Singleton class for RAX int register
487 reg_class int_rax_reg(RAX);
489 // Singleton class for RBX int register
490 reg_class int_rbx_reg(RBX);
492 // Singleton class for RCX int register
493 reg_class int_rcx_reg(RCX);
495 // Singleton class for RCX int register
496 reg_class int_rdx_reg(RDX);
498 // Singleton class for RCX int register
499 reg_class int_rdi_reg(RDI);
501 // Singleton class for instruction pointer
502 // reg_class ip_reg(RIP);
504 // Singleton class for condition codes
505 reg_class int_flags(RFLAGS);
507 // Class for all float registers
508 reg_class float_reg(XMM0,
509 XMM1,
510 XMM2,
511 XMM3,
512 XMM4,
513 XMM5,
514 XMM6,
515 XMM7,
516 XMM8,
517 XMM9,
518 XMM10,
519 XMM11,
520 XMM12,
521 XMM13,
522 XMM14,
523 XMM15);
525 // Class for all double registers
526 reg_class double_reg(XMM0, XMM0_H,
527 XMM1, XMM1_H,
528 XMM2, XMM2_H,
529 XMM3, XMM3_H,
530 XMM4, XMM4_H,
531 XMM5, XMM5_H,
532 XMM6, XMM6_H,
533 XMM7, XMM7_H,
534 XMM8, XMM8_H,
535 XMM9, XMM9_H,
536 XMM10, XMM10_H,
537 XMM11, XMM11_H,
538 XMM12, XMM12_H,
539 XMM13, XMM13_H,
540 XMM14, XMM14_H,
541 XMM15, XMM15_H);
542 %}
545 //----------SOURCE BLOCK-------------------------------------------------------
546 // This is a block of C++ code which provides values, functions, and
547 // definitions necessary in the rest of the architecture description
548 source %{
549 #define RELOC_IMM64 Assembler::imm_operand
550 #define RELOC_DISP32 Assembler::disp32_operand
552 #define __ _masm.
554 // !!!!! Special hack to get all types of calls to specify the byte offset
555 // from the start of the call to the point where the return address
556 // will point.
557 int MachCallStaticJavaNode::ret_addr_offset()
558 {
559 return 5; // 5 bytes from start of call to where return address points
560 }
562 int MachCallDynamicJavaNode::ret_addr_offset()
563 {
564 return 15; // 15 bytes from start of call to where return address points
565 }
567 // In os_cpu .ad file
568 // int MachCallRuntimeNode::ret_addr_offset()
570 // Indicate if the safepoint node needs the polling page as an input.
571 // Since amd64 does not have absolute addressing but RIP-relative
572 // addressing and the polling page is within 2G, it doesn't.
573 bool SafePointNode::needs_polling_address_input()
574 {
575 return false;
576 }
578 //
579 // Compute padding required for nodes which need alignment
580 //
582 // The address of the call instruction needs to be 4-byte aligned to
583 // ensure that it does not span a cache line so that it can be patched.
584 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
585 {
586 current_offset += 1; // skip call opcode byte
587 return round_to(current_offset, alignment_required()) - current_offset;
588 }
590 // The address of the call instruction needs to be 4-byte aligned to
591 // ensure that it does not span a cache line so that it can be patched.
592 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
593 {
594 current_offset += 11; // skip movq instruction + call opcode byte
595 return round_to(current_offset, alignment_required()) - current_offset;
596 }
598 #ifndef PRODUCT
599 void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const
600 {
601 st->print("INT3");
602 }
603 #endif
605 // EMIT_RM()
606 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3)
607 {
608 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
609 *(cbuf.code_end()) = c;
610 cbuf.set_code_end(cbuf.code_end() + 1);
611 }
613 // EMIT_CC()
614 void emit_cc(CodeBuffer &cbuf, int f1, int f2)
615 {
616 unsigned char c = (unsigned char) (f1 | f2);
617 *(cbuf.code_end()) = c;
618 cbuf.set_code_end(cbuf.code_end() + 1);
619 }
621 // EMIT_OPCODE()
622 void emit_opcode(CodeBuffer &cbuf, int code)
623 {
624 *(cbuf.code_end()) = (unsigned char) code;
625 cbuf.set_code_end(cbuf.code_end() + 1);
626 }
628 // EMIT_OPCODE() w/ relocation information
629 void emit_opcode(CodeBuffer &cbuf,
630 int code, relocInfo::relocType reloc, int offset, int format)
631 {
632 cbuf.relocate(cbuf.inst_mark() + offset, reloc, format);
633 emit_opcode(cbuf, code);
634 }
636 // EMIT_D8()
637 void emit_d8(CodeBuffer &cbuf, int d8)
638 {
639 *(cbuf.code_end()) = (unsigned char) d8;
640 cbuf.set_code_end(cbuf.code_end() + 1);
641 }
643 // EMIT_D16()
644 void emit_d16(CodeBuffer &cbuf, int d16)
645 {
646 *((short *)(cbuf.code_end())) = d16;
647 cbuf.set_code_end(cbuf.code_end() + 2);
648 }
650 // EMIT_D32()
651 void emit_d32(CodeBuffer &cbuf, int d32)
652 {
653 *((int *)(cbuf.code_end())) = d32;
654 cbuf.set_code_end(cbuf.code_end() + 4);
655 }
657 // EMIT_D64()
658 void emit_d64(CodeBuffer &cbuf, int64_t d64)
659 {
660 *((int64_t*) (cbuf.code_end())) = d64;
661 cbuf.set_code_end(cbuf.code_end() + 8);
662 }
664 // emit 32 bit value and construct relocation entry from relocInfo::relocType
665 void emit_d32_reloc(CodeBuffer& cbuf,
666 int d32,
667 relocInfo::relocType reloc,
668 int format)
669 {
670 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc");
671 cbuf.relocate(cbuf.inst_mark(), reloc, format);
673 *((int*) (cbuf.code_end())) = d32;
674 cbuf.set_code_end(cbuf.code_end() + 4);
675 }
677 // emit 32 bit value and construct relocation entry from RelocationHolder
678 void emit_d32_reloc(CodeBuffer& cbuf,
679 int d32,
680 RelocationHolder const& rspec,
681 int format)
682 {
683 #ifdef ASSERT
684 if (rspec.reloc()->type() == relocInfo::oop_type &&
685 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
686 assert(oop((intptr_t)d32)->is_oop() && oop((intptr_t)d32)->is_perm(), "cannot embed non-perm oops in code");
687 }
688 #endif
689 cbuf.relocate(cbuf.inst_mark(), rspec, format);
691 *((int* )(cbuf.code_end())) = d32;
692 cbuf.set_code_end(cbuf.code_end() + 4);
693 }
695 void emit_d32_reloc(CodeBuffer& cbuf, address addr) {
696 address next_ip = cbuf.code_end() + 4;
697 emit_d32_reloc(cbuf, (int) (addr - next_ip),
698 external_word_Relocation::spec(addr),
699 RELOC_DISP32);
700 }
703 // emit 64 bit value and construct relocation entry from relocInfo::relocType
704 void emit_d64_reloc(CodeBuffer& cbuf,
705 int64_t d64,
706 relocInfo::relocType reloc,
707 int format)
708 {
709 cbuf.relocate(cbuf.inst_mark(), reloc, format);
711 *((int64_t*) (cbuf.code_end())) = d64;
712 cbuf.set_code_end(cbuf.code_end() + 8);
713 }
715 // emit 64 bit value and construct relocation entry from RelocationHolder
716 void emit_d64_reloc(CodeBuffer& cbuf,
717 int64_t d64,
718 RelocationHolder const& rspec,
719 int format)
720 {
721 #ifdef ASSERT
722 if (rspec.reloc()->type() == relocInfo::oop_type &&
723 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
724 assert(oop(d64)->is_oop() && oop(d64)->is_perm(),
725 "cannot embed non-perm oops in code");
726 }
727 #endif
728 cbuf.relocate(cbuf.inst_mark(), rspec, format);
730 *((int64_t*) (cbuf.code_end())) = d64;
731 cbuf.set_code_end(cbuf.code_end() + 8);
732 }
734 // Access stack slot for load or store
735 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp)
736 {
737 emit_opcode(cbuf, opcode); // (e.g., FILD [RSP+src])
738 if (-0x80 <= disp && disp < 0x80) {
739 emit_rm(cbuf, 0x01, rm_field, RSP_enc); // R/M byte
740 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
741 emit_d8(cbuf, disp); // Displacement // R/M byte
742 } else {
743 emit_rm(cbuf, 0x02, rm_field, RSP_enc); // R/M byte
744 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
745 emit_d32(cbuf, disp); // Displacement // R/M byte
746 }
747 }
749 // rRegI ereg, memory mem) %{ // emit_reg_mem
750 void encode_RegMem(CodeBuffer &cbuf,
751 int reg,
752 int base, int index, int scale, int disp, bool disp_is_oop)
753 {
754 assert(!disp_is_oop, "cannot have disp");
755 int regenc = reg & 7;
756 int baseenc = base & 7;
757 int indexenc = index & 7;
759 // There is no index & no scale, use form without SIB byte
760 if (index == 0x4 && scale == 0 && base != RSP_enc && base != R12_enc) {
761 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
762 if (disp == 0 && base != RBP_enc && base != R13_enc) {
763 emit_rm(cbuf, 0x0, regenc, baseenc); // *
764 } else if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
765 // If 8-bit displacement, mode 0x1
766 emit_rm(cbuf, 0x1, regenc, baseenc); // *
767 emit_d8(cbuf, disp);
768 } else {
769 // If 32-bit displacement
770 if (base == -1) { // Special flag for absolute address
771 emit_rm(cbuf, 0x0, regenc, 0x5); // *
772 if (disp_is_oop) {
773 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
774 } else {
775 emit_d32(cbuf, disp);
776 }
777 } else {
778 // Normal base + offset
779 emit_rm(cbuf, 0x2, regenc, baseenc); // *
780 if (disp_is_oop) {
781 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
782 } else {
783 emit_d32(cbuf, disp);
784 }
785 }
786 }
787 } else {
788 // Else, encode with the SIB byte
789 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
790 if (disp == 0 && base != RBP_enc && base != R13_enc) {
791 // If no displacement
792 emit_rm(cbuf, 0x0, regenc, 0x4); // *
793 emit_rm(cbuf, scale, indexenc, baseenc);
794 } else {
795 if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
796 // If 8-bit displacement, mode 0x1
797 emit_rm(cbuf, 0x1, regenc, 0x4); // *
798 emit_rm(cbuf, scale, indexenc, baseenc);
799 emit_d8(cbuf, disp);
800 } else {
801 // If 32-bit displacement
802 if (base == 0x04 ) {
803 emit_rm(cbuf, 0x2, regenc, 0x4);
804 emit_rm(cbuf, scale, indexenc, 0x04); // XXX is this valid???
805 } else {
806 emit_rm(cbuf, 0x2, regenc, 0x4);
807 emit_rm(cbuf, scale, indexenc, baseenc); // *
808 }
809 if (disp_is_oop) {
810 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
811 } else {
812 emit_d32(cbuf, disp);
813 }
814 }
815 }
816 }
817 }
819 void encode_copy(CodeBuffer &cbuf, int dstenc, int srcenc)
820 {
821 if (dstenc != srcenc) {
822 if (dstenc < 8) {
823 if (srcenc >= 8) {
824 emit_opcode(cbuf, Assembler::REX_B);
825 srcenc -= 8;
826 }
827 } else {
828 if (srcenc < 8) {
829 emit_opcode(cbuf, Assembler::REX_R);
830 } else {
831 emit_opcode(cbuf, Assembler::REX_RB);
832 srcenc -= 8;
833 }
834 dstenc -= 8;
835 }
837 emit_opcode(cbuf, 0x8B);
838 emit_rm(cbuf, 0x3, dstenc, srcenc);
839 }
840 }
842 void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
843 if( dst_encoding == src_encoding ) {
844 // reg-reg copy, use an empty encoding
845 } else {
846 MacroAssembler _masm(&cbuf);
848 __ movdqa(as_XMMRegister(dst_encoding), as_XMMRegister(src_encoding));
849 }
850 }
853 //=============================================================================
854 #ifndef PRODUCT
855 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
856 {
857 Compile* C = ra_->C;
859 int framesize = C->frame_slots() << LogBytesPerInt;
860 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
861 // Remove wordSize for return adr already pushed
862 // and another for the RBP we are going to save
863 framesize -= 2*wordSize;
864 bool need_nop = true;
866 // Calls to C2R adapters often do not accept exceptional returns.
867 // We require that their callers must bang for them. But be
868 // careful, because some VM calls (such as call site linkage) can
869 // use several kilobytes of stack. But the stack safety zone should
870 // account for that. See bugs 4446381, 4468289, 4497237.
871 if (C->need_stack_bang(framesize)) {
872 st->print_cr("# stack bang"); st->print("\t");
873 need_nop = false;
874 }
875 st->print_cr("pushq rbp"); st->print("\t");
877 if (VerifyStackAtCalls) {
878 // Majik cookie to verify stack depth
879 st->print_cr("pushq 0xffffffffbadb100d"
880 "\t# Majik cookie for stack depth check");
881 st->print("\t");
882 framesize -= wordSize; // Remove 2 for cookie
883 need_nop = false;
884 }
886 if (framesize) {
887 st->print("subq rsp, #%d\t# Create frame", framesize);
888 if (framesize < 0x80 && need_nop) {
889 st->print("\n\tnop\t# nop for patch_verified_entry");
890 }
891 }
892 }
893 #endif
895 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
896 {
897 Compile* C = ra_->C;
899 // WARNING: Initial instruction MUST be 5 bytes or longer so that
900 // NativeJump::patch_verified_entry will be able to patch out the entry
901 // code safely. The fldcw is ok at 6 bytes, the push to verify stack
902 // depth is ok at 5 bytes, the frame allocation can be either 3 or
903 // 6 bytes. So if we don't do the fldcw or the push then we must
904 // use the 6 byte frame allocation even if we have no frame. :-(
905 // If method sets FPU control word do it now
907 int framesize = C->frame_slots() << LogBytesPerInt;
908 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
909 // Remove wordSize for return adr already pushed
910 // and another for the RBP we are going to save
911 framesize -= 2*wordSize;
912 bool need_nop = true;
914 // Calls to C2R adapters often do not accept exceptional returns.
915 // We require that their callers must bang for them. But be
916 // careful, because some VM calls (such as call site linkage) can
917 // use several kilobytes of stack. But the stack safety zone should
918 // account for that. See bugs 4446381, 4468289, 4497237.
919 if (C->need_stack_bang(framesize)) {
920 MacroAssembler masm(&cbuf);
921 masm.generate_stack_overflow_check(framesize);
922 need_nop = false;
923 }
925 // We always push rbp so that on return to interpreter rbp will be
926 // restored correctly and we can correct the stack.
927 emit_opcode(cbuf, 0x50 | RBP_enc);
929 if (VerifyStackAtCalls) {
930 // Majik cookie to verify stack depth
931 emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d
932 emit_d32(cbuf, 0xbadb100d);
933 framesize -= wordSize; // Remove 2 for cookie
934 need_nop = false;
935 }
937 if (framesize) {
938 emit_opcode(cbuf, Assembler::REX_W);
939 if (framesize < 0x80) {
940 emit_opcode(cbuf, 0x83); // sub SP,#framesize
941 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
942 emit_d8(cbuf, framesize);
943 if (need_nop) {
944 emit_opcode(cbuf, 0x90); // nop
945 }
946 } else {
947 emit_opcode(cbuf, 0x81); // sub SP,#framesize
948 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
949 emit_d32(cbuf, framesize);
950 }
951 }
953 C->set_frame_complete(cbuf.code_end() - cbuf.code_begin());
955 #ifdef ASSERT
956 if (VerifyStackAtCalls) {
957 Label L;
958 MacroAssembler masm(&cbuf);
959 masm.push(rax);
960 masm.mov(rax, rsp);
961 masm.andptr(rax, StackAlignmentInBytes-1);
962 masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
963 masm.pop(rax);
964 masm.jcc(Assembler::equal, L);
965 masm.stop("Stack is not properly aligned!");
966 masm.bind(L);
967 }
968 #endif
969 }
971 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
972 {
973 return MachNode::size(ra_); // too many variables; just compute it
974 // the hard way
975 }
977 int MachPrologNode::reloc() const
978 {
979 return 0; // a large enough number
980 }
982 //=============================================================================
983 #ifndef PRODUCT
984 void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
985 {
986 Compile* C = ra_->C;
987 int framesize = C->frame_slots() << LogBytesPerInt;
988 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
989 // Remove word for return adr already pushed
990 // and RBP
991 framesize -= 2*wordSize;
993 if (framesize) {
994 st->print_cr("addq\trsp, %d\t# Destroy frame", framesize);
995 st->print("\t");
996 }
998 st->print_cr("popq\trbp");
999 if (do_polling() && C->is_method_compilation()) {
1000 st->print_cr("\ttestl\trax, [rip + #offset_to_poll_page]\t"
1001 "# Safepoint: poll for GC");
1002 st->print("\t");
1003 }
1004 }
1005 #endif
1007 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1008 {
1009 Compile* C = ra_->C;
1010 int framesize = C->frame_slots() << LogBytesPerInt;
1011 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1012 // Remove word for return adr already pushed
1013 // and RBP
1014 framesize -= 2*wordSize;
1016 // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
1018 if (framesize) {
1019 emit_opcode(cbuf, Assembler::REX_W);
1020 if (framesize < 0x80) {
1021 emit_opcode(cbuf, 0x83); // addq rsp, #framesize
1022 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
1023 emit_d8(cbuf, framesize);
1024 } else {
1025 emit_opcode(cbuf, 0x81); // addq rsp, #framesize
1026 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
1027 emit_d32(cbuf, framesize);
1028 }
1029 }
1031 // popq rbp
1032 emit_opcode(cbuf, 0x58 | RBP_enc);
1034 if (do_polling() && C->is_method_compilation()) {
1035 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
1036 // XXX reg_mem doesn't support RIP-relative addressing yet
1037 cbuf.set_inst_mark();
1038 cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_return_type, 0); // XXX
1039 emit_opcode(cbuf, 0x85); // testl
1040 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
1041 // cbuf.inst_mark() is beginning of instruction
1042 emit_d32_reloc(cbuf, os::get_polling_page());
1043 // relocInfo::poll_return_type,
1044 }
1045 }
1047 uint MachEpilogNode::size(PhaseRegAlloc* ra_) const
1048 {
1049 Compile* C = ra_->C;
1050 int framesize = C->frame_slots() << LogBytesPerInt;
1051 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1052 // Remove word for return adr already pushed
1053 // and RBP
1054 framesize -= 2*wordSize;
1056 uint size = 0;
1058 if (do_polling() && C->is_method_compilation()) {
1059 size += 6;
1060 }
1062 // count popq rbp
1063 size++;
1065 if (framesize) {
1066 if (framesize < 0x80) {
1067 size += 4;
1068 } else if (framesize) {
1069 size += 7;
1070 }
1071 }
1073 return size;
1074 }
1076 int MachEpilogNode::reloc() const
1077 {
1078 return 2; // a large enough number
1079 }
1081 const Pipeline* MachEpilogNode::pipeline() const
1082 {
1083 return MachNode::pipeline_class();
1084 }
1086 int MachEpilogNode::safepoint_offset() const
1087 {
1088 return 0;
1089 }
1091 //=============================================================================
1093 enum RC {
1094 rc_bad,
1095 rc_int,
1096 rc_float,
1097 rc_stack
1098 };
1100 static enum RC rc_class(OptoReg::Name reg)
1101 {
1102 if( !OptoReg::is_valid(reg) ) return rc_bad;
1104 if (OptoReg::is_stack(reg)) return rc_stack;
1106 VMReg r = OptoReg::as_VMReg(reg);
1108 if (r->is_Register()) return rc_int;
1110 assert(r->is_XMMRegister(), "must be");
1111 return rc_float;
1112 }
1114 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
1115 PhaseRegAlloc* ra_,
1116 bool do_size,
1117 outputStream* st) const
1118 {
1120 // Get registers to move
1121 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1122 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1123 OptoReg::Name dst_second = ra_->get_reg_second(this);
1124 OptoReg::Name dst_first = ra_->get_reg_first(this);
1126 enum RC src_second_rc = rc_class(src_second);
1127 enum RC src_first_rc = rc_class(src_first);
1128 enum RC dst_second_rc = rc_class(dst_second);
1129 enum RC dst_first_rc = rc_class(dst_first);
1131 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first),
1132 "must move at least 1 register" );
1134 if (src_first == dst_first && src_second == dst_second) {
1135 // Self copy, no move
1136 return 0;
1137 } else if (src_first_rc == rc_stack) {
1138 // mem ->
1139 if (dst_first_rc == rc_stack) {
1140 // mem -> mem
1141 assert(src_second != dst_first, "overlap");
1142 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1143 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1144 // 64-bit
1145 int src_offset = ra_->reg2offset(src_first);
1146 int dst_offset = ra_->reg2offset(dst_first);
1147 if (cbuf) {
1148 emit_opcode(*cbuf, 0xFF);
1149 encode_RegMem(*cbuf, RSI_enc, RSP_enc, 0x4, 0, src_offset, false);
1151 emit_opcode(*cbuf, 0x8F);
1152 encode_RegMem(*cbuf, RAX_enc, RSP_enc, 0x4, 0, dst_offset, false);
1154 #ifndef PRODUCT
1155 } else if (!do_size) {
1156 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
1157 "popq [rsp + #%d]",
1158 src_offset,
1159 dst_offset);
1160 #endif
1161 }
1162 return
1163 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) +
1164 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4));
1165 } else {
1166 // 32-bit
1167 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1168 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1169 // No pushl/popl, so:
1170 int src_offset = ra_->reg2offset(src_first);
1171 int dst_offset = ra_->reg2offset(dst_first);
1172 if (cbuf) {
1173 emit_opcode(*cbuf, Assembler::REX_W);
1174 emit_opcode(*cbuf, 0x89);
1175 emit_opcode(*cbuf, 0x44);
1176 emit_opcode(*cbuf, 0x24);
1177 emit_opcode(*cbuf, 0xF8);
1179 emit_opcode(*cbuf, 0x8B);
1180 encode_RegMem(*cbuf,
1181 RAX_enc,
1182 RSP_enc, 0x4, 0, src_offset,
1183 false);
1185 emit_opcode(*cbuf, 0x89);
1186 encode_RegMem(*cbuf,
1187 RAX_enc,
1188 RSP_enc, 0x4, 0, dst_offset,
1189 false);
1191 emit_opcode(*cbuf, Assembler::REX_W);
1192 emit_opcode(*cbuf, 0x8B);
1193 emit_opcode(*cbuf, 0x44);
1194 emit_opcode(*cbuf, 0x24);
1195 emit_opcode(*cbuf, 0xF8);
1197 #ifndef PRODUCT
1198 } else if (!do_size) {
1199 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t"
1200 "movl rax, [rsp + #%d]\n\t"
1201 "movl [rsp + #%d], rax\n\t"
1202 "movq rax, [rsp - #8]",
1203 src_offset,
1204 dst_offset);
1205 #endif
1206 }
1207 return
1208 5 + // movq
1209 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + // movl
1210 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)) + // movl
1211 5; // movq
1212 }
1213 } else if (dst_first_rc == rc_int) {
1214 // mem -> gpr
1215 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1216 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1217 // 64-bit
1218 int offset = ra_->reg2offset(src_first);
1219 if (cbuf) {
1220 if (Matcher::_regEncode[dst_first] < 8) {
1221 emit_opcode(*cbuf, Assembler::REX_W);
1222 } else {
1223 emit_opcode(*cbuf, Assembler::REX_WR);
1224 }
1225 emit_opcode(*cbuf, 0x8B);
1226 encode_RegMem(*cbuf,
1227 Matcher::_regEncode[dst_first],
1228 RSP_enc, 0x4, 0, offset,
1229 false);
1230 #ifndef PRODUCT
1231 } else if (!do_size) {
1232 st->print("movq %s, [rsp + #%d]\t# spill",
1233 Matcher::regName[dst_first],
1234 offset);
1235 #endif
1236 }
1237 return
1238 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
1239 } else {
1240 // 32-bit
1241 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1242 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1243 int offset = ra_->reg2offset(src_first);
1244 if (cbuf) {
1245 if (Matcher::_regEncode[dst_first] >= 8) {
1246 emit_opcode(*cbuf, Assembler::REX_R);
1247 }
1248 emit_opcode(*cbuf, 0x8B);
1249 encode_RegMem(*cbuf,
1250 Matcher::_regEncode[dst_first],
1251 RSP_enc, 0x4, 0, offset,
1252 false);
1253 #ifndef PRODUCT
1254 } else if (!do_size) {
1255 st->print("movl %s, [rsp + #%d]\t# spill",
1256 Matcher::regName[dst_first],
1257 offset);
1258 #endif
1259 }
1260 return
1261 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1262 ((Matcher::_regEncode[dst_first] < 8)
1263 ? 3
1264 : 4); // REX
1265 }
1266 } else if (dst_first_rc == rc_float) {
1267 // mem-> xmm
1268 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1269 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1270 // 64-bit
1271 int offset = ra_->reg2offset(src_first);
1272 if (cbuf) {
1273 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
1274 if (Matcher::_regEncode[dst_first] >= 8) {
1275 emit_opcode(*cbuf, Assembler::REX_R);
1276 }
1277 emit_opcode(*cbuf, 0x0F);
1278 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
1279 encode_RegMem(*cbuf,
1280 Matcher::_regEncode[dst_first],
1281 RSP_enc, 0x4, 0, offset,
1282 false);
1283 #ifndef PRODUCT
1284 } else if (!do_size) {
1285 st->print("%s %s, [rsp + #%d]\t# spill",
1286 UseXmmLoadAndClearUpper ? "movsd " : "movlpd",
1287 Matcher::regName[dst_first],
1288 offset);
1289 #endif
1290 }
1291 return
1292 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1293 ((Matcher::_regEncode[dst_first] < 8)
1294 ? 5
1295 : 6); // REX
1296 } else {
1297 // 32-bit
1298 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1299 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1300 int offset = ra_->reg2offset(src_first);
1301 if (cbuf) {
1302 emit_opcode(*cbuf, 0xF3);
1303 if (Matcher::_regEncode[dst_first] >= 8) {
1304 emit_opcode(*cbuf, Assembler::REX_R);
1305 }
1306 emit_opcode(*cbuf, 0x0F);
1307 emit_opcode(*cbuf, 0x10);
1308 encode_RegMem(*cbuf,
1309 Matcher::_regEncode[dst_first],
1310 RSP_enc, 0x4, 0, offset,
1311 false);
1312 #ifndef PRODUCT
1313 } else if (!do_size) {
1314 st->print("movss %s, [rsp + #%d]\t# spill",
1315 Matcher::regName[dst_first],
1316 offset);
1317 #endif
1318 }
1319 return
1320 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1321 ((Matcher::_regEncode[dst_first] < 8)
1322 ? 5
1323 : 6); // REX
1324 }
1325 }
1326 } else if (src_first_rc == rc_int) {
1327 // gpr ->
1328 if (dst_first_rc == rc_stack) {
1329 // gpr -> mem
1330 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1331 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1332 // 64-bit
1333 int offset = ra_->reg2offset(dst_first);
1334 if (cbuf) {
1335 if (Matcher::_regEncode[src_first] < 8) {
1336 emit_opcode(*cbuf, Assembler::REX_W);
1337 } else {
1338 emit_opcode(*cbuf, Assembler::REX_WR);
1339 }
1340 emit_opcode(*cbuf, 0x89);
1341 encode_RegMem(*cbuf,
1342 Matcher::_regEncode[src_first],
1343 RSP_enc, 0x4, 0, offset,
1344 false);
1345 #ifndef PRODUCT
1346 } else if (!do_size) {
1347 st->print("movq [rsp + #%d], %s\t# spill",
1348 offset,
1349 Matcher::regName[src_first]);
1350 #endif
1351 }
1352 return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
1353 } else {
1354 // 32-bit
1355 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1356 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1357 int offset = ra_->reg2offset(dst_first);
1358 if (cbuf) {
1359 if (Matcher::_regEncode[src_first] >= 8) {
1360 emit_opcode(*cbuf, Assembler::REX_R);
1361 }
1362 emit_opcode(*cbuf, 0x89);
1363 encode_RegMem(*cbuf,
1364 Matcher::_regEncode[src_first],
1365 RSP_enc, 0x4, 0, offset,
1366 false);
1367 #ifndef PRODUCT
1368 } else if (!do_size) {
1369 st->print("movl [rsp + #%d], %s\t# spill",
1370 offset,
1371 Matcher::regName[src_first]);
1372 #endif
1373 }
1374 return
1375 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1376 ((Matcher::_regEncode[src_first] < 8)
1377 ? 3
1378 : 4); // REX
1379 }
1380 } else if (dst_first_rc == rc_int) {
1381 // gpr -> gpr
1382 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1383 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1384 // 64-bit
1385 if (cbuf) {
1386 if (Matcher::_regEncode[dst_first] < 8) {
1387 if (Matcher::_regEncode[src_first] < 8) {
1388 emit_opcode(*cbuf, Assembler::REX_W);
1389 } else {
1390 emit_opcode(*cbuf, Assembler::REX_WB);
1391 }
1392 } else {
1393 if (Matcher::_regEncode[src_first] < 8) {
1394 emit_opcode(*cbuf, Assembler::REX_WR);
1395 } else {
1396 emit_opcode(*cbuf, Assembler::REX_WRB);
1397 }
1398 }
1399 emit_opcode(*cbuf, 0x8B);
1400 emit_rm(*cbuf, 0x3,
1401 Matcher::_regEncode[dst_first] & 7,
1402 Matcher::_regEncode[src_first] & 7);
1403 #ifndef PRODUCT
1404 } else if (!do_size) {
1405 st->print("movq %s, %s\t# spill",
1406 Matcher::regName[dst_first],
1407 Matcher::regName[src_first]);
1408 #endif
1409 }
1410 return 3; // REX
1411 } else {
1412 // 32-bit
1413 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1414 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1415 if (cbuf) {
1416 if (Matcher::_regEncode[dst_first] < 8) {
1417 if (Matcher::_regEncode[src_first] >= 8) {
1418 emit_opcode(*cbuf, Assembler::REX_B);
1419 }
1420 } else {
1421 if (Matcher::_regEncode[src_first] < 8) {
1422 emit_opcode(*cbuf, Assembler::REX_R);
1423 } else {
1424 emit_opcode(*cbuf, Assembler::REX_RB);
1425 }
1426 }
1427 emit_opcode(*cbuf, 0x8B);
1428 emit_rm(*cbuf, 0x3,
1429 Matcher::_regEncode[dst_first] & 7,
1430 Matcher::_regEncode[src_first] & 7);
1431 #ifndef PRODUCT
1432 } else if (!do_size) {
1433 st->print("movl %s, %s\t# spill",
1434 Matcher::regName[dst_first],
1435 Matcher::regName[src_first]);
1436 #endif
1437 }
1438 return
1439 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1440 ? 2
1441 : 3; // REX
1442 }
1443 } else if (dst_first_rc == rc_float) {
1444 // gpr -> xmm
1445 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1446 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1447 // 64-bit
1448 if (cbuf) {
1449 emit_opcode(*cbuf, 0x66);
1450 if (Matcher::_regEncode[dst_first] < 8) {
1451 if (Matcher::_regEncode[src_first] < 8) {
1452 emit_opcode(*cbuf, Assembler::REX_W);
1453 } else {
1454 emit_opcode(*cbuf, Assembler::REX_WB);
1455 }
1456 } else {
1457 if (Matcher::_regEncode[src_first] < 8) {
1458 emit_opcode(*cbuf, Assembler::REX_WR);
1459 } else {
1460 emit_opcode(*cbuf, Assembler::REX_WRB);
1461 }
1462 }
1463 emit_opcode(*cbuf, 0x0F);
1464 emit_opcode(*cbuf, 0x6E);
1465 emit_rm(*cbuf, 0x3,
1466 Matcher::_regEncode[dst_first] & 7,
1467 Matcher::_regEncode[src_first] & 7);
1468 #ifndef PRODUCT
1469 } else if (!do_size) {
1470 st->print("movdq %s, %s\t# spill",
1471 Matcher::regName[dst_first],
1472 Matcher::regName[src_first]);
1473 #endif
1474 }
1475 return 5; // REX
1476 } else {
1477 // 32-bit
1478 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1479 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1480 if (cbuf) {
1481 emit_opcode(*cbuf, 0x66);
1482 if (Matcher::_regEncode[dst_first] < 8) {
1483 if (Matcher::_regEncode[src_first] >= 8) {
1484 emit_opcode(*cbuf, Assembler::REX_B);
1485 }
1486 } else {
1487 if (Matcher::_regEncode[src_first] < 8) {
1488 emit_opcode(*cbuf, Assembler::REX_R);
1489 } else {
1490 emit_opcode(*cbuf, Assembler::REX_RB);
1491 }
1492 }
1493 emit_opcode(*cbuf, 0x0F);
1494 emit_opcode(*cbuf, 0x6E);
1495 emit_rm(*cbuf, 0x3,
1496 Matcher::_regEncode[dst_first] & 7,
1497 Matcher::_regEncode[src_first] & 7);
1498 #ifndef PRODUCT
1499 } else if (!do_size) {
1500 st->print("movdl %s, %s\t# spill",
1501 Matcher::regName[dst_first],
1502 Matcher::regName[src_first]);
1503 #endif
1504 }
1505 return
1506 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1507 ? 4
1508 : 5; // REX
1509 }
1510 }
1511 } else if (src_first_rc == rc_float) {
1512 // xmm ->
1513 if (dst_first_rc == rc_stack) {
1514 // xmm -> mem
1515 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1516 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1517 // 64-bit
1518 int offset = ra_->reg2offset(dst_first);
1519 if (cbuf) {
1520 emit_opcode(*cbuf, 0xF2);
1521 if (Matcher::_regEncode[src_first] >= 8) {
1522 emit_opcode(*cbuf, Assembler::REX_R);
1523 }
1524 emit_opcode(*cbuf, 0x0F);
1525 emit_opcode(*cbuf, 0x11);
1526 encode_RegMem(*cbuf,
1527 Matcher::_regEncode[src_first],
1528 RSP_enc, 0x4, 0, offset,
1529 false);
1530 #ifndef PRODUCT
1531 } else if (!do_size) {
1532 st->print("movsd [rsp + #%d], %s\t# spill",
1533 offset,
1534 Matcher::regName[src_first]);
1535 #endif
1536 }
1537 return
1538 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1539 ((Matcher::_regEncode[src_first] < 8)
1540 ? 5
1541 : 6); // REX
1542 } else {
1543 // 32-bit
1544 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1545 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1546 int offset = ra_->reg2offset(dst_first);
1547 if (cbuf) {
1548 emit_opcode(*cbuf, 0xF3);
1549 if (Matcher::_regEncode[src_first] >= 8) {
1550 emit_opcode(*cbuf, Assembler::REX_R);
1551 }
1552 emit_opcode(*cbuf, 0x0F);
1553 emit_opcode(*cbuf, 0x11);
1554 encode_RegMem(*cbuf,
1555 Matcher::_regEncode[src_first],
1556 RSP_enc, 0x4, 0, offset,
1557 false);
1558 #ifndef PRODUCT
1559 } else if (!do_size) {
1560 st->print("movss [rsp + #%d], %s\t# spill",
1561 offset,
1562 Matcher::regName[src_first]);
1563 #endif
1564 }
1565 return
1566 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
1567 ((Matcher::_regEncode[src_first] < 8)
1568 ? 5
1569 : 6); // REX
1570 }
1571 } else if (dst_first_rc == rc_int) {
1572 // xmm -> gpr
1573 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1574 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1575 // 64-bit
1576 if (cbuf) {
1577 emit_opcode(*cbuf, 0x66);
1578 if (Matcher::_regEncode[dst_first] < 8) {
1579 if (Matcher::_regEncode[src_first] < 8) {
1580 emit_opcode(*cbuf, Assembler::REX_W);
1581 } else {
1582 emit_opcode(*cbuf, Assembler::REX_WR); // attention!
1583 }
1584 } else {
1585 if (Matcher::_regEncode[src_first] < 8) {
1586 emit_opcode(*cbuf, Assembler::REX_WB); // attention!
1587 } else {
1588 emit_opcode(*cbuf, Assembler::REX_WRB);
1589 }
1590 }
1591 emit_opcode(*cbuf, 0x0F);
1592 emit_opcode(*cbuf, 0x7E);
1593 emit_rm(*cbuf, 0x3,
1594 Matcher::_regEncode[dst_first] & 7,
1595 Matcher::_regEncode[src_first] & 7);
1596 #ifndef PRODUCT
1597 } else if (!do_size) {
1598 st->print("movdq %s, %s\t# spill",
1599 Matcher::regName[dst_first],
1600 Matcher::regName[src_first]);
1601 #endif
1602 }
1603 return 5; // REX
1604 } else {
1605 // 32-bit
1606 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1607 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1608 if (cbuf) {
1609 emit_opcode(*cbuf, 0x66);
1610 if (Matcher::_regEncode[dst_first] < 8) {
1611 if (Matcher::_regEncode[src_first] >= 8) {
1612 emit_opcode(*cbuf, Assembler::REX_R); // attention!
1613 }
1614 } else {
1615 if (Matcher::_regEncode[src_first] < 8) {
1616 emit_opcode(*cbuf, Assembler::REX_B); // attention!
1617 } else {
1618 emit_opcode(*cbuf, Assembler::REX_RB);
1619 }
1620 }
1621 emit_opcode(*cbuf, 0x0F);
1622 emit_opcode(*cbuf, 0x7E);
1623 emit_rm(*cbuf, 0x3,
1624 Matcher::_regEncode[dst_first] & 7,
1625 Matcher::_regEncode[src_first] & 7);
1626 #ifndef PRODUCT
1627 } else if (!do_size) {
1628 st->print("movdl %s, %s\t# spill",
1629 Matcher::regName[dst_first],
1630 Matcher::regName[src_first]);
1631 #endif
1632 }
1633 return
1634 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1635 ? 4
1636 : 5; // REX
1637 }
1638 } else if (dst_first_rc == rc_float) {
1639 // xmm -> xmm
1640 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1641 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1642 // 64-bit
1643 if (cbuf) {
1644 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
1645 if (Matcher::_regEncode[dst_first] < 8) {
1646 if (Matcher::_regEncode[src_first] >= 8) {
1647 emit_opcode(*cbuf, Assembler::REX_B);
1648 }
1649 } else {
1650 if (Matcher::_regEncode[src_first] < 8) {
1651 emit_opcode(*cbuf, Assembler::REX_R);
1652 } else {
1653 emit_opcode(*cbuf, Assembler::REX_RB);
1654 }
1655 }
1656 emit_opcode(*cbuf, 0x0F);
1657 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
1658 emit_rm(*cbuf, 0x3,
1659 Matcher::_regEncode[dst_first] & 7,
1660 Matcher::_regEncode[src_first] & 7);
1661 #ifndef PRODUCT
1662 } else if (!do_size) {
1663 st->print("%s %s, %s\t# spill",
1664 UseXmmRegToRegMoveAll ? "movapd" : "movsd ",
1665 Matcher::regName[dst_first],
1666 Matcher::regName[src_first]);
1667 #endif
1668 }
1669 return
1670 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1671 ? 4
1672 : 5; // REX
1673 } else {
1674 // 32-bit
1675 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1676 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1677 if (cbuf) {
1678 if (!UseXmmRegToRegMoveAll)
1679 emit_opcode(*cbuf, 0xF3);
1680 if (Matcher::_regEncode[dst_first] < 8) {
1681 if (Matcher::_regEncode[src_first] >= 8) {
1682 emit_opcode(*cbuf, Assembler::REX_B);
1683 }
1684 } else {
1685 if (Matcher::_regEncode[src_first] < 8) {
1686 emit_opcode(*cbuf, Assembler::REX_R);
1687 } else {
1688 emit_opcode(*cbuf, Assembler::REX_RB);
1689 }
1690 }
1691 emit_opcode(*cbuf, 0x0F);
1692 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
1693 emit_rm(*cbuf, 0x3,
1694 Matcher::_regEncode[dst_first] & 7,
1695 Matcher::_regEncode[src_first] & 7);
1696 #ifndef PRODUCT
1697 } else if (!do_size) {
1698 st->print("%s %s, %s\t# spill",
1699 UseXmmRegToRegMoveAll ? "movaps" : "movss ",
1700 Matcher::regName[dst_first],
1701 Matcher::regName[src_first]);
1702 #endif
1703 }
1704 return
1705 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
1706 ? (UseXmmRegToRegMoveAll ? 3 : 4)
1707 : (UseXmmRegToRegMoveAll ? 4 : 5); // REX
1708 }
1709 }
1710 }
1712 assert(0," foo ");
1713 Unimplemented();
1715 return 0;
1716 }
1718 #ifndef PRODUCT
1719 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const
1720 {
1721 implementation(NULL, ra_, false, st);
1722 }
1723 #endif
1725 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
1726 {
1727 implementation(&cbuf, ra_, false, NULL);
1728 }
1730 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const
1731 {
1732 return implementation(NULL, ra_, true, NULL);
1733 }
1735 //=============================================================================
1736 #ifndef PRODUCT
1737 void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const
1738 {
1739 st->print("nop \t# %d bytes pad for loops and calls", _count);
1740 }
1741 #endif
1743 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const
1744 {
1745 MacroAssembler _masm(&cbuf);
1746 __ nop(_count);
1747 }
1749 uint MachNopNode::size(PhaseRegAlloc*) const
1750 {
1751 return _count;
1752 }
1755 //=============================================================================
1756 #ifndef PRODUCT
1757 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1758 {
1759 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1760 int reg = ra_->get_reg_first(this);
1761 st->print("leaq %s, [rsp + #%d]\t# box lock",
1762 Matcher::regName[reg], offset);
1763 }
1764 #endif
1766 void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1767 {
1768 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1769 int reg = ra_->get_encode(this);
1770 if (offset >= 0x80) {
1771 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
1772 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1773 emit_rm(cbuf, 0x2, reg & 7, 0x04);
1774 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
1775 emit_d32(cbuf, offset);
1776 } else {
1777 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
1778 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1779 emit_rm(cbuf, 0x1, reg & 7, 0x04);
1780 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
1781 emit_d8(cbuf, offset);
1782 }
1783 }
1785 uint BoxLockNode::size(PhaseRegAlloc *ra_) const
1786 {
1787 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1788 return (offset < 0x80) ? 5 : 8; // REX
1789 }
1791 //=============================================================================
1793 // emit call stub, compiled java to interpreter
1794 void emit_java_to_interp(CodeBuffer& cbuf)
1795 {
1796 // Stub is fixed up when the corresponding call is converted from
1797 // calling compiled code to calling interpreted code.
1798 // movq rbx, 0
1799 // jmp -5 # to self
1801 address mark = cbuf.inst_mark(); // get mark within main instrs section
1803 // Note that the code buffer's inst_mark is always relative to insts.
1804 // That's why we must use the macroassembler to generate a stub.
1805 MacroAssembler _masm(&cbuf);
1807 address base =
1808 __ start_a_stub(Compile::MAX_stubs_size);
1809 if (base == NULL) return; // CodeBuffer::expand failed
1810 // static stub relocation stores the instruction address of the call
1811 __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
1812 // static stub relocation also tags the methodOop in the code-stream.
1813 __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time
1814 // This is recognized as unresolved by relocs/nativeinst/ic code
1815 __ jump(RuntimeAddress(__ pc()));
1817 // Update current stubs pointer and restore code_end.
1818 __ end_a_stub();
1819 }
1821 // size of call stub, compiled java to interpretor
1822 uint size_java_to_interp()
1823 {
1824 return 15; // movq (1+1+8); jmp (1+4)
1825 }
1827 // relocation entries for call stub, compiled java to interpretor
1828 uint reloc_java_to_interp()
1829 {
1830 return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
1831 }
1833 //=============================================================================
1834 #ifndef PRODUCT
1835 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1836 {
1837 if (UseCompressedOops) {
1838 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
1839 if (Universe::narrow_oop_shift() != 0) {
1840 st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]");
1841 }
1842 st->print_cr("cmpq rax, rscratch1\t # Inline cache check");
1843 } else {
1844 st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
1845 "# Inline cache check", oopDesc::klass_offset_in_bytes());
1846 }
1847 st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
1848 st->print_cr("\tnop");
1849 if (!OptoBreakpoint) {
1850 st->print_cr("\tnop");
1851 }
1852 }
1853 #endif
1855 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
1856 {
1857 MacroAssembler masm(&cbuf);
1858 #ifdef ASSERT
1859 uint code_size = cbuf.code_size();
1860 #endif
1861 if (UseCompressedOops) {
1862 masm.load_klass(rscratch1, j_rarg0);
1863 masm.cmpptr(rax, rscratch1);
1864 } else {
1865 masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
1866 }
1868 masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1870 /* WARNING these NOPs are critical so that verified entry point is properly
1871 aligned for patching by NativeJump::patch_verified_entry() */
1872 int nops_cnt = 1;
1873 if (!OptoBreakpoint) {
1874 // Leave space for int3
1875 nops_cnt += 1;
1876 }
1877 if (UseCompressedOops) {
1878 // ??? divisible by 4 is aligned?
1879 nops_cnt += 1;
1880 }
1881 masm.nop(nops_cnt);
1883 assert(cbuf.code_size() - code_size == size(ra_),
1884 "checking code size of inline cache node");
1885 }
1887 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1888 {
1889 if (UseCompressedOops) {
1890 if (Universe::narrow_oop_shift() == 0) {
1891 return OptoBreakpoint ? 15 : 16;
1892 } else {
1893 return OptoBreakpoint ? 19 : 20;
1894 }
1895 } else {
1896 return OptoBreakpoint ? 11 : 12;
1897 }
1898 }
1901 //=============================================================================
1902 uint size_exception_handler()
1903 {
1904 // NativeCall instruction size is the same as NativeJump.
1905 // Note that this value is also credited (in output.cpp) to
1906 // the size of the code section.
1907 return NativeJump::instruction_size;
1908 }
1910 // Emit exception handler code.
1911 int emit_exception_handler(CodeBuffer& cbuf)
1912 {
1914 // Note that the code buffer's inst_mark is always relative to insts.
1915 // That's why we must use the macroassembler to generate a handler.
1916 MacroAssembler _masm(&cbuf);
1917 address base =
1918 __ start_a_stub(size_exception_handler());
1919 if (base == NULL) return 0; // CodeBuffer::expand failed
1920 int offset = __ offset();
1921 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin()));
1922 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1923 __ end_a_stub();
1924 return offset;
1925 }
1927 uint size_deopt_handler()
1928 {
1929 // three 5 byte instructions
1930 return 15;
1931 }
1933 // Emit deopt handler code.
1934 int emit_deopt_handler(CodeBuffer& cbuf)
1935 {
1937 // Note that the code buffer's inst_mark is always relative to insts.
1938 // That's why we must use the macroassembler to generate a handler.
1939 MacroAssembler _masm(&cbuf);
1940 address base =
1941 __ start_a_stub(size_deopt_handler());
1942 if (base == NULL) return 0; // CodeBuffer::expand failed
1943 int offset = __ offset();
1944 address the_pc = (address) __ pc();
1945 Label next;
1946 // push a "the_pc" on the stack without destroying any registers
1947 // as they all may be live.
1949 // push address of "next"
1950 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
1951 __ bind(next);
1952 // adjust it so it matches "the_pc"
1953 __ subptr(Address(rsp, 0), __ offset() - offset);
1954 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
1955 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1956 __ end_a_stub();
1957 return offset;
1958 }
1960 static void emit_double_constant(CodeBuffer& cbuf, double x) {
1961 int mark = cbuf.insts()->mark_off();
1962 MacroAssembler _masm(&cbuf);
1963 address double_address = __ double_constant(x);
1964 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
1965 emit_d32_reloc(cbuf,
1966 (int) (double_address - cbuf.code_end() - 4),
1967 internal_word_Relocation::spec(double_address),
1968 RELOC_DISP32);
1969 }
1971 static void emit_float_constant(CodeBuffer& cbuf, float x) {
1972 int mark = cbuf.insts()->mark_off();
1973 MacroAssembler _masm(&cbuf);
1974 address float_address = __ float_constant(x);
1975 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
1976 emit_d32_reloc(cbuf,
1977 (int) (float_address - cbuf.code_end() - 4),
1978 internal_word_Relocation::spec(float_address),
1979 RELOC_DISP32);
1980 }
1983 int Matcher::regnum_to_fpu_offset(int regnum)
1984 {
1985 return regnum - 32; // The FP registers are in the second chunk
1986 }
1988 // This is UltraSparc specific, true just means we have fast l2f conversion
1989 const bool Matcher::convL2FSupported(void) {
1990 return true;
1991 }
1993 // Vector width in bytes
1994 const uint Matcher::vector_width_in_bytes(void) {
1995 return 8;
1996 }
1998 // Vector ideal reg
1999 const uint Matcher::vector_ideal_reg(void) {
2000 return Op_RegD;
2001 }
2003 // Is this branch offset short enough that a short branch can be used?
2004 //
2005 // NOTE: If the platform does not provide any short branch variants, then
2006 // this method should return false for offset 0.
2007 bool Matcher::is_short_branch_offset(int rule, int offset) {
2008 // the short version of jmpConUCF2 contains multiple branches,
2009 // making the reach slightly less
2010 if (rule == jmpConUCF2_rule)
2011 return (-126 <= offset && offset <= 125);
2012 return (-128 <= offset && offset <= 127);
2013 }
2015 const bool Matcher::isSimpleConstant64(jlong value) {
2016 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
2017 //return value == (int) value; // Cf. storeImmL and immL32.
2019 // Probably always true, even if a temp register is required.
2020 return true;
2021 }
2023 // The ecx parameter to rep stosq for the ClearArray node is in words.
2024 const bool Matcher::init_array_count_is_in_bytes = false;
2026 // Threshold size for cleararray.
2027 const int Matcher::init_array_short_size = 8 * BytesPerLong;
2029 // Should the Matcher clone shifts on addressing modes, expecting them
2030 // to be subsumed into complex addressing expressions or compute them
2031 // into registers? True for Intel but false for most RISCs
2032 const bool Matcher::clone_shift_expressions = true;
2034 // Is it better to copy float constants, or load them directly from
2035 // memory? Intel can load a float constant from a direct address,
2036 // requiring no extra registers. Most RISCs will have to materialize
2037 // an address into a register first, so they would do better to copy
2038 // the constant from stack.
2039 const bool Matcher::rematerialize_float_constants = true; // XXX
2041 // If CPU can load and store mis-aligned doubles directly then no
2042 // fixup is needed. Else we split the double into 2 integer pieces
2043 // and move it piece-by-piece. Only happens when passing doubles into
2044 // C code as the Java calling convention forces doubles to be aligned.
2045 const bool Matcher::misaligned_doubles_ok = true;
2047 // No-op on amd64
2048 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
2050 // Advertise here if the CPU requires explicit rounding operations to
2051 // implement the UseStrictFP mode.
2052 const bool Matcher::strict_fp_requires_explicit_rounding = true;
2054 // Do floats take an entire double register or just half?
2055 const bool Matcher::float_in_double = true;
2056 // Do ints take an entire long register or just half?
2057 const bool Matcher::int_in_long = true;
2059 // Return whether or not this register is ever used as an argument.
2060 // This function is used on startup to build the trampoline stubs in
2061 // generateOptoStub. Registers not mentioned will be killed by the VM
2062 // call in the trampoline, and arguments in those registers not be
2063 // available to the callee.
2064 bool Matcher::can_be_java_arg(int reg)
2065 {
2066 return
2067 reg == RDI_num || reg == RDI_H_num ||
2068 reg == RSI_num || reg == RSI_H_num ||
2069 reg == RDX_num || reg == RDX_H_num ||
2070 reg == RCX_num || reg == RCX_H_num ||
2071 reg == R8_num || reg == R8_H_num ||
2072 reg == R9_num || reg == R9_H_num ||
2073 reg == R12_num || reg == R12_H_num ||
2074 reg == XMM0_num || reg == XMM0_H_num ||
2075 reg == XMM1_num || reg == XMM1_H_num ||
2076 reg == XMM2_num || reg == XMM2_H_num ||
2077 reg == XMM3_num || reg == XMM3_H_num ||
2078 reg == XMM4_num || reg == XMM4_H_num ||
2079 reg == XMM5_num || reg == XMM5_H_num ||
2080 reg == XMM6_num || reg == XMM6_H_num ||
2081 reg == XMM7_num || reg == XMM7_H_num;
2082 }
2084 bool Matcher::is_spillable_arg(int reg)
2085 {
2086 return can_be_java_arg(reg);
2087 }
2089 // Register for DIVI projection of divmodI
2090 RegMask Matcher::divI_proj_mask() {
2091 return INT_RAX_REG_mask;
2092 }
2094 // Register for MODI projection of divmodI
2095 RegMask Matcher::modI_proj_mask() {
2096 return INT_RDX_REG_mask;
2097 }
2099 // Register for DIVL projection of divmodL
2100 RegMask Matcher::divL_proj_mask() {
2101 return LONG_RAX_REG_mask;
2102 }
2104 // Register for MODL projection of divmodL
2105 RegMask Matcher::modL_proj_mask() {
2106 return LONG_RDX_REG_mask;
2107 }
2109 static Address build_address(int b, int i, int s, int d) {
2110 Register index = as_Register(i);
2111 Address::ScaleFactor scale = (Address::ScaleFactor)s;
2112 if (index == rsp) {
2113 index = noreg;
2114 scale = Address::no_scale;
2115 }
2116 Address addr(as_Register(b), index, scale, d);
2117 return addr;
2118 }
2120 %}
2122 //----------ENCODING BLOCK-----------------------------------------------------
2123 // This block specifies the encoding classes used by the compiler to
2124 // output byte streams. Encoding classes are parameterized macros
2125 // used by Machine Instruction Nodes in order to generate the bit
2126 // encoding of the instruction. Operands specify their base encoding
2127 // interface with the interface keyword. There are currently
2128 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2129 // COND_INTER. REG_INTER causes an operand to generate a function
2130 // which returns its register number when queried. CONST_INTER causes
2131 // an operand to generate a function which returns the value of the
2132 // constant when queried. MEMORY_INTER causes an operand to generate
2133 // four functions which return the Base Register, the Index Register,
2134 // the Scale Value, and the Offset Value of the operand when queried.
2135 // COND_INTER causes an operand to generate six functions which return
2136 // the encoding code (ie - encoding bits for the instruction)
2137 // associated with each basic boolean condition for a conditional
2138 // instruction.
2139 //
2140 // Instructions specify two basic values for encoding. Again, a
2141 // function is available to check if the constant displacement is an
2142 // oop. They use the ins_encode keyword to specify their encoding
2143 // classes (which must be a sequence of enc_class names, and their
2144 // parameters, specified in the encoding block), and they use the
2145 // opcode keyword to specify, in order, their primary, secondary, and
2146 // tertiary opcode. Only the opcode sections which a particular
2147 // instruction needs for encoding need to be specified.
2148 encode %{
2149 // Build emit functions for each basic byte or larger field in the
2150 // intel encoding scheme (opcode, rm, sib, immediate), and call them
2151 // from C++ code in the enc_class source block. Emit functions will
2152 // live in the main source block for now. In future, we can
2153 // generalize this by adding a syntax that specifies the sizes of
2154 // fields in an order, so that the adlc can build the emit functions
2155 // automagically
2157 // Emit primary opcode
2158 enc_class OpcP
2159 %{
2160 emit_opcode(cbuf, $primary);
2161 %}
2163 // Emit secondary opcode
2164 enc_class OpcS
2165 %{
2166 emit_opcode(cbuf, $secondary);
2167 %}
2169 // Emit tertiary opcode
2170 enc_class OpcT
2171 %{
2172 emit_opcode(cbuf, $tertiary);
2173 %}
2175 // Emit opcode directly
2176 enc_class Opcode(immI d8)
2177 %{
2178 emit_opcode(cbuf, $d8$$constant);
2179 %}
2181 // Emit size prefix
2182 enc_class SizePrefix
2183 %{
2184 emit_opcode(cbuf, 0x66);
2185 %}
2187 enc_class reg(rRegI reg)
2188 %{
2189 emit_rm(cbuf, 0x3, 0, $reg$$reg & 7);
2190 %}
2192 enc_class reg_reg(rRegI dst, rRegI src)
2193 %{
2194 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2195 %}
2197 enc_class opc_reg_reg(immI opcode, rRegI dst, rRegI src)
2198 %{
2199 emit_opcode(cbuf, $opcode$$constant);
2200 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2201 %}
2203 enc_class cmpfp_fixup()
2204 %{
2205 // jnp,s exit
2206 emit_opcode(cbuf, 0x7B);
2207 emit_d8(cbuf, 0x0A);
2209 // pushfq
2210 emit_opcode(cbuf, 0x9C);
2212 // andq $0xffffff2b, (%rsp)
2213 emit_opcode(cbuf, Assembler::REX_W);
2214 emit_opcode(cbuf, 0x81);
2215 emit_opcode(cbuf, 0x24);
2216 emit_opcode(cbuf, 0x24);
2217 emit_d32(cbuf, 0xffffff2b);
2219 // popfq
2220 emit_opcode(cbuf, 0x9D);
2222 // nop (target for branch to avoid branch to branch)
2223 emit_opcode(cbuf, 0x90);
2224 %}
2226 enc_class cmpfp3(rRegI dst)
2227 %{
2228 int dstenc = $dst$$reg;
2230 // movl $dst, -1
2231 if (dstenc >= 8) {
2232 emit_opcode(cbuf, Assembler::REX_B);
2233 }
2234 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
2235 emit_d32(cbuf, -1);
2237 // jp,s done
2238 emit_opcode(cbuf, 0x7A);
2239 emit_d8(cbuf, dstenc < 4 ? 0x08 : 0x0A);
2241 // jb,s done
2242 emit_opcode(cbuf, 0x72);
2243 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
2245 // setne $dst
2246 if (dstenc >= 4) {
2247 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
2248 }
2249 emit_opcode(cbuf, 0x0F);
2250 emit_opcode(cbuf, 0x95);
2251 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
2253 // movzbl $dst, $dst
2254 if (dstenc >= 4) {
2255 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
2256 }
2257 emit_opcode(cbuf, 0x0F);
2258 emit_opcode(cbuf, 0xB6);
2259 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
2260 %}
2262 enc_class cdql_enc(no_rax_rdx_RegI div)
2263 %{
2264 // Full implementation of Java idiv and irem; checks for
2265 // special case as described in JVM spec., p.243 & p.271.
2266 //
2267 // normal case special case
2268 //
2269 // input : rax: dividend min_int
2270 // reg: divisor -1
2271 //
2272 // output: rax: quotient (= rax idiv reg) min_int
2273 // rdx: remainder (= rax irem reg) 0
2274 //
2275 // Code sequnce:
2276 //
2277 // 0: 3d 00 00 00 80 cmp $0x80000000,%eax
2278 // 5: 75 07/08 jne e <normal>
2279 // 7: 33 d2 xor %edx,%edx
2280 // [div >= 8 -> offset + 1]
2281 // [REX_B]
2282 // 9: 83 f9 ff cmp $0xffffffffffffffff,$div
2283 // c: 74 03/04 je 11 <done>
2284 // 000000000000000e <normal>:
2285 // e: 99 cltd
2286 // [div >= 8 -> offset + 1]
2287 // [REX_B]
2288 // f: f7 f9 idiv $div
2289 // 0000000000000011 <done>:
2291 // cmp $0x80000000,%eax
2292 emit_opcode(cbuf, 0x3d);
2293 emit_d8(cbuf, 0x00);
2294 emit_d8(cbuf, 0x00);
2295 emit_d8(cbuf, 0x00);
2296 emit_d8(cbuf, 0x80);
2298 // jne e <normal>
2299 emit_opcode(cbuf, 0x75);
2300 emit_d8(cbuf, $div$$reg < 8 ? 0x07 : 0x08);
2302 // xor %edx,%edx
2303 emit_opcode(cbuf, 0x33);
2304 emit_d8(cbuf, 0xD2);
2306 // cmp $0xffffffffffffffff,%ecx
2307 if ($div$$reg >= 8) {
2308 emit_opcode(cbuf, Assembler::REX_B);
2309 }
2310 emit_opcode(cbuf, 0x83);
2311 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
2312 emit_d8(cbuf, 0xFF);
2314 // je 11 <done>
2315 emit_opcode(cbuf, 0x74);
2316 emit_d8(cbuf, $div$$reg < 8 ? 0x03 : 0x04);
2318 // <normal>
2319 // cltd
2320 emit_opcode(cbuf, 0x99);
2322 // idivl (note: must be emitted by the user of this rule)
2323 // <done>
2324 %}
2326 enc_class cdqq_enc(no_rax_rdx_RegL div)
2327 %{
2328 // Full implementation of Java ldiv and lrem; checks for
2329 // special case as described in JVM spec., p.243 & p.271.
2330 //
2331 // normal case special case
2332 //
2333 // input : rax: dividend min_long
2334 // reg: divisor -1
2335 //
2336 // output: rax: quotient (= rax idiv reg) min_long
2337 // rdx: remainder (= rax irem reg) 0
2338 //
2339 // Code sequnce:
2340 //
2341 // 0: 48 ba 00 00 00 00 00 mov $0x8000000000000000,%rdx
2342 // 7: 00 00 80
2343 // a: 48 39 d0 cmp %rdx,%rax
2344 // d: 75 08 jne 17 <normal>
2345 // f: 33 d2 xor %edx,%edx
2346 // 11: 48 83 f9 ff cmp $0xffffffffffffffff,$div
2347 // 15: 74 05 je 1c <done>
2348 // 0000000000000017 <normal>:
2349 // 17: 48 99 cqto
2350 // 19: 48 f7 f9 idiv $div
2351 // 000000000000001c <done>:
2353 // mov $0x8000000000000000,%rdx
2354 emit_opcode(cbuf, Assembler::REX_W);
2355 emit_opcode(cbuf, 0xBA);
2356 emit_d8(cbuf, 0x00);
2357 emit_d8(cbuf, 0x00);
2358 emit_d8(cbuf, 0x00);
2359 emit_d8(cbuf, 0x00);
2360 emit_d8(cbuf, 0x00);
2361 emit_d8(cbuf, 0x00);
2362 emit_d8(cbuf, 0x00);
2363 emit_d8(cbuf, 0x80);
2365 // cmp %rdx,%rax
2366 emit_opcode(cbuf, Assembler::REX_W);
2367 emit_opcode(cbuf, 0x39);
2368 emit_d8(cbuf, 0xD0);
2370 // jne 17 <normal>
2371 emit_opcode(cbuf, 0x75);
2372 emit_d8(cbuf, 0x08);
2374 // xor %edx,%edx
2375 emit_opcode(cbuf, 0x33);
2376 emit_d8(cbuf, 0xD2);
2378 // cmp $0xffffffffffffffff,$div
2379 emit_opcode(cbuf, $div$$reg < 8 ? Assembler::REX_W : Assembler::REX_WB);
2380 emit_opcode(cbuf, 0x83);
2381 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
2382 emit_d8(cbuf, 0xFF);
2384 // je 1e <done>
2385 emit_opcode(cbuf, 0x74);
2386 emit_d8(cbuf, 0x05);
2388 // <normal>
2389 // cqto
2390 emit_opcode(cbuf, Assembler::REX_W);
2391 emit_opcode(cbuf, 0x99);
2393 // idivq (note: must be emitted by the user of this rule)
2394 // <done>
2395 %}
2397 // Opcde enc_class for 8/32 bit immediate instructions with sign-extension
2398 enc_class OpcSE(immI imm)
2399 %{
2400 // Emit primary opcode and set sign-extend bit
2401 // Check for 8-bit immediate, and set sign extend bit in opcode
2402 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2403 emit_opcode(cbuf, $primary | 0x02);
2404 } else {
2405 // 32-bit immediate
2406 emit_opcode(cbuf, $primary);
2407 }
2408 %}
2410 enc_class OpcSErm(rRegI dst, immI imm)
2411 %{
2412 // OpcSEr/m
2413 int dstenc = $dst$$reg;
2414 if (dstenc >= 8) {
2415 emit_opcode(cbuf, Assembler::REX_B);
2416 dstenc -= 8;
2417 }
2418 // Emit primary opcode and set sign-extend bit
2419 // Check for 8-bit immediate, and set sign extend bit in opcode
2420 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2421 emit_opcode(cbuf, $primary | 0x02);
2422 } else {
2423 // 32-bit immediate
2424 emit_opcode(cbuf, $primary);
2425 }
2426 // Emit r/m byte with secondary opcode, after primary opcode.
2427 emit_rm(cbuf, 0x3, $secondary, dstenc);
2428 %}
2430 enc_class OpcSErm_wide(rRegL dst, immI imm)
2431 %{
2432 // OpcSEr/m
2433 int dstenc = $dst$$reg;
2434 if (dstenc < 8) {
2435 emit_opcode(cbuf, Assembler::REX_W);
2436 } else {
2437 emit_opcode(cbuf, Assembler::REX_WB);
2438 dstenc -= 8;
2439 }
2440 // Emit primary opcode and set sign-extend bit
2441 // Check for 8-bit immediate, and set sign extend bit in opcode
2442 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2443 emit_opcode(cbuf, $primary | 0x02);
2444 } else {
2445 // 32-bit immediate
2446 emit_opcode(cbuf, $primary);
2447 }
2448 // Emit r/m byte with secondary opcode, after primary opcode.
2449 emit_rm(cbuf, 0x3, $secondary, dstenc);
2450 %}
2452 enc_class Con8or32(immI imm)
2453 %{
2454 // Check for 8-bit immediate, and set sign extend bit in opcode
2455 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
2456 $$$emit8$imm$$constant;
2457 } else {
2458 // 32-bit immediate
2459 $$$emit32$imm$$constant;
2460 }
2461 %}
2463 enc_class Lbl(label labl)
2464 %{
2465 // JMP, CALL
2466 Label* l = $labl$$label;
2467 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
2468 %}
2470 enc_class LblShort(label labl)
2471 %{
2472 // JMP, CALL
2473 Label* l = $labl$$label;
2474 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
2475 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
2476 emit_d8(cbuf, disp);
2477 %}
2479 enc_class opc2_reg(rRegI dst)
2480 %{
2481 // BSWAP
2482 emit_cc(cbuf, $secondary, $dst$$reg);
2483 %}
2485 enc_class opc3_reg(rRegI dst)
2486 %{
2487 // BSWAP
2488 emit_cc(cbuf, $tertiary, $dst$$reg);
2489 %}
2491 enc_class reg_opc(rRegI div)
2492 %{
2493 // INC, DEC, IDIV, IMOD, JMP indirect, ...
2494 emit_rm(cbuf, 0x3, $secondary, $div$$reg & 7);
2495 %}
2497 enc_class Jcc(cmpOp cop, label labl)
2498 %{
2499 // JCC
2500 Label* l = $labl$$label;
2501 $$$emit8$primary;
2502 emit_cc(cbuf, $secondary, $cop$$cmpcode);
2503 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
2504 %}
2506 enc_class JccShort (cmpOp cop, label labl)
2507 %{
2508 // JCC
2509 Label *l = $labl$$label;
2510 emit_cc(cbuf, $primary, $cop$$cmpcode);
2511 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
2512 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
2513 emit_d8(cbuf, disp);
2514 %}
2516 enc_class enc_cmov(cmpOp cop)
2517 %{
2518 // CMOV
2519 $$$emit8$primary;
2520 emit_cc(cbuf, $secondary, $cop$$cmpcode);
2521 %}
2523 enc_class enc_cmovf_branch(cmpOp cop, regF dst, regF src)
2524 %{
2525 // Invert sense of branch from sense of cmov
2526 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
2527 emit_d8(cbuf, ($dst$$reg < 8 && $src$$reg < 8)
2528 ? (UseXmmRegToRegMoveAll ? 3 : 4)
2529 : (UseXmmRegToRegMoveAll ? 4 : 5) ); // REX
2530 // UseXmmRegToRegMoveAll ? movaps(dst, src) : movss(dst, src)
2531 if (!UseXmmRegToRegMoveAll) emit_opcode(cbuf, 0xF3);
2532 if ($dst$$reg < 8) {
2533 if ($src$$reg >= 8) {
2534 emit_opcode(cbuf, Assembler::REX_B);
2535 }
2536 } else {
2537 if ($src$$reg < 8) {
2538 emit_opcode(cbuf, Assembler::REX_R);
2539 } else {
2540 emit_opcode(cbuf, Assembler::REX_RB);
2541 }
2542 }
2543 emit_opcode(cbuf, 0x0F);
2544 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
2545 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2546 %}
2548 enc_class enc_cmovd_branch(cmpOp cop, regD dst, regD src)
2549 %{
2550 // Invert sense of branch from sense of cmov
2551 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
2552 emit_d8(cbuf, $dst$$reg < 8 && $src$$reg < 8 ? 4 : 5); // REX
2554 // UseXmmRegToRegMoveAll ? movapd(dst, src) : movsd(dst, src)
2555 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
2556 if ($dst$$reg < 8) {
2557 if ($src$$reg >= 8) {
2558 emit_opcode(cbuf, Assembler::REX_B);
2559 }
2560 } else {
2561 if ($src$$reg < 8) {
2562 emit_opcode(cbuf, Assembler::REX_R);
2563 } else {
2564 emit_opcode(cbuf, Assembler::REX_RB);
2565 }
2566 }
2567 emit_opcode(cbuf, 0x0F);
2568 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
2569 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
2570 %}
2572 enc_class enc_PartialSubtypeCheck()
2573 %{
2574 Register Rrdi = as_Register(RDI_enc); // result register
2575 Register Rrax = as_Register(RAX_enc); // super class
2576 Register Rrcx = as_Register(RCX_enc); // killed
2577 Register Rrsi = as_Register(RSI_enc); // sub class
2578 Label hit, miss, cmiss;
2580 MacroAssembler _masm(&cbuf);
2581 // Compare super with sub directly, since super is not in its own SSA.
2582 // The compiler used to emit this test, but we fold it in here,
2583 // to allow platform-specific tweaking on sparc.
2584 __ cmpptr(Rrax, Rrsi);
2585 __ jcc(Assembler::equal, hit);
2586 #ifndef PRODUCT
2587 __ lea(Rrcx, ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
2588 __ incrementl(Address(Rrcx, 0));
2589 #endif //PRODUCT
2590 __ movptr(Rrdi, Address(Rrsi,
2591 sizeof(oopDesc) +
2592 Klass::secondary_supers_offset_in_bytes()));
2593 __ movl(Rrcx, Address(Rrdi, arrayOopDesc::length_offset_in_bytes()));
2594 __ addptr(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
2595 if (UseCompressedOops) {
2596 __ push(Rrax);
2597 __ encode_heap_oop(Rrax);
2598 __ repne_scanl();
2599 __ pop(Rrax);
2600 __ jccb(Assembler::notEqual, miss);
2601 __ movptr(Address(Rrsi,
2602 sizeof(oopDesc) +
2603 Klass::secondary_super_cache_offset_in_bytes()),
2604 Rrax);
2605 __ jmp(hit);
2606 } else {
2607 __ repne_scan();
2608 __ jccb(Assembler::notEqual, miss);
2609 __ movptr(Address(Rrsi,
2610 sizeof(oopDesc) +
2611 Klass::secondary_super_cache_offset_in_bytes()),
2612 Rrax);
2613 }
2614 __ bind(hit);
2615 if ($primary) {
2616 __ xorptr(Rrdi, Rrdi);
2617 }
2618 __ bind(miss);
2619 %}
2621 enc_class Java_To_Interpreter(method meth)
2622 %{
2623 // CALL Java_To_Interpreter
2624 // This is the instruction starting address for relocation info.
2625 cbuf.set_inst_mark();
2626 $$$emit8$primary;
2627 // CALL directly to the runtime
2628 emit_d32_reloc(cbuf,
2629 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2630 runtime_call_Relocation::spec(),
2631 RELOC_DISP32);
2632 %}
2634 enc_class Java_Static_Call(method meth)
2635 %{
2636 // JAVA STATIC CALL
2637 // CALL to fixup routine. Fixup routine uses ScopeDesc info to
2638 // determine who we intended to call.
2639 cbuf.set_inst_mark();
2640 $$$emit8$primary;
2642 if (!_method) {
2643 emit_d32_reloc(cbuf,
2644 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2645 runtime_call_Relocation::spec(),
2646 RELOC_DISP32);
2647 } else if (_optimized_virtual) {
2648 emit_d32_reloc(cbuf,
2649 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2650 opt_virtual_call_Relocation::spec(),
2651 RELOC_DISP32);
2652 } else {
2653 emit_d32_reloc(cbuf,
2654 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2655 static_call_Relocation::spec(),
2656 RELOC_DISP32);
2657 }
2658 if (_method) {
2659 // Emit stub for static call
2660 emit_java_to_interp(cbuf);
2661 }
2662 %}
2664 enc_class Java_Dynamic_Call(method meth)
2665 %{
2666 // JAVA DYNAMIC CALL
2667 // !!!!!
2668 // Generate "movq rax, -1", placeholder instruction to load oop-info
2669 // emit_call_dynamic_prologue( cbuf );
2670 cbuf.set_inst_mark();
2672 // movq rax, -1
2673 emit_opcode(cbuf, Assembler::REX_W);
2674 emit_opcode(cbuf, 0xB8 | RAX_enc);
2675 emit_d64_reloc(cbuf,
2676 (int64_t) Universe::non_oop_word(),
2677 oop_Relocation::spec_for_immediate(), RELOC_IMM64);
2678 address virtual_call_oop_addr = cbuf.inst_mark();
2679 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2680 // who we intended to call.
2681 cbuf.set_inst_mark();
2682 $$$emit8$primary;
2683 emit_d32_reloc(cbuf,
2684 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
2685 virtual_call_Relocation::spec(virtual_call_oop_addr),
2686 RELOC_DISP32);
2687 %}
2689 enc_class Java_Compiled_Call(method meth)
2690 %{
2691 // JAVA COMPILED CALL
2692 int disp = in_bytes(methodOopDesc:: from_compiled_offset());
2694 // XXX XXX offset is 128 is 1.5 NON-PRODUCT !!!
2695 // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small");
2697 // callq *disp(%rax)
2698 cbuf.set_inst_mark();
2699 $$$emit8$primary;
2700 if (disp < 0x80) {
2701 emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte
2702 emit_d8(cbuf, disp); // Displacement
2703 } else {
2704 emit_rm(cbuf, 0x02, $secondary, RAX_enc); // R/M byte
2705 emit_d32(cbuf, disp); // Displacement
2706 }
2707 %}
2709 enc_class reg_opc_imm(rRegI dst, immI8 shift)
2710 %{
2711 // SAL, SAR, SHR
2712 int dstenc = $dst$$reg;
2713 if (dstenc >= 8) {
2714 emit_opcode(cbuf, Assembler::REX_B);
2715 dstenc -= 8;
2716 }
2717 $$$emit8$primary;
2718 emit_rm(cbuf, 0x3, $secondary, dstenc);
2719 $$$emit8$shift$$constant;
2720 %}
2722 enc_class reg_opc_imm_wide(rRegL dst, immI8 shift)
2723 %{
2724 // SAL, SAR, SHR
2725 int dstenc = $dst$$reg;
2726 if (dstenc < 8) {
2727 emit_opcode(cbuf, Assembler::REX_W);
2728 } else {
2729 emit_opcode(cbuf, Assembler::REX_WB);
2730 dstenc -= 8;
2731 }
2732 $$$emit8$primary;
2733 emit_rm(cbuf, 0x3, $secondary, dstenc);
2734 $$$emit8$shift$$constant;
2735 %}
2737 enc_class load_immI(rRegI dst, immI src)
2738 %{
2739 int dstenc = $dst$$reg;
2740 if (dstenc >= 8) {
2741 emit_opcode(cbuf, Assembler::REX_B);
2742 dstenc -= 8;
2743 }
2744 emit_opcode(cbuf, 0xB8 | dstenc);
2745 $$$emit32$src$$constant;
2746 %}
2748 enc_class load_immL(rRegL dst, immL src)
2749 %{
2750 int dstenc = $dst$$reg;
2751 if (dstenc < 8) {
2752 emit_opcode(cbuf, Assembler::REX_W);
2753 } else {
2754 emit_opcode(cbuf, Assembler::REX_WB);
2755 dstenc -= 8;
2756 }
2757 emit_opcode(cbuf, 0xB8 | dstenc);
2758 emit_d64(cbuf, $src$$constant);
2759 %}
2761 enc_class load_immUL32(rRegL dst, immUL32 src)
2762 %{
2763 // same as load_immI, but this time we care about zeroes in the high word
2764 int dstenc = $dst$$reg;
2765 if (dstenc >= 8) {
2766 emit_opcode(cbuf, Assembler::REX_B);
2767 dstenc -= 8;
2768 }
2769 emit_opcode(cbuf, 0xB8 | dstenc);
2770 $$$emit32$src$$constant;
2771 %}
2773 enc_class load_immL32(rRegL dst, immL32 src)
2774 %{
2775 int dstenc = $dst$$reg;
2776 if (dstenc < 8) {
2777 emit_opcode(cbuf, Assembler::REX_W);
2778 } else {
2779 emit_opcode(cbuf, Assembler::REX_WB);
2780 dstenc -= 8;
2781 }
2782 emit_opcode(cbuf, 0xC7);
2783 emit_rm(cbuf, 0x03, 0x00, dstenc);
2784 $$$emit32$src$$constant;
2785 %}
2787 enc_class load_immP31(rRegP dst, immP32 src)
2788 %{
2789 // same as load_immI, but this time we care about zeroes in the high word
2790 int dstenc = $dst$$reg;
2791 if (dstenc >= 8) {
2792 emit_opcode(cbuf, Assembler::REX_B);
2793 dstenc -= 8;
2794 }
2795 emit_opcode(cbuf, 0xB8 | dstenc);
2796 $$$emit32$src$$constant;
2797 %}
2799 enc_class load_immP(rRegP dst, immP src)
2800 %{
2801 int dstenc = $dst$$reg;
2802 if (dstenc < 8) {
2803 emit_opcode(cbuf, Assembler::REX_W);
2804 } else {
2805 emit_opcode(cbuf, Assembler::REX_WB);
2806 dstenc -= 8;
2807 }
2808 emit_opcode(cbuf, 0xB8 | dstenc);
2809 // This next line should be generated from ADLC
2810 if ($src->constant_is_oop()) {
2811 emit_d64_reloc(cbuf, $src$$constant, relocInfo::oop_type, RELOC_IMM64);
2812 } else {
2813 emit_d64(cbuf, $src$$constant);
2814 }
2815 %}
2817 enc_class load_immF(regF dst, immF con)
2818 %{
2819 // XXX reg_mem doesn't support RIP-relative addressing yet
2820 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2821 emit_float_constant(cbuf, $con$$constant);
2822 %}
2824 enc_class load_immD(regD dst, immD con)
2825 %{
2826 // XXX reg_mem doesn't support RIP-relative addressing yet
2827 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2828 emit_double_constant(cbuf, $con$$constant);
2829 %}
2831 enc_class load_conF (regF dst, immF con) %{ // Load float constant
2832 emit_opcode(cbuf, 0xF3);
2833 if ($dst$$reg >= 8) {
2834 emit_opcode(cbuf, Assembler::REX_R);
2835 }
2836 emit_opcode(cbuf, 0x0F);
2837 emit_opcode(cbuf, 0x10);
2838 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2839 emit_float_constant(cbuf, $con$$constant);
2840 %}
2842 enc_class load_conD (regD dst, immD con) %{ // Load double constant
2843 // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)
2844 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
2845 if ($dst$$reg >= 8) {
2846 emit_opcode(cbuf, Assembler::REX_R);
2847 }
2848 emit_opcode(cbuf, 0x0F);
2849 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
2850 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
2851 emit_double_constant(cbuf, $con$$constant);
2852 %}
2854 // Encode a reg-reg copy. If it is useless, then empty encoding.
2855 enc_class enc_copy(rRegI dst, rRegI src)
2856 %{
2857 encode_copy(cbuf, $dst$$reg, $src$$reg);
2858 %}
2860 // Encode xmm reg-reg copy. If it is useless, then empty encoding.
2861 enc_class enc_CopyXD( RegD dst, RegD src ) %{
2862 encode_CopyXD( cbuf, $dst$$reg, $src$$reg );
2863 %}
2865 enc_class enc_copy_always(rRegI dst, rRegI src)
2866 %{
2867 int srcenc = $src$$reg;
2868 int dstenc = $dst$$reg;
2870 if (dstenc < 8) {
2871 if (srcenc >= 8) {
2872 emit_opcode(cbuf, Assembler::REX_B);
2873 srcenc -= 8;
2874 }
2875 } else {
2876 if (srcenc < 8) {
2877 emit_opcode(cbuf, Assembler::REX_R);
2878 } else {
2879 emit_opcode(cbuf, Assembler::REX_RB);
2880 srcenc -= 8;
2881 }
2882 dstenc -= 8;
2883 }
2885 emit_opcode(cbuf, 0x8B);
2886 emit_rm(cbuf, 0x3, dstenc, srcenc);
2887 %}
2889 enc_class enc_copy_wide(rRegL dst, rRegL src)
2890 %{
2891 int srcenc = $src$$reg;
2892 int dstenc = $dst$$reg;
2894 if (dstenc != srcenc) {
2895 if (dstenc < 8) {
2896 if (srcenc < 8) {
2897 emit_opcode(cbuf, Assembler::REX_W);
2898 } else {
2899 emit_opcode(cbuf, Assembler::REX_WB);
2900 srcenc -= 8;
2901 }
2902 } else {
2903 if (srcenc < 8) {
2904 emit_opcode(cbuf, Assembler::REX_WR);
2905 } else {
2906 emit_opcode(cbuf, Assembler::REX_WRB);
2907 srcenc -= 8;
2908 }
2909 dstenc -= 8;
2910 }
2911 emit_opcode(cbuf, 0x8B);
2912 emit_rm(cbuf, 0x3, dstenc, srcenc);
2913 }
2914 %}
2916 enc_class Con32(immI src)
2917 %{
2918 // Output immediate
2919 $$$emit32$src$$constant;
2920 %}
2922 enc_class Con64(immL src)
2923 %{
2924 // Output immediate
2925 emit_d64($src$$constant);
2926 %}
2928 enc_class Con32F_as_bits(immF src)
2929 %{
2930 // Output Float immediate bits
2931 jfloat jf = $src$$constant;
2932 jint jf_as_bits = jint_cast(jf);
2933 emit_d32(cbuf, jf_as_bits);
2934 %}
2936 enc_class Con16(immI src)
2937 %{
2938 // Output immediate
2939 $$$emit16$src$$constant;
2940 %}
2942 // How is this different from Con32??? XXX
2943 enc_class Con_d32(immI src)
2944 %{
2945 emit_d32(cbuf,$src$$constant);
2946 %}
2948 enc_class conmemref (rRegP t1) %{ // Con32(storeImmI)
2949 // Output immediate memory reference
2950 emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
2951 emit_d32(cbuf, 0x00);
2952 %}
2954 enc_class jump_enc(rRegL switch_val, rRegI dest) %{
2955 MacroAssembler masm(&cbuf);
2957 Register switch_reg = as_Register($switch_val$$reg);
2958 Register dest_reg = as_Register($dest$$reg);
2959 address table_base = masm.address_table_constant(_index2label);
2961 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
2962 // to do that and the compiler is using that register as one it can allocate.
2963 // So we build it all by hand.
2964 // Address index(noreg, switch_reg, Address::times_1);
2965 // ArrayAddress dispatch(table, index);
2967 Address dispatch(dest_reg, switch_reg, Address::times_1);
2969 masm.lea(dest_reg, InternalAddress(table_base));
2970 masm.jmp(dispatch);
2971 %}
2973 enc_class jump_enc_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
2974 MacroAssembler masm(&cbuf);
2976 Register switch_reg = as_Register($switch_val$$reg);
2977 Register dest_reg = as_Register($dest$$reg);
2978 address table_base = masm.address_table_constant(_index2label);
2980 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
2981 // to do that and the compiler is using that register as one it can allocate.
2982 // So we build it all by hand.
2983 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
2984 // ArrayAddress dispatch(table, index);
2986 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
2988 masm.lea(dest_reg, InternalAddress(table_base));
2989 masm.jmp(dispatch);
2990 %}
2992 enc_class jump_enc_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
2993 MacroAssembler masm(&cbuf);
2995 Register switch_reg = as_Register($switch_val$$reg);
2996 Register dest_reg = as_Register($dest$$reg);
2997 address table_base = masm.address_table_constant(_index2label);
2999 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
3000 // to do that and the compiler is using that register as one it can allocate.
3001 // So we build it all by hand.
3002 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);
3003 // ArrayAddress dispatch(table, index);
3005 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant);
3006 masm.lea(dest_reg, InternalAddress(table_base));
3007 masm.jmp(dispatch);
3009 %}
3011 enc_class lock_prefix()
3012 %{
3013 if (os::is_MP()) {
3014 emit_opcode(cbuf, 0xF0); // lock
3015 }
3016 %}
3018 enc_class REX_mem(memory mem)
3019 %{
3020 if ($mem$$base >= 8) {
3021 if ($mem$$index < 8) {
3022 emit_opcode(cbuf, Assembler::REX_B);
3023 } else {
3024 emit_opcode(cbuf, Assembler::REX_XB);
3025 }
3026 } else {
3027 if ($mem$$index >= 8) {
3028 emit_opcode(cbuf, Assembler::REX_X);
3029 }
3030 }
3031 %}
3033 enc_class REX_mem_wide(memory mem)
3034 %{
3035 if ($mem$$base >= 8) {
3036 if ($mem$$index < 8) {
3037 emit_opcode(cbuf, Assembler::REX_WB);
3038 } else {
3039 emit_opcode(cbuf, Assembler::REX_WXB);
3040 }
3041 } else {
3042 if ($mem$$index < 8) {
3043 emit_opcode(cbuf, Assembler::REX_W);
3044 } else {
3045 emit_opcode(cbuf, Assembler::REX_WX);
3046 }
3047 }
3048 %}
3050 // for byte regs
3051 enc_class REX_breg(rRegI reg)
3052 %{
3053 if ($reg$$reg >= 4) {
3054 emit_opcode(cbuf, $reg$$reg < 8 ? Assembler::REX : Assembler::REX_B);
3055 }
3056 %}
3058 // for byte regs
3059 enc_class REX_reg_breg(rRegI dst, rRegI src)
3060 %{
3061 if ($dst$$reg < 8) {
3062 if ($src$$reg >= 4) {
3063 emit_opcode(cbuf, $src$$reg < 8 ? Assembler::REX : Assembler::REX_B);
3064 }
3065 } else {
3066 if ($src$$reg < 8) {
3067 emit_opcode(cbuf, Assembler::REX_R);
3068 } else {
3069 emit_opcode(cbuf, Assembler::REX_RB);
3070 }
3071 }
3072 %}
3074 // for byte regs
3075 enc_class REX_breg_mem(rRegI reg, memory mem)
3076 %{
3077 if ($reg$$reg < 8) {
3078 if ($mem$$base < 8) {
3079 if ($mem$$index >= 8) {
3080 emit_opcode(cbuf, Assembler::REX_X);
3081 } else if ($reg$$reg >= 4) {
3082 emit_opcode(cbuf, Assembler::REX);
3083 }
3084 } else {
3085 if ($mem$$index < 8) {
3086 emit_opcode(cbuf, Assembler::REX_B);
3087 } else {
3088 emit_opcode(cbuf, Assembler::REX_XB);
3089 }
3090 }
3091 } else {
3092 if ($mem$$base < 8) {
3093 if ($mem$$index < 8) {
3094 emit_opcode(cbuf, Assembler::REX_R);
3095 } else {
3096 emit_opcode(cbuf, Assembler::REX_RX);
3097 }
3098 } else {
3099 if ($mem$$index < 8) {
3100 emit_opcode(cbuf, Assembler::REX_RB);
3101 } else {
3102 emit_opcode(cbuf, Assembler::REX_RXB);
3103 }
3104 }
3105 }
3106 %}
3108 enc_class REX_reg(rRegI reg)
3109 %{
3110 if ($reg$$reg >= 8) {
3111 emit_opcode(cbuf, Assembler::REX_B);
3112 }
3113 %}
3115 enc_class REX_reg_wide(rRegI reg)
3116 %{
3117 if ($reg$$reg < 8) {
3118 emit_opcode(cbuf, Assembler::REX_W);
3119 } else {
3120 emit_opcode(cbuf, Assembler::REX_WB);
3121 }
3122 %}
3124 enc_class REX_reg_reg(rRegI dst, rRegI src)
3125 %{
3126 if ($dst$$reg < 8) {
3127 if ($src$$reg >= 8) {
3128 emit_opcode(cbuf, Assembler::REX_B);
3129 }
3130 } else {
3131 if ($src$$reg < 8) {
3132 emit_opcode(cbuf, Assembler::REX_R);
3133 } else {
3134 emit_opcode(cbuf, Assembler::REX_RB);
3135 }
3136 }
3137 %}
3139 enc_class REX_reg_reg_wide(rRegI dst, rRegI src)
3140 %{
3141 if ($dst$$reg < 8) {
3142 if ($src$$reg < 8) {
3143 emit_opcode(cbuf, Assembler::REX_W);
3144 } else {
3145 emit_opcode(cbuf, Assembler::REX_WB);
3146 }
3147 } else {
3148 if ($src$$reg < 8) {
3149 emit_opcode(cbuf, Assembler::REX_WR);
3150 } else {
3151 emit_opcode(cbuf, Assembler::REX_WRB);
3152 }
3153 }
3154 %}
3156 enc_class REX_reg_mem(rRegI reg, memory mem)
3157 %{
3158 if ($reg$$reg < 8) {
3159 if ($mem$$base < 8) {
3160 if ($mem$$index >= 8) {
3161 emit_opcode(cbuf, Assembler::REX_X);
3162 }
3163 } else {
3164 if ($mem$$index < 8) {
3165 emit_opcode(cbuf, Assembler::REX_B);
3166 } else {
3167 emit_opcode(cbuf, Assembler::REX_XB);
3168 }
3169 }
3170 } else {
3171 if ($mem$$base < 8) {
3172 if ($mem$$index < 8) {
3173 emit_opcode(cbuf, Assembler::REX_R);
3174 } else {
3175 emit_opcode(cbuf, Assembler::REX_RX);
3176 }
3177 } else {
3178 if ($mem$$index < 8) {
3179 emit_opcode(cbuf, Assembler::REX_RB);
3180 } else {
3181 emit_opcode(cbuf, Assembler::REX_RXB);
3182 }
3183 }
3184 }
3185 %}
3187 enc_class REX_reg_mem_wide(rRegL reg, memory mem)
3188 %{
3189 if ($reg$$reg < 8) {
3190 if ($mem$$base < 8) {
3191 if ($mem$$index < 8) {
3192 emit_opcode(cbuf, Assembler::REX_W);
3193 } else {
3194 emit_opcode(cbuf, Assembler::REX_WX);
3195 }
3196 } else {
3197 if ($mem$$index < 8) {
3198 emit_opcode(cbuf, Assembler::REX_WB);
3199 } else {
3200 emit_opcode(cbuf, Assembler::REX_WXB);
3201 }
3202 }
3203 } else {
3204 if ($mem$$base < 8) {
3205 if ($mem$$index < 8) {
3206 emit_opcode(cbuf, Assembler::REX_WR);
3207 } else {
3208 emit_opcode(cbuf, Assembler::REX_WRX);
3209 }
3210 } else {
3211 if ($mem$$index < 8) {
3212 emit_opcode(cbuf, Assembler::REX_WRB);
3213 } else {
3214 emit_opcode(cbuf, Assembler::REX_WRXB);
3215 }
3216 }
3217 }
3218 %}
3220 enc_class reg_mem(rRegI ereg, memory mem)
3221 %{
3222 // High registers handle in encode_RegMem
3223 int reg = $ereg$$reg;
3224 int base = $mem$$base;
3225 int index = $mem$$index;
3226 int scale = $mem$$scale;
3227 int disp = $mem$$disp;
3228 bool disp_is_oop = $mem->disp_is_oop();
3230 encode_RegMem(cbuf, reg, base, index, scale, disp, disp_is_oop);
3231 %}
3233 enc_class RM_opc_mem(immI rm_opcode, memory mem)
3234 %{
3235 int rm_byte_opcode = $rm_opcode$$constant;
3237 // High registers handle in encode_RegMem
3238 int base = $mem$$base;
3239 int index = $mem$$index;
3240 int scale = $mem$$scale;
3241 int displace = $mem$$disp;
3243 bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when
3244 // working with static
3245 // globals
3246 encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace,
3247 disp_is_oop);
3248 %}
3250 enc_class reg_lea(rRegI dst, rRegI src0, immI src1)
3251 %{
3252 int reg_encoding = $dst$$reg;
3253 int base = $src0$$reg; // 0xFFFFFFFF indicates no base
3254 int index = 0x04; // 0x04 indicates no index
3255 int scale = 0x00; // 0x00 indicates no scale
3256 int displace = $src1$$constant; // 0x00 indicates no displacement
3257 bool disp_is_oop = false;
3258 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace,
3259 disp_is_oop);
3260 %}
3262 enc_class neg_reg(rRegI dst)
3263 %{
3264 int dstenc = $dst$$reg;
3265 if (dstenc >= 8) {
3266 emit_opcode(cbuf, Assembler::REX_B);
3267 dstenc -= 8;
3268 }
3269 // NEG $dst
3270 emit_opcode(cbuf, 0xF7);
3271 emit_rm(cbuf, 0x3, 0x03, dstenc);
3272 %}
3274 enc_class neg_reg_wide(rRegI dst)
3275 %{
3276 int dstenc = $dst$$reg;
3277 if (dstenc < 8) {
3278 emit_opcode(cbuf, Assembler::REX_W);
3279 } else {
3280 emit_opcode(cbuf, Assembler::REX_WB);
3281 dstenc -= 8;
3282 }
3283 // NEG $dst
3284 emit_opcode(cbuf, 0xF7);
3285 emit_rm(cbuf, 0x3, 0x03, dstenc);
3286 %}
3288 enc_class setLT_reg(rRegI dst)
3289 %{
3290 int dstenc = $dst$$reg;
3291 if (dstenc >= 8) {
3292 emit_opcode(cbuf, Assembler::REX_B);
3293 dstenc -= 8;
3294 } else if (dstenc >= 4) {
3295 emit_opcode(cbuf, Assembler::REX);
3296 }
3297 // SETLT $dst
3298 emit_opcode(cbuf, 0x0F);
3299 emit_opcode(cbuf, 0x9C);
3300 emit_rm(cbuf, 0x3, 0x0, dstenc);
3301 %}
3303 enc_class setNZ_reg(rRegI dst)
3304 %{
3305 int dstenc = $dst$$reg;
3306 if (dstenc >= 8) {
3307 emit_opcode(cbuf, Assembler::REX_B);
3308 dstenc -= 8;
3309 } else if (dstenc >= 4) {
3310 emit_opcode(cbuf, Assembler::REX);
3311 }
3312 // SETNZ $dst
3313 emit_opcode(cbuf, 0x0F);
3314 emit_opcode(cbuf, 0x95);
3315 emit_rm(cbuf, 0x3, 0x0, dstenc);
3316 %}
3318 enc_class enc_cmpLTP(no_rcx_RegI p, no_rcx_RegI q, no_rcx_RegI y,
3319 rcx_RegI tmp)
3320 %{
3321 // cadd_cmpLT
3323 int tmpReg = $tmp$$reg;
3325 int penc = $p$$reg;
3326 int qenc = $q$$reg;
3327 int yenc = $y$$reg;
3329 // subl $p,$q
3330 if (penc < 8) {
3331 if (qenc >= 8) {
3332 emit_opcode(cbuf, Assembler::REX_B);
3333 }
3334 } else {
3335 if (qenc < 8) {
3336 emit_opcode(cbuf, Assembler::REX_R);
3337 } else {
3338 emit_opcode(cbuf, Assembler::REX_RB);
3339 }
3340 }
3341 emit_opcode(cbuf, 0x2B);
3342 emit_rm(cbuf, 0x3, penc & 7, qenc & 7);
3344 // sbbl $tmp, $tmp
3345 emit_opcode(cbuf, 0x1B);
3346 emit_rm(cbuf, 0x3, tmpReg, tmpReg);
3348 // andl $tmp, $y
3349 if (yenc >= 8) {
3350 emit_opcode(cbuf, Assembler::REX_B);
3351 }
3352 emit_opcode(cbuf, 0x23);
3353 emit_rm(cbuf, 0x3, tmpReg, yenc & 7);
3355 // addl $p,$tmp
3356 if (penc >= 8) {
3357 emit_opcode(cbuf, Assembler::REX_R);
3358 }
3359 emit_opcode(cbuf, 0x03);
3360 emit_rm(cbuf, 0x3, penc & 7, tmpReg);
3361 %}
3363 // Compare the lonogs and set -1, 0, or 1 into dst
3364 enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst)
3365 %{
3366 int src1enc = $src1$$reg;
3367 int src2enc = $src2$$reg;
3368 int dstenc = $dst$$reg;
3370 // cmpq $src1, $src2
3371 if (src1enc < 8) {
3372 if (src2enc < 8) {
3373 emit_opcode(cbuf, Assembler::REX_W);
3374 } else {
3375 emit_opcode(cbuf, Assembler::REX_WB);
3376 }
3377 } else {
3378 if (src2enc < 8) {
3379 emit_opcode(cbuf, Assembler::REX_WR);
3380 } else {
3381 emit_opcode(cbuf, Assembler::REX_WRB);
3382 }
3383 }
3384 emit_opcode(cbuf, 0x3B);
3385 emit_rm(cbuf, 0x3, src1enc & 7, src2enc & 7);
3387 // movl $dst, -1
3388 if (dstenc >= 8) {
3389 emit_opcode(cbuf, Assembler::REX_B);
3390 }
3391 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
3392 emit_d32(cbuf, -1);
3394 // jl,s done
3395 emit_opcode(cbuf, 0x7C);
3396 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
3398 // setne $dst
3399 if (dstenc >= 4) {
3400 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
3401 }
3402 emit_opcode(cbuf, 0x0F);
3403 emit_opcode(cbuf, 0x95);
3404 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
3406 // movzbl $dst, $dst
3407 if (dstenc >= 4) {
3408 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
3409 }
3410 emit_opcode(cbuf, 0x0F);
3411 emit_opcode(cbuf, 0xB6);
3412 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
3413 %}
3415 enc_class Push_ResultXD(regD dst) %{
3416 int dstenc = $dst$$reg;
3418 store_to_stackslot( cbuf, 0xDD, 0x03, 0 ); //FSTP [RSP]
3420 // UseXmmLoadAndClearUpper ? movsd dst,[rsp] : movlpd dst,[rsp]
3421 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
3422 if (dstenc >= 8) {
3423 emit_opcode(cbuf, Assembler::REX_R);
3424 }
3425 emit_opcode (cbuf, 0x0F );
3426 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12 );
3427 encode_RegMem(cbuf, dstenc, RSP_enc, 0x4, 0, 0, false);
3429 // add rsp,8
3430 emit_opcode(cbuf, Assembler::REX_W);
3431 emit_opcode(cbuf,0x83);
3432 emit_rm(cbuf,0x3, 0x0, RSP_enc);
3433 emit_d8(cbuf,0x08);
3434 %}
3436 enc_class Push_SrcXD(regD src) %{
3437 int srcenc = $src$$reg;
3439 // subq rsp,#8
3440 emit_opcode(cbuf, Assembler::REX_W);
3441 emit_opcode(cbuf, 0x83);
3442 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
3443 emit_d8(cbuf, 0x8);
3445 // movsd [rsp],src
3446 emit_opcode(cbuf, 0xF2);
3447 if (srcenc >= 8) {
3448 emit_opcode(cbuf, Assembler::REX_R);
3449 }
3450 emit_opcode(cbuf, 0x0F);
3451 emit_opcode(cbuf, 0x11);
3452 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false);
3454 // fldd [rsp]
3455 emit_opcode(cbuf, 0x66);
3456 emit_opcode(cbuf, 0xDD);
3457 encode_RegMem(cbuf, 0x0, RSP_enc, 0x4, 0, 0, false);
3458 %}
3461 enc_class movq_ld(regD dst, memory mem) %{
3462 MacroAssembler _masm(&cbuf);
3463 __ movq($dst$$XMMRegister, $mem$$Address);
3464 %}
3466 enc_class movq_st(memory mem, regD src) %{
3467 MacroAssembler _masm(&cbuf);
3468 __ movq($mem$$Address, $src$$XMMRegister);
3469 %}
3471 enc_class pshufd_8x8(regF dst, regF src) %{
3472 MacroAssembler _masm(&cbuf);
3474 encode_CopyXD(cbuf, $dst$$reg, $src$$reg);
3475 __ punpcklbw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg));
3476 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg), 0x00);
3477 %}
3479 enc_class pshufd_4x16(regF dst, regF src) %{
3480 MacroAssembler _masm(&cbuf);
3482 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), 0x00);
3483 %}
3485 enc_class pshufd(regD dst, regD src, int mode) %{
3486 MacroAssembler _masm(&cbuf);
3488 __ pshufd(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), $mode);
3489 %}
3491 enc_class pxor(regD dst, regD src) %{
3492 MacroAssembler _masm(&cbuf);
3494 __ pxor(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg));
3495 %}
3497 enc_class mov_i2x(regD dst, rRegI src) %{
3498 MacroAssembler _masm(&cbuf);
3500 __ movdl(as_XMMRegister($dst$$reg), as_Register($src$$reg));
3501 %}
3503 // obj: object to lock
3504 // box: box address (header location) -- killed
3505 // tmp: rax -- killed
3506 // scr: rbx -- killed
3507 //
3508 // What follows is a direct transliteration of fast_lock() and fast_unlock()
3509 // from i486.ad. See that file for comments.
3510 // TODO: where possible switch from movq (r, 0) to movl(r,0) and
3511 // use the shorter encoding. (Movl clears the high-order 32-bits).
3514 enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr)
3515 %{
3516 Register objReg = as_Register((int)$obj$$reg);
3517 Register boxReg = as_Register((int)$box$$reg);
3518 Register tmpReg = as_Register($tmp$$reg);
3519 Register scrReg = as_Register($scr$$reg);
3520 MacroAssembler masm(&cbuf);
3522 // Verify uniqueness of register assignments -- necessary but not sufficient
3523 assert (objReg != boxReg && objReg != tmpReg &&
3524 objReg != scrReg && tmpReg != scrReg, "invariant") ;
3526 if (_counters != NULL) {
3527 masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
3528 }
3529 if (EmitSync & 1) {
3530 // Without cast to int32_t a movptr will destroy r10 which is typically obj
3531 masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
3532 masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
3533 } else
3534 if (EmitSync & 2) {
3535 Label DONE_LABEL;
3536 if (UseBiasedLocking) {
3537 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
3538 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
3539 }
3540 // QQQ was movl...
3541 masm.movptr(tmpReg, 0x1);
3542 masm.orptr(tmpReg, Address(objReg, 0));
3543 masm.movptr(Address(boxReg, 0), tmpReg);
3544 if (os::is_MP()) {
3545 masm.lock();
3546 }
3547 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
3548 masm.jcc(Assembler::equal, DONE_LABEL);
3550 // Recursive locking
3551 masm.subptr(tmpReg, rsp);
3552 masm.andptr(tmpReg, 7 - os::vm_page_size());
3553 masm.movptr(Address(boxReg, 0), tmpReg);
3555 masm.bind(DONE_LABEL);
3556 masm.nop(); // avoid branch to branch
3557 } else {
3558 Label DONE_LABEL, IsInflated, Egress;
3560 masm.movptr(tmpReg, Address(objReg, 0)) ;
3561 masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
3562 masm.jcc (Assembler::notZero, IsInflated) ;
3564 // it's stack-locked, biased or neutral
3565 // TODO: optimize markword triage order to reduce the number of
3566 // conditional branches in the most common cases.
3567 // Beware -- there's a subtle invariant that fetch of the markword
3568 // at [FETCH], below, will never observe a biased encoding (*101b).
3569 // If this invariant is not held we'll suffer exclusion (safety) failure.
3571 if (UseBiasedLocking && !UseOptoBiasInlining) {
3572 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
3573 masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
3574 }
3576 // was q will it destroy high?
3577 masm.orl (tmpReg, 1) ;
3578 masm.movptr(Address(boxReg, 0), tmpReg) ;
3579 if (os::is_MP()) { masm.lock(); }
3580 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
3581 if (_counters != NULL) {
3582 masm.cond_inc32(Assembler::equal,
3583 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
3584 }
3585 masm.jcc (Assembler::equal, DONE_LABEL);
3587 // Recursive locking
3588 masm.subptr(tmpReg, rsp);
3589 masm.andptr(tmpReg, 7 - os::vm_page_size());
3590 masm.movptr(Address(boxReg, 0), tmpReg);
3591 if (_counters != NULL) {
3592 masm.cond_inc32(Assembler::equal,
3593 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
3594 }
3595 masm.jmp (DONE_LABEL) ;
3597 masm.bind (IsInflated) ;
3598 // It's inflated
3600 // TODO: someday avoid the ST-before-CAS penalty by
3601 // relocating (deferring) the following ST.
3602 // We should also think about trying a CAS without having
3603 // fetched _owner. If the CAS is successful we may
3604 // avoid an RTO->RTS upgrade on the $line.
3605 // Without cast to int32_t a movptr will destroy r10 which is typically obj
3606 masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
3608 masm.mov (boxReg, tmpReg) ;
3609 masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3610 masm.testptr(tmpReg, tmpReg) ;
3611 masm.jcc (Assembler::notZero, DONE_LABEL) ;
3613 // It's inflated and appears unlocked
3614 if (os::is_MP()) { masm.lock(); }
3615 masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3616 // Intentional fall-through into DONE_LABEL ...
3618 masm.bind (DONE_LABEL) ;
3619 masm.nop () ; // avoid jmp to jmp
3620 }
3621 %}
3623 // obj: object to unlock
3624 // box: box address (displaced header location), killed
3625 // RBX: killed tmp; cannot be obj nor box
3626 enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp)
3627 %{
3629 Register objReg = as_Register($obj$$reg);
3630 Register boxReg = as_Register($box$$reg);
3631 Register tmpReg = as_Register($tmp$$reg);
3632 MacroAssembler masm(&cbuf);
3634 if (EmitSync & 4) {
3635 masm.cmpptr(rsp, 0) ;
3636 } else
3637 if (EmitSync & 8) {
3638 Label DONE_LABEL;
3639 if (UseBiasedLocking) {
3640 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3641 }
3643 // Check whether the displaced header is 0
3644 //(=> recursive unlock)
3645 masm.movptr(tmpReg, Address(boxReg, 0));
3646 masm.testptr(tmpReg, tmpReg);
3647 masm.jcc(Assembler::zero, DONE_LABEL);
3649 // If not recursive lock, reset the header to displaced header
3650 if (os::is_MP()) {
3651 masm.lock();
3652 }
3653 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
3654 masm.bind(DONE_LABEL);
3655 masm.nop(); // avoid branch to branch
3656 } else {
3657 Label DONE_LABEL, Stacked, CheckSucc ;
3659 if (UseBiasedLocking && !UseOptoBiasInlining) {
3660 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
3661 }
3663 masm.movptr(tmpReg, Address(objReg, 0)) ;
3664 masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
3665 masm.jcc (Assembler::zero, DONE_LABEL) ;
3666 masm.testl (tmpReg, 0x02) ;
3667 masm.jcc (Assembler::zero, Stacked) ;
3669 // It's inflated
3670 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
3671 masm.xorptr(boxReg, r15_thread) ;
3672 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
3673 masm.jcc (Assembler::notZero, DONE_LABEL) ;
3674 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
3675 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
3676 masm.jcc (Assembler::notZero, CheckSucc) ;
3677 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3678 masm.jmp (DONE_LABEL) ;
3680 if ((EmitSync & 65536) == 0) {
3681 Label LSuccess, LGoSlowPath ;
3682 masm.bind (CheckSucc) ;
3683 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3684 masm.jcc (Assembler::zero, LGoSlowPath) ;
3686 // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
3687 // the explicit ST;MEMBAR combination, but masm doesn't currently support
3688 // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
3689 // are all faster when the write buffer is populated.
3690 masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3691 if (os::is_MP()) {
3692 masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
3693 }
3694 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
3695 masm.jcc (Assembler::notZero, LSuccess) ;
3697 masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX
3698 if (os::is_MP()) { masm.lock(); }
3699 masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
3700 masm.jcc (Assembler::notEqual, LSuccess) ;
3701 // Intentional fall-through into slow-path
3703 masm.bind (LGoSlowPath) ;
3704 masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure
3705 masm.jmp (DONE_LABEL) ;
3707 masm.bind (LSuccess) ;
3708 masm.testl (boxReg, 0) ; // set ICC.ZF=1 to indicate success
3709 masm.jmp (DONE_LABEL) ;
3710 }
3712 masm.bind (Stacked) ;
3713 masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch
3714 if (os::is_MP()) { masm.lock(); }
3715 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
3717 if (EmitSync & 65536) {
3718 masm.bind (CheckSucc) ;
3719 }
3720 masm.bind(DONE_LABEL);
3721 if (EmitSync & 32768) {
3722 masm.nop(); // avoid branch to branch
3723 }
3724 }
3725 %}
3727 enc_class enc_String_Compare()
3728 %{
3729 Label RCX_GOOD_LABEL, LENGTH_DIFF_LABEL,
3730 POP_LABEL, DONE_LABEL, CONT_LABEL,
3731 WHILE_HEAD_LABEL;
3732 MacroAssembler masm(&cbuf);
3734 // Get the first character position in both strings
3735 // [8] char array, [12] offset, [16] count
3736 int value_offset = java_lang_String::value_offset_in_bytes();
3737 int offset_offset = java_lang_String::offset_offset_in_bytes();
3738 int count_offset = java_lang_String::count_offset_in_bytes();
3739 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
3741 masm.load_heap_oop(rax, Address(rsi, value_offset));
3742 masm.movl(rcx, Address(rsi, offset_offset));
3743 masm.lea(rax, Address(rax, rcx, Address::times_2, base_offset));
3744 masm.load_heap_oop(rbx, Address(rdi, value_offset));
3745 masm.movl(rcx, Address(rdi, offset_offset));
3746 masm.lea(rbx, Address(rbx, rcx, Address::times_2, base_offset));
3748 // Compute the minimum of the string lengths(rsi) and the
3749 // difference of the string lengths (stack)
3751 masm.movl(rdi, Address(rdi, count_offset));
3752 masm.movl(rsi, Address(rsi, count_offset));
3753 masm.movl(rcx, rdi);
3754 masm.subl(rdi, rsi);
3755 masm.push(rdi);
3756 masm.cmov(Assembler::lessEqual, rsi, rcx);
3758 // Is the minimum length zero?
3759 masm.bind(RCX_GOOD_LABEL);
3760 masm.testl(rsi, rsi);
3761 masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
3763 // Load first characters
3764 masm.load_unsigned_short(rcx, Address(rbx, 0));
3765 masm.load_unsigned_short(rdi, Address(rax, 0));
3767 // Compare first characters
3768 masm.subl(rcx, rdi);
3769 masm.jcc(Assembler::notZero, POP_LABEL);
3770 masm.decrementl(rsi);
3771 masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
3773 {
3774 // Check after comparing first character to see if strings are equivalent
3775 Label LSkip2;
3776 // Check if the strings start at same location
3777 masm.cmpptr(rbx, rax);
3778 masm.jcc(Assembler::notEqual, LSkip2);
3780 // Check if the length difference is zero (from stack)
3781 masm.cmpl(Address(rsp, 0), 0x0);
3782 masm.jcc(Assembler::equal, LENGTH_DIFF_LABEL);
3784 // Strings might not be equivalent
3785 masm.bind(LSkip2);
3786 }
3788 // Shift RAX and RBX to the end of the arrays, negate min
3789 masm.lea(rax, Address(rax, rsi, Address::times_2, 2));
3790 masm.lea(rbx, Address(rbx, rsi, Address::times_2, 2));
3791 masm.negptr(rsi);
3793 // Compare the rest of the characters
3794 masm.bind(WHILE_HEAD_LABEL);
3795 masm.load_unsigned_short(rcx, Address(rbx, rsi, Address::times_2, 0));
3796 masm.load_unsigned_short(rdi, Address(rax, rsi, Address::times_2, 0));
3797 masm.subl(rcx, rdi);
3798 masm.jcc(Assembler::notZero, POP_LABEL);
3799 masm.increment(rsi);
3800 masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL);
3802 // Strings are equal up to min length. Return the length difference.
3803 masm.bind(LENGTH_DIFF_LABEL);
3804 masm.pop(rcx);
3805 masm.jmp(DONE_LABEL);
3807 // Discard the stored length difference
3808 masm.bind(POP_LABEL);
3809 masm.addptr(rsp, 8);
3811 // That's it
3812 masm.bind(DONE_LABEL);
3813 %}
3815 enc_class enc_Array_Equals(rdi_RegP ary1, rsi_RegP ary2, rax_RegI tmp1, rbx_RegI tmp2, rcx_RegI result) %{
3816 Label TRUE_LABEL, FALSE_LABEL, DONE_LABEL, COMPARE_LOOP_HDR, COMPARE_LOOP;
3817 MacroAssembler masm(&cbuf);
3819 Register ary1Reg = as_Register($ary1$$reg);
3820 Register ary2Reg = as_Register($ary2$$reg);
3821 Register tmp1Reg = as_Register($tmp1$$reg);
3822 Register tmp2Reg = as_Register($tmp2$$reg);
3823 Register resultReg = as_Register($result$$reg);
3825 int length_offset = arrayOopDesc::length_offset_in_bytes();
3826 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
3828 // Check the input args
3829 masm.cmpq(ary1Reg, ary2Reg);
3830 masm.jcc(Assembler::equal, TRUE_LABEL);
3831 masm.testq(ary1Reg, ary1Reg);
3832 masm.jcc(Assembler::zero, FALSE_LABEL);
3833 masm.testq(ary2Reg, ary2Reg);
3834 masm.jcc(Assembler::zero, FALSE_LABEL);
3836 // Check the lengths
3837 masm.movl(tmp2Reg, Address(ary1Reg, length_offset));
3838 masm.movl(resultReg, Address(ary2Reg, length_offset));
3839 masm.cmpl(tmp2Reg, resultReg);
3840 masm.jcc(Assembler::notEqual, FALSE_LABEL);
3841 masm.testl(resultReg, resultReg);
3842 masm.jcc(Assembler::zero, TRUE_LABEL);
3844 // Get the number of 4 byte vectors to compare
3845 masm.shrl(resultReg, 1);
3847 // Check for odd-length arrays
3848 masm.andl(tmp2Reg, 1);
3849 masm.testl(tmp2Reg, tmp2Reg);
3850 masm.jcc(Assembler::zero, COMPARE_LOOP_HDR);
3852 // Compare 2-byte "tail" at end of arrays
3853 masm.load_unsigned_short(tmp1Reg, Address(ary1Reg, resultReg, Address::times_4, base_offset));
3854 masm.load_unsigned_short(tmp2Reg, Address(ary2Reg, resultReg, Address::times_4, base_offset));
3855 masm.cmpl(tmp1Reg, tmp2Reg);
3856 masm.jcc(Assembler::notEqual, FALSE_LABEL);
3857 masm.testl(resultReg, resultReg);
3858 masm.jcc(Assembler::zero, TRUE_LABEL);
3860 // Setup compare loop
3861 masm.bind(COMPARE_LOOP_HDR);
3862 // Shift tmp1Reg and tmp2Reg to the last 4-byte boundary of the arrays
3863 masm.leaq(tmp1Reg, Address(ary1Reg, resultReg, Address::times_4, base_offset));
3864 masm.leaq(tmp2Reg, Address(ary2Reg, resultReg, Address::times_4, base_offset));
3865 masm.negq(resultReg);
3867 // 4-byte-wide compare loop
3868 masm.bind(COMPARE_LOOP);
3869 masm.movl(ary1Reg, Address(tmp1Reg, resultReg, Address::times_4, 0));
3870 masm.movl(ary2Reg, Address(tmp2Reg, resultReg, Address::times_4, 0));
3871 masm.cmpl(ary1Reg, ary2Reg);
3872 masm.jcc(Assembler::notEqual, FALSE_LABEL);
3873 masm.incrementq(resultReg);
3874 masm.jcc(Assembler::notZero, COMPARE_LOOP);
3876 masm.bind(TRUE_LABEL);
3877 masm.movl(resultReg, 1); // return true
3878 masm.jmp(DONE_LABEL);
3880 masm.bind(FALSE_LABEL);
3881 masm.xorl(resultReg, resultReg); // return false
3883 // That's it
3884 masm.bind(DONE_LABEL);
3885 %}
3887 enc_class enc_rethrow()
3888 %{
3889 cbuf.set_inst_mark();
3890 emit_opcode(cbuf, 0xE9); // jmp entry
3891 emit_d32_reloc(cbuf,
3892 (int) (OptoRuntime::rethrow_stub() - cbuf.code_end() - 4),
3893 runtime_call_Relocation::spec(),
3894 RELOC_DISP32);
3895 %}
3897 enc_class absF_encoding(regF dst)
3898 %{
3899 int dstenc = $dst$$reg;
3900 address signmask_address = (address) StubRoutines::x86::float_sign_mask();
3902 cbuf.set_inst_mark();
3903 if (dstenc >= 8) {
3904 emit_opcode(cbuf, Assembler::REX_R);
3905 dstenc -= 8;
3906 }
3907 // XXX reg_mem doesn't support RIP-relative addressing yet
3908 emit_opcode(cbuf, 0x0F);
3909 emit_opcode(cbuf, 0x54);
3910 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3911 emit_d32_reloc(cbuf, signmask_address);
3912 %}
3914 enc_class absD_encoding(regD dst)
3915 %{
3916 int dstenc = $dst$$reg;
3917 address signmask_address = (address) StubRoutines::x86::double_sign_mask();
3919 cbuf.set_inst_mark();
3920 emit_opcode(cbuf, 0x66);
3921 if (dstenc >= 8) {
3922 emit_opcode(cbuf, Assembler::REX_R);
3923 dstenc -= 8;
3924 }
3925 // XXX reg_mem doesn't support RIP-relative addressing yet
3926 emit_opcode(cbuf, 0x0F);
3927 emit_opcode(cbuf, 0x54);
3928 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3929 emit_d32_reloc(cbuf, signmask_address);
3930 %}
3932 enc_class negF_encoding(regF dst)
3933 %{
3934 int dstenc = $dst$$reg;
3935 address signflip_address = (address) StubRoutines::x86::float_sign_flip();
3937 cbuf.set_inst_mark();
3938 if (dstenc >= 8) {
3939 emit_opcode(cbuf, Assembler::REX_R);
3940 dstenc -= 8;
3941 }
3942 // XXX reg_mem doesn't support RIP-relative addressing yet
3943 emit_opcode(cbuf, 0x0F);
3944 emit_opcode(cbuf, 0x57);
3945 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3946 emit_d32_reloc(cbuf, signflip_address);
3947 %}
3949 enc_class negD_encoding(regD dst)
3950 %{
3951 int dstenc = $dst$$reg;
3952 address signflip_address = (address) StubRoutines::x86::double_sign_flip();
3954 cbuf.set_inst_mark();
3955 emit_opcode(cbuf, 0x66);
3956 if (dstenc >= 8) {
3957 emit_opcode(cbuf, Assembler::REX_R);
3958 dstenc -= 8;
3959 }
3960 // XXX reg_mem doesn't support RIP-relative addressing yet
3961 emit_opcode(cbuf, 0x0F);
3962 emit_opcode(cbuf, 0x57);
3963 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
3964 emit_d32_reloc(cbuf, signflip_address);
3965 %}
3967 enc_class f2i_fixup(rRegI dst, regF src)
3968 %{
3969 int dstenc = $dst$$reg;
3970 int srcenc = $src$$reg;
3972 // cmpl $dst, #0x80000000
3973 if (dstenc >= 8) {
3974 emit_opcode(cbuf, Assembler::REX_B);
3975 }
3976 emit_opcode(cbuf, 0x81);
3977 emit_rm(cbuf, 0x3, 0x7, dstenc & 7);
3978 emit_d32(cbuf, 0x80000000);
3980 // jne,s done
3981 emit_opcode(cbuf, 0x75);
3982 if (srcenc < 8 && dstenc < 8) {
3983 emit_d8(cbuf, 0xF);
3984 } else if (srcenc >= 8 && dstenc >= 8) {
3985 emit_d8(cbuf, 0x11);
3986 } else {
3987 emit_d8(cbuf, 0x10);
3988 }
3990 // subq rsp, #8
3991 emit_opcode(cbuf, Assembler::REX_W);
3992 emit_opcode(cbuf, 0x83);
3993 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
3994 emit_d8(cbuf, 8);
3996 // movss [rsp], $src
3997 emit_opcode(cbuf, 0xF3);
3998 if (srcenc >= 8) {
3999 emit_opcode(cbuf, Assembler::REX_R);
4000 }
4001 emit_opcode(cbuf, 0x0F);
4002 emit_opcode(cbuf, 0x11);
4003 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
4005 // call f2i_fixup
4006 cbuf.set_inst_mark();
4007 emit_opcode(cbuf, 0xE8);
4008 emit_d32_reloc(cbuf,
4009 (int)
4010 (StubRoutines::x86::f2i_fixup() - cbuf.code_end() - 4),
4011 runtime_call_Relocation::spec(),
4012 RELOC_DISP32);
4014 // popq $dst
4015 if (dstenc >= 8) {
4016 emit_opcode(cbuf, Assembler::REX_B);
4017 }
4018 emit_opcode(cbuf, 0x58 | (dstenc & 7));
4020 // done:
4021 %}
4023 enc_class f2l_fixup(rRegL dst, regF src)
4024 %{
4025 int dstenc = $dst$$reg;
4026 int srcenc = $src$$reg;
4027 address const_address = (address) StubRoutines::x86::double_sign_flip();
4029 // cmpq $dst, [0x8000000000000000]
4030 cbuf.set_inst_mark();
4031 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
4032 emit_opcode(cbuf, 0x39);
4033 // XXX reg_mem doesn't support RIP-relative addressing yet
4034 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101
4035 emit_d32_reloc(cbuf, const_address);
4038 // jne,s done
4039 emit_opcode(cbuf, 0x75);
4040 if (srcenc < 8 && dstenc < 8) {
4041 emit_d8(cbuf, 0xF);
4042 } else if (srcenc >= 8 && dstenc >= 8) {
4043 emit_d8(cbuf, 0x11);
4044 } else {
4045 emit_d8(cbuf, 0x10);
4046 }
4048 // subq rsp, #8
4049 emit_opcode(cbuf, Assembler::REX_W);
4050 emit_opcode(cbuf, 0x83);
4051 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
4052 emit_d8(cbuf, 8);
4054 // movss [rsp], $src
4055 emit_opcode(cbuf, 0xF3);
4056 if (srcenc >= 8) {
4057 emit_opcode(cbuf, Assembler::REX_R);
4058 }
4059 emit_opcode(cbuf, 0x0F);
4060 emit_opcode(cbuf, 0x11);
4061 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
4063 // call f2l_fixup
4064 cbuf.set_inst_mark();
4065 emit_opcode(cbuf, 0xE8);
4066 emit_d32_reloc(cbuf,
4067 (int)
4068 (StubRoutines::x86::f2l_fixup() - cbuf.code_end() - 4),
4069 runtime_call_Relocation::spec(),
4070 RELOC_DISP32);
4072 // popq $dst
4073 if (dstenc >= 8) {
4074 emit_opcode(cbuf, Assembler::REX_B);
4075 }
4076 emit_opcode(cbuf, 0x58 | (dstenc & 7));
4078 // done:
4079 %}
4081 enc_class d2i_fixup(rRegI dst, regD src)
4082 %{
4083 int dstenc = $dst$$reg;
4084 int srcenc = $src$$reg;
4086 // cmpl $dst, #0x80000000
4087 if (dstenc >= 8) {
4088 emit_opcode(cbuf, Assembler::REX_B);
4089 }
4090 emit_opcode(cbuf, 0x81);
4091 emit_rm(cbuf, 0x3, 0x7, dstenc & 7);
4092 emit_d32(cbuf, 0x80000000);
4094 // jne,s done
4095 emit_opcode(cbuf, 0x75);
4096 if (srcenc < 8 && dstenc < 8) {
4097 emit_d8(cbuf, 0xF);
4098 } else if (srcenc >= 8 && dstenc >= 8) {
4099 emit_d8(cbuf, 0x11);
4100 } else {
4101 emit_d8(cbuf, 0x10);
4102 }
4104 // subq rsp, #8
4105 emit_opcode(cbuf, Assembler::REX_W);
4106 emit_opcode(cbuf, 0x83);
4107 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
4108 emit_d8(cbuf, 8);
4110 // movsd [rsp], $src
4111 emit_opcode(cbuf, 0xF2);
4112 if (srcenc >= 8) {
4113 emit_opcode(cbuf, Assembler::REX_R);
4114 }
4115 emit_opcode(cbuf, 0x0F);
4116 emit_opcode(cbuf, 0x11);
4117 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
4119 // call d2i_fixup
4120 cbuf.set_inst_mark();
4121 emit_opcode(cbuf, 0xE8);
4122 emit_d32_reloc(cbuf,
4123 (int)
4124 (StubRoutines::x86::d2i_fixup() - cbuf.code_end() - 4),
4125 runtime_call_Relocation::spec(),
4126 RELOC_DISP32);
4128 // popq $dst
4129 if (dstenc >= 8) {
4130 emit_opcode(cbuf, Assembler::REX_B);
4131 }
4132 emit_opcode(cbuf, 0x58 | (dstenc & 7));
4134 // done:
4135 %}
4137 enc_class d2l_fixup(rRegL dst, regD src)
4138 %{
4139 int dstenc = $dst$$reg;
4140 int srcenc = $src$$reg;
4141 address const_address = (address) StubRoutines::x86::double_sign_flip();
4143 // cmpq $dst, [0x8000000000000000]
4144 cbuf.set_inst_mark();
4145 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
4146 emit_opcode(cbuf, 0x39);
4147 // XXX reg_mem doesn't support RIP-relative addressing yet
4148 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101
4149 emit_d32_reloc(cbuf, const_address);
4152 // jne,s done
4153 emit_opcode(cbuf, 0x75);
4154 if (srcenc < 8 && dstenc < 8) {
4155 emit_d8(cbuf, 0xF);
4156 } else if (srcenc >= 8 && dstenc >= 8) {
4157 emit_d8(cbuf, 0x11);
4158 } else {
4159 emit_d8(cbuf, 0x10);
4160 }
4162 // subq rsp, #8
4163 emit_opcode(cbuf, Assembler::REX_W);
4164 emit_opcode(cbuf, 0x83);
4165 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
4166 emit_d8(cbuf, 8);
4168 // movsd [rsp], $src
4169 emit_opcode(cbuf, 0xF2);
4170 if (srcenc >= 8) {
4171 emit_opcode(cbuf, Assembler::REX_R);
4172 }
4173 emit_opcode(cbuf, 0x0F);
4174 emit_opcode(cbuf, 0x11);
4175 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
4177 // call d2l_fixup
4178 cbuf.set_inst_mark();
4179 emit_opcode(cbuf, 0xE8);
4180 emit_d32_reloc(cbuf,
4181 (int)
4182 (StubRoutines::x86::d2l_fixup() - cbuf.code_end() - 4),
4183 runtime_call_Relocation::spec(),
4184 RELOC_DISP32);
4186 // popq $dst
4187 if (dstenc >= 8) {
4188 emit_opcode(cbuf, Assembler::REX_B);
4189 }
4190 emit_opcode(cbuf, 0x58 | (dstenc & 7));
4192 // done:
4193 %}
4195 enc_class enc_membar_acquire
4196 %{
4197 // [jk] not needed currently, if you enable this and it really
4198 // emits code don't forget to the remove the "size(0)" line in
4199 // membar_acquire()
4200 // MacroAssembler masm(&cbuf);
4201 // masm.membar(Assembler::Membar_mask_bits(Assembler::LoadStore |
4202 // Assembler::LoadLoad));
4203 %}
4205 enc_class enc_membar_release
4206 %{
4207 // [jk] not needed currently, if you enable this and it really
4208 // emits code don't forget to the remove the "size(0)" line in
4209 // membar_release()
4210 // MacroAssembler masm(&cbuf);
4211 // masm.membar(Assembler::Membar_mask_bits(Assembler::LoadStore |
4212 // Assembler::StoreStore));
4213 %}
4215 enc_class enc_membar_volatile
4216 %{
4217 MacroAssembler masm(&cbuf);
4218 masm.membar(Assembler::Membar_mask_bits(Assembler::StoreLoad |
4219 Assembler::StoreStore));
4220 %}
4222 // Safepoint Poll. This polls the safepoint page, and causes an
4223 // exception if it is not readable. Unfortunately, it kills
4224 // RFLAGS in the process.
4225 enc_class enc_safepoint_poll
4226 %{
4227 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
4228 // XXX reg_mem doesn't support RIP-relative addressing yet
4229 cbuf.set_inst_mark();
4230 cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_type, 0); // XXX
4231 emit_opcode(cbuf, 0x85); // testl
4232 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
4233 // cbuf.inst_mark() is beginning of instruction
4234 emit_d32_reloc(cbuf, os::get_polling_page());
4235 // relocInfo::poll_type,
4236 %}
4237 %}
4241 //----------FRAME--------------------------------------------------------------
4242 // Definition of frame structure and management information.
4243 //
4244 // S T A C K L A Y O U T Allocators stack-slot number
4245 // | (to get allocators register number
4246 // G Owned by | | v add OptoReg::stack0())
4247 // r CALLER | |
4248 // o | +--------+ pad to even-align allocators stack-slot
4249 // w V | pad0 | numbers; owned by CALLER
4250 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
4251 // h ^ | in | 5
4252 // | | args | 4 Holes in incoming args owned by SELF
4253 // | | | | 3
4254 // | | +--------+
4255 // V | | old out| Empty on Intel, window on Sparc
4256 // | old |preserve| Must be even aligned.
4257 // | SP-+--------+----> Matcher::_old_SP, even aligned
4258 // | | in | 3 area for Intel ret address
4259 // Owned by |preserve| Empty on Sparc.
4260 // SELF +--------+
4261 // | | pad2 | 2 pad to align old SP
4262 // | +--------+ 1
4263 // | | locks | 0
4264 // | +--------+----> OptoReg::stack0(), even aligned
4265 // | | pad1 | 11 pad to align new SP
4266 // | +--------+
4267 // | | | 10
4268 // | | spills | 9 spills
4269 // V | | 8 (pad0 slot for callee)
4270 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
4271 // ^ | out | 7
4272 // | | args | 6 Holes in outgoing args owned by CALLEE
4273 // Owned by +--------+
4274 // CALLEE | new out| 6 Empty on Intel, window on Sparc
4275 // | new |preserve| Must be even-aligned.
4276 // | SP-+--------+----> Matcher::_new_SP, even aligned
4277 // | | |
4278 //
4279 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
4280 // known from SELF's arguments and the Java calling convention.
4281 // Region 6-7 is determined per call site.
4282 // Note 2: If the calling convention leaves holes in the incoming argument
4283 // area, those holes are owned by SELF. Holes in the outgoing area
4284 // are owned by the CALLEE. Holes should not be nessecary in the
4285 // incoming area, as the Java calling convention is completely under
4286 // the control of the AD file. Doubles can be sorted and packed to
4287 // avoid holes. Holes in the outgoing arguments may be nessecary for
4288 // varargs C calling conventions.
4289 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
4290 // even aligned with pad0 as needed.
4291 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
4292 // region 6-11 is even aligned; it may be padded out more so that
4293 // the region from SP to FP meets the minimum stack alignment.
4294 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
4295 // alignment. Region 11, pad1, may be dynamically extended so that
4296 // SP meets the minimum alignment.
4298 frame
4299 %{
4300 // What direction does stack grow in (assumed to be same for C & Java)
4301 stack_direction(TOWARDS_LOW);
4303 // These three registers define part of the calling convention
4304 // between compiled code and the interpreter.
4305 inline_cache_reg(RAX); // Inline Cache Register
4306 interpreter_method_oop_reg(RBX); // Method Oop Register when
4307 // calling interpreter
4309 // Optional: name the operand used by cisc-spilling to access
4310 // [stack_pointer + offset]
4311 cisc_spilling_operand_name(indOffset32);
4313 // Number of stack slots consumed by locking an object
4314 sync_stack_slots(2);
4316 // Compiled code's Frame Pointer
4317 frame_pointer(RSP);
4319 // Interpreter stores its frame pointer in a register which is
4320 // stored to the stack by I2CAdaptors.
4321 // I2CAdaptors convert from interpreted java to compiled java.
4322 interpreter_frame_pointer(RBP);
4324 // Stack alignment requirement
4325 stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
4327 // Number of stack slots between incoming argument block and the start of
4328 // a new frame. The PROLOG must add this many slots to the stack. The
4329 // EPILOG must remove this many slots. amd64 needs two slots for
4330 // return address.
4331 in_preserve_stack_slots(4 + 2 * VerifyStackAtCalls);
4333 // Number of outgoing stack slots killed above the out_preserve_stack_slots
4334 // for calls to C. Supports the var-args backing area for register parms.
4335 varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
4337 // The after-PROLOG location of the return address. Location of
4338 // return address specifies a type (REG or STACK) and a number
4339 // representing the register number (i.e. - use a register name) or
4340 // stack slot.
4341 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
4342 // Otherwise, it is above the locks and verification slot and alignment word
4343 return_addr(STACK - 2 +
4344 round_to(2 + 2 * VerifyStackAtCalls +
4345 Compile::current()->fixed_slots(),
4346 WordsPerLong * 2));
4348 // Body of function which returns an integer array locating
4349 // arguments either in registers or in stack slots. Passed an array
4350 // of ideal registers called "sig" and a "length" count. Stack-slot
4351 // offsets are based on outgoing arguments, i.e. a CALLER setting up
4352 // arguments for a CALLEE. Incoming stack arguments are
4353 // automatically biased by the preserve_stack_slots field above.
4355 calling_convention
4356 %{
4357 // No difference between ingoing/outgoing just pass false
4358 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
4359 %}
4361 c_calling_convention
4362 %{
4363 // This is obviously always outgoing
4364 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
4365 %}
4367 // Location of compiled Java return values. Same as C for now.
4368 return_value
4369 %{
4370 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
4371 "only return normal values");
4373 static const int lo[Op_RegL + 1] = {
4374 0,
4375 0,
4376 RAX_num, // Op_RegN
4377 RAX_num, // Op_RegI
4378 RAX_num, // Op_RegP
4379 XMM0_num, // Op_RegF
4380 XMM0_num, // Op_RegD
4381 RAX_num // Op_RegL
4382 };
4383 static const int hi[Op_RegL + 1] = {
4384 0,
4385 0,
4386 OptoReg::Bad, // Op_RegN
4387 OptoReg::Bad, // Op_RegI
4388 RAX_H_num, // Op_RegP
4389 OptoReg::Bad, // Op_RegF
4390 XMM0_H_num, // Op_RegD
4391 RAX_H_num // Op_RegL
4392 };
4393 assert(ARRAY_SIZE(hi) == _last_machine_leaf - 1, "missing type");
4394 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
4395 %}
4396 %}
4398 //----------ATTRIBUTES---------------------------------------------------------
4399 //----------Operand Attributes-------------------------------------------------
4400 op_attrib op_cost(0); // Required cost attribute
4402 //----------Instruction Attributes---------------------------------------------
4403 ins_attrib ins_cost(100); // Required cost attribute
4404 ins_attrib ins_size(8); // Required size attribute (in bits)
4405 ins_attrib ins_pc_relative(0); // Required PC Relative flag
4406 ins_attrib ins_short_branch(0); // Required flag: is this instruction
4407 // a non-matching short branch variant
4408 // of some long branch?
4409 ins_attrib ins_alignment(1); // Required alignment attribute (must
4410 // be a power of 2) specifies the
4411 // alignment that some part of the
4412 // instruction (not necessarily the
4413 // start) requires. If > 1, a
4414 // compute_padding() function must be
4415 // provided for the instruction
4417 //----------OPERANDS-----------------------------------------------------------
4418 // Operand definitions must precede instruction definitions for correct parsing
4419 // in the ADLC because operands constitute user defined types which are used in
4420 // instruction definitions.
4422 //----------Simple Operands----------------------------------------------------
4423 // Immediate Operands
4424 // Integer Immediate
4425 operand immI()
4426 %{
4427 match(ConI);
4429 op_cost(10);
4430 format %{ %}
4431 interface(CONST_INTER);
4432 %}
4434 // Constant for test vs zero
4435 operand immI0()
4436 %{
4437 predicate(n->get_int() == 0);
4438 match(ConI);
4440 op_cost(0);
4441 format %{ %}
4442 interface(CONST_INTER);
4443 %}
4445 // Constant for increment
4446 operand immI1()
4447 %{
4448 predicate(n->get_int() == 1);
4449 match(ConI);
4451 op_cost(0);
4452 format %{ %}
4453 interface(CONST_INTER);
4454 %}
4456 // Constant for decrement
4457 operand immI_M1()
4458 %{
4459 predicate(n->get_int() == -1);
4460 match(ConI);
4462 op_cost(0);
4463 format %{ %}
4464 interface(CONST_INTER);
4465 %}
4467 // Valid scale values for addressing modes
4468 operand immI2()
4469 %{
4470 predicate(0 <= n->get_int() && (n->get_int() <= 3));
4471 match(ConI);
4473 format %{ %}
4474 interface(CONST_INTER);
4475 %}
4477 operand immI8()
4478 %{
4479 predicate((-0x80 <= n->get_int()) && (n->get_int() < 0x80));
4480 match(ConI);
4482 op_cost(5);
4483 format %{ %}
4484 interface(CONST_INTER);
4485 %}
4487 operand immI16()
4488 %{
4489 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
4490 match(ConI);
4492 op_cost(10);
4493 format %{ %}
4494 interface(CONST_INTER);
4495 %}
4497 // Constant for long shifts
4498 operand immI_32()
4499 %{
4500 predicate( n->get_int() == 32 );
4501 match(ConI);
4503 op_cost(0);
4504 format %{ %}
4505 interface(CONST_INTER);
4506 %}
4508 // Constant for long shifts
4509 operand immI_64()
4510 %{
4511 predicate( n->get_int() == 64 );
4512 match(ConI);
4514 op_cost(0);
4515 format %{ %}
4516 interface(CONST_INTER);
4517 %}
4519 // Pointer Immediate
4520 operand immP()
4521 %{
4522 match(ConP);
4524 op_cost(10);
4525 format %{ %}
4526 interface(CONST_INTER);
4527 %}
4529 // NULL Pointer Immediate
4530 operand immP0()
4531 %{
4532 predicate(n->get_ptr() == 0);
4533 match(ConP);
4535 op_cost(5);
4536 format %{ %}
4537 interface(CONST_INTER);
4538 %}
4540 // Pointer Immediate
4541 operand immN() %{
4542 match(ConN);
4544 op_cost(10);
4545 format %{ %}
4546 interface(CONST_INTER);
4547 %}
4549 // NULL Pointer Immediate
4550 operand immN0() %{
4551 predicate(n->get_narrowcon() == 0);
4552 match(ConN);
4554 op_cost(5);
4555 format %{ %}
4556 interface(CONST_INTER);
4557 %}
4559 operand immP31()
4560 %{
4561 predicate(!n->as_Type()->type()->isa_oopptr()
4562 && (n->get_ptr() >> 31) == 0);
4563 match(ConP);
4565 op_cost(5);
4566 format %{ %}
4567 interface(CONST_INTER);
4568 %}
4571 // Long Immediate
4572 operand immL()
4573 %{
4574 match(ConL);
4576 op_cost(20);
4577 format %{ %}
4578 interface(CONST_INTER);
4579 %}
4581 // Long Immediate 8-bit
4582 operand immL8()
4583 %{
4584 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
4585 match(ConL);
4587 op_cost(5);
4588 format %{ %}
4589 interface(CONST_INTER);
4590 %}
4592 // Long Immediate 32-bit unsigned
4593 operand immUL32()
4594 %{
4595 predicate(n->get_long() == (unsigned int) (n->get_long()));
4596 match(ConL);
4598 op_cost(10);
4599 format %{ %}
4600 interface(CONST_INTER);
4601 %}
4603 // Long Immediate 32-bit signed
4604 operand immL32()
4605 %{
4606 predicate(n->get_long() == (int) (n->get_long()));
4607 match(ConL);
4609 op_cost(15);
4610 format %{ %}
4611 interface(CONST_INTER);
4612 %}
4614 // Long Immediate zero
4615 operand immL0()
4616 %{
4617 predicate(n->get_long() == 0L);
4618 match(ConL);
4620 op_cost(10);
4621 format %{ %}
4622 interface(CONST_INTER);
4623 %}
4625 // Constant for increment
4626 operand immL1()
4627 %{
4628 predicate(n->get_long() == 1);
4629 match(ConL);
4631 format %{ %}
4632 interface(CONST_INTER);
4633 %}
4635 // Constant for decrement
4636 operand immL_M1()
4637 %{
4638 predicate(n->get_long() == -1);
4639 match(ConL);
4641 format %{ %}
4642 interface(CONST_INTER);
4643 %}
4645 // Long Immediate: the value 10
4646 operand immL10()
4647 %{
4648 predicate(n->get_long() == 10);
4649 match(ConL);
4651 format %{ %}
4652 interface(CONST_INTER);
4653 %}
4655 // Long immediate from 0 to 127.
4656 // Used for a shorter form of long mul by 10.
4657 operand immL_127()
4658 %{
4659 predicate(0 <= n->get_long() && n->get_long() < 0x80);
4660 match(ConL);
4662 op_cost(10);
4663 format %{ %}
4664 interface(CONST_INTER);
4665 %}
4667 // Long Immediate: low 32-bit mask
4668 operand immL_32bits()
4669 %{
4670 predicate(n->get_long() == 0xFFFFFFFFL);
4671 match(ConL);
4672 op_cost(20);
4674 format %{ %}
4675 interface(CONST_INTER);
4676 %}
4678 // Float Immediate zero
4679 operand immF0()
4680 %{
4681 predicate(jint_cast(n->getf()) == 0);
4682 match(ConF);
4684 op_cost(5);
4685 format %{ %}
4686 interface(CONST_INTER);
4687 %}
4689 // Float Immediate
4690 operand immF()
4691 %{
4692 match(ConF);
4694 op_cost(15);
4695 format %{ %}
4696 interface(CONST_INTER);
4697 %}
4699 // Double Immediate zero
4700 operand immD0()
4701 %{
4702 predicate(jlong_cast(n->getd()) == 0);
4703 match(ConD);
4705 op_cost(5);
4706 format %{ %}
4707 interface(CONST_INTER);
4708 %}
4710 // Double Immediate
4711 operand immD()
4712 %{
4713 match(ConD);
4715 op_cost(15);
4716 format %{ %}
4717 interface(CONST_INTER);
4718 %}
4720 // Immediates for special shifts (sign extend)
4722 // Constants for increment
4723 operand immI_16()
4724 %{
4725 predicate(n->get_int() == 16);
4726 match(ConI);
4728 format %{ %}
4729 interface(CONST_INTER);
4730 %}
4732 operand immI_24()
4733 %{
4734 predicate(n->get_int() == 24);
4735 match(ConI);
4737 format %{ %}
4738 interface(CONST_INTER);
4739 %}
4741 // Constant for byte-wide masking
4742 operand immI_255()
4743 %{
4744 predicate(n->get_int() == 255);
4745 match(ConI);
4747 format %{ %}
4748 interface(CONST_INTER);
4749 %}
4751 // Constant for short-wide masking
4752 operand immI_65535()
4753 %{
4754 predicate(n->get_int() == 65535);
4755 match(ConI);
4757 format %{ %}
4758 interface(CONST_INTER);
4759 %}
4761 // Constant for byte-wide masking
4762 operand immL_255()
4763 %{
4764 predicate(n->get_long() == 255);
4765 match(ConL);
4767 format %{ %}
4768 interface(CONST_INTER);
4769 %}
4771 // Constant for short-wide masking
4772 operand immL_65535()
4773 %{
4774 predicate(n->get_long() == 65535);
4775 match(ConL);
4777 format %{ %}
4778 interface(CONST_INTER);
4779 %}
4781 // Register Operands
4782 // Integer Register
4783 operand rRegI()
4784 %{
4785 constraint(ALLOC_IN_RC(int_reg));
4786 match(RegI);
4788 match(rax_RegI);
4789 match(rbx_RegI);
4790 match(rcx_RegI);
4791 match(rdx_RegI);
4792 match(rdi_RegI);
4794 format %{ %}
4795 interface(REG_INTER);
4796 %}
4798 // Special Registers
4799 operand rax_RegI()
4800 %{
4801 constraint(ALLOC_IN_RC(int_rax_reg));
4802 match(RegI);
4803 match(rRegI);
4805 format %{ "RAX" %}
4806 interface(REG_INTER);
4807 %}
4809 // Special Registers
4810 operand rbx_RegI()
4811 %{
4812 constraint(ALLOC_IN_RC(int_rbx_reg));
4813 match(RegI);
4814 match(rRegI);
4816 format %{ "RBX" %}
4817 interface(REG_INTER);
4818 %}
4820 operand rcx_RegI()
4821 %{
4822 constraint(ALLOC_IN_RC(int_rcx_reg));
4823 match(RegI);
4824 match(rRegI);
4826 format %{ "RCX" %}
4827 interface(REG_INTER);
4828 %}
4830 operand rdx_RegI()
4831 %{
4832 constraint(ALLOC_IN_RC(int_rdx_reg));
4833 match(RegI);
4834 match(rRegI);
4836 format %{ "RDX" %}
4837 interface(REG_INTER);
4838 %}
4840 operand rdi_RegI()
4841 %{
4842 constraint(ALLOC_IN_RC(int_rdi_reg));
4843 match(RegI);
4844 match(rRegI);
4846 format %{ "RDI" %}
4847 interface(REG_INTER);
4848 %}
4850 operand no_rcx_RegI()
4851 %{
4852 constraint(ALLOC_IN_RC(int_no_rcx_reg));
4853 match(RegI);
4854 match(rax_RegI);
4855 match(rbx_RegI);
4856 match(rdx_RegI);
4857 match(rdi_RegI);
4859 format %{ %}
4860 interface(REG_INTER);
4861 %}
4863 operand no_rax_rdx_RegI()
4864 %{
4865 constraint(ALLOC_IN_RC(int_no_rax_rdx_reg));
4866 match(RegI);
4867 match(rbx_RegI);
4868 match(rcx_RegI);
4869 match(rdi_RegI);
4871 format %{ %}
4872 interface(REG_INTER);
4873 %}
4875 // Pointer Register
4876 operand any_RegP()
4877 %{
4878 constraint(ALLOC_IN_RC(any_reg));
4879 match(RegP);
4880 match(rax_RegP);
4881 match(rbx_RegP);
4882 match(rdi_RegP);
4883 match(rsi_RegP);
4884 match(rbp_RegP);
4885 match(r15_RegP);
4886 match(rRegP);
4888 format %{ %}
4889 interface(REG_INTER);
4890 %}
4892 operand rRegP()
4893 %{
4894 constraint(ALLOC_IN_RC(ptr_reg));
4895 match(RegP);
4896 match(rax_RegP);
4897 match(rbx_RegP);
4898 match(rdi_RegP);
4899 match(rsi_RegP);
4900 match(rbp_RegP);
4901 match(r15_RegP); // See Q&A below about r15_RegP.
4903 format %{ %}
4904 interface(REG_INTER);
4905 %}
4907 operand rRegN() %{
4908 constraint(ALLOC_IN_RC(int_reg));
4909 match(RegN);
4911 format %{ %}
4912 interface(REG_INTER);
4913 %}
4915 // Question: Why is r15_RegP (the read-only TLS register) a match for rRegP?
4916 // Answer: Operand match rules govern the DFA as it processes instruction inputs.
4917 // It's fine for an instruction input which expects rRegP to match a r15_RegP.
4918 // The output of an instruction is controlled by the allocator, which respects
4919 // register class masks, not match rules. Unless an instruction mentions
4920 // r15_RegP or any_RegP explicitly as its output, r15 will not be considered
4921 // by the allocator as an input.
4923 operand no_rax_RegP()
4924 %{
4925 constraint(ALLOC_IN_RC(ptr_no_rax_reg));
4926 match(RegP);
4927 match(rbx_RegP);
4928 match(rsi_RegP);
4929 match(rdi_RegP);
4931 format %{ %}
4932 interface(REG_INTER);
4933 %}
4935 operand no_rbp_RegP()
4936 %{
4937 constraint(ALLOC_IN_RC(ptr_no_rbp_reg));
4938 match(RegP);
4939 match(rbx_RegP);
4940 match(rsi_RegP);
4941 match(rdi_RegP);
4943 format %{ %}
4944 interface(REG_INTER);
4945 %}
4947 operand no_rax_rbx_RegP()
4948 %{
4949 constraint(ALLOC_IN_RC(ptr_no_rax_rbx_reg));
4950 match(RegP);
4951 match(rsi_RegP);
4952 match(rdi_RegP);
4954 format %{ %}
4955 interface(REG_INTER);
4956 %}
4958 // Special Registers
4959 // Return a pointer value
4960 operand rax_RegP()
4961 %{
4962 constraint(ALLOC_IN_RC(ptr_rax_reg));
4963 match(RegP);
4964 match(rRegP);
4966 format %{ %}
4967 interface(REG_INTER);
4968 %}
4970 // Special Registers
4971 // Return a compressed pointer value
4972 operand rax_RegN()
4973 %{
4974 constraint(ALLOC_IN_RC(int_rax_reg));
4975 match(RegN);
4976 match(rRegN);
4978 format %{ %}
4979 interface(REG_INTER);
4980 %}
4982 // Used in AtomicAdd
4983 operand rbx_RegP()
4984 %{
4985 constraint(ALLOC_IN_RC(ptr_rbx_reg));
4986 match(RegP);
4987 match(rRegP);
4989 format %{ %}
4990 interface(REG_INTER);
4991 %}
4993 operand rsi_RegP()
4994 %{
4995 constraint(ALLOC_IN_RC(ptr_rsi_reg));
4996 match(RegP);
4997 match(rRegP);
4999 format %{ %}
5000 interface(REG_INTER);
5001 %}
5003 // Used in rep stosq
5004 operand rdi_RegP()
5005 %{
5006 constraint(ALLOC_IN_RC(ptr_rdi_reg));
5007 match(RegP);
5008 match(rRegP);
5010 format %{ %}
5011 interface(REG_INTER);
5012 %}
5014 operand rbp_RegP()
5015 %{
5016 constraint(ALLOC_IN_RC(ptr_rbp_reg));
5017 match(RegP);
5018 match(rRegP);
5020 format %{ %}
5021 interface(REG_INTER);
5022 %}
5024 operand r15_RegP()
5025 %{
5026 constraint(ALLOC_IN_RC(ptr_r15_reg));
5027 match(RegP);
5028 match(rRegP);
5030 format %{ %}
5031 interface(REG_INTER);
5032 %}
5034 operand rRegL()
5035 %{
5036 constraint(ALLOC_IN_RC(long_reg));
5037 match(RegL);
5038 match(rax_RegL);
5039 match(rdx_RegL);
5041 format %{ %}
5042 interface(REG_INTER);
5043 %}
5045 // Special Registers
5046 operand no_rax_rdx_RegL()
5047 %{
5048 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg));
5049 match(RegL);
5050 match(rRegL);
5052 format %{ %}
5053 interface(REG_INTER);
5054 %}
5056 operand no_rax_RegL()
5057 %{
5058 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg));
5059 match(RegL);
5060 match(rRegL);
5061 match(rdx_RegL);
5063 format %{ %}
5064 interface(REG_INTER);
5065 %}
5067 operand no_rcx_RegL()
5068 %{
5069 constraint(ALLOC_IN_RC(long_no_rcx_reg));
5070 match(RegL);
5071 match(rRegL);
5073 format %{ %}
5074 interface(REG_INTER);
5075 %}
5077 operand rax_RegL()
5078 %{
5079 constraint(ALLOC_IN_RC(long_rax_reg));
5080 match(RegL);
5081 match(rRegL);
5083 format %{ "RAX" %}
5084 interface(REG_INTER);
5085 %}
5087 operand rcx_RegL()
5088 %{
5089 constraint(ALLOC_IN_RC(long_rcx_reg));
5090 match(RegL);
5091 match(rRegL);
5093 format %{ %}
5094 interface(REG_INTER);
5095 %}
5097 operand rdx_RegL()
5098 %{
5099 constraint(ALLOC_IN_RC(long_rdx_reg));
5100 match(RegL);
5101 match(rRegL);
5103 format %{ %}
5104 interface(REG_INTER);
5105 %}
5107 // Flags register, used as output of compare instructions
5108 operand rFlagsReg()
5109 %{
5110 constraint(ALLOC_IN_RC(int_flags));
5111 match(RegFlags);
5113 format %{ "RFLAGS" %}
5114 interface(REG_INTER);
5115 %}
5117 // Flags register, used as output of FLOATING POINT compare instructions
5118 operand rFlagsRegU()
5119 %{
5120 constraint(ALLOC_IN_RC(int_flags));
5121 match(RegFlags);
5123 format %{ "RFLAGS_U" %}
5124 interface(REG_INTER);
5125 %}
5127 operand rFlagsRegUCF() %{
5128 constraint(ALLOC_IN_RC(int_flags));
5129 match(RegFlags);
5130 predicate(false);
5132 format %{ "RFLAGS_U_CF" %}
5133 interface(REG_INTER);
5134 %}
5136 // Float register operands
5137 operand regF()
5138 %{
5139 constraint(ALLOC_IN_RC(float_reg));
5140 match(RegF);
5142 format %{ %}
5143 interface(REG_INTER);
5144 %}
5146 // Double register operands
5147 operand regD()
5148 %{
5149 constraint(ALLOC_IN_RC(double_reg));
5150 match(RegD);
5152 format %{ %}
5153 interface(REG_INTER);
5154 %}
5157 //----------Memory Operands----------------------------------------------------
5158 // Direct Memory Operand
5159 // operand direct(immP addr)
5160 // %{
5161 // match(addr);
5163 // format %{ "[$addr]" %}
5164 // interface(MEMORY_INTER) %{
5165 // base(0xFFFFFFFF);
5166 // index(0x4);
5167 // scale(0x0);
5168 // disp($addr);
5169 // %}
5170 // %}
5172 // Indirect Memory Operand
5173 operand indirect(any_RegP reg)
5174 %{
5175 constraint(ALLOC_IN_RC(ptr_reg));
5176 match(reg);
5178 format %{ "[$reg]" %}
5179 interface(MEMORY_INTER) %{
5180 base($reg);
5181 index(0x4);
5182 scale(0x0);
5183 disp(0x0);
5184 %}
5185 %}
5187 // Indirect Memory Plus Short Offset Operand
5188 operand indOffset8(any_RegP reg, immL8 off)
5189 %{
5190 constraint(ALLOC_IN_RC(ptr_reg));
5191 match(AddP reg off);
5193 format %{ "[$reg + $off (8-bit)]" %}
5194 interface(MEMORY_INTER) %{
5195 base($reg);
5196 index(0x4);
5197 scale(0x0);
5198 disp($off);
5199 %}
5200 %}
5202 // Indirect Memory Plus Long Offset Operand
5203 operand indOffset32(any_RegP reg, immL32 off)
5204 %{
5205 constraint(ALLOC_IN_RC(ptr_reg));
5206 match(AddP reg off);
5208 format %{ "[$reg + $off (32-bit)]" %}
5209 interface(MEMORY_INTER) %{
5210 base($reg);
5211 index(0x4);
5212 scale(0x0);
5213 disp($off);
5214 %}
5215 %}
5217 // Indirect Memory Plus Index Register Plus Offset Operand
5218 operand indIndexOffset(any_RegP reg, rRegL lreg, immL32 off)
5219 %{
5220 constraint(ALLOC_IN_RC(ptr_reg));
5221 match(AddP (AddP reg lreg) off);
5223 op_cost(10);
5224 format %{"[$reg + $off + $lreg]" %}
5225 interface(MEMORY_INTER) %{
5226 base($reg);
5227 index($lreg);
5228 scale(0x0);
5229 disp($off);
5230 %}
5231 %}
5233 // Indirect Memory Plus Index Register Plus Offset Operand
5234 operand indIndex(any_RegP reg, rRegL lreg)
5235 %{
5236 constraint(ALLOC_IN_RC(ptr_reg));
5237 match(AddP reg lreg);
5239 op_cost(10);
5240 format %{"[$reg + $lreg]" %}
5241 interface(MEMORY_INTER) %{
5242 base($reg);
5243 index($lreg);
5244 scale(0x0);
5245 disp(0x0);
5246 %}
5247 %}
5249 // Indirect Memory Times Scale Plus Index Register
5250 operand indIndexScale(any_RegP reg, rRegL lreg, immI2 scale)
5251 %{
5252 constraint(ALLOC_IN_RC(ptr_reg));
5253 match(AddP reg (LShiftL lreg scale));
5255 op_cost(10);
5256 format %{"[$reg + $lreg << $scale]" %}
5257 interface(MEMORY_INTER) %{
5258 base($reg);
5259 index($lreg);
5260 scale($scale);
5261 disp(0x0);
5262 %}
5263 %}
5265 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5266 operand indIndexScaleOffset(any_RegP reg, immL32 off, rRegL lreg, immI2 scale)
5267 %{
5268 constraint(ALLOC_IN_RC(ptr_reg));
5269 match(AddP (AddP reg (LShiftL lreg scale)) off);
5271 op_cost(10);
5272 format %{"[$reg + $off + $lreg << $scale]" %}
5273 interface(MEMORY_INTER) %{
5274 base($reg);
5275 index($lreg);
5276 scale($scale);
5277 disp($off);
5278 %}
5279 %}
5281 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
5282 operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
5283 %{
5284 constraint(ALLOC_IN_RC(ptr_reg));
5285 predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5286 match(AddP (AddP reg (LShiftL (ConvI2L idx) scale)) off);
5288 op_cost(10);
5289 format %{"[$reg + $off + $idx << $scale]" %}
5290 interface(MEMORY_INTER) %{
5291 base($reg);
5292 index($idx);
5293 scale($scale);
5294 disp($off);
5295 %}
5296 %}
5298 // Indirect Narrow Oop Plus Offset Operand
5299 // Note: x86 architecture doesn't support "scale * index + offset" without a base
5300 // we can't free r12 even with Universe::narrow_oop_base() == NULL.
5301 operand indCompressedOopOffset(rRegN reg, immL32 off) %{
5302 predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
5303 constraint(ALLOC_IN_RC(ptr_reg));
5304 match(AddP (DecodeN reg) off);
5306 op_cost(10);
5307 format %{"[R12 + $reg << 3 + $off] (compressed oop addressing)" %}
5308 interface(MEMORY_INTER) %{
5309 base(0xc); // R12
5310 index($reg);
5311 scale(0x3);
5312 disp($off);
5313 %}
5314 %}
5316 // Indirect Memory Operand
5317 operand indirectNarrow(rRegN reg)
5318 %{
5319 predicate(Universe::narrow_oop_shift() == 0);
5320 constraint(ALLOC_IN_RC(ptr_reg));
5321 match(DecodeN reg);
5323 format %{ "[$reg]" %}
5324 interface(MEMORY_INTER) %{
5325 base($reg);
5326 index(0x4);
5327 scale(0x0);
5328 disp(0x0);
5329 %}
5330 %}
5332 // Indirect Memory Plus Short Offset Operand
5333 operand indOffset8Narrow(rRegN reg, immL8 off)
5334 %{
5335 predicate(Universe::narrow_oop_shift() == 0);
5336 constraint(ALLOC_IN_RC(ptr_reg));
5337 match(AddP (DecodeN reg) off);
5339 format %{ "[$reg + $off (8-bit)]" %}
5340 interface(MEMORY_INTER) %{
5341 base($reg);
5342 index(0x4);
5343 scale(0x0);
5344 disp($off);
5345 %}
5346 %}
5348 // Indirect Memory Plus Long Offset Operand
5349 operand indOffset32Narrow(rRegN reg, immL32 off)
5350 %{
5351 predicate(Universe::narrow_oop_shift() == 0);
5352 constraint(ALLOC_IN_RC(ptr_reg));
5353 match(AddP (DecodeN reg) off);
5355 format %{ "[$reg + $off (32-bit)]" %}
5356 interface(MEMORY_INTER) %{
5357 base($reg);
5358 index(0x4);
5359 scale(0x0);
5360 disp($off);
5361 %}
5362 %}
5364 // Indirect Memory Plus Index Register Plus Offset Operand
5365 operand indIndexOffsetNarrow(rRegN reg, rRegL lreg, immL32 off)
5366 %{
5367 predicate(Universe::narrow_oop_shift() == 0);
5368 constraint(ALLOC_IN_RC(ptr_reg));
5369 match(AddP (AddP (DecodeN reg) lreg) off);
5371 op_cost(10);
5372 format %{"[$reg + $off + $lreg]" %}
5373 interface(MEMORY_INTER) %{
5374 base($reg);
5375 index($lreg);
5376 scale(0x0);
5377 disp($off);
5378 %}
5379 %}
5381 // Indirect Memory Plus Index Register Plus Offset Operand
5382 operand indIndexNarrow(rRegN reg, rRegL lreg)
5383 %{
5384 predicate(Universe::narrow_oop_shift() == 0);
5385 constraint(ALLOC_IN_RC(ptr_reg));
5386 match(AddP (DecodeN reg) lreg);
5388 op_cost(10);
5389 format %{"[$reg + $lreg]" %}
5390 interface(MEMORY_INTER) %{
5391 base($reg);
5392 index($lreg);
5393 scale(0x0);
5394 disp(0x0);
5395 %}
5396 %}
5398 // Indirect Memory Times Scale Plus Index Register
5399 operand indIndexScaleNarrow(rRegN reg, rRegL lreg, immI2 scale)
5400 %{
5401 predicate(Universe::narrow_oop_shift() == 0);
5402 constraint(ALLOC_IN_RC(ptr_reg));
5403 match(AddP (DecodeN reg) (LShiftL lreg scale));
5405 op_cost(10);
5406 format %{"[$reg + $lreg << $scale]" %}
5407 interface(MEMORY_INTER) %{
5408 base($reg);
5409 index($lreg);
5410 scale($scale);
5411 disp(0x0);
5412 %}
5413 %}
5415 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5416 operand indIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegL lreg, immI2 scale)
5417 %{
5418 predicate(Universe::narrow_oop_shift() == 0);
5419 constraint(ALLOC_IN_RC(ptr_reg));
5420 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5422 op_cost(10);
5423 format %{"[$reg + $off + $lreg << $scale]" %}
5424 interface(MEMORY_INTER) %{
5425 base($reg);
5426 index($lreg);
5427 scale($scale);
5428 disp($off);
5429 %}
5430 %}
5432 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
5433 operand indPosIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegI idx, immI2 scale)
5434 %{
5435 constraint(ALLOC_IN_RC(ptr_reg));
5436 predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5437 match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L idx) scale)) off);
5439 op_cost(10);
5440 format %{"[$reg + $off + $idx << $scale]" %}
5441 interface(MEMORY_INTER) %{
5442 base($reg);
5443 index($idx);
5444 scale($scale);
5445 disp($off);
5446 %}
5447 %}
5450 //----------Special Memory Operands--------------------------------------------
5451 // Stack Slot Operand - This operand is used for loading and storing temporary
5452 // values on the stack where a match requires a value to
5453 // flow through memory.
5454 operand stackSlotP(sRegP reg)
5455 %{
5456 constraint(ALLOC_IN_RC(stack_slots));
5457 // No match rule because this operand is only generated in matching
5459 format %{ "[$reg]" %}
5460 interface(MEMORY_INTER) %{
5461 base(0x4); // RSP
5462 index(0x4); // No Index
5463 scale(0x0); // No Scale
5464 disp($reg); // Stack Offset
5465 %}
5466 %}
5468 operand stackSlotI(sRegI reg)
5469 %{
5470 constraint(ALLOC_IN_RC(stack_slots));
5471 // No match rule because this operand is only generated in matching
5473 format %{ "[$reg]" %}
5474 interface(MEMORY_INTER) %{
5475 base(0x4); // RSP
5476 index(0x4); // No Index
5477 scale(0x0); // No Scale
5478 disp($reg); // Stack Offset
5479 %}
5480 %}
5482 operand stackSlotF(sRegF reg)
5483 %{
5484 constraint(ALLOC_IN_RC(stack_slots));
5485 // No match rule because this operand is only generated in matching
5487 format %{ "[$reg]" %}
5488 interface(MEMORY_INTER) %{
5489 base(0x4); // RSP
5490 index(0x4); // No Index
5491 scale(0x0); // No Scale
5492 disp($reg); // Stack Offset
5493 %}
5494 %}
5496 operand stackSlotD(sRegD reg)
5497 %{
5498 constraint(ALLOC_IN_RC(stack_slots));
5499 // No match rule because this operand is only generated in matching
5501 format %{ "[$reg]" %}
5502 interface(MEMORY_INTER) %{
5503 base(0x4); // RSP
5504 index(0x4); // No Index
5505 scale(0x0); // No Scale
5506 disp($reg); // Stack Offset
5507 %}
5508 %}
5509 operand stackSlotL(sRegL reg)
5510 %{
5511 constraint(ALLOC_IN_RC(stack_slots));
5512 // No match rule because this operand is only generated in matching
5514 format %{ "[$reg]" %}
5515 interface(MEMORY_INTER) %{
5516 base(0x4); // RSP
5517 index(0x4); // No Index
5518 scale(0x0); // No Scale
5519 disp($reg); // Stack Offset
5520 %}
5521 %}
5523 //----------Conditional Branch Operands----------------------------------------
5524 // Comparison Op - This is the operation of the comparison, and is limited to
5525 // the following set of codes:
5526 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5527 //
5528 // Other attributes of the comparison, such as unsignedness, are specified
5529 // by the comparison instruction that sets a condition code flags register.
5530 // That result is represented by a flags operand whose subtype is appropriate
5531 // to the unsignedness (etc.) of the comparison.
5532 //
5533 // Later, the instruction which matches both the Comparison Op (a Bool) and
5534 // the flags (produced by the Cmp) specifies the coding of the comparison op
5535 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5537 // Comparision Code
5538 operand cmpOp()
5539 %{
5540 match(Bool);
5542 format %{ "" %}
5543 interface(COND_INTER) %{
5544 equal(0x4, "e");
5545 not_equal(0x5, "ne");
5546 less(0xC, "l");
5547 greater_equal(0xD, "ge");
5548 less_equal(0xE, "le");
5549 greater(0xF, "g");
5550 %}
5551 %}
5553 // Comparison Code, unsigned compare. Used by FP also, with
5554 // C2 (unordered) turned into GT or LT already. The other bits
5555 // C0 and C3 are turned into Carry & Zero flags.
5556 operand cmpOpU()
5557 %{
5558 match(Bool);
5560 format %{ "" %}
5561 interface(COND_INTER) %{
5562 equal(0x4, "e");
5563 not_equal(0x5, "ne");
5564 less(0x2, "b");
5565 greater_equal(0x3, "nb");
5566 less_equal(0x6, "be");
5567 greater(0x7, "nbe");
5568 %}
5569 %}
5572 // Floating comparisons that don't require any fixup for the unordered case
5573 operand cmpOpUCF() %{
5574 match(Bool);
5575 predicate(n->as_Bool()->_test._test == BoolTest::lt ||
5576 n->as_Bool()->_test._test == BoolTest::ge ||
5577 n->as_Bool()->_test._test == BoolTest::le ||
5578 n->as_Bool()->_test._test == BoolTest::gt);
5579 format %{ "" %}
5580 interface(COND_INTER) %{
5581 equal(0x4, "e");
5582 not_equal(0x5, "ne");
5583 less(0x2, "b");
5584 greater_equal(0x3, "nb");
5585 less_equal(0x6, "be");
5586 greater(0x7, "nbe");
5587 %}
5588 %}
5591 // Floating comparisons that can be fixed up with extra conditional jumps
5592 operand cmpOpUCF2() %{
5593 match(Bool);
5594 predicate(n->as_Bool()->_test._test == BoolTest::ne ||
5595 n->as_Bool()->_test._test == BoolTest::eq);
5596 format %{ "" %}
5597 interface(COND_INTER) %{
5598 equal(0x4, "e");
5599 not_equal(0x5, "ne");
5600 less(0x2, "b");
5601 greater_equal(0x3, "nb");
5602 less_equal(0x6, "be");
5603 greater(0x7, "nbe");
5604 %}
5605 %}
5608 //----------OPERAND CLASSES----------------------------------------------------
5609 // Operand Classes are groups of operands that are used as to simplify
5610 // instruction definitions by not requiring the AD writer to specify separate
5611 // instructions for every form of operand when the instruction accepts
5612 // multiple operand types with the same basic encoding and format. The classic
5613 // case of this is memory operands.
5615 opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
5616 indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset,
5617 indCompressedOopOffset,
5618 indirectNarrow, indOffset8Narrow, indOffset32Narrow,
5619 indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
5620 indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow);
5622 //----------PIPELINE-----------------------------------------------------------
5623 // Rules which define the behavior of the target architectures pipeline.
5624 pipeline %{
5626 //----------ATTRIBUTES---------------------------------------------------------
5627 attributes %{
5628 variable_size_instructions; // Fixed size instructions
5629 max_instructions_per_bundle = 3; // Up to 3 instructions per bundle
5630 instruction_unit_size = 1; // An instruction is 1 bytes long
5631 instruction_fetch_unit_size = 16; // The processor fetches one line
5632 instruction_fetch_units = 1; // of 16 bytes
5634 // List of nop instructions
5635 nops( MachNop );
5636 %}
5638 //----------RESOURCES----------------------------------------------------------
5639 // Resources are the functional units available to the machine
5641 // Generic P2/P3 pipeline
5642 // 3 decoders, only D0 handles big operands; a "bundle" is the limit of
5643 // 3 instructions decoded per cycle.
5644 // 2 load/store ops per cycle, 1 branch, 1 FPU,
5645 // 3 ALU op, only ALU0 handles mul instructions.
5646 resources( D0, D1, D2, DECODE = D0 | D1 | D2,
5647 MS0, MS1, MS2, MEM = MS0 | MS1 | MS2,
5648 BR, FPU,
5649 ALU0, ALU1, ALU2, ALU = ALU0 | ALU1 | ALU2);
5651 //----------PIPELINE DESCRIPTION-----------------------------------------------
5652 // Pipeline Description specifies the stages in the machine's pipeline
5654 // Generic P2/P3 pipeline
5655 pipe_desc(S0, S1, S2, S3, S4, S5);
5657 //----------PIPELINE CLASSES---------------------------------------------------
5658 // Pipeline Classes describe the stages in which input and output are
5659 // referenced by the hardware pipeline.
5661 // Naming convention: ialu or fpu
5662 // Then: _reg
5663 // Then: _reg if there is a 2nd register
5664 // Then: _long if it's a pair of instructions implementing a long
5665 // Then: _fat if it requires the big decoder
5666 // Or: _mem if it requires the big decoder and a memory unit.
5668 // Integer ALU reg operation
5669 pipe_class ialu_reg(rRegI dst)
5670 %{
5671 single_instruction;
5672 dst : S4(write);
5673 dst : S3(read);
5674 DECODE : S0; // any decoder
5675 ALU : S3; // any alu
5676 %}
5678 // Long ALU reg operation
5679 pipe_class ialu_reg_long(rRegL dst)
5680 %{
5681 instruction_count(2);
5682 dst : S4(write);
5683 dst : S3(read);
5684 DECODE : S0(2); // any 2 decoders
5685 ALU : S3(2); // both alus
5686 %}
5688 // Integer ALU reg operation using big decoder
5689 pipe_class ialu_reg_fat(rRegI dst)
5690 %{
5691 single_instruction;
5692 dst : S4(write);
5693 dst : S3(read);
5694 D0 : S0; // big decoder only
5695 ALU : S3; // any alu
5696 %}
5698 // Long ALU reg operation using big decoder
5699 pipe_class ialu_reg_long_fat(rRegL dst)
5700 %{
5701 instruction_count(2);
5702 dst : S4(write);
5703 dst : S3(read);
5704 D0 : S0(2); // big decoder only; twice
5705 ALU : S3(2); // any 2 alus
5706 %}
5708 // Integer ALU reg-reg operation
5709 pipe_class ialu_reg_reg(rRegI dst, rRegI src)
5710 %{
5711 single_instruction;
5712 dst : S4(write);
5713 src : S3(read);
5714 DECODE : S0; // any decoder
5715 ALU : S3; // any alu
5716 %}
5718 // Long ALU reg-reg operation
5719 pipe_class ialu_reg_reg_long(rRegL dst, rRegL src)
5720 %{
5721 instruction_count(2);
5722 dst : S4(write);
5723 src : S3(read);
5724 DECODE : S0(2); // any 2 decoders
5725 ALU : S3(2); // both alus
5726 %}
5728 // Integer ALU reg-reg operation
5729 pipe_class ialu_reg_reg_fat(rRegI dst, memory src)
5730 %{
5731 single_instruction;
5732 dst : S4(write);
5733 src : S3(read);
5734 D0 : S0; // big decoder only
5735 ALU : S3; // any alu
5736 %}
5738 // Long ALU reg-reg operation
5739 pipe_class ialu_reg_reg_long_fat(rRegL dst, rRegL src)
5740 %{
5741 instruction_count(2);
5742 dst : S4(write);
5743 src : S3(read);
5744 D0 : S0(2); // big decoder only; twice
5745 ALU : S3(2); // both alus
5746 %}
5748 // Integer ALU reg-mem operation
5749 pipe_class ialu_reg_mem(rRegI dst, memory mem)
5750 %{
5751 single_instruction;
5752 dst : S5(write);
5753 mem : S3(read);
5754 D0 : S0; // big decoder only
5755 ALU : S4; // any alu
5756 MEM : S3; // any mem
5757 %}
5759 // Integer mem operation (prefetch)
5760 pipe_class ialu_mem(memory mem)
5761 %{
5762 single_instruction;
5763 mem : S3(read);
5764 D0 : S0; // big decoder only
5765 MEM : S3; // any mem
5766 %}
5768 // Integer Store to Memory
5769 pipe_class ialu_mem_reg(memory mem, rRegI src)
5770 %{
5771 single_instruction;
5772 mem : S3(read);
5773 src : S5(read);
5774 D0 : S0; // big decoder only
5775 ALU : S4; // any alu
5776 MEM : S3;
5777 %}
5779 // // Long Store to Memory
5780 // pipe_class ialu_mem_long_reg(memory mem, rRegL src)
5781 // %{
5782 // instruction_count(2);
5783 // mem : S3(read);
5784 // src : S5(read);
5785 // D0 : S0(2); // big decoder only; twice
5786 // ALU : S4(2); // any 2 alus
5787 // MEM : S3(2); // Both mems
5788 // %}
5790 // Integer Store to Memory
5791 pipe_class ialu_mem_imm(memory mem)
5792 %{
5793 single_instruction;
5794 mem : S3(read);
5795 D0 : S0; // big decoder only
5796 ALU : S4; // any alu
5797 MEM : S3;
5798 %}
5800 // Integer ALU0 reg-reg operation
5801 pipe_class ialu_reg_reg_alu0(rRegI dst, rRegI src)
5802 %{
5803 single_instruction;
5804 dst : S4(write);
5805 src : S3(read);
5806 D0 : S0; // Big decoder only
5807 ALU0 : S3; // only alu0
5808 %}
5810 // Integer ALU0 reg-mem operation
5811 pipe_class ialu_reg_mem_alu0(rRegI dst, memory mem)
5812 %{
5813 single_instruction;
5814 dst : S5(write);
5815 mem : S3(read);
5816 D0 : S0; // big decoder only
5817 ALU0 : S4; // ALU0 only
5818 MEM : S3; // any mem
5819 %}
5821 // Integer ALU reg-reg operation
5822 pipe_class ialu_cr_reg_reg(rFlagsReg cr, rRegI src1, rRegI src2)
5823 %{
5824 single_instruction;
5825 cr : S4(write);
5826 src1 : S3(read);
5827 src2 : S3(read);
5828 DECODE : S0; // any decoder
5829 ALU : S3; // any alu
5830 %}
5832 // Integer ALU reg-imm operation
5833 pipe_class ialu_cr_reg_imm(rFlagsReg cr, rRegI src1)
5834 %{
5835 single_instruction;
5836 cr : S4(write);
5837 src1 : S3(read);
5838 DECODE : S0; // any decoder
5839 ALU : S3; // any alu
5840 %}
5842 // Integer ALU reg-mem operation
5843 pipe_class ialu_cr_reg_mem(rFlagsReg cr, rRegI src1, memory src2)
5844 %{
5845 single_instruction;
5846 cr : S4(write);
5847 src1 : S3(read);
5848 src2 : S3(read);
5849 D0 : S0; // big decoder only
5850 ALU : S4; // any alu
5851 MEM : S3;
5852 %}
5854 // Conditional move reg-reg
5855 pipe_class pipe_cmplt( rRegI p, rRegI q, rRegI y)
5856 %{
5857 instruction_count(4);
5858 y : S4(read);
5859 q : S3(read);
5860 p : S3(read);
5861 DECODE : S0(4); // any decoder
5862 %}
5864 // Conditional move reg-reg
5865 pipe_class pipe_cmov_reg( rRegI dst, rRegI src, rFlagsReg cr)
5866 %{
5867 single_instruction;
5868 dst : S4(write);
5869 src : S3(read);
5870 cr : S3(read);
5871 DECODE : S0; // any decoder
5872 %}
5874 // Conditional move reg-mem
5875 pipe_class pipe_cmov_mem( rFlagsReg cr, rRegI dst, memory src)
5876 %{
5877 single_instruction;
5878 dst : S4(write);
5879 src : S3(read);
5880 cr : S3(read);
5881 DECODE : S0; // any decoder
5882 MEM : S3;
5883 %}
5885 // Conditional move reg-reg long
5886 pipe_class pipe_cmov_reg_long( rFlagsReg cr, rRegL dst, rRegL src)
5887 %{
5888 single_instruction;
5889 dst : S4(write);
5890 src : S3(read);
5891 cr : S3(read);
5892 DECODE : S0(2); // any 2 decoders
5893 %}
5895 // XXX
5896 // // Conditional move double reg-reg
5897 // pipe_class pipe_cmovD_reg( rFlagsReg cr, regDPR1 dst, regD src)
5898 // %{
5899 // single_instruction;
5900 // dst : S4(write);
5901 // src : S3(read);
5902 // cr : S3(read);
5903 // DECODE : S0; // any decoder
5904 // %}
5906 // Float reg-reg operation
5907 pipe_class fpu_reg(regD dst)
5908 %{
5909 instruction_count(2);
5910 dst : S3(read);
5911 DECODE : S0(2); // any 2 decoders
5912 FPU : S3;
5913 %}
5915 // Float reg-reg operation
5916 pipe_class fpu_reg_reg(regD dst, regD src)
5917 %{
5918 instruction_count(2);
5919 dst : S4(write);
5920 src : S3(read);
5921 DECODE : S0(2); // any 2 decoders
5922 FPU : S3;
5923 %}
5925 // Float reg-reg operation
5926 pipe_class fpu_reg_reg_reg(regD dst, regD src1, regD src2)
5927 %{
5928 instruction_count(3);
5929 dst : S4(write);
5930 src1 : S3(read);
5931 src2 : S3(read);
5932 DECODE : S0(3); // any 3 decoders
5933 FPU : S3(2);
5934 %}
5936 // Float reg-reg operation
5937 pipe_class fpu_reg_reg_reg_reg(regD dst, regD src1, regD src2, regD src3)
5938 %{
5939 instruction_count(4);
5940 dst : S4(write);
5941 src1 : S3(read);
5942 src2 : S3(read);
5943 src3 : S3(read);
5944 DECODE : S0(4); // any 3 decoders
5945 FPU : S3(2);
5946 %}
5948 // Float reg-reg operation
5949 pipe_class fpu_reg_mem_reg_reg(regD dst, memory src1, regD src2, regD src3)
5950 %{
5951 instruction_count(4);
5952 dst : S4(write);
5953 src1 : S3(read);
5954 src2 : S3(read);
5955 src3 : S3(read);
5956 DECODE : S1(3); // any 3 decoders
5957 D0 : S0; // Big decoder only
5958 FPU : S3(2);
5959 MEM : S3;
5960 %}
5962 // Float reg-mem operation
5963 pipe_class fpu_reg_mem(regD dst, memory mem)
5964 %{
5965 instruction_count(2);
5966 dst : S5(write);
5967 mem : S3(read);
5968 D0 : S0; // big decoder only
5969 DECODE : S1; // any decoder for FPU POP
5970 FPU : S4;
5971 MEM : S3; // any mem
5972 %}
5974 // Float reg-mem operation
5975 pipe_class fpu_reg_reg_mem(regD dst, regD src1, memory mem)
5976 %{
5977 instruction_count(3);
5978 dst : S5(write);
5979 src1 : S3(read);
5980 mem : S3(read);
5981 D0 : S0; // big decoder only
5982 DECODE : S1(2); // any decoder for FPU POP
5983 FPU : S4;
5984 MEM : S3; // any mem
5985 %}
5987 // Float mem-reg operation
5988 pipe_class fpu_mem_reg(memory mem, regD src)
5989 %{
5990 instruction_count(2);
5991 src : S5(read);
5992 mem : S3(read);
5993 DECODE : S0; // any decoder for FPU PUSH
5994 D0 : S1; // big decoder only
5995 FPU : S4;
5996 MEM : S3; // any mem
5997 %}
5999 pipe_class fpu_mem_reg_reg(memory mem, regD src1, regD src2)
6000 %{
6001 instruction_count(3);
6002 src1 : S3(read);
6003 src2 : S3(read);
6004 mem : S3(read);
6005 DECODE : S0(2); // any decoder for FPU PUSH
6006 D0 : S1; // big decoder only
6007 FPU : S4;
6008 MEM : S3; // any mem
6009 %}
6011 pipe_class fpu_mem_reg_mem(memory mem, regD src1, memory src2)
6012 %{
6013 instruction_count(3);
6014 src1 : S3(read);
6015 src2 : S3(read);
6016 mem : S4(read);
6017 DECODE : S0; // any decoder for FPU PUSH
6018 D0 : S0(2); // big decoder only
6019 FPU : S4;
6020 MEM : S3(2); // any mem
6021 %}
6023 pipe_class fpu_mem_mem(memory dst, memory src1)
6024 %{
6025 instruction_count(2);
6026 src1 : S3(read);
6027 dst : S4(read);
6028 D0 : S0(2); // big decoder only
6029 MEM : S3(2); // any mem
6030 %}
6032 pipe_class fpu_mem_mem_mem(memory dst, memory src1, memory src2)
6033 %{
6034 instruction_count(3);
6035 src1 : S3(read);
6036 src2 : S3(read);
6037 dst : S4(read);
6038 D0 : S0(3); // big decoder only
6039 FPU : S4;
6040 MEM : S3(3); // any mem
6041 %}
6043 pipe_class fpu_mem_reg_con(memory mem, regD src1)
6044 %{
6045 instruction_count(3);
6046 src1 : S4(read);
6047 mem : S4(read);
6048 DECODE : S0; // any decoder for FPU PUSH
6049 D0 : S0(2); // big decoder only
6050 FPU : S4;
6051 MEM : S3(2); // any mem
6052 %}
6054 // Float load constant
6055 pipe_class fpu_reg_con(regD dst)
6056 %{
6057 instruction_count(2);
6058 dst : S5(write);
6059 D0 : S0; // big decoder only for the load
6060 DECODE : S1; // any decoder for FPU POP
6061 FPU : S4;
6062 MEM : S3; // any mem
6063 %}
6065 // Float load constant
6066 pipe_class fpu_reg_reg_con(regD dst, regD src)
6067 %{
6068 instruction_count(3);
6069 dst : S5(write);
6070 src : S3(read);
6071 D0 : S0; // big decoder only for the load
6072 DECODE : S1(2); // any decoder for FPU POP
6073 FPU : S4;
6074 MEM : S3; // any mem
6075 %}
6077 // UnConditional branch
6078 pipe_class pipe_jmp(label labl)
6079 %{
6080 single_instruction;
6081 BR : S3;
6082 %}
6084 // Conditional branch
6085 pipe_class pipe_jcc(cmpOp cmp, rFlagsReg cr, label labl)
6086 %{
6087 single_instruction;
6088 cr : S1(read);
6089 BR : S3;
6090 %}
6092 // Allocation idiom
6093 pipe_class pipe_cmpxchg(rRegP dst, rRegP heap_ptr)
6094 %{
6095 instruction_count(1); force_serialization;
6096 fixed_latency(6);
6097 heap_ptr : S3(read);
6098 DECODE : S0(3);
6099 D0 : S2;
6100 MEM : S3;
6101 ALU : S3(2);
6102 dst : S5(write);
6103 BR : S5;
6104 %}
6106 // Generic big/slow expanded idiom
6107 pipe_class pipe_slow()
6108 %{
6109 instruction_count(10); multiple_bundles; force_serialization;
6110 fixed_latency(100);
6111 D0 : S0(2);
6112 MEM : S3(2);
6113 %}
6115 // The real do-nothing guy
6116 pipe_class empty()
6117 %{
6118 instruction_count(0);
6119 %}
6121 // Define the class for the Nop node
6122 define
6123 %{
6124 MachNop = empty;
6125 %}
6127 %}
6129 //----------INSTRUCTIONS-------------------------------------------------------
6130 //
6131 // match -- States which machine-independent subtree may be replaced
6132 // by this instruction.
6133 // ins_cost -- The estimated cost of this instruction is used by instruction
6134 // selection to identify a minimum cost tree of machine
6135 // instructions that matches a tree of machine-independent
6136 // instructions.
6137 // format -- A string providing the disassembly for this instruction.
6138 // The value of an instruction's operand may be inserted
6139 // by referring to it with a '$' prefix.
6140 // opcode -- Three instruction opcodes may be provided. These are referred
6141 // to within an encode class as $primary, $secondary, and $tertiary
6142 // rrspectively. The primary opcode is commonly used to
6143 // indicate the type of machine instruction, while secondary
6144 // and tertiary are often used for prefix options or addressing
6145 // modes.
6146 // ins_encode -- A list of encode classes with parameters. The encode class
6147 // name must have been defined in an 'enc_class' specification
6148 // in the encode section of the architecture description.
6151 //----------Load/Store/Move Instructions---------------------------------------
6152 //----------Load Instructions--------------------------------------------------
6154 // Load Byte (8 bit signed)
6155 instruct loadB(rRegI dst, memory mem)
6156 %{
6157 match(Set dst (LoadB mem));
6159 ins_cost(125);
6160 format %{ "movsbl $dst, $mem\t# byte" %}
6162 ins_encode %{
6163 __ movsbl($dst$$Register, $mem$$Address);
6164 %}
6166 ins_pipe(ialu_reg_mem);
6167 %}
6169 // Load Byte (8 bit signed) into Long Register
6170 instruct loadB2L(rRegL dst, memory mem)
6171 %{
6172 match(Set dst (ConvI2L (LoadB mem)));
6174 ins_cost(125);
6175 format %{ "movsbq $dst, $mem\t# byte -> long" %}
6177 ins_encode %{
6178 __ movsbq($dst$$Register, $mem$$Address);
6179 %}
6181 ins_pipe(ialu_reg_mem);
6182 %}
6184 // Load Unsigned Byte (8 bit UNsigned)
6185 instruct loadUB(rRegI dst, memory mem)
6186 %{
6187 match(Set dst (LoadUB mem));
6189 ins_cost(125);
6190 format %{ "movzbl $dst, $mem\t# ubyte" %}
6192 ins_encode %{
6193 __ movzbl($dst$$Register, $mem$$Address);
6194 %}
6196 ins_pipe(ialu_reg_mem);
6197 %}
6199 // Load Unsigned Byte (8 bit UNsigned) into Long Register
6200 instruct loadUB2L(rRegL dst, memory mem)
6201 %{
6202 match(Set dst (ConvI2L (LoadUB mem)));
6204 ins_cost(125);
6205 format %{ "movzbq $dst, $mem\t# ubyte -> long" %}
6207 ins_encode %{
6208 __ movzbq($dst$$Register, $mem$$Address);
6209 %}
6211 ins_pipe(ialu_reg_mem);
6212 %}
6214 // Load Short (16 bit signed)
6215 instruct loadS(rRegI dst, memory mem)
6216 %{
6217 match(Set dst (LoadS mem));
6219 ins_cost(125);
6220 format %{ "movswl $dst, $mem\t# short" %}
6222 ins_encode %{
6223 __ movswl($dst$$Register, $mem$$Address);
6224 %}
6226 ins_pipe(ialu_reg_mem);
6227 %}
6229 // Load Short (16 bit signed) into Long Register
6230 instruct loadS2L(rRegL dst, memory mem)
6231 %{
6232 match(Set dst (ConvI2L (LoadS mem)));
6234 ins_cost(125);
6235 format %{ "movswq $dst, $mem\t# short -> long" %}
6237 ins_encode %{
6238 __ movswq($dst$$Register, $mem$$Address);
6239 %}
6241 ins_pipe(ialu_reg_mem);
6242 %}
6244 // Load Unsigned Short/Char (16 bit UNsigned)
6245 instruct loadUS(rRegI dst, memory mem)
6246 %{
6247 match(Set dst (LoadUS mem));
6249 ins_cost(125);
6250 format %{ "movzwl $dst, $mem\t# ushort/char" %}
6252 ins_encode %{
6253 __ movzwl($dst$$Register, $mem$$Address);
6254 %}
6256 ins_pipe(ialu_reg_mem);
6257 %}
6259 // Load Unsigned Short/Char (16 bit UNsigned) into Long Register
6260 instruct loadUS2L(rRegL dst, memory mem)
6261 %{
6262 match(Set dst (ConvI2L (LoadUS mem)));
6264 ins_cost(125);
6265 format %{ "movzwq $dst, $mem\t# ushort/char -> long" %}
6267 ins_encode %{
6268 __ movzwq($dst$$Register, $mem$$Address);
6269 %}
6271 ins_pipe(ialu_reg_mem);
6272 %}
6274 // Load Integer
6275 instruct loadI(rRegI dst, memory mem)
6276 %{
6277 match(Set dst (LoadI mem));
6279 ins_cost(125);
6280 format %{ "movl $dst, $mem\t# int" %}
6282 ins_encode %{
6283 __ movl($dst$$Register, $mem$$Address);
6284 %}
6286 ins_pipe(ialu_reg_mem);
6287 %}
6289 // Load Integer into Long Register
6290 instruct loadI2L(rRegL dst, memory mem)
6291 %{
6292 match(Set dst (ConvI2L (LoadI mem)));
6294 ins_cost(125);
6295 format %{ "movslq $dst, $mem\t# int -> long" %}
6297 ins_encode %{
6298 __ movslq($dst$$Register, $mem$$Address);
6299 %}
6301 ins_pipe(ialu_reg_mem);
6302 %}
6304 // Load Unsigned Integer into Long Register
6305 instruct loadUI2L(rRegL dst, memory mem)
6306 %{
6307 match(Set dst (LoadUI2L mem));
6309 ins_cost(125);
6310 format %{ "movl $dst, $mem\t# uint -> long" %}
6312 ins_encode %{
6313 __ movl($dst$$Register, $mem$$Address);
6314 %}
6316 ins_pipe(ialu_reg_mem);
6317 %}
6319 // Load Long
6320 instruct loadL(rRegL dst, memory mem)
6321 %{
6322 match(Set dst (LoadL mem));
6324 ins_cost(125);
6325 format %{ "movq $dst, $mem\t# long" %}
6327 ins_encode %{
6328 __ movq($dst$$Register, $mem$$Address);
6329 %}
6331 ins_pipe(ialu_reg_mem); // XXX
6332 %}
6334 // Load Range
6335 instruct loadRange(rRegI dst, memory mem)
6336 %{
6337 match(Set dst (LoadRange mem));
6339 ins_cost(125); // XXX
6340 format %{ "movl $dst, $mem\t# range" %}
6341 opcode(0x8B);
6342 ins_encode(REX_reg_mem(dst, mem), OpcP, reg_mem(dst, mem));
6343 ins_pipe(ialu_reg_mem);
6344 %}
6346 // Load Pointer
6347 instruct loadP(rRegP dst, memory mem)
6348 %{
6349 match(Set dst (LoadP mem));
6351 ins_cost(125); // XXX
6352 format %{ "movq $dst, $mem\t# ptr" %}
6353 opcode(0x8B);
6354 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6355 ins_pipe(ialu_reg_mem); // XXX
6356 %}
6358 // Load Compressed Pointer
6359 instruct loadN(rRegN dst, memory mem)
6360 %{
6361 match(Set dst (LoadN mem));
6363 ins_cost(125); // XXX
6364 format %{ "movl $dst, $mem\t# compressed ptr" %}
6365 ins_encode %{
6366 __ movl($dst$$Register, $mem$$Address);
6367 %}
6368 ins_pipe(ialu_reg_mem); // XXX
6369 %}
6372 // Load Klass Pointer
6373 instruct loadKlass(rRegP dst, memory mem)
6374 %{
6375 match(Set dst (LoadKlass mem));
6377 ins_cost(125); // XXX
6378 format %{ "movq $dst, $mem\t# class" %}
6379 opcode(0x8B);
6380 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6381 ins_pipe(ialu_reg_mem); // XXX
6382 %}
6384 // Load narrow Klass Pointer
6385 instruct loadNKlass(rRegN dst, memory mem)
6386 %{
6387 match(Set dst (LoadNKlass mem));
6389 ins_cost(125); // XXX
6390 format %{ "movl $dst, $mem\t# compressed klass ptr" %}
6391 ins_encode %{
6392 __ movl($dst$$Register, $mem$$Address);
6393 %}
6394 ins_pipe(ialu_reg_mem); // XXX
6395 %}
6397 // Load Float
6398 instruct loadF(regF dst, memory mem)
6399 %{
6400 match(Set dst (LoadF mem));
6402 ins_cost(145); // XXX
6403 format %{ "movss $dst, $mem\t# float" %}
6404 opcode(0xF3, 0x0F, 0x10);
6405 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6406 ins_pipe(pipe_slow); // XXX
6407 %}
6409 // Load Double
6410 instruct loadD_partial(regD dst, memory mem)
6411 %{
6412 predicate(!UseXmmLoadAndClearUpper);
6413 match(Set dst (LoadD mem));
6415 ins_cost(145); // XXX
6416 format %{ "movlpd $dst, $mem\t# double" %}
6417 opcode(0x66, 0x0F, 0x12);
6418 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6419 ins_pipe(pipe_slow); // XXX
6420 %}
6422 instruct loadD(regD dst, memory mem)
6423 %{
6424 predicate(UseXmmLoadAndClearUpper);
6425 match(Set dst (LoadD mem));
6427 ins_cost(145); // XXX
6428 format %{ "movsd $dst, $mem\t# double" %}
6429 opcode(0xF2, 0x0F, 0x10);
6430 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem));
6431 ins_pipe(pipe_slow); // XXX
6432 %}
6434 // Load Aligned Packed Byte to XMM register
6435 instruct loadA8B(regD dst, memory mem) %{
6436 match(Set dst (Load8B mem));
6437 ins_cost(125);
6438 format %{ "MOVQ $dst,$mem\t! packed8B" %}
6439 ins_encode( movq_ld(dst, mem));
6440 ins_pipe( pipe_slow );
6441 %}
6443 // Load Aligned Packed Short to XMM register
6444 instruct loadA4S(regD dst, memory mem) %{
6445 match(Set dst (Load4S mem));
6446 ins_cost(125);
6447 format %{ "MOVQ $dst,$mem\t! packed4S" %}
6448 ins_encode( movq_ld(dst, mem));
6449 ins_pipe( pipe_slow );
6450 %}
6452 // Load Aligned Packed Char to XMM register
6453 instruct loadA4C(regD dst, memory mem) %{
6454 match(Set dst (Load4C mem));
6455 ins_cost(125);
6456 format %{ "MOVQ $dst,$mem\t! packed4C" %}
6457 ins_encode( movq_ld(dst, mem));
6458 ins_pipe( pipe_slow );
6459 %}
6461 // Load Aligned Packed Integer to XMM register
6462 instruct load2IU(regD dst, memory mem) %{
6463 match(Set dst (Load2I mem));
6464 ins_cost(125);
6465 format %{ "MOVQ $dst,$mem\t! packed2I" %}
6466 ins_encode( movq_ld(dst, mem));
6467 ins_pipe( pipe_slow );
6468 %}
6470 // Load Aligned Packed Single to XMM
6471 instruct loadA2F(regD dst, memory mem) %{
6472 match(Set dst (Load2F mem));
6473 ins_cost(145);
6474 format %{ "MOVQ $dst,$mem\t! packed2F" %}
6475 ins_encode( movq_ld(dst, mem));
6476 ins_pipe( pipe_slow );
6477 %}
6479 // Load Effective Address
6480 instruct leaP8(rRegP dst, indOffset8 mem)
6481 %{
6482 match(Set dst mem);
6484 ins_cost(110); // XXX
6485 format %{ "leaq $dst, $mem\t# ptr 8" %}
6486 opcode(0x8D);
6487 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6488 ins_pipe(ialu_reg_reg_fat);
6489 %}
6491 instruct leaP32(rRegP dst, indOffset32 mem)
6492 %{
6493 match(Set dst mem);
6495 ins_cost(110);
6496 format %{ "leaq $dst, $mem\t# ptr 32" %}
6497 opcode(0x8D);
6498 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6499 ins_pipe(ialu_reg_reg_fat);
6500 %}
6502 // instruct leaPIdx(rRegP dst, indIndex mem)
6503 // %{
6504 // match(Set dst mem);
6506 // ins_cost(110);
6507 // format %{ "leaq $dst, $mem\t# ptr idx" %}
6508 // opcode(0x8D);
6509 // ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6510 // ins_pipe(ialu_reg_reg_fat);
6511 // %}
6513 instruct leaPIdxOff(rRegP dst, indIndexOffset mem)
6514 %{
6515 match(Set dst mem);
6517 ins_cost(110);
6518 format %{ "leaq $dst, $mem\t# ptr idxoff" %}
6519 opcode(0x8D);
6520 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6521 ins_pipe(ialu_reg_reg_fat);
6522 %}
6524 instruct leaPIdxScale(rRegP dst, indIndexScale mem)
6525 %{
6526 match(Set dst mem);
6528 ins_cost(110);
6529 format %{ "leaq $dst, $mem\t# ptr idxscale" %}
6530 opcode(0x8D);
6531 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6532 ins_pipe(ialu_reg_reg_fat);
6533 %}
6535 instruct leaPIdxScaleOff(rRegP dst, indIndexScaleOffset mem)
6536 %{
6537 match(Set dst mem);
6539 ins_cost(110);
6540 format %{ "leaq $dst, $mem\t# ptr idxscaleoff" %}
6541 opcode(0x8D);
6542 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6543 ins_pipe(ialu_reg_reg_fat);
6544 %}
6546 instruct leaPPosIdxScaleOff(rRegP dst, indPosIndexScaleOffset mem)
6547 %{
6548 match(Set dst mem);
6550 ins_cost(110);
6551 format %{ "leaq $dst, $mem\t# ptr posidxscaleoff" %}
6552 opcode(0x8D);
6553 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6554 ins_pipe(ialu_reg_reg_fat);
6555 %}
6557 // Load Effective Address which uses Narrow (32-bits) oop
6558 instruct leaPCompressedOopOffset(rRegP dst, indCompressedOopOffset mem)
6559 %{
6560 predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
6561 match(Set dst mem);
6563 ins_cost(110);
6564 format %{ "leaq $dst, $mem\t# ptr compressedoopoff32" %}
6565 opcode(0x8D);
6566 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6567 ins_pipe(ialu_reg_reg_fat);
6568 %}
6570 instruct leaP8Narrow(rRegP dst, indOffset8Narrow mem)
6571 %{
6572 predicate(Universe::narrow_oop_shift() == 0);
6573 match(Set dst mem);
6575 ins_cost(110); // XXX
6576 format %{ "leaq $dst, $mem\t# ptr off8narrow" %}
6577 opcode(0x8D);
6578 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6579 ins_pipe(ialu_reg_reg_fat);
6580 %}
6582 instruct leaP32Narrow(rRegP dst, indOffset32Narrow mem)
6583 %{
6584 predicate(Universe::narrow_oop_shift() == 0);
6585 match(Set dst mem);
6587 ins_cost(110);
6588 format %{ "leaq $dst, $mem\t# ptr off32narrow" %}
6589 opcode(0x8D);
6590 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6591 ins_pipe(ialu_reg_reg_fat);
6592 %}
6594 instruct leaPIdxOffNarrow(rRegP dst, indIndexOffsetNarrow mem)
6595 %{
6596 predicate(Universe::narrow_oop_shift() == 0);
6597 match(Set dst mem);
6599 ins_cost(110);
6600 format %{ "leaq $dst, $mem\t# ptr idxoffnarrow" %}
6601 opcode(0x8D);
6602 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6603 ins_pipe(ialu_reg_reg_fat);
6604 %}
6606 instruct leaPIdxScaleNarrow(rRegP dst, indIndexScaleNarrow mem)
6607 %{
6608 predicate(Universe::narrow_oop_shift() == 0);
6609 match(Set dst mem);
6611 ins_cost(110);
6612 format %{ "leaq $dst, $mem\t# ptr idxscalenarrow" %}
6613 opcode(0x8D);
6614 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6615 ins_pipe(ialu_reg_reg_fat);
6616 %}
6618 instruct leaPIdxScaleOffNarrow(rRegP dst, indIndexScaleOffsetNarrow mem)
6619 %{
6620 predicate(Universe::narrow_oop_shift() == 0);
6621 match(Set dst mem);
6623 ins_cost(110);
6624 format %{ "leaq $dst, $mem\t# ptr idxscaleoffnarrow" %}
6625 opcode(0x8D);
6626 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6627 ins_pipe(ialu_reg_reg_fat);
6628 %}
6630 instruct leaPPosIdxScaleOffNarrow(rRegP dst, indPosIndexScaleOffsetNarrow mem)
6631 %{
6632 predicate(Universe::narrow_oop_shift() == 0);
6633 match(Set dst mem);
6635 ins_cost(110);
6636 format %{ "leaq $dst, $mem\t# ptr posidxscaleoffnarrow" %}
6637 opcode(0x8D);
6638 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
6639 ins_pipe(ialu_reg_reg_fat);
6640 %}
6642 instruct loadConI(rRegI dst, immI src)
6643 %{
6644 match(Set dst src);
6646 format %{ "movl $dst, $src\t# int" %}
6647 ins_encode(load_immI(dst, src));
6648 ins_pipe(ialu_reg_fat); // XXX
6649 %}
6651 instruct loadConI0(rRegI dst, immI0 src, rFlagsReg cr)
6652 %{
6653 match(Set dst src);
6654 effect(KILL cr);
6656 ins_cost(50);
6657 format %{ "xorl $dst, $dst\t# int" %}
6658 opcode(0x33); /* + rd */
6659 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6660 ins_pipe(ialu_reg);
6661 %}
6663 instruct loadConL(rRegL dst, immL src)
6664 %{
6665 match(Set dst src);
6667 ins_cost(150);
6668 format %{ "movq $dst, $src\t# long" %}
6669 ins_encode(load_immL(dst, src));
6670 ins_pipe(ialu_reg);
6671 %}
6673 instruct loadConL0(rRegL dst, immL0 src, rFlagsReg cr)
6674 %{
6675 match(Set dst src);
6676 effect(KILL cr);
6678 ins_cost(50);
6679 format %{ "xorl $dst, $dst\t# long" %}
6680 opcode(0x33); /* + rd */
6681 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6682 ins_pipe(ialu_reg); // XXX
6683 %}
6685 instruct loadConUL32(rRegL dst, immUL32 src)
6686 %{
6687 match(Set dst src);
6689 ins_cost(60);
6690 format %{ "movl $dst, $src\t# long (unsigned 32-bit)" %}
6691 ins_encode(load_immUL32(dst, src));
6692 ins_pipe(ialu_reg);
6693 %}
6695 instruct loadConL32(rRegL dst, immL32 src)
6696 %{
6697 match(Set dst src);
6699 ins_cost(70);
6700 format %{ "movq $dst, $src\t# long (32-bit)" %}
6701 ins_encode(load_immL32(dst, src));
6702 ins_pipe(ialu_reg);
6703 %}
6705 instruct loadConP(rRegP dst, immP src)
6706 %{
6707 match(Set dst src);
6709 format %{ "movq $dst, $src\t# ptr" %}
6710 ins_encode(load_immP(dst, src));
6711 ins_pipe(ialu_reg_fat); // XXX
6712 %}
6714 instruct loadConP0(rRegP dst, immP0 src, rFlagsReg cr)
6715 %{
6716 match(Set dst src);
6717 effect(KILL cr);
6719 ins_cost(50);
6720 format %{ "xorl $dst, $dst\t# ptr" %}
6721 opcode(0x33); /* + rd */
6722 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst));
6723 ins_pipe(ialu_reg);
6724 %}
6726 instruct loadConP31(rRegP dst, immP31 src, rFlagsReg cr)
6727 %{
6728 match(Set dst src);
6729 effect(KILL cr);
6731 ins_cost(60);
6732 format %{ "movl $dst, $src\t# ptr (positive 32-bit)" %}
6733 ins_encode(load_immP31(dst, src));
6734 ins_pipe(ialu_reg);
6735 %}
6737 instruct loadConF(regF dst, immF src)
6738 %{
6739 match(Set dst src);
6740 ins_cost(125);
6742 format %{ "movss $dst, [$src]" %}
6743 ins_encode(load_conF(dst, src));
6744 ins_pipe(pipe_slow);
6745 %}
6747 instruct loadConN0(rRegN dst, immN0 src, rFlagsReg cr) %{
6748 match(Set dst src);
6749 effect(KILL cr);
6750 format %{ "xorq $dst, $src\t# compressed NULL ptr" %}
6751 ins_encode %{
6752 __ xorq($dst$$Register, $dst$$Register);
6753 %}
6754 ins_pipe(ialu_reg);
6755 %}
6757 instruct loadConN(rRegN dst, immN src) %{
6758 match(Set dst src);
6760 ins_cost(125);
6761 format %{ "movl $dst, $src\t# compressed ptr" %}
6762 ins_encode %{
6763 address con = (address)$src$$constant;
6764 if (con == NULL) {
6765 ShouldNotReachHere();
6766 } else {
6767 __ set_narrow_oop($dst$$Register, (jobject)$src$$constant);
6768 }
6769 %}
6770 ins_pipe(ialu_reg_fat); // XXX
6771 %}
6773 instruct loadConF0(regF dst, immF0 src)
6774 %{
6775 match(Set dst src);
6776 ins_cost(100);
6778 format %{ "xorps $dst, $dst\t# float 0.0" %}
6779 opcode(0x0F, 0x57);
6780 ins_encode(REX_reg_reg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
6781 ins_pipe(pipe_slow);
6782 %}
6784 // Use the same format since predicate() can not be used here.
6785 instruct loadConD(regD dst, immD src)
6786 %{
6787 match(Set dst src);
6788 ins_cost(125);
6790 format %{ "movsd $dst, [$src]" %}
6791 ins_encode(load_conD(dst, src));
6792 ins_pipe(pipe_slow);
6793 %}
6795 instruct loadConD0(regD dst, immD0 src)
6796 %{
6797 match(Set dst src);
6798 ins_cost(100);
6800 format %{ "xorpd $dst, $dst\t# double 0.0" %}
6801 opcode(0x66, 0x0F, 0x57);
6802 ins_encode(OpcP, REX_reg_reg(dst, dst), OpcS, OpcT, reg_reg(dst, dst));
6803 ins_pipe(pipe_slow);
6804 %}
6806 instruct loadSSI(rRegI dst, stackSlotI src)
6807 %{
6808 match(Set dst src);
6810 ins_cost(125);
6811 format %{ "movl $dst, $src\t# int stk" %}
6812 opcode(0x8B);
6813 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
6814 ins_pipe(ialu_reg_mem);
6815 %}
6817 instruct loadSSL(rRegL dst, stackSlotL src)
6818 %{
6819 match(Set dst src);
6821 ins_cost(125);
6822 format %{ "movq $dst, $src\t# long stk" %}
6823 opcode(0x8B);
6824 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
6825 ins_pipe(ialu_reg_mem);
6826 %}
6828 instruct loadSSP(rRegP dst, stackSlotP src)
6829 %{
6830 match(Set dst src);
6832 ins_cost(125);
6833 format %{ "movq $dst, $src\t# ptr stk" %}
6834 opcode(0x8B);
6835 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
6836 ins_pipe(ialu_reg_mem);
6837 %}
6839 instruct loadSSF(regF dst, stackSlotF src)
6840 %{
6841 match(Set dst src);
6843 ins_cost(125);
6844 format %{ "movss $dst, $src\t# float stk" %}
6845 opcode(0xF3, 0x0F, 0x10);
6846 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
6847 ins_pipe(pipe_slow); // XXX
6848 %}
6850 // Use the same format since predicate() can not be used here.
6851 instruct loadSSD(regD dst, stackSlotD src)
6852 %{
6853 match(Set dst src);
6855 ins_cost(125);
6856 format %{ "movsd $dst, $src\t# double stk" %}
6857 ins_encode %{
6858 __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp));
6859 %}
6860 ins_pipe(pipe_slow); // XXX
6861 %}
6863 // Prefetch instructions.
6864 // Must be safe to execute with invalid address (cannot fault).
6866 instruct prefetchr( memory mem ) %{
6867 predicate(ReadPrefetchInstr==3);
6868 match(PrefetchRead mem);
6869 ins_cost(125);
6871 format %{ "PREFETCHR $mem\t# Prefetch into level 1 cache" %}
6872 opcode(0x0F, 0x0D); /* Opcode 0F 0D /0 */
6873 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
6874 ins_pipe(ialu_mem);
6875 %}
6877 instruct prefetchrNTA( memory mem ) %{
6878 predicate(ReadPrefetchInstr==0);
6879 match(PrefetchRead mem);
6880 ins_cost(125);
6882 format %{ "PREFETCHNTA $mem\t# Prefetch into non-temporal cache for read" %}
6883 opcode(0x0F, 0x18); /* Opcode 0F 18 /0 */
6884 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
6885 ins_pipe(ialu_mem);
6886 %}
6888 instruct prefetchrT0( memory mem ) %{
6889 predicate(ReadPrefetchInstr==1);
6890 match(PrefetchRead mem);
6891 ins_cost(125);
6893 format %{ "PREFETCHT0 $mem\t# prefetch into L1 and L2 caches for read" %}
6894 opcode(0x0F, 0x18); /* Opcode 0F 18 /1 */
6895 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
6896 ins_pipe(ialu_mem);
6897 %}
6899 instruct prefetchrT2( memory mem ) %{
6900 predicate(ReadPrefetchInstr==2);
6901 match(PrefetchRead mem);
6902 ins_cost(125);
6904 format %{ "PREFETCHT2 $mem\t# prefetch into L2 caches for read" %}
6905 opcode(0x0F, 0x18); /* Opcode 0F 18 /3 */
6906 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x03, mem));
6907 ins_pipe(ialu_mem);
6908 %}
6910 instruct prefetchw( memory mem ) %{
6911 predicate(AllocatePrefetchInstr==3);
6912 match(PrefetchWrite mem);
6913 ins_cost(125);
6915 format %{ "PREFETCHW $mem\t# Prefetch into level 1 cache and mark modified" %}
6916 opcode(0x0F, 0x0D); /* Opcode 0F 0D /1 */
6917 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
6918 ins_pipe(ialu_mem);
6919 %}
6921 instruct prefetchwNTA( memory mem ) %{
6922 predicate(AllocatePrefetchInstr==0);
6923 match(PrefetchWrite mem);
6924 ins_cost(125);
6926 format %{ "PREFETCHNTA $mem\t# Prefetch to non-temporal cache for write" %}
6927 opcode(0x0F, 0x18); /* Opcode 0F 18 /0 */
6928 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem));
6929 ins_pipe(ialu_mem);
6930 %}
6932 instruct prefetchwT0( memory mem ) %{
6933 predicate(AllocatePrefetchInstr==1);
6934 match(PrefetchWrite mem);
6935 ins_cost(125);
6937 format %{ "PREFETCHT0 $mem\t# Prefetch to level 1 and 2 caches for write" %}
6938 opcode(0x0F, 0x18); /* Opcode 0F 18 /1 */
6939 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem));
6940 ins_pipe(ialu_mem);
6941 %}
6943 instruct prefetchwT2( memory mem ) %{
6944 predicate(AllocatePrefetchInstr==2);
6945 match(PrefetchWrite mem);
6946 ins_cost(125);
6948 format %{ "PREFETCHT2 $mem\t# Prefetch to level 2 cache for write" %}
6949 opcode(0x0F, 0x18); /* Opcode 0F 18 /3 */
6950 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x03, mem));
6951 ins_pipe(ialu_mem);
6952 %}
6954 //----------Store Instructions-------------------------------------------------
6956 // Store Byte
6957 instruct storeB(memory mem, rRegI src)
6958 %{
6959 match(Set mem (StoreB mem src));
6961 ins_cost(125); // XXX
6962 format %{ "movb $mem, $src\t# byte" %}
6963 opcode(0x88);
6964 ins_encode(REX_breg_mem(src, mem), OpcP, reg_mem(src, mem));
6965 ins_pipe(ialu_mem_reg);
6966 %}
6968 // Store Char/Short
6969 instruct storeC(memory mem, rRegI src)
6970 %{
6971 match(Set mem (StoreC mem src));
6973 ins_cost(125); // XXX
6974 format %{ "movw $mem, $src\t# char/short" %}
6975 opcode(0x89);
6976 ins_encode(SizePrefix, REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
6977 ins_pipe(ialu_mem_reg);
6978 %}
6980 // Store Integer
6981 instruct storeI(memory mem, rRegI src)
6982 %{
6983 match(Set mem (StoreI mem src));
6985 ins_cost(125); // XXX
6986 format %{ "movl $mem, $src\t# int" %}
6987 opcode(0x89);
6988 ins_encode(REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
6989 ins_pipe(ialu_mem_reg);
6990 %}
6992 // Store Long
6993 instruct storeL(memory mem, rRegL src)
6994 %{
6995 match(Set mem (StoreL mem src));
6997 ins_cost(125); // XXX
6998 format %{ "movq $mem, $src\t# long" %}
6999 opcode(0x89);
7000 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
7001 ins_pipe(ialu_mem_reg); // XXX
7002 %}
7004 // Store Pointer
7005 instruct storeP(memory mem, any_RegP src)
7006 %{
7007 match(Set mem (StoreP mem src));
7009 ins_cost(125); // XXX
7010 format %{ "movq $mem, $src\t# ptr" %}
7011 opcode(0x89);
7012 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
7013 ins_pipe(ialu_mem_reg);
7014 %}
7016 instruct storeImmP0(memory mem, immP0 zero)
7017 %{
7018 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7019 match(Set mem (StoreP mem zero));
7021 ins_cost(125); // XXX
7022 format %{ "movq $mem, R12\t# ptr (R12_heapbase==0)" %}
7023 ins_encode %{
7024 __ movq($mem$$Address, r12);
7025 %}
7026 ins_pipe(ialu_mem_reg);
7027 %}
7029 // Store NULL Pointer, mark word, or other simple pointer constant.
7030 instruct storeImmP(memory mem, immP31 src)
7031 %{
7032 match(Set mem (StoreP mem src));
7034 ins_cost(150); // XXX
7035 format %{ "movq $mem, $src\t# ptr" %}
7036 opcode(0xC7); /* C7 /0 */
7037 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
7038 ins_pipe(ialu_mem_imm);
7039 %}
7041 // Store Compressed Pointer
7042 instruct storeN(memory mem, rRegN src)
7043 %{
7044 match(Set mem (StoreN mem src));
7046 ins_cost(125); // XXX
7047 format %{ "movl $mem, $src\t# compressed ptr" %}
7048 ins_encode %{
7049 __ movl($mem$$Address, $src$$Register);
7050 %}
7051 ins_pipe(ialu_mem_reg);
7052 %}
7054 instruct storeImmN0(memory mem, immN0 zero)
7055 %{
7056 predicate(Universe::narrow_oop_base() == NULL);
7057 match(Set mem (StoreN mem zero));
7059 ins_cost(125); // XXX
7060 format %{ "movl $mem, R12\t# compressed ptr (R12_heapbase==0)" %}
7061 ins_encode %{
7062 __ movl($mem$$Address, r12);
7063 %}
7064 ins_pipe(ialu_mem_reg);
7065 %}
7067 instruct storeImmN(memory mem, immN src)
7068 %{
7069 match(Set mem (StoreN mem src));
7071 ins_cost(150); // XXX
7072 format %{ "movl $mem, $src\t# compressed ptr" %}
7073 ins_encode %{
7074 address con = (address)$src$$constant;
7075 if (con == NULL) {
7076 __ movl($mem$$Address, (int32_t)0);
7077 } else {
7078 __ set_narrow_oop($mem$$Address, (jobject)$src$$constant);
7079 }
7080 %}
7081 ins_pipe(ialu_mem_imm);
7082 %}
7084 // Store Integer Immediate
7085 instruct storeImmI0(memory mem, immI0 zero)
7086 %{
7087 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7088 match(Set mem (StoreI mem zero));
7090 ins_cost(125); // XXX
7091 format %{ "movl $mem, R12\t# int (R12_heapbase==0)" %}
7092 ins_encode %{
7093 __ movl($mem$$Address, r12);
7094 %}
7095 ins_pipe(ialu_mem_reg);
7096 %}
7098 instruct storeImmI(memory mem, immI src)
7099 %{
7100 match(Set mem (StoreI mem src));
7102 ins_cost(150);
7103 format %{ "movl $mem, $src\t# int" %}
7104 opcode(0xC7); /* C7 /0 */
7105 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
7106 ins_pipe(ialu_mem_imm);
7107 %}
7109 // Store Long Immediate
7110 instruct storeImmL0(memory mem, immL0 zero)
7111 %{
7112 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7113 match(Set mem (StoreL mem zero));
7115 ins_cost(125); // XXX
7116 format %{ "movq $mem, R12\t# long (R12_heapbase==0)" %}
7117 ins_encode %{
7118 __ movq($mem$$Address, r12);
7119 %}
7120 ins_pipe(ialu_mem_reg);
7121 %}
7123 instruct storeImmL(memory mem, immL32 src)
7124 %{
7125 match(Set mem (StoreL mem src));
7127 ins_cost(150);
7128 format %{ "movq $mem, $src\t# long" %}
7129 opcode(0xC7); /* C7 /0 */
7130 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src));
7131 ins_pipe(ialu_mem_imm);
7132 %}
7134 // Store Short/Char Immediate
7135 instruct storeImmC0(memory mem, immI0 zero)
7136 %{
7137 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7138 match(Set mem (StoreC mem zero));
7140 ins_cost(125); // XXX
7141 format %{ "movw $mem, R12\t# short/char (R12_heapbase==0)" %}
7142 ins_encode %{
7143 __ movw($mem$$Address, r12);
7144 %}
7145 ins_pipe(ialu_mem_reg);
7146 %}
7148 instruct storeImmI16(memory mem, immI16 src)
7149 %{
7150 predicate(UseStoreImmI16);
7151 match(Set mem (StoreC mem src));
7153 ins_cost(150);
7154 format %{ "movw $mem, $src\t# short/char" %}
7155 opcode(0xC7); /* C7 /0 Same as 32 store immediate with prefix */
7156 ins_encode(SizePrefix, REX_mem(mem), OpcP, RM_opc_mem(0x00, mem),Con16(src));
7157 ins_pipe(ialu_mem_imm);
7158 %}
7160 // Store Byte Immediate
7161 instruct storeImmB0(memory mem, immI0 zero)
7162 %{
7163 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7164 match(Set mem (StoreB mem zero));
7166 ins_cost(125); // XXX
7167 format %{ "movb $mem, R12\t# short/char (R12_heapbase==0)" %}
7168 ins_encode %{
7169 __ movb($mem$$Address, r12);
7170 %}
7171 ins_pipe(ialu_mem_reg);
7172 %}
7174 instruct storeImmB(memory mem, immI8 src)
7175 %{
7176 match(Set mem (StoreB mem src));
7178 ins_cost(150); // XXX
7179 format %{ "movb $mem, $src\t# byte" %}
7180 opcode(0xC6); /* C6 /0 */
7181 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src));
7182 ins_pipe(ialu_mem_imm);
7183 %}
7185 // Store Aligned Packed Byte XMM register to memory
7186 instruct storeA8B(memory mem, regD src) %{
7187 match(Set mem (Store8B mem src));
7188 ins_cost(145);
7189 format %{ "MOVQ $mem,$src\t! packed8B" %}
7190 ins_encode( movq_st(mem, src));
7191 ins_pipe( pipe_slow );
7192 %}
7194 // Store Aligned Packed Char/Short XMM register to memory
7195 instruct storeA4C(memory mem, regD src) %{
7196 match(Set mem (Store4C mem src));
7197 ins_cost(145);
7198 format %{ "MOVQ $mem,$src\t! packed4C" %}
7199 ins_encode( movq_st(mem, src));
7200 ins_pipe( pipe_slow );
7201 %}
7203 // Store Aligned Packed Integer XMM register to memory
7204 instruct storeA2I(memory mem, regD src) %{
7205 match(Set mem (Store2I mem src));
7206 ins_cost(145);
7207 format %{ "MOVQ $mem,$src\t! packed2I" %}
7208 ins_encode( movq_st(mem, src));
7209 ins_pipe( pipe_slow );
7210 %}
7212 // Store CMS card-mark Immediate
7213 instruct storeImmCM0_reg(memory mem, immI0 zero)
7214 %{
7215 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7216 match(Set mem (StoreCM mem zero));
7218 ins_cost(125); // XXX
7219 format %{ "movb $mem, R12\t# CMS card-mark byte 0 (R12_heapbase==0)" %}
7220 ins_encode %{
7221 __ movb($mem$$Address, r12);
7222 %}
7223 ins_pipe(ialu_mem_reg);
7224 %}
7226 instruct storeImmCM0(memory mem, immI0 src)
7227 %{
7228 match(Set mem (StoreCM mem src));
7230 ins_cost(150); // XXX
7231 format %{ "movb $mem, $src\t# CMS card-mark byte 0" %}
7232 opcode(0xC6); /* C6 /0 */
7233 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src));
7234 ins_pipe(ialu_mem_imm);
7235 %}
7237 // Store Aligned Packed Single Float XMM register to memory
7238 instruct storeA2F(memory mem, regD src) %{
7239 match(Set mem (Store2F mem src));
7240 ins_cost(145);
7241 format %{ "MOVQ $mem,$src\t! packed2F" %}
7242 ins_encode( movq_st(mem, src));
7243 ins_pipe( pipe_slow );
7244 %}
7246 // Store Float
7247 instruct storeF(memory mem, regF src)
7248 %{
7249 match(Set mem (StoreF mem src));
7251 ins_cost(95); // XXX
7252 format %{ "movss $mem, $src\t# float" %}
7253 opcode(0xF3, 0x0F, 0x11);
7254 ins_encode(OpcP, REX_reg_mem(src, mem), OpcS, OpcT, reg_mem(src, mem));
7255 ins_pipe(pipe_slow); // XXX
7256 %}
7258 // Store immediate Float value (it is faster than store from XMM register)
7259 instruct storeF0(memory mem, immF0 zero)
7260 %{
7261 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7262 match(Set mem (StoreF mem zero));
7264 ins_cost(25); // XXX
7265 format %{ "movl $mem, R12\t# float 0. (R12_heapbase==0)" %}
7266 ins_encode %{
7267 __ movl($mem$$Address, r12);
7268 %}
7269 ins_pipe(ialu_mem_reg);
7270 %}
7272 instruct storeF_imm(memory mem, immF src)
7273 %{
7274 match(Set mem (StoreF mem src));
7276 ins_cost(50);
7277 format %{ "movl $mem, $src\t# float" %}
7278 opcode(0xC7); /* C7 /0 */
7279 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con32F_as_bits(src));
7280 ins_pipe(ialu_mem_imm);
7281 %}
7283 // Store Double
7284 instruct storeD(memory mem, regD src)
7285 %{
7286 match(Set mem (StoreD mem src));
7288 ins_cost(95); // XXX
7289 format %{ "movsd $mem, $src\t# double" %}
7290 opcode(0xF2, 0x0F, 0x11);
7291 ins_encode(OpcP, REX_reg_mem(src, mem), OpcS, OpcT, reg_mem(src, mem));
7292 ins_pipe(pipe_slow); // XXX
7293 %}
7295 // Store immediate double 0.0 (it is faster than store from XMM register)
7296 instruct storeD0_imm(memory mem, immD0 src)
7297 %{
7298 predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL));
7299 match(Set mem (StoreD mem src));
7301 ins_cost(50);
7302 format %{ "movq $mem, $src\t# double 0." %}
7303 opcode(0xC7); /* C7 /0 */
7304 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32F_as_bits(src));
7305 ins_pipe(ialu_mem_imm);
7306 %}
7308 instruct storeD0(memory mem, immD0 zero)
7309 %{
7310 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
7311 match(Set mem (StoreD mem zero));
7313 ins_cost(25); // XXX
7314 format %{ "movq $mem, R12\t# double 0. (R12_heapbase==0)" %}
7315 ins_encode %{
7316 __ movq($mem$$Address, r12);
7317 %}
7318 ins_pipe(ialu_mem_reg);
7319 %}
7321 instruct storeSSI(stackSlotI dst, rRegI src)
7322 %{
7323 match(Set dst src);
7325 ins_cost(100);
7326 format %{ "movl $dst, $src\t# int stk" %}
7327 opcode(0x89);
7328 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
7329 ins_pipe( ialu_mem_reg );
7330 %}
7332 instruct storeSSL(stackSlotL dst, rRegL src)
7333 %{
7334 match(Set dst src);
7336 ins_cost(100);
7337 format %{ "movq $dst, $src\t# long stk" %}
7338 opcode(0x89);
7339 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
7340 ins_pipe(ialu_mem_reg);
7341 %}
7343 instruct storeSSP(stackSlotP dst, rRegP src)
7344 %{
7345 match(Set dst src);
7347 ins_cost(100);
7348 format %{ "movq $dst, $src\t# ptr stk" %}
7349 opcode(0x89);
7350 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
7351 ins_pipe(ialu_mem_reg);
7352 %}
7354 instruct storeSSF(stackSlotF dst, regF src)
7355 %{
7356 match(Set dst src);
7358 ins_cost(95); // XXX
7359 format %{ "movss $dst, $src\t# float stk" %}
7360 opcode(0xF3, 0x0F, 0x11);
7361 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
7362 ins_pipe(pipe_slow); // XXX
7363 %}
7365 instruct storeSSD(stackSlotD dst, regD src)
7366 %{
7367 match(Set dst src);
7369 ins_cost(95); // XXX
7370 format %{ "movsd $dst, $src\t# double stk" %}
7371 opcode(0xF2, 0x0F, 0x11);
7372 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
7373 ins_pipe(pipe_slow); // XXX
7374 %}
7376 //----------BSWAP Instructions-------------------------------------------------
7377 instruct bytes_reverse_int(rRegI dst) %{
7378 match(Set dst (ReverseBytesI dst));
7380 format %{ "bswapl $dst" %}
7381 opcode(0x0F, 0xC8); /*Opcode 0F /C8 */
7382 ins_encode( REX_reg(dst), OpcP, opc2_reg(dst) );
7383 ins_pipe( ialu_reg );
7384 %}
7386 instruct bytes_reverse_long(rRegL dst) %{
7387 match(Set dst (ReverseBytesL dst));
7389 format %{ "bswapq $dst" %}
7391 opcode(0x0F, 0xC8); /* Opcode 0F /C8 */
7392 ins_encode( REX_reg_wide(dst), OpcP, opc2_reg(dst) );
7393 ins_pipe( ialu_reg);
7394 %}
7396 instruct loadI_reversed(rRegI dst, memory src) %{
7397 match(Set dst (ReverseBytesI (LoadI src)));
7399 format %{ "bswap_movl $dst, $src" %}
7400 opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */
7401 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src), REX_reg(dst), OpcS, opc3_reg(dst));
7402 ins_pipe( ialu_reg_mem );
7403 %}
7405 instruct loadL_reversed(rRegL dst, memory src) %{
7406 match(Set dst (ReverseBytesL (LoadL src)));
7408 format %{ "bswap_movq $dst, $src" %}
7409 opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */
7410 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src), REX_reg_wide(dst), OpcS, opc3_reg(dst));
7411 ins_pipe( ialu_reg_mem );
7412 %}
7414 instruct storeI_reversed(memory dst, rRegI src) %{
7415 match(Set dst (StoreI dst (ReverseBytesI src)));
7417 format %{ "movl_bswap $dst, $src" %}
7418 opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */
7419 ins_encode( REX_reg(src), OpcP, opc2_reg(src), REX_reg_mem(src, dst), OpcT, reg_mem(src, dst) );
7420 ins_pipe( ialu_mem_reg );
7421 %}
7423 instruct storeL_reversed(memory dst, rRegL src) %{
7424 match(Set dst (StoreL dst (ReverseBytesL src)));
7426 format %{ "movq_bswap $dst, $src" %}
7427 opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */
7428 ins_encode( REX_reg_wide(src), OpcP, opc2_reg(src), REX_reg_mem_wide(src, dst), OpcT, reg_mem(src, dst) );
7429 ins_pipe( ialu_mem_reg );
7430 %}
7432 //----------MemBar Instructions-----------------------------------------------
7433 // Memory barrier flavors
7435 instruct membar_acquire()
7436 %{
7437 match(MemBarAcquire);
7438 ins_cost(0);
7440 size(0);
7441 format %{ "MEMBAR-acquire" %}
7442 ins_encode();
7443 ins_pipe(empty);
7444 %}
7446 instruct membar_acquire_lock()
7447 %{
7448 match(MemBarAcquire);
7449 predicate(Matcher::prior_fast_lock(n));
7450 ins_cost(0);
7452 size(0);
7453 format %{ "MEMBAR-acquire (prior CMPXCHG in FastLock so empty encoding)" %}
7454 ins_encode();
7455 ins_pipe(empty);
7456 %}
7458 instruct membar_release()
7459 %{
7460 match(MemBarRelease);
7461 ins_cost(0);
7463 size(0);
7464 format %{ "MEMBAR-release" %}
7465 ins_encode();
7466 ins_pipe(empty);
7467 %}
7469 instruct membar_release_lock()
7470 %{
7471 match(MemBarRelease);
7472 predicate(Matcher::post_fast_unlock(n));
7473 ins_cost(0);
7475 size(0);
7476 format %{ "MEMBAR-release (a FastUnlock follows so empty encoding)" %}
7477 ins_encode();
7478 ins_pipe(empty);
7479 %}
7481 instruct membar_volatile()
7482 %{
7483 match(MemBarVolatile);
7484 ins_cost(400);
7486 format %{ "MEMBAR-volatile" %}
7487 ins_encode(enc_membar_volatile);
7488 ins_pipe(pipe_slow);
7489 %}
7491 instruct unnecessary_membar_volatile()
7492 %{
7493 match(MemBarVolatile);
7494 predicate(Matcher::post_store_load_barrier(n));
7495 ins_cost(0);
7497 size(0);
7498 format %{ "MEMBAR-volatile (unnecessary so empty encoding)" %}
7499 ins_encode();
7500 ins_pipe(empty);
7501 %}
7503 //----------Move Instructions--------------------------------------------------
7505 instruct castX2P(rRegP dst, rRegL src)
7506 %{
7507 match(Set dst (CastX2P src));
7509 format %{ "movq $dst, $src\t# long->ptr" %}
7510 ins_encode(enc_copy_wide(dst, src));
7511 ins_pipe(ialu_reg_reg); // XXX
7512 %}
7514 instruct castP2X(rRegL dst, rRegP src)
7515 %{
7516 match(Set dst (CastP2X src));
7518 format %{ "movq $dst, $src\t# ptr -> long" %}
7519 ins_encode(enc_copy_wide(dst, src));
7520 ins_pipe(ialu_reg_reg); // XXX
7521 %}
7524 // Convert oop pointer into compressed form
7525 instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{
7526 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
7527 match(Set dst (EncodeP src));
7528 effect(KILL cr);
7529 format %{ "encode_heap_oop $dst,$src" %}
7530 ins_encode %{
7531 Register s = $src$$Register;
7532 Register d = $dst$$Register;
7533 if (s != d) {
7534 __ movq(d, s);
7535 }
7536 __ encode_heap_oop(d);
7537 %}
7538 ins_pipe(ialu_reg_long);
7539 %}
7541 instruct encodeHeapOop_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
7542 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
7543 match(Set dst (EncodeP src));
7544 effect(KILL cr);
7545 format %{ "encode_heap_oop_not_null $dst,$src" %}
7546 ins_encode %{
7547 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
7548 %}
7549 ins_pipe(ialu_reg_long);
7550 %}
7552 instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
7553 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
7554 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant);
7555 match(Set dst (DecodeN src));
7556 effect(KILL cr);
7557 format %{ "decode_heap_oop $dst,$src" %}
7558 ins_encode %{
7559 Register s = $src$$Register;
7560 Register d = $dst$$Register;
7561 if (s != d) {
7562 __ movq(d, s);
7563 }
7564 __ decode_heap_oop(d);
7565 %}
7566 ins_pipe(ialu_reg_long);
7567 %}
7569 instruct decodeHeapOop_not_null(rRegP dst, rRegN src) %{
7570 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
7571 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
7572 match(Set dst (DecodeN src));
7573 format %{ "decode_heap_oop_not_null $dst,$src" %}
7574 ins_encode %{
7575 Register s = $src$$Register;
7576 Register d = $dst$$Register;
7577 if (s != d) {
7578 __ decode_heap_oop_not_null(d, s);
7579 } else {
7580 __ decode_heap_oop_not_null(d);
7581 }
7582 %}
7583 ins_pipe(ialu_reg_long);
7584 %}
7587 //----------Conditional Move---------------------------------------------------
7588 // Jump
7589 // dummy instruction for generating temp registers
7590 instruct jumpXtnd_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
7591 match(Jump (LShiftL switch_val shift));
7592 ins_cost(350);
7593 predicate(false);
7594 effect(TEMP dest);
7596 format %{ "leaq $dest, table_base\n\t"
7597 "jmp [$dest + $switch_val << $shift]\n\t" %}
7598 ins_encode(jump_enc_offset(switch_val, shift, dest));
7599 ins_pipe(pipe_jmp);
7600 ins_pc_relative(1);
7601 %}
7603 instruct jumpXtnd_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
7604 match(Jump (AddL (LShiftL switch_val shift) offset));
7605 ins_cost(350);
7606 effect(TEMP dest);
7608 format %{ "leaq $dest, table_base\n\t"
7609 "jmp [$dest + $switch_val << $shift + $offset]\n\t" %}
7610 ins_encode(jump_enc_addr(switch_val, shift, offset, dest));
7611 ins_pipe(pipe_jmp);
7612 ins_pc_relative(1);
7613 %}
7615 instruct jumpXtnd(rRegL switch_val, rRegI dest) %{
7616 match(Jump switch_val);
7617 ins_cost(350);
7618 effect(TEMP dest);
7620 format %{ "leaq $dest, table_base\n\t"
7621 "jmp [$dest + $switch_val]\n\t" %}
7622 ins_encode(jump_enc(switch_val, dest));
7623 ins_pipe(pipe_jmp);
7624 ins_pc_relative(1);
7625 %}
7627 // Conditional move
7628 instruct cmovI_reg(rRegI dst, rRegI src, rFlagsReg cr, cmpOp cop)
7629 %{
7630 match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
7632 ins_cost(200); // XXX
7633 format %{ "cmovl$cop $dst, $src\t# signed, int" %}
7634 opcode(0x0F, 0x40);
7635 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7636 ins_pipe(pipe_cmov_reg);
7637 %}
7639 instruct cmovI_regU(cmpOpU cop, rFlagsRegU cr, rRegI dst, rRegI src) %{
7640 match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
7642 ins_cost(200); // XXX
7643 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %}
7644 opcode(0x0F, 0x40);
7645 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7646 ins_pipe(pipe_cmov_reg);
7647 %}
7649 instruct cmovI_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, rRegI src) %{
7650 match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
7651 ins_cost(200);
7652 expand %{
7653 cmovI_regU(cop, cr, dst, src);
7654 %}
7655 %}
7657 // Conditional move
7658 instruct cmovI_mem(cmpOp cop, rFlagsReg cr, rRegI dst, memory src) %{
7659 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
7661 ins_cost(250); // XXX
7662 format %{ "cmovl$cop $dst, $src\t# signed, int" %}
7663 opcode(0x0F, 0x40);
7664 ins_encode(REX_reg_mem(dst, src), enc_cmov(cop), reg_mem(dst, src));
7665 ins_pipe(pipe_cmov_mem);
7666 %}
7668 // Conditional move
7669 instruct cmovI_memU(cmpOpU cop, rFlagsRegU cr, rRegI dst, memory src)
7670 %{
7671 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
7673 ins_cost(250); // XXX
7674 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %}
7675 opcode(0x0F, 0x40);
7676 ins_encode(REX_reg_mem(dst, src), enc_cmov(cop), reg_mem(dst, src));
7677 ins_pipe(pipe_cmov_mem);
7678 %}
7680 instruct cmovI_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, memory src) %{
7681 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
7682 ins_cost(250);
7683 expand %{
7684 cmovI_memU(cop, cr, dst, src);
7685 %}
7686 %}
7688 // Conditional move
7689 instruct cmovN_reg(rRegN dst, rRegN src, rFlagsReg cr, cmpOp cop)
7690 %{
7691 match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
7693 ins_cost(200); // XXX
7694 format %{ "cmovl$cop $dst, $src\t# signed, compressed ptr" %}
7695 opcode(0x0F, 0x40);
7696 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7697 ins_pipe(pipe_cmov_reg);
7698 %}
7700 // Conditional move
7701 instruct cmovN_regU(cmpOpU cop, rFlagsRegU cr, rRegN dst, rRegN src)
7702 %{
7703 match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
7705 ins_cost(200); // XXX
7706 format %{ "cmovl$cop $dst, $src\t# unsigned, compressed ptr" %}
7707 opcode(0x0F, 0x40);
7708 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src));
7709 ins_pipe(pipe_cmov_reg);
7710 %}
7712 instruct cmovN_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegN dst, rRegN src) %{
7713 match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
7714 ins_cost(200);
7715 expand %{
7716 cmovN_regU(cop, cr, dst, src);
7717 %}
7718 %}
7720 // Conditional move
7721 instruct cmovP_reg(rRegP dst, rRegP src, rFlagsReg cr, cmpOp cop)
7722 %{
7723 match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
7725 ins_cost(200); // XXX
7726 format %{ "cmovq$cop $dst, $src\t# signed, ptr" %}
7727 opcode(0x0F, 0x40);
7728 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7729 ins_pipe(pipe_cmov_reg); // XXX
7730 %}
7732 // Conditional move
7733 instruct cmovP_regU(cmpOpU cop, rFlagsRegU cr, rRegP dst, rRegP src)
7734 %{
7735 match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
7737 ins_cost(200); // XXX
7738 format %{ "cmovq$cop $dst, $src\t# unsigned, ptr" %}
7739 opcode(0x0F, 0x40);
7740 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7741 ins_pipe(pipe_cmov_reg); // XXX
7742 %}
7744 instruct cmovP_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegP dst, rRegP src) %{
7745 match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
7746 ins_cost(200);
7747 expand %{
7748 cmovP_regU(cop, cr, dst, src);
7749 %}
7750 %}
7752 // DISABLED: Requires the ADLC to emit a bottom_type call that
7753 // correctly meets the two pointer arguments; one is an incoming
7754 // register but the other is a memory operand. ALSO appears to
7755 // be buggy with implicit null checks.
7756 //
7757 //// Conditional move
7758 //instruct cmovP_mem(cmpOp cop, rFlagsReg cr, rRegP dst, memory src)
7759 //%{
7760 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
7761 // ins_cost(250);
7762 // format %{ "CMOV$cop $dst,$src\t# ptr" %}
7763 // opcode(0x0F,0x40);
7764 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) );
7765 // ins_pipe( pipe_cmov_mem );
7766 //%}
7767 //
7768 //// Conditional move
7769 //instruct cmovP_memU(cmpOpU cop, rFlagsRegU cr, rRegP dst, memory src)
7770 //%{
7771 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src))));
7772 // ins_cost(250);
7773 // format %{ "CMOV$cop $dst,$src\t# ptr" %}
7774 // opcode(0x0F,0x40);
7775 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) );
7776 // ins_pipe( pipe_cmov_mem );
7777 //%}
7779 instruct cmovL_reg(cmpOp cop, rFlagsReg cr, rRegL dst, rRegL src)
7780 %{
7781 match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
7783 ins_cost(200); // XXX
7784 format %{ "cmovq$cop $dst, $src\t# signed, long" %}
7785 opcode(0x0F, 0x40);
7786 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7787 ins_pipe(pipe_cmov_reg); // XXX
7788 %}
7790 instruct cmovL_mem(cmpOp cop, rFlagsReg cr, rRegL dst, memory src)
7791 %{
7792 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
7794 ins_cost(200); // XXX
7795 format %{ "cmovq$cop $dst, $src\t# signed, long" %}
7796 opcode(0x0F, 0x40);
7797 ins_encode(REX_reg_mem_wide(dst, src), enc_cmov(cop), reg_mem(dst, src));
7798 ins_pipe(pipe_cmov_mem); // XXX
7799 %}
7801 instruct cmovL_regU(cmpOpU cop, rFlagsRegU cr, rRegL dst, rRegL src)
7802 %{
7803 match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
7805 ins_cost(200); // XXX
7806 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %}
7807 opcode(0x0F, 0x40);
7808 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src));
7809 ins_pipe(pipe_cmov_reg); // XXX
7810 %}
7812 instruct cmovL_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, rRegL src) %{
7813 match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
7814 ins_cost(200);
7815 expand %{
7816 cmovL_regU(cop, cr, dst, src);
7817 %}
7818 %}
7820 instruct cmovL_memU(cmpOpU cop, rFlagsRegU cr, rRegL dst, memory src)
7821 %{
7822 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
7824 ins_cost(200); // XXX
7825 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %}
7826 opcode(0x0F, 0x40);
7827 ins_encode(REX_reg_mem_wide(dst, src), enc_cmov(cop), reg_mem(dst, src));
7828 ins_pipe(pipe_cmov_mem); // XXX
7829 %}
7831 instruct cmovL_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, memory src) %{
7832 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
7833 ins_cost(200);
7834 expand %{
7835 cmovL_memU(cop, cr, dst, src);
7836 %}
7837 %}
7839 instruct cmovF_reg(cmpOp cop, rFlagsReg cr, regF dst, regF src)
7840 %{
7841 match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
7843 ins_cost(200); // XXX
7844 format %{ "jn$cop skip\t# signed cmove float\n\t"
7845 "movss $dst, $src\n"
7846 "skip:" %}
7847 ins_encode(enc_cmovf_branch(cop, dst, src));
7848 ins_pipe(pipe_slow);
7849 %}
7851 // instruct cmovF_mem(cmpOp cop, rFlagsReg cr, regF dst, memory src)
7852 // %{
7853 // match(Set dst (CMoveF (Binary cop cr) (Binary dst (LoadL src))));
7855 // ins_cost(200); // XXX
7856 // format %{ "jn$cop skip\t# signed cmove float\n\t"
7857 // "movss $dst, $src\n"
7858 // "skip:" %}
7859 // ins_encode(enc_cmovf_mem_branch(cop, dst, src));
7860 // ins_pipe(pipe_slow);
7861 // %}
7863 instruct cmovF_regU(cmpOpU cop, rFlagsRegU cr, regF dst, regF src)
7864 %{
7865 match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
7867 ins_cost(200); // XXX
7868 format %{ "jn$cop skip\t# unsigned cmove float\n\t"
7869 "movss $dst, $src\n"
7870 "skip:" %}
7871 ins_encode(enc_cmovf_branch(cop, dst, src));
7872 ins_pipe(pipe_slow);
7873 %}
7875 instruct cmovF_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regF dst, regF src) %{
7876 match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
7877 ins_cost(200);
7878 expand %{
7879 cmovF_regU(cop, cr, dst, src);
7880 %}
7881 %}
7883 instruct cmovD_reg(cmpOp cop, rFlagsReg cr, regD dst, regD src)
7884 %{
7885 match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
7887 ins_cost(200); // XXX
7888 format %{ "jn$cop skip\t# signed cmove double\n\t"
7889 "movsd $dst, $src\n"
7890 "skip:" %}
7891 ins_encode(enc_cmovd_branch(cop, dst, src));
7892 ins_pipe(pipe_slow);
7893 %}
7895 instruct cmovD_regU(cmpOpU cop, rFlagsRegU cr, regD dst, regD src)
7896 %{
7897 match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
7899 ins_cost(200); // XXX
7900 format %{ "jn$cop skip\t# unsigned cmove double\n\t"
7901 "movsd $dst, $src\n"
7902 "skip:" %}
7903 ins_encode(enc_cmovd_branch(cop, dst, src));
7904 ins_pipe(pipe_slow);
7905 %}
7907 instruct cmovD_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regD dst, regD src) %{
7908 match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
7909 ins_cost(200);
7910 expand %{
7911 cmovD_regU(cop, cr, dst, src);
7912 %}
7913 %}
7915 //----------Arithmetic Instructions--------------------------------------------
7916 //----------Addition Instructions----------------------------------------------
7918 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
7919 %{
7920 match(Set dst (AddI dst src));
7921 effect(KILL cr);
7923 format %{ "addl $dst, $src\t# int" %}
7924 opcode(0x03);
7925 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
7926 ins_pipe(ialu_reg_reg);
7927 %}
7929 instruct addI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
7930 %{
7931 match(Set dst (AddI dst src));
7932 effect(KILL cr);
7934 format %{ "addl $dst, $src\t# int" %}
7935 opcode(0x81, 0x00); /* /0 id */
7936 ins_encode(OpcSErm(dst, src), Con8or32(src));
7937 ins_pipe( ialu_reg );
7938 %}
7940 instruct addI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
7941 %{
7942 match(Set dst (AddI dst (LoadI src)));
7943 effect(KILL cr);
7945 ins_cost(125); // XXX
7946 format %{ "addl $dst, $src\t# int" %}
7947 opcode(0x03);
7948 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
7949 ins_pipe(ialu_reg_mem);
7950 %}
7952 instruct addI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
7953 %{
7954 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7955 effect(KILL cr);
7957 ins_cost(150); // XXX
7958 format %{ "addl $dst, $src\t# int" %}
7959 opcode(0x01); /* Opcode 01 /r */
7960 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
7961 ins_pipe(ialu_mem_reg);
7962 %}
7964 instruct addI_mem_imm(memory dst, immI src, rFlagsReg cr)
7965 %{
7966 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7967 effect(KILL cr);
7969 ins_cost(125); // XXX
7970 format %{ "addl $dst, $src\t# int" %}
7971 opcode(0x81); /* Opcode 81 /0 id */
7972 ins_encode(REX_mem(dst), OpcSE(src), RM_opc_mem(0x00, dst), Con8or32(src));
7973 ins_pipe(ialu_mem_imm);
7974 %}
7976 instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr)
7977 %{
7978 predicate(UseIncDec);
7979 match(Set dst (AddI dst src));
7980 effect(KILL cr);
7982 format %{ "incl $dst\t# int" %}
7983 opcode(0xFF, 0x00); // FF /0
7984 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
7985 ins_pipe(ialu_reg);
7986 %}
7988 instruct incI_mem(memory dst, immI1 src, rFlagsReg cr)
7989 %{
7990 predicate(UseIncDec);
7991 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
7992 effect(KILL cr);
7994 ins_cost(125); // XXX
7995 format %{ "incl $dst\t# int" %}
7996 opcode(0xFF); /* Opcode FF /0 */
7997 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(0x00, dst));
7998 ins_pipe(ialu_mem_imm);
7999 %}
8001 // XXX why does that use AddI
8002 instruct decI_rReg(rRegI dst, immI_M1 src, rFlagsReg cr)
8003 %{
8004 predicate(UseIncDec);
8005 match(Set dst (AddI dst src));
8006 effect(KILL cr);
8008 format %{ "decl $dst\t# int" %}
8009 opcode(0xFF, 0x01); // FF /1
8010 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8011 ins_pipe(ialu_reg);
8012 %}
8014 // XXX why does that use AddI
8015 instruct decI_mem(memory dst, immI_M1 src, rFlagsReg cr)
8016 %{
8017 predicate(UseIncDec);
8018 match(Set dst (StoreI dst (AddI (LoadI dst) src)));
8019 effect(KILL cr);
8021 ins_cost(125); // XXX
8022 format %{ "decl $dst\t# int" %}
8023 opcode(0xFF); /* Opcode FF /1 */
8024 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(0x01, dst));
8025 ins_pipe(ialu_mem_imm);
8026 %}
8028 instruct leaI_rReg_immI(rRegI dst, rRegI src0, immI src1)
8029 %{
8030 match(Set dst (AddI src0 src1));
8032 ins_cost(110);
8033 format %{ "addr32 leal $dst, [$src0 + $src1]\t# int" %}
8034 opcode(0x8D); /* 0x8D /r */
8035 ins_encode(Opcode(0x67), REX_reg_reg(dst, src0), OpcP, reg_lea(dst, src0, src1)); // XXX
8036 ins_pipe(ialu_reg_reg);
8037 %}
8039 instruct addL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
8040 %{
8041 match(Set dst (AddL dst src));
8042 effect(KILL cr);
8044 format %{ "addq $dst, $src\t# long" %}
8045 opcode(0x03);
8046 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8047 ins_pipe(ialu_reg_reg);
8048 %}
8050 instruct addL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
8051 %{
8052 match(Set dst (AddL dst src));
8053 effect(KILL cr);
8055 format %{ "addq $dst, $src\t# long" %}
8056 opcode(0x81, 0x00); /* /0 id */
8057 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
8058 ins_pipe( ialu_reg );
8059 %}
8061 instruct addL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
8062 %{
8063 match(Set dst (AddL dst (LoadL src)));
8064 effect(KILL cr);
8066 ins_cost(125); // XXX
8067 format %{ "addq $dst, $src\t# long" %}
8068 opcode(0x03);
8069 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
8070 ins_pipe(ialu_reg_mem);
8071 %}
8073 instruct addL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
8074 %{
8075 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8076 effect(KILL cr);
8078 ins_cost(150); // XXX
8079 format %{ "addq $dst, $src\t# long" %}
8080 opcode(0x01); /* Opcode 01 /r */
8081 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
8082 ins_pipe(ialu_mem_reg);
8083 %}
8085 instruct addL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
8086 %{
8087 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8088 effect(KILL cr);
8090 ins_cost(125); // XXX
8091 format %{ "addq $dst, $src\t# long" %}
8092 opcode(0x81); /* Opcode 81 /0 id */
8093 ins_encode(REX_mem_wide(dst),
8094 OpcSE(src), RM_opc_mem(0x00, dst), Con8or32(src));
8095 ins_pipe(ialu_mem_imm);
8096 %}
8098 instruct incL_rReg(rRegI dst, immL1 src, rFlagsReg cr)
8099 %{
8100 predicate(UseIncDec);
8101 match(Set dst (AddL dst src));
8102 effect(KILL cr);
8104 format %{ "incq $dst\t# long" %}
8105 opcode(0xFF, 0x00); // FF /0
8106 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8107 ins_pipe(ialu_reg);
8108 %}
8110 instruct incL_mem(memory dst, immL1 src, rFlagsReg cr)
8111 %{
8112 predicate(UseIncDec);
8113 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8114 effect(KILL cr);
8116 ins_cost(125); // XXX
8117 format %{ "incq $dst\t# long" %}
8118 opcode(0xFF); /* Opcode FF /0 */
8119 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(0x00, dst));
8120 ins_pipe(ialu_mem_imm);
8121 %}
8123 // XXX why does that use AddL
8124 instruct decL_rReg(rRegL dst, immL_M1 src, rFlagsReg cr)
8125 %{
8126 predicate(UseIncDec);
8127 match(Set dst (AddL dst src));
8128 effect(KILL cr);
8130 format %{ "decq $dst\t# long" %}
8131 opcode(0xFF, 0x01); // FF /1
8132 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8133 ins_pipe(ialu_reg);
8134 %}
8136 // XXX why does that use AddL
8137 instruct decL_mem(memory dst, immL_M1 src, rFlagsReg cr)
8138 %{
8139 predicate(UseIncDec);
8140 match(Set dst (StoreL dst (AddL (LoadL dst) src)));
8141 effect(KILL cr);
8143 ins_cost(125); // XXX
8144 format %{ "decq $dst\t# long" %}
8145 opcode(0xFF); /* Opcode FF /1 */
8146 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(0x01, dst));
8147 ins_pipe(ialu_mem_imm);
8148 %}
8150 instruct leaL_rReg_immL(rRegL dst, rRegL src0, immL32 src1)
8151 %{
8152 match(Set dst (AddL src0 src1));
8154 ins_cost(110);
8155 format %{ "leaq $dst, [$src0 + $src1]\t# long" %}
8156 opcode(0x8D); /* 0x8D /r */
8157 ins_encode(REX_reg_reg_wide(dst, src0), OpcP, reg_lea(dst, src0, src1)); // XXX
8158 ins_pipe(ialu_reg_reg);
8159 %}
8161 instruct addP_rReg(rRegP dst, rRegL src, rFlagsReg cr)
8162 %{
8163 match(Set dst (AddP dst src));
8164 effect(KILL cr);
8166 format %{ "addq $dst, $src\t# ptr" %}
8167 opcode(0x03);
8168 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8169 ins_pipe(ialu_reg_reg);
8170 %}
8172 instruct addP_rReg_imm(rRegP dst, immL32 src, rFlagsReg cr)
8173 %{
8174 match(Set dst (AddP dst src));
8175 effect(KILL cr);
8177 format %{ "addq $dst, $src\t# ptr" %}
8178 opcode(0x81, 0x00); /* /0 id */
8179 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
8180 ins_pipe( ialu_reg );
8181 %}
8183 // XXX addP mem ops ????
8185 instruct leaP_rReg_imm(rRegP dst, rRegP src0, immL32 src1)
8186 %{
8187 match(Set dst (AddP src0 src1));
8189 ins_cost(110);
8190 format %{ "leaq $dst, [$src0 + $src1]\t# ptr" %}
8191 opcode(0x8D); /* 0x8D /r */
8192 ins_encode(REX_reg_reg_wide(dst, src0), OpcP, reg_lea(dst, src0, src1));// XXX
8193 ins_pipe(ialu_reg_reg);
8194 %}
8196 instruct checkCastPP(rRegP dst)
8197 %{
8198 match(Set dst (CheckCastPP dst));
8200 size(0);
8201 format %{ "# checkcastPP of $dst" %}
8202 ins_encode(/* empty encoding */);
8203 ins_pipe(empty);
8204 %}
8206 instruct castPP(rRegP dst)
8207 %{
8208 match(Set dst (CastPP dst));
8210 size(0);
8211 format %{ "# castPP of $dst" %}
8212 ins_encode(/* empty encoding */);
8213 ins_pipe(empty);
8214 %}
8216 instruct castII(rRegI dst)
8217 %{
8218 match(Set dst (CastII dst));
8220 size(0);
8221 format %{ "# castII of $dst" %}
8222 ins_encode(/* empty encoding */);
8223 ins_cost(0);
8224 ins_pipe(empty);
8225 %}
8227 // LoadP-locked same as a regular LoadP when used with compare-swap
8228 instruct loadPLocked(rRegP dst, memory mem)
8229 %{
8230 match(Set dst (LoadPLocked mem));
8232 ins_cost(125); // XXX
8233 format %{ "movq $dst, $mem\t# ptr locked" %}
8234 opcode(0x8B);
8235 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
8236 ins_pipe(ialu_reg_mem); // XXX
8237 %}
8239 // LoadL-locked - same as a regular LoadL when used with compare-swap
8240 instruct loadLLocked(rRegL dst, memory mem)
8241 %{
8242 match(Set dst (LoadLLocked mem));
8244 ins_cost(125); // XXX
8245 format %{ "movq $dst, $mem\t# long locked" %}
8246 opcode(0x8B);
8247 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
8248 ins_pipe(ialu_reg_mem); // XXX
8249 %}
8251 // Conditional-store of the updated heap-top.
8252 // Used during allocation of the shared heap.
8253 // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.
8255 instruct storePConditional(memory heap_top_ptr,
8256 rax_RegP oldval, rRegP newval,
8257 rFlagsReg cr)
8258 %{
8259 match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
8261 format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) "
8262 "If rax == $heap_top_ptr then store $newval into $heap_top_ptr" %}
8263 opcode(0x0F, 0xB1);
8264 ins_encode(lock_prefix,
8265 REX_reg_mem_wide(newval, heap_top_ptr),
8266 OpcP, OpcS,
8267 reg_mem(newval, heap_top_ptr));
8268 ins_pipe(pipe_cmpxchg);
8269 %}
8271 // Conditional-store of an int value.
8272 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
8273 instruct storeIConditional(memory mem, rax_RegI oldval, rRegI newval, rFlagsReg cr)
8274 %{
8275 match(Set cr (StoreIConditional mem (Binary oldval newval)));
8276 effect(KILL oldval);
8278 format %{ "cmpxchgl $mem, $newval\t# If rax == $mem then store $newval into $mem" %}
8279 opcode(0x0F, 0xB1);
8280 ins_encode(lock_prefix,
8281 REX_reg_mem(newval, mem),
8282 OpcP, OpcS,
8283 reg_mem(newval, mem));
8284 ins_pipe(pipe_cmpxchg);
8285 %}
8287 // Conditional-store of a long value.
8288 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
8289 instruct storeLConditional(memory mem, rax_RegL oldval, rRegL newval, rFlagsReg cr)
8290 %{
8291 match(Set cr (StoreLConditional mem (Binary oldval newval)));
8292 effect(KILL oldval);
8294 format %{ "cmpxchgq $mem, $newval\t# If rax == $mem then store $newval into $mem" %}
8295 opcode(0x0F, 0xB1);
8296 ins_encode(lock_prefix,
8297 REX_reg_mem_wide(newval, mem),
8298 OpcP, OpcS,
8299 reg_mem(newval, mem));
8300 ins_pipe(pipe_cmpxchg);
8301 %}
8304 // XXX No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
8305 instruct compareAndSwapP(rRegI res,
8306 memory mem_ptr,
8307 rax_RegP oldval, rRegP newval,
8308 rFlagsReg cr)
8309 %{
8310 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
8311 effect(KILL cr, KILL oldval);
8313 format %{ "cmpxchgq $mem_ptr,$newval\t# "
8314 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8315 "sete $res\n\t"
8316 "movzbl $res, $res" %}
8317 opcode(0x0F, 0xB1);
8318 ins_encode(lock_prefix,
8319 REX_reg_mem_wide(newval, mem_ptr),
8320 OpcP, OpcS,
8321 reg_mem(newval, mem_ptr),
8322 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8323 REX_reg_breg(res, res), // movzbl
8324 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8325 ins_pipe( pipe_cmpxchg );
8326 %}
8328 instruct compareAndSwapL(rRegI res,
8329 memory mem_ptr,
8330 rax_RegL oldval, rRegL newval,
8331 rFlagsReg cr)
8332 %{
8333 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
8334 effect(KILL cr, KILL oldval);
8336 format %{ "cmpxchgq $mem_ptr,$newval\t# "
8337 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8338 "sete $res\n\t"
8339 "movzbl $res, $res" %}
8340 opcode(0x0F, 0xB1);
8341 ins_encode(lock_prefix,
8342 REX_reg_mem_wide(newval, mem_ptr),
8343 OpcP, OpcS,
8344 reg_mem(newval, mem_ptr),
8345 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8346 REX_reg_breg(res, res), // movzbl
8347 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8348 ins_pipe( pipe_cmpxchg );
8349 %}
8351 instruct compareAndSwapI(rRegI res,
8352 memory mem_ptr,
8353 rax_RegI oldval, rRegI newval,
8354 rFlagsReg cr)
8355 %{
8356 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
8357 effect(KILL cr, KILL oldval);
8359 format %{ "cmpxchgl $mem_ptr,$newval\t# "
8360 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8361 "sete $res\n\t"
8362 "movzbl $res, $res" %}
8363 opcode(0x0F, 0xB1);
8364 ins_encode(lock_prefix,
8365 REX_reg_mem(newval, mem_ptr),
8366 OpcP, OpcS,
8367 reg_mem(newval, mem_ptr),
8368 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8369 REX_reg_breg(res, res), // movzbl
8370 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8371 ins_pipe( pipe_cmpxchg );
8372 %}
8375 instruct compareAndSwapN(rRegI res,
8376 memory mem_ptr,
8377 rax_RegN oldval, rRegN newval,
8378 rFlagsReg cr) %{
8379 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
8380 effect(KILL cr, KILL oldval);
8382 format %{ "cmpxchgl $mem_ptr,$newval\t# "
8383 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
8384 "sete $res\n\t"
8385 "movzbl $res, $res" %}
8386 opcode(0x0F, 0xB1);
8387 ins_encode(lock_prefix,
8388 REX_reg_mem(newval, mem_ptr),
8389 OpcP, OpcS,
8390 reg_mem(newval, mem_ptr),
8391 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
8392 REX_reg_breg(res, res), // movzbl
8393 Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
8394 ins_pipe( pipe_cmpxchg );
8395 %}
8397 //----------Subtraction Instructions-------------------------------------------
8399 // Integer Subtraction Instructions
8400 instruct subI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
8401 %{
8402 match(Set dst (SubI dst src));
8403 effect(KILL cr);
8405 format %{ "subl $dst, $src\t# int" %}
8406 opcode(0x2B);
8407 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
8408 ins_pipe(ialu_reg_reg);
8409 %}
8411 instruct subI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
8412 %{
8413 match(Set dst (SubI dst src));
8414 effect(KILL cr);
8416 format %{ "subl $dst, $src\t# int" %}
8417 opcode(0x81, 0x05); /* Opcode 81 /5 */
8418 ins_encode(OpcSErm(dst, src), Con8or32(src));
8419 ins_pipe(ialu_reg);
8420 %}
8422 instruct subI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
8423 %{
8424 match(Set dst (SubI dst (LoadI src)));
8425 effect(KILL cr);
8427 ins_cost(125);
8428 format %{ "subl $dst, $src\t# int" %}
8429 opcode(0x2B);
8430 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
8431 ins_pipe(ialu_reg_mem);
8432 %}
8434 instruct subI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
8435 %{
8436 match(Set dst (StoreI dst (SubI (LoadI dst) src)));
8437 effect(KILL cr);
8439 ins_cost(150);
8440 format %{ "subl $dst, $src\t# int" %}
8441 opcode(0x29); /* Opcode 29 /r */
8442 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
8443 ins_pipe(ialu_mem_reg);
8444 %}
8446 instruct subI_mem_imm(memory dst, immI src, rFlagsReg cr)
8447 %{
8448 match(Set dst (StoreI dst (SubI (LoadI dst) src)));
8449 effect(KILL cr);
8451 ins_cost(125); // XXX
8452 format %{ "subl $dst, $src\t# int" %}
8453 opcode(0x81); /* Opcode 81 /5 id */
8454 ins_encode(REX_mem(dst), OpcSE(src), RM_opc_mem(0x05, dst), Con8or32(src));
8455 ins_pipe(ialu_mem_imm);
8456 %}
8458 instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
8459 %{
8460 match(Set dst (SubL dst src));
8461 effect(KILL cr);
8463 format %{ "subq $dst, $src\t# long" %}
8464 opcode(0x2B);
8465 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8466 ins_pipe(ialu_reg_reg);
8467 %}
8469 instruct subL_rReg_imm(rRegI dst, immL32 src, rFlagsReg cr)
8470 %{
8471 match(Set dst (SubL dst src));
8472 effect(KILL cr);
8474 format %{ "subq $dst, $src\t# long" %}
8475 opcode(0x81, 0x05); /* Opcode 81 /5 */
8476 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
8477 ins_pipe(ialu_reg);
8478 %}
8480 instruct subL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
8481 %{
8482 match(Set dst (SubL dst (LoadL src)));
8483 effect(KILL cr);
8485 ins_cost(125);
8486 format %{ "subq $dst, $src\t# long" %}
8487 opcode(0x2B);
8488 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
8489 ins_pipe(ialu_reg_mem);
8490 %}
8492 instruct subL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
8493 %{
8494 match(Set dst (StoreL dst (SubL (LoadL dst) src)));
8495 effect(KILL cr);
8497 ins_cost(150);
8498 format %{ "subq $dst, $src\t# long" %}
8499 opcode(0x29); /* Opcode 29 /r */
8500 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
8501 ins_pipe(ialu_mem_reg);
8502 %}
8504 instruct subL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
8505 %{
8506 match(Set dst (StoreL dst (SubL (LoadL dst) src)));
8507 effect(KILL cr);
8509 ins_cost(125); // XXX
8510 format %{ "subq $dst, $src\t# long" %}
8511 opcode(0x81); /* Opcode 81 /5 id */
8512 ins_encode(REX_mem_wide(dst),
8513 OpcSE(src), RM_opc_mem(0x05, dst), Con8or32(src));
8514 ins_pipe(ialu_mem_imm);
8515 %}
8517 // Subtract from a pointer
8518 // XXX hmpf???
8519 instruct subP_rReg(rRegP dst, rRegI src, immI0 zero, rFlagsReg cr)
8520 %{
8521 match(Set dst (AddP dst (SubI zero src)));
8522 effect(KILL cr);
8524 format %{ "subq $dst, $src\t# ptr - int" %}
8525 opcode(0x2B);
8526 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
8527 ins_pipe(ialu_reg_reg);
8528 %}
8530 instruct negI_rReg(rRegI dst, immI0 zero, rFlagsReg cr)
8531 %{
8532 match(Set dst (SubI zero dst));
8533 effect(KILL cr);
8535 format %{ "negl $dst\t# int" %}
8536 opcode(0xF7, 0x03); // Opcode F7 /3
8537 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8538 ins_pipe(ialu_reg);
8539 %}
8541 instruct negI_mem(memory dst, immI0 zero, rFlagsReg cr)
8542 %{
8543 match(Set dst (StoreI dst (SubI zero (LoadI dst))));
8544 effect(KILL cr);
8546 format %{ "negl $dst\t# int" %}
8547 opcode(0xF7, 0x03); // Opcode F7 /3
8548 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8549 ins_pipe(ialu_reg);
8550 %}
8552 instruct negL_rReg(rRegL dst, immL0 zero, rFlagsReg cr)
8553 %{
8554 match(Set dst (SubL zero dst));
8555 effect(KILL cr);
8557 format %{ "negq $dst\t# long" %}
8558 opcode(0xF7, 0x03); // Opcode F7 /3
8559 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
8560 ins_pipe(ialu_reg);
8561 %}
8563 instruct negL_mem(memory dst, immL0 zero, rFlagsReg cr)
8564 %{
8565 match(Set dst (StoreL dst (SubL zero (LoadL dst))));
8566 effect(KILL cr);
8568 format %{ "negq $dst\t# long" %}
8569 opcode(0xF7, 0x03); // Opcode F7 /3
8570 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
8571 ins_pipe(ialu_reg);
8572 %}
8575 //----------Multiplication/Division Instructions-------------------------------
8576 // Integer Multiplication Instructions
8577 // Multiply Register
8579 instruct mulI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
8580 %{
8581 match(Set dst (MulI dst src));
8582 effect(KILL cr);
8584 ins_cost(300);
8585 format %{ "imull $dst, $src\t# int" %}
8586 opcode(0x0F, 0xAF);
8587 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
8588 ins_pipe(ialu_reg_reg_alu0);
8589 %}
8591 instruct mulI_rReg_imm(rRegI dst, rRegI src, immI imm, rFlagsReg cr)
8592 %{
8593 match(Set dst (MulI src imm));
8594 effect(KILL cr);
8596 ins_cost(300);
8597 format %{ "imull $dst, $src, $imm\t# int" %}
8598 opcode(0x69); /* 69 /r id */
8599 ins_encode(REX_reg_reg(dst, src),
8600 OpcSE(imm), reg_reg(dst, src), Con8or32(imm));
8601 ins_pipe(ialu_reg_reg_alu0);
8602 %}
8604 instruct mulI_mem(rRegI dst, memory src, rFlagsReg cr)
8605 %{
8606 match(Set dst (MulI dst (LoadI src)));
8607 effect(KILL cr);
8609 ins_cost(350);
8610 format %{ "imull $dst, $src\t# int" %}
8611 opcode(0x0F, 0xAF);
8612 ins_encode(REX_reg_mem(dst, src), OpcP, OpcS, reg_mem(dst, src));
8613 ins_pipe(ialu_reg_mem_alu0);
8614 %}
8616 instruct mulI_mem_imm(rRegI dst, memory src, immI imm, rFlagsReg cr)
8617 %{
8618 match(Set dst (MulI (LoadI src) imm));
8619 effect(KILL cr);
8621 ins_cost(300);
8622 format %{ "imull $dst, $src, $imm\t# int" %}
8623 opcode(0x69); /* 69 /r id */
8624 ins_encode(REX_reg_mem(dst, src),
8625 OpcSE(imm), reg_mem(dst, src), Con8or32(imm));
8626 ins_pipe(ialu_reg_mem_alu0);
8627 %}
8629 instruct mulL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
8630 %{
8631 match(Set dst (MulL dst src));
8632 effect(KILL cr);
8634 ins_cost(300);
8635 format %{ "imulq $dst, $src\t# long" %}
8636 opcode(0x0F, 0xAF);
8637 ins_encode(REX_reg_reg_wide(dst, src), OpcP, OpcS, reg_reg(dst, src));
8638 ins_pipe(ialu_reg_reg_alu0);
8639 %}
8641 instruct mulL_rReg_imm(rRegL dst, rRegL src, immL32 imm, rFlagsReg cr)
8642 %{
8643 match(Set dst (MulL src imm));
8644 effect(KILL cr);
8646 ins_cost(300);
8647 format %{ "imulq $dst, $src, $imm\t# long" %}
8648 opcode(0x69); /* 69 /r id */
8649 ins_encode(REX_reg_reg_wide(dst, src),
8650 OpcSE(imm), reg_reg(dst, src), Con8or32(imm));
8651 ins_pipe(ialu_reg_reg_alu0);
8652 %}
8654 instruct mulL_mem(rRegL dst, memory src, rFlagsReg cr)
8655 %{
8656 match(Set dst (MulL dst (LoadL src)));
8657 effect(KILL cr);
8659 ins_cost(350);
8660 format %{ "imulq $dst, $src\t# long" %}
8661 opcode(0x0F, 0xAF);
8662 ins_encode(REX_reg_mem_wide(dst, src), OpcP, OpcS, reg_mem(dst, src));
8663 ins_pipe(ialu_reg_mem_alu0);
8664 %}
8666 instruct mulL_mem_imm(rRegL dst, memory src, immL32 imm, rFlagsReg cr)
8667 %{
8668 match(Set dst (MulL (LoadL src) imm));
8669 effect(KILL cr);
8671 ins_cost(300);
8672 format %{ "imulq $dst, $src, $imm\t# long" %}
8673 opcode(0x69); /* 69 /r id */
8674 ins_encode(REX_reg_mem_wide(dst, src),
8675 OpcSE(imm), reg_mem(dst, src), Con8or32(imm));
8676 ins_pipe(ialu_reg_mem_alu0);
8677 %}
8679 instruct mulHiL_rReg(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr)
8680 %{
8681 match(Set dst (MulHiL src rax));
8682 effect(USE_KILL rax, KILL cr);
8684 ins_cost(300);
8685 format %{ "imulq RDX:RAX, RAX, $src\t# mulhi" %}
8686 opcode(0xF7, 0x5); /* Opcode F7 /5 */
8687 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src));
8688 ins_pipe(ialu_reg_reg_alu0);
8689 %}
8691 instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
8692 rFlagsReg cr)
8693 %{
8694 match(Set rax (DivI rax div));
8695 effect(KILL rdx, KILL cr);
8697 ins_cost(30*100+10*100); // XXX
8698 format %{ "cmpl rax, 0x80000000\t# idiv\n\t"
8699 "jne,s normal\n\t"
8700 "xorl rdx, rdx\n\t"
8701 "cmpl $div, -1\n\t"
8702 "je,s done\n"
8703 "normal: cdql\n\t"
8704 "idivl $div\n"
8705 "done:" %}
8706 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8707 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
8708 ins_pipe(ialu_reg_reg_alu0);
8709 %}
8711 instruct divL_rReg(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div,
8712 rFlagsReg cr)
8713 %{
8714 match(Set rax (DivL rax div));
8715 effect(KILL rdx, KILL cr);
8717 ins_cost(30*100+10*100); // XXX
8718 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t"
8719 "cmpq rax, rdx\n\t"
8720 "jne,s normal\n\t"
8721 "xorl rdx, rdx\n\t"
8722 "cmpq $div, -1\n\t"
8723 "je,s done\n"
8724 "normal: cdqq\n\t"
8725 "idivq $div\n"
8726 "done:" %}
8727 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8728 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
8729 ins_pipe(ialu_reg_reg_alu0);
8730 %}
8732 // Integer DIVMOD with Register, both quotient and mod results
8733 instruct divModI_rReg_divmod(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
8734 rFlagsReg cr)
8735 %{
8736 match(DivModI rax div);
8737 effect(KILL cr);
8739 ins_cost(30*100+10*100); // XXX
8740 format %{ "cmpl rax, 0x80000000\t# idiv\n\t"
8741 "jne,s normal\n\t"
8742 "xorl rdx, rdx\n\t"
8743 "cmpl $div, -1\n\t"
8744 "je,s done\n"
8745 "normal: cdql\n\t"
8746 "idivl $div\n"
8747 "done:" %}
8748 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8749 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
8750 ins_pipe(pipe_slow);
8751 %}
8753 // Long DIVMOD with Register, both quotient and mod results
8754 instruct divModL_rReg_divmod(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div,
8755 rFlagsReg cr)
8756 %{
8757 match(DivModL rax div);
8758 effect(KILL cr);
8760 ins_cost(30*100+10*100); // XXX
8761 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t"
8762 "cmpq rax, rdx\n\t"
8763 "jne,s normal\n\t"
8764 "xorl rdx, rdx\n\t"
8765 "cmpq $div, -1\n\t"
8766 "je,s done\n"
8767 "normal: cdqq\n\t"
8768 "idivq $div\n"
8769 "done:" %}
8770 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8771 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
8772 ins_pipe(pipe_slow);
8773 %}
8775 //----------- DivL-By-Constant-Expansions--------------------------------------
8776 // DivI cases are handled by the compiler
8778 // Magic constant, reciprocal of 10
8779 instruct loadConL_0x6666666666666667(rRegL dst)
8780 %{
8781 effect(DEF dst);
8783 format %{ "movq $dst, #0x666666666666667\t# Used in div-by-10" %}
8784 ins_encode(load_immL(dst, 0x6666666666666667));
8785 ins_pipe(ialu_reg);
8786 %}
8788 instruct mul_hi(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr)
8789 %{
8790 effect(DEF dst, USE src, USE_KILL rax, KILL cr);
8792 format %{ "imulq rdx:rax, rax, $src\t# Used in div-by-10" %}
8793 opcode(0xF7, 0x5); /* Opcode F7 /5 */
8794 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src));
8795 ins_pipe(ialu_reg_reg_alu0);
8796 %}
8798 instruct sarL_rReg_63(rRegL dst, rFlagsReg cr)
8799 %{
8800 effect(USE_DEF dst, KILL cr);
8802 format %{ "sarq $dst, #63\t# Used in div-by-10" %}
8803 opcode(0xC1, 0x7); /* C1 /7 ib */
8804 ins_encode(reg_opc_imm_wide(dst, 0x3F));
8805 ins_pipe(ialu_reg);
8806 %}
8808 instruct sarL_rReg_2(rRegL dst, rFlagsReg cr)
8809 %{
8810 effect(USE_DEF dst, KILL cr);
8812 format %{ "sarq $dst, #2\t# Used in div-by-10" %}
8813 opcode(0xC1, 0x7); /* C1 /7 ib */
8814 ins_encode(reg_opc_imm_wide(dst, 0x2));
8815 ins_pipe(ialu_reg);
8816 %}
8818 instruct divL_10(rdx_RegL dst, no_rax_RegL src, immL10 div)
8819 %{
8820 match(Set dst (DivL src div));
8822 ins_cost((5+8)*100);
8823 expand %{
8824 rax_RegL rax; // Killed temp
8825 rFlagsReg cr; // Killed
8826 loadConL_0x6666666666666667(rax); // movq rax, 0x6666666666666667
8827 mul_hi(dst, src, rax, cr); // mulq rdx:rax <= rax * $src
8828 sarL_rReg_63(src, cr); // sarq src, 63
8829 sarL_rReg_2(dst, cr); // sarq rdx, 2
8830 subL_rReg(dst, src, cr); // subl rdx, src
8831 %}
8832 %}
8834 //-----------------------------------------------------------------------------
8836 instruct modI_rReg(rdx_RegI rdx, rax_RegI rax, no_rax_rdx_RegI div,
8837 rFlagsReg cr)
8838 %{
8839 match(Set rdx (ModI rax div));
8840 effect(KILL rax, KILL cr);
8842 ins_cost(300); // XXX
8843 format %{ "cmpl rax, 0x80000000\t# irem\n\t"
8844 "jne,s normal\n\t"
8845 "xorl rdx, rdx\n\t"
8846 "cmpl $div, -1\n\t"
8847 "je,s done\n"
8848 "normal: cdql\n\t"
8849 "idivl $div\n"
8850 "done:" %}
8851 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8852 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div));
8853 ins_pipe(ialu_reg_reg_alu0);
8854 %}
8856 instruct modL_rReg(rdx_RegL rdx, rax_RegL rax, no_rax_rdx_RegL div,
8857 rFlagsReg cr)
8858 %{
8859 match(Set rdx (ModL rax div));
8860 effect(KILL rax, KILL cr);
8862 ins_cost(300); // XXX
8863 format %{ "movq rdx, 0x8000000000000000\t# lrem\n\t"
8864 "cmpq rax, rdx\n\t"
8865 "jne,s normal\n\t"
8866 "xorl rdx, rdx\n\t"
8867 "cmpq $div, -1\n\t"
8868 "je,s done\n"
8869 "normal: cdqq\n\t"
8870 "idivq $div\n"
8871 "done:" %}
8872 opcode(0xF7, 0x7); /* Opcode F7 /7 */
8873 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div));
8874 ins_pipe(ialu_reg_reg_alu0);
8875 %}
8877 // Integer Shift Instructions
8878 // Shift Left by one
8879 instruct salI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
8880 %{
8881 match(Set dst (LShiftI dst shift));
8882 effect(KILL cr);
8884 format %{ "sall $dst, $shift" %}
8885 opcode(0xD1, 0x4); /* D1 /4 */
8886 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8887 ins_pipe(ialu_reg);
8888 %}
8890 // Shift Left by one
8891 instruct salI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
8892 %{
8893 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
8894 effect(KILL cr);
8896 format %{ "sall $dst, $shift\t" %}
8897 opcode(0xD1, 0x4); /* D1 /4 */
8898 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8899 ins_pipe(ialu_mem_imm);
8900 %}
8902 // Shift Left by 8-bit immediate
8903 instruct salI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
8904 %{
8905 match(Set dst (LShiftI dst shift));
8906 effect(KILL cr);
8908 format %{ "sall $dst, $shift" %}
8909 opcode(0xC1, 0x4); /* C1 /4 ib */
8910 ins_encode(reg_opc_imm(dst, shift));
8911 ins_pipe(ialu_reg);
8912 %}
8914 // Shift Left by 8-bit immediate
8915 instruct salI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
8916 %{
8917 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
8918 effect(KILL cr);
8920 format %{ "sall $dst, $shift" %}
8921 opcode(0xC1, 0x4); /* C1 /4 ib */
8922 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
8923 ins_pipe(ialu_mem_imm);
8924 %}
8926 // Shift Left by variable
8927 instruct salI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
8928 %{
8929 match(Set dst (LShiftI dst shift));
8930 effect(KILL cr);
8932 format %{ "sall $dst, $shift" %}
8933 opcode(0xD3, 0x4); /* D3 /4 */
8934 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8935 ins_pipe(ialu_reg_reg);
8936 %}
8938 // Shift Left by variable
8939 instruct salI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
8940 %{
8941 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift)));
8942 effect(KILL cr);
8944 format %{ "sall $dst, $shift" %}
8945 opcode(0xD3, 0x4); /* D3 /4 */
8946 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8947 ins_pipe(ialu_mem_reg);
8948 %}
8950 // Arithmetic shift right by one
8951 instruct sarI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
8952 %{
8953 match(Set dst (RShiftI dst shift));
8954 effect(KILL cr);
8956 format %{ "sarl $dst, $shift" %}
8957 opcode(0xD1, 0x7); /* D1 /7 */
8958 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
8959 ins_pipe(ialu_reg);
8960 %}
8962 // Arithmetic shift right by one
8963 instruct sarI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
8964 %{
8965 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
8966 effect(KILL cr);
8968 format %{ "sarl $dst, $shift" %}
8969 opcode(0xD1, 0x7); /* D1 /7 */
8970 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
8971 ins_pipe(ialu_mem_imm);
8972 %}
8974 // Arithmetic Shift Right by 8-bit immediate
8975 instruct sarI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
8976 %{
8977 match(Set dst (RShiftI dst shift));
8978 effect(KILL cr);
8980 format %{ "sarl $dst, $shift" %}
8981 opcode(0xC1, 0x7); /* C1 /7 ib */
8982 ins_encode(reg_opc_imm(dst, shift));
8983 ins_pipe(ialu_mem_imm);
8984 %}
8986 // Arithmetic Shift Right by 8-bit immediate
8987 instruct sarI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
8988 %{
8989 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
8990 effect(KILL cr);
8992 format %{ "sarl $dst, $shift" %}
8993 opcode(0xC1, 0x7); /* C1 /7 ib */
8994 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
8995 ins_pipe(ialu_mem_imm);
8996 %}
8998 // Arithmetic Shift Right by variable
8999 instruct sarI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
9000 %{
9001 match(Set dst (RShiftI dst shift));
9002 effect(KILL cr);
9004 format %{ "sarl $dst, $shift" %}
9005 opcode(0xD3, 0x7); /* D3 /7 */
9006 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9007 ins_pipe(ialu_reg_reg);
9008 %}
9010 // Arithmetic Shift Right by variable
9011 instruct sarI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9012 %{
9013 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift)));
9014 effect(KILL cr);
9016 format %{ "sarl $dst, $shift" %}
9017 opcode(0xD3, 0x7); /* D3 /7 */
9018 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
9019 ins_pipe(ialu_mem_reg);
9020 %}
9022 // Logical shift right by one
9023 instruct shrI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr)
9024 %{
9025 match(Set dst (URShiftI dst shift));
9026 effect(KILL cr);
9028 format %{ "shrl $dst, $shift" %}
9029 opcode(0xD1, 0x5); /* D1 /5 */
9030 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9031 ins_pipe(ialu_reg);
9032 %}
9034 // Logical shift right by one
9035 instruct shrI_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9036 %{
9037 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
9038 effect(KILL cr);
9040 format %{ "shrl $dst, $shift" %}
9041 opcode(0xD1, 0x5); /* D1 /5 */
9042 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
9043 ins_pipe(ialu_mem_imm);
9044 %}
9046 // Logical Shift Right by 8-bit immediate
9047 instruct shrI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr)
9048 %{
9049 match(Set dst (URShiftI dst shift));
9050 effect(KILL cr);
9052 format %{ "shrl $dst, $shift" %}
9053 opcode(0xC1, 0x5); /* C1 /5 ib */
9054 ins_encode(reg_opc_imm(dst, shift));
9055 ins_pipe(ialu_reg);
9056 %}
9058 // Logical Shift Right by 8-bit immediate
9059 instruct shrI_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9060 %{
9061 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
9062 effect(KILL cr);
9064 format %{ "shrl $dst, $shift" %}
9065 opcode(0xC1, 0x5); /* C1 /5 ib */
9066 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift));
9067 ins_pipe(ialu_mem_imm);
9068 %}
9070 // Logical Shift Right by variable
9071 instruct shrI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr)
9072 %{
9073 match(Set dst (URShiftI dst shift));
9074 effect(KILL cr);
9076 format %{ "shrl $dst, $shift" %}
9077 opcode(0xD3, 0x5); /* D3 /5 */
9078 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9079 ins_pipe(ialu_reg_reg);
9080 %}
9082 // Logical Shift Right by variable
9083 instruct shrI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9084 %{
9085 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift)));
9086 effect(KILL cr);
9088 format %{ "shrl $dst, $shift" %}
9089 opcode(0xD3, 0x5); /* D3 /5 */
9090 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst));
9091 ins_pipe(ialu_mem_reg);
9092 %}
9094 // Long Shift Instructions
9095 // Shift Left by one
9096 instruct salL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
9097 %{
9098 match(Set dst (LShiftL dst shift));
9099 effect(KILL cr);
9101 format %{ "salq $dst, $shift" %}
9102 opcode(0xD1, 0x4); /* D1 /4 */
9103 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9104 ins_pipe(ialu_reg);
9105 %}
9107 // Shift Left by one
9108 instruct salL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9109 %{
9110 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
9111 effect(KILL cr);
9113 format %{ "salq $dst, $shift" %}
9114 opcode(0xD1, 0x4); /* D1 /4 */
9115 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9116 ins_pipe(ialu_mem_imm);
9117 %}
9119 // Shift Left by 8-bit immediate
9120 instruct salL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
9121 %{
9122 match(Set dst (LShiftL dst shift));
9123 effect(KILL cr);
9125 format %{ "salq $dst, $shift" %}
9126 opcode(0xC1, 0x4); /* C1 /4 ib */
9127 ins_encode(reg_opc_imm_wide(dst, shift));
9128 ins_pipe(ialu_reg);
9129 %}
9131 // Shift Left by 8-bit immediate
9132 instruct salL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9133 %{
9134 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
9135 effect(KILL cr);
9137 format %{ "salq $dst, $shift" %}
9138 opcode(0xC1, 0x4); /* C1 /4 ib */
9139 ins_encode(REX_mem_wide(dst), OpcP,
9140 RM_opc_mem(secondary, dst), Con8or32(shift));
9141 ins_pipe(ialu_mem_imm);
9142 %}
9144 // Shift Left by variable
9145 instruct salL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
9146 %{
9147 match(Set dst (LShiftL dst shift));
9148 effect(KILL cr);
9150 format %{ "salq $dst, $shift" %}
9151 opcode(0xD3, 0x4); /* D3 /4 */
9152 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9153 ins_pipe(ialu_reg_reg);
9154 %}
9156 // Shift Left by variable
9157 instruct salL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9158 %{
9159 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift)));
9160 effect(KILL cr);
9162 format %{ "salq $dst, $shift" %}
9163 opcode(0xD3, 0x4); /* D3 /4 */
9164 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9165 ins_pipe(ialu_mem_reg);
9166 %}
9168 // Arithmetic shift right by one
9169 instruct sarL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
9170 %{
9171 match(Set dst (RShiftL dst shift));
9172 effect(KILL cr);
9174 format %{ "sarq $dst, $shift" %}
9175 opcode(0xD1, 0x7); /* D1 /7 */
9176 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9177 ins_pipe(ialu_reg);
9178 %}
9180 // Arithmetic shift right by one
9181 instruct sarL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9182 %{
9183 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
9184 effect(KILL cr);
9186 format %{ "sarq $dst, $shift" %}
9187 opcode(0xD1, 0x7); /* D1 /7 */
9188 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9189 ins_pipe(ialu_mem_imm);
9190 %}
9192 // Arithmetic Shift Right by 8-bit immediate
9193 instruct sarL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
9194 %{
9195 match(Set dst (RShiftL dst shift));
9196 effect(KILL cr);
9198 format %{ "sarq $dst, $shift" %}
9199 opcode(0xC1, 0x7); /* C1 /7 ib */
9200 ins_encode(reg_opc_imm_wide(dst, shift));
9201 ins_pipe(ialu_mem_imm);
9202 %}
9204 // Arithmetic Shift Right by 8-bit immediate
9205 instruct sarL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9206 %{
9207 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
9208 effect(KILL cr);
9210 format %{ "sarq $dst, $shift" %}
9211 opcode(0xC1, 0x7); /* C1 /7 ib */
9212 ins_encode(REX_mem_wide(dst), OpcP,
9213 RM_opc_mem(secondary, dst), Con8or32(shift));
9214 ins_pipe(ialu_mem_imm);
9215 %}
9217 // Arithmetic Shift Right by variable
9218 instruct sarL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
9219 %{
9220 match(Set dst (RShiftL dst shift));
9221 effect(KILL cr);
9223 format %{ "sarq $dst, $shift" %}
9224 opcode(0xD3, 0x7); /* D3 /7 */
9225 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9226 ins_pipe(ialu_reg_reg);
9227 %}
9229 // Arithmetic Shift Right by variable
9230 instruct sarL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9231 %{
9232 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift)));
9233 effect(KILL cr);
9235 format %{ "sarq $dst, $shift" %}
9236 opcode(0xD3, 0x7); /* D3 /7 */
9237 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9238 ins_pipe(ialu_mem_reg);
9239 %}
9241 // Logical shift right by one
9242 instruct shrL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr)
9243 %{
9244 match(Set dst (URShiftL dst shift));
9245 effect(KILL cr);
9247 format %{ "shrq $dst, $shift" %}
9248 opcode(0xD1, 0x5); /* D1 /5 */
9249 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst ));
9250 ins_pipe(ialu_reg);
9251 %}
9253 // Logical shift right by one
9254 instruct shrL_mem_1(memory dst, immI1 shift, rFlagsReg cr)
9255 %{
9256 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
9257 effect(KILL cr);
9259 format %{ "shrq $dst, $shift" %}
9260 opcode(0xD1, 0x5); /* D1 /5 */
9261 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9262 ins_pipe(ialu_mem_imm);
9263 %}
9265 // Logical Shift Right by 8-bit immediate
9266 instruct shrL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
9267 %{
9268 match(Set dst (URShiftL dst shift));
9269 effect(KILL cr);
9271 format %{ "shrq $dst, $shift" %}
9272 opcode(0xC1, 0x5); /* C1 /5 ib */
9273 ins_encode(reg_opc_imm_wide(dst, shift));
9274 ins_pipe(ialu_reg);
9275 %}
9278 // Logical Shift Right by 8-bit immediate
9279 instruct shrL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
9280 %{
9281 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
9282 effect(KILL cr);
9284 format %{ "shrq $dst, $shift" %}
9285 opcode(0xC1, 0x5); /* C1 /5 ib */
9286 ins_encode(REX_mem_wide(dst), OpcP,
9287 RM_opc_mem(secondary, dst), Con8or32(shift));
9288 ins_pipe(ialu_mem_imm);
9289 %}
9291 // Logical Shift Right by variable
9292 instruct shrL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr)
9293 %{
9294 match(Set dst (URShiftL dst shift));
9295 effect(KILL cr);
9297 format %{ "shrq $dst, $shift" %}
9298 opcode(0xD3, 0x5); /* D3 /5 */
9299 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9300 ins_pipe(ialu_reg_reg);
9301 %}
9303 // Logical Shift Right by variable
9304 instruct shrL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr)
9305 %{
9306 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift)));
9307 effect(KILL cr);
9309 format %{ "shrq $dst, $shift" %}
9310 opcode(0xD3, 0x5); /* D3 /5 */
9311 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst));
9312 ins_pipe(ialu_mem_reg);
9313 %}
9315 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
9316 // This idiom is used by the compiler for the i2b bytecode.
9317 instruct i2b(rRegI dst, rRegI src, immI_24 twentyfour)
9318 %{
9319 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
9321 format %{ "movsbl $dst, $src\t# i2b" %}
9322 opcode(0x0F, 0xBE);
9323 ins_encode(REX_reg_breg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9324 ins_pipe(ialu_reg_reg);
9325 %}
9327 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
9328 // This idiom is used by the compiler the i2s bytecode.
9329 instruct i2s(rRegI dst, rRegI src, immI_16 sixteen)
9330 %{
9331 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
9333 format %{ "movswl $dst, $src\t# i2s" %}
9334 opcode(0x0F, 0xBF);
9335 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9336 ins_pipe(ialu_reg_reg);
9337 %}
9339 // ROL/ROR instructions
9341 // ROL expand
9342 instruct rolI_rReg_imm1(rRegI dst, rFlagsReg cr) %{
9343 effect(KILL cr, USE_DEF dst);
9345 format %{ "roll $dst" %}
9346 opcode(0xD1, 0x0); /* Opcode D1 /0 */
9347 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9348 ins_pipe(ialu_reg);
9349 %}
9351 instruct rolI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr) %{
9352 effect(USE_DEF dst, USE shift, KILL cr);
9354 format %{ "roll $dst, $shift" %}
9355 opcode(0xC1, 0x0); /* Opcode C1 /0 ib */
9356 ins_encode( reg_opc_imm(dst, shift) );
9357 ins_pipe(ialu_reg);
9358 %}
9360 instruct rolI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr)
9361 %{
9362 effect(USE_DEF dst, USE shift, KILL cr);
9364 format %{ "roll $dst, $shift" %}
9365 opcode(0xD3, 0x0); /* Opcode D3 /0 */
9366 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9367 ins_pipe(ialu_reg_reg);
9368 %}
9369 // end of ROL expand
9371 // Rotate Left by one
9372 instruct rolI_rReg_i1(rRegI dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr)
9373 %{
9374 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
9376 expand %{
9377 rolI_rReg_imm1(dst, cr);
9378 %}
9379 %}
9381 // Rotate Left by 8-bit immediate
9382 instruct rolI_rReg_i8(rRegI dst, immI8 lshift, immI8 rshift, rFlagsReg cr)
9383 %{
9384 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
9385 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
9387 expand %{
9388 rolI_rReg_imm8(dst, lshift, cr);
9389 %}
9390 %}
9392 // Rotate Left by variable
9393 instruct rolI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9394 %{
9395 match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI zero shift))));
9397 expand %{
9398 rolI_rReg_CL(dst, shift, cr);
9399 %}
9400 %}
9402 // Rotate Left by variable
9403 instruct rolI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr)
9404 %{
9405 match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI c32 shift))));
9407 expand %{
9408 rolI_rReg_CL(dst, shift, cr);
9409 %}
9410 %}
9412 // ROR expand
9413 instruct rorI_rReg_imm1(rRegI dst, rFlagsReg cr)
9414 %{
9415 effect(USE_DEF dst, KILL cr);
9417 format %{ "rorl $dst" %}
9418 opcode(0xD1, 0x1); /* D1 /1 */
9419 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9420 ins_pipe(ialu_reg);
9421 %}
9423 instruct rorI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr)
9424 %{
9425 effect(USE_DEF dst, USE shift, KILL cr);
9427 format %{ "rorl $dst, $shift" %}
9428 opcode(0xC1, 0x1); /* C1 /1 ib */
9429 ins_encode(reg_opc_imm(dst, shift));
9430 ins_pipe(ialu_reg);
9431 %}
9433 instruct rorI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr)
9434 %{
9435 effect(USE_DEF dst, USE shift, KILL cr);
9437 format %{ "rorl $dst, $shift" %}
9438 opcode(0xD3, 0x1); /* D3 /1 */
9439 ins_encode(REX_reg(dst), OpcP, reg_opc(dst));
9440 ins_pipe(ialu_reg_reg);
9441 %}
9442 // end of ROR expand
9444 // Rotate Right by one
9445 instruct rorI_rReg_i1(rRegI dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr)
9446 %{
9447 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
9449 expand %{
9450 rorI_rReg_imm1(dst, cr);
9451 %}
9452 %}
9454 // Rotate Right by 8-bit immediate
9455 instruct rorI_rReg_i8(rRegI dst, immI8 rshift, immI8 lshift, rFlagsReg cr)
9456 %{
9457 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
9458 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
9460 expand %{
9461 rorI_rReg_imm8(dst, rshift, cr);
9462 %}
9463 %}
9465 // Rotate Right by variable
9466 instruct rorI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9467 %{
9468 match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI zero shift))));
9470 expand %{
9471 rorI_rReg_CL(dst, shift, cr);
9472 %}
9473 %}
9475 // Rotate Right by variable
9476 instruct rorI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr)
9477 %{
9478 match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI c32 shift))));
9480 expand %{
9481 rorI_rReg_CL(dst, shift, cr);
9482 %}
9483 %}
9485 // for long rotate
9486 // ROL expand
9487 instruct rolL_rReg_imm1(rRegL dst, rFlagsReg cr) %{
9488 effect(USE_DEF dst, KILL cr);
9490 format %{ "rolq $dst" %}
9491 opcode(0xD1, 0x0); /* Opcode D1 /0 */
9492 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9493 ins_pipe(ialu_reg);
9494 %}
9496 instruct rolL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr) %{
9497 effect(USE_DEF dst, USE shift, KILL cr);
9499 format %{ "rolq $dst, $shift" %}
9500 opcode(0xC1, 0x0); /* Opcode C1 /0 ib */
9501 ins_encode( reg_opc_imm_wide(dst, shift) );
9502 ins_pipe(ialu_reg);
9503 %}
9505 instruct rolL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr)
9506 %{
9507 effect(USE_DEF dst, USE shift, KILL cr);
9509 format %{ "rolq $dst, $shift" %}
9510 opcode(0xD3, 0x0); /* Opcode D3 /0 */
9511 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9512 ins_pipe(ialu_reg_reg);
9513 %}
9514 // end of ROL expand
9516 // Rotate Left by one
9517 instruct rolL_rReg_i1(rRegL dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr)
9518 %{
9519 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
9521 expand %{
9522 rolL_rReg_imm1(dst, cr);
9523 %}
9524 %}
9526 // Rotate Left by 8-bit immediate
9527 instruct rolL_rReg_i8(rRegL dst, immI8 lshift, immI8 rshift, rFlagsReg cr)
9528 %{
9529 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
9530 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
9532 expand %{
9533 rolL_rReg_imm8(dst, lshift, cr);
9534 %}
9535 %}
9537 // Rotate Left by variable
9538 instruct rolL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9539 %{
9540 match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI zero shift))));
9542 expand %{
9543 rolL_rReg_CL(dst, shift, cr);
9544 %}
9545 %}
9547 // Rotate Left by variable
9548 instruct rolL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr)
9549 %{
9550 match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI c64 shift))));
9552 expand %{
9553 rolL_rReg_CL(dst, shift, cr);
9554 %}
9555 %}
9557 // ROR expand
9558 instruct rorL_rReg_imm1(rRegL dst, rFlagsReg cr)
9559 %{
9560 effect(USE_DEF dst, KILL cr);
9562 format %{ "rorq $dst" %}
9563 opcode(0xD1, 0x1); /* D1 /1 */
9564 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9565 ins_pipe(ialu_reg);
9566 %}
9568 instruct rorL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr)
9569 %{
9570 effect(USE_DEF dst, USE shift, KILL cr);
9572 format %{ "rorq $dst, $shift" %}
9573 opcode(0xC1, 0x1); /* C1 /1 ib */
9574 ins_encode(reg_opc_imm_wide(dst, shift));
9575 ins_pipe(ialu_reg);
9576 %}
9578 instruct rorL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr)
9579 %{
9580 effect(USE_DEF dst, USE shift, KILL cr);
9582 format %{ "rorq $dst, $shift" %}
9583 opcode(0xD3, 0x1); /* D3 /1 */
9584 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst));
9585 ins_pipe(ialu_reg_reg);
9586 %}
9587 // end of ROR expand
9589 // Rotate Right by one
9590 instruct rorL_rReg_i1(rRegL dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr)
9591 %{
9592 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
9594 expand %{
9595 rorL_rReg_imm1(dst, cr);
9596 %}
9597 %}
9599 // Rotate Right by 8-bit immediate
9600 instruct rorL_rReg_i8(rRegL dst, immI8 rshift, immI8 lshift, rFlagsReg cr)
9601 %{
9602 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
9603 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
9605 expand %{
9606 rorL_rReg_imm8(dst, rshift, cr);
9607 %}
9608 %}
9610 // Rotate Right by variable
9611 instruct rorL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr)
9612 %{
9613 match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI zero shift))));
9615 expand %{
9616 rorL_rReg_CL(dst, shift, cr);
9617 %}
9618 %}
9620 // Rotate Right by variable
9621 instruct rorL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr)
9622 %{
9623 match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI c64 shift))));
9625 expand %{
9626 rorL_rReg_CL(dst, shift, cr);
9627 %}
9628 %}
9630 // Logical Instructions
9632 // Integer Logical Instructions
9634 // And Instructions
9635 // And Register with Register
9636 instruct andI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
9637 %{
9638 match(Set dst (AndI dst src));
9639 effect(KILL cr);
9641 format %{ "andl $dst, $src\t# int" %}
9642 opcode(0x23);
9643 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
9644 ins_pipe(ialu_reg_reg);
9645 %}
9647 // And Register with Immediate 255
9648 instruct andI_rReg_imm255(rRegI dst, immI_255 src)
9649 %{
9650 match(Set dst (AndI dst src));
9652 format %{ "movzbl $dst, $dst\t# int & 0xFF" %}
9653 opcode(0x0F, 0xB6);
9654 ins_encode(REX_reg_breg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9655 ins_pipe(ialu_reg);
9656 %}
9658 // And Register with Immediate 255 and promote to long
9659 instruct andI2L_rReg_imm255(rRegL dst, rRegI src, immI_255 mask)
9660 %{
9661 match(Set dst (ConvI2L (AndI src mask)));
9663 format %{ "movzbl $dst, $src\t# int & 0xFF -> long" %}
9664 opcode(0x0F, 0xB6);
9665 ins_encode(REX_reg_breg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9666 ins_pipe(ialu_reg);
9667 %}
9669 // And Register with Immediate 65535
9670 instruct andI_rReg_imm65535(rRegI dst, immI_65535 src)
9671 %{
9672 match(Set dst (AndI dst src));
9674 format %{ "movzwl $dst, $dst\t# int & 0xFFFF" %}
9675 opcode(0x0F, 0xB7);
9676 ins_encode(REX_reg_reg(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9677 ins_pipe(ialu_reg);
9678 %}
9680 // And Register with Immediate 65535 and promote to long
9681 instruct andI2L_rReg_imm65535(rRegL dst, rRegI src, immI_65535 mask)
9682 %{
9683 match(Set dst (ConvI2L (AndI src mask)));
9685 format %{ "movzwl $dst, $src\t# int & 0xFFFF -> long" %}
9686 opcode(0x0F, 0xB7);
9687 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
9688 ins_pipe(ialu_reg);
9689 %}
9691 // And Register with Immediate
9692 instruct andI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
9693 %{
9694 match(Set dst (AndI dst src));
9695 effect(KILL cr);
9697 format %{ "andl $dst, $src\t# int" %}
9698 opcode(0x81, 0x04); /* Opcode 81 /4 */
9699 ins_encode(OpcSErm(dst, src), Con8or32(src));
9700 ins_pipe(ialu_reg);
9701 %}
9703 // And Register with Memory
9704 instruct andI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
9705 %{
9706 match(Set dst (AndI dst (LoadI src)));
9707 effect(KILL cr);
9709 ins_cost(125);
9710 format %{ "andl $dst, $src\t# int" %}
9711 opcode(0x23);
9712 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
9713 ins_pipe(ialu_reg_mem);
9714 %}
9716 // And Memory with Register
9717 instruct andI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
9718 %{
9719 match(Set dst (StoreI dst (AndI (LoadI dst) src)));
9720 effect(KILL cr);
9722 ins_cost(150);
9723 format %{ "andl $dst, $src\t# int" %}
9724 opcode(0x21); /* Opcode 21 /r */
9725 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
9726 ins_pipe(ialu_mem_reg);
9727 %}
9729 // And Memory with Immediate
9730 instruct andI_mem_imm(memory dst, immI src, rFlagsReg cr)
9731 %{
9732 match(Set dst (StoreI dst (AndI (LoadI dst) src)));
9733 effect(KILL cr);
9735 ins_cost(125);
9736 format %{ "andl $dst, $src\t# int" %}
9737 opcode(0x81, 0x4); /* Opcode 81 /4 id */
9738 ins_encode(REX_mem(dst), OpcSE(src),
9739 RM_opc_mem(secondary, dst), Con8or32(src));
9740 ins_pipe(ialu_mem_imm);
9741 %}
9743 // Or Instructions
9744 // Or Register with Register
9745 instruct orI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
9746 %{
9747 match(Set dst (OrI dst src));
9748 effect(KILL cr);
9750 format %{ "orl $dst, $src\t# int" %}
9751 opcode(0x0B);
9752 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
9753 ins_pipe(ialu_reg_reg);
9754 %}
9756 // Or Register with Immediate
9757 instruct orI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
9758 %{
9759 match(Set dst (OrI dst src));
9760 effect(KILL cr);
9762 format %{ "orl $dst, $src\t# int" %}
9763 opcode(0x81, 0x01); /* Opcode 81 /1 id */
9764 ins_encode(OpcSErm(dst, src), Con8or32(src));
9765 ins_pipe(ialu_reg);
9766 %}
9768 // Or Register with Memory
9769 instruct orI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
9770 %{
9771 match(Set dst (OrI dst (LoadI src)));
9772 effect(KILL cr);
9774 ins_cost(125);
9775 format %{ "orl $dst, $src\t# int" %}
9776 opcode(0x0B);
9777 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
9778 ins_pipe(ialu_reg_mem);
9779 %}
9781 // Or Memory with Register
9782 instruct orI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
9783 %{
9784 match(Set dst (StoreI dst (OrI (LoadI dst) src)));
9785 effect(KILL cr);
9787 ins_cost(150);
9788 format %{ "orl $dst, $src\t# int" %}
9789 opcode(0x09); /* Opcode 09 /r */
9790 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
9791 ins_pipe(ialu_mem_reg);
9792 %}
9794 // Or Memory with Immediate
9795 instruct orI_mem_imm(memory dst, immI src, rFlagsReg cr)
9796 %{
9797 match(Set dst (StoreI dst (OrI (LoadI dst) src)));
9798 effect(KILL cr);
9800 ins_cost(125);
9801 format %{ "orl $dst, $src\t# int" %}
9802 opcode(0x81, 0x1); /* Opcode 81 /1 id */
9803 ins_encode(REX_mem(dst), OpcSE(src),
9804 RM_opc_mem(secondary, dst), Con8or32(src));
9805 ins_pipe(ialu_mem_imm);
9806 %}
9808 // Xor Instructions
9809 // Xor Register with Register
9810 instruct xorI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
9811 %{
9812 match(Set dst (XorI dst src));
9813 effect(KILL cr);
9815 format %{ "xorl $dst, $src\t# int" %}
9816 opcode(0x33);
9817 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src));
9818 ins_pipe(ialu_reg_reg);
9819 %}
9821 // Xor Register with Immediate -1
9822 instruct xorI_rReg_im1(rRegI dst, immI_M1 imm) %{
9823 match(Set dst (XorI dst imm));
9825 format %{ "not $dst" %}
9826 ins_encode %{
9827 __ notl($dst$$Register);
9828 %}
9829 ins_pipe(ialu_reg);
9830 %}
9832 // Xor Register with Immediate
9833 instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
9834 %{
9835 match(Set dst (XorI dst src));
9836 effect(KILL cr);
9838 format %{ "xorl $dst, $src\t# int" %}
9839 opcode(0x81, 0x06); /* Opcode 81 /6 id */
9840 ins_encode(OpcSErm(dst, src), Con8or32(src));
9841 ins_pipe(ialu_reg);
9842 %}
9844 // Xor Register with Memory
9845 instruct xorI_rReg_mem(rRegI dst, memory src, rFlagsReg cr)
9846 %{
9847 match(Set dst (XorI dst (LoadI src)));
9848 effect(KILL cr);
9850 ins_cost(125);
9851 format %{ "xorl $dst, $src\t# int" %}
9852 opcode(0x33);
9853 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
9854 ins_pipe(ialu_reg_mem);
9855 %}
9857 // Xor Memory with Register
9858 instruct xorI_mem_rReg(memory dst, rRegI src, rFlagsReg cr)
9859 %{
9860 match(Set dst (StoreI dst (XorI (LoadI dst) src)));
9861 effect(KILL cr);
9863 ins_cost(150);
9864 format %{ "xorl $dst, $src\t# int" %}
9865 opcode(0x31); /* Opcode 31 /r */
9866 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
9867 ins_pipe(ialu_mem_reg);
9868 %}
9870 // Xor Memory with Immediate
9871 instruct xorI_mem_imm(memory dst, immI src, rFlagsReg cr)
9872 %{
9873 match(Set dst (StoreI dst (XorI (LoadI dst) src)));
9874 effect(KILL cr);
9876 ins_cost(125);
9877 format %{ "xorl $dst, $src\t# int" %}
9878 opcode(0x81, 0x6); /* Opcode 81 /6 id */
9879 ins_encode(REX_mem(dst), OpcSE(src),
9880 RM_opc_mem(secondary, dst), Con8or32(src));
9881 ins_pipe(ialu_mem_imm);
9882 %}
9885 // Long Logical Instructions
9887 // And Instructions
9888 // And Register with Register
9889 instruct andL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
9890 %{
9891 match(Set dst (AndL dst src));
9892 effect(KILL cr);
9894 format %{ "andq $dst, $src\t# long" %}
9895 opcode(0x23);
9896 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
9897 ins_pipe(ialu_reg_reg);
9898 %}
9900 // And Register with Immediate 255
9901 instruct andL_rReg_imm255(rRegL dst, immL_255 src)
9902 %{
9903 match(Set dst (AndL dst src));
9905 format %{ "movzbq $dst, $dst\t# long & 0xFF" %}
9906 opcode(0x0F, 0xB6);
9907 ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9908 ins_pipe(ialu_reg);
9909 %}
9911 // And Register with Immediate 65535
9912 instruct andL_rReg_imm65535(rRegL dst, immL_65535 src)
9913 %{
9914 match(Set dst (AndL dst src));
9916 format %{ "movzwq $dst, $dst\t# long & 0xFFFF" %}
9917 opcode(0x0F, 0xB7);
9918 ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
9919 ins_pipe(ialu_reg);
9920 %}
9922 // And Register with Immediate
9923 instruct andL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
9924 %{
9925 match(Set dst (AndL dst src));
9926 effect(KILL cr);
9928 format %{ "andq $dst, $src\t# long" %}
9929 opcode(0x81, 0x04); /* Opcode 81 /4 */
9930 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
9931 ins_pipe(ialu_reg);
9932 %}
9934 // And Register with Memory
9935 instruct andL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
9936 %{
9937 match(Set dst (AndL dst (LoadL src)));
9938 effect(KILL cr);
9940 ins_cost(125);
9941 format %{ "andq $dst, $src\t# long" %}
9942 opcode(0x23);
9943 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
9944 ins_pipe(ialu_reg_mem);
9945 %}
9947 // And Memory with Register
9948 instruct andL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
9949 %{
9950 match(Set dst (StoreL dst (AndL (LoadL dst) src)));
9951 effect(KILL cr);
9953 ins_cost(150);
9954 format %{ "andq $dst, $src\t# long" %}
9955 opcode(0x21); /* Opcode 21 /r */
9956 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
9957 ins_pipe(ialu_mem_reg);
9958 %}
9960 // And Memory with Immediate
9961 instruct andL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
9962 %{
9963 match(Set dst (StoreL dst (AndL (LoadL dst) src)));
9964 effect(KILL cr);
9966 ins_cost(125);
9967 format %{ "andq $dst, $src\t# long" %}
9968 opcode(0x81, 0x4); /* Opcode 81 /4 id */
9969 ins_encode(REX_mem_wide(dst), OpcSE(src),
9970 RM_opc_mem(secondary, dst), Con8or32(src));
9971 ins_pipe(ialu_mem_imm);
9972 %}
9974 // Or Instructions
9975 // Or Register with Register
9976 instruct orL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
9977 %{
9978 match(Set dst (OrL dst src));
9979 effect(KILL cr);
9981 format %{ "orq $dst, $src\t# long" %}
9982 opcode(0x0B);
9983 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
9984 ins_pipe(ialu_reg_reg);
9985 %}
9987 // Use any_RegP to match R15 (TLS register) without spilling.
9988 instruct orL_rReg_castP2X(rRegL dst, any_RegP src, rFlagsReg cr) %{
9989 match(Set dst (OrL dst (CastP2X src)));
9990 effect(KILL cr);
9992 format %{ "orq $dst, $src\t# long" %}
9993 opcode(0x0B);
9994 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
9995 ins_pipe(ialu_reg_reg);
9996 %}
9999 // Or Register with Immediate
10000 instruct orL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
10001 %{
10002 match(Set dst (OrL dst src));
10003 effect(KILL cr);
10005 format %{ "orq $dst, $src\t# long" %}
10006 opcode(0x81, 0x01); /* Opcode 81 /1 id */
10007 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
10008 ins_pipe(ialu_reg);
10009 %}
10011 // Or Register with Memory
10012 instruct orL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
10013 %{
10014 match(Set dst (OrL dst (LoadL src)));
10015 effect(KILL cr);
10017 ins_cost(125);
10018 format %{ "orq $dst, $src\t# long" %}
10019 opcode(0x0B);
10020 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
10021 ins_pipe(ialu_reg_mem);
10022 %}
10024 // Or Memory with Register
10025 instruct orL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
10026 %{
10027 match(Set dst (StoreL dst (OrL (LoadL dst) src)));
10028 effect(KILL cr);
10030 ins_cost(150);
10031 format %{ "orq $dst, $src\t# long" %}
10032 opcode(0x09); /* Opcode 09 /r */
10033 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
10034 ins_pipe(ialu_mem_reg);
10035 %}
10037 // Or Memory with Immediate
10038 instruct orL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
10039 %{
10040 match(Set dst (StoreL dst (OrL (LoadL dst) src)));
10041 effect(KILL cr);
10043 ins_cost(125);
10044 format %{ "orq $dst, $src\t# long" %}
10045 opcode(0x81, 0x1); /* Opcode 81 /1 id */
10046 ins_encode(REX_mem_wide(dst), OpcSE(src),
10047 RM_opc_mem(secondary, dst), Con8or32(src));
10048 ins_pipe(ialu_mem_imm);
10049 %}
10051 // Xor Instructions
10052 // Xor Register with Register
10053 instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
10054 %{
10055 match(Set dst (XorL dst src));
10056 effect(KILL cr);
10058 format %{ "xorq $dst, $src\t# long" %}
10059 opcode(0x33);
10060 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
10061 ins_pipe(ialu_reg_reg);
10062 %}
10064 // Xor Register with Immediate -1
10065 instruct xorL_rReg_im1(rRegL dst, immL_M1 imm) %{
10066 match(Set dst (XorL dst imm));
10068 format %{ "notq $dst" %}
10069 ins_encode %{
10070 __ notq($dst$$Register);
10071 %}
10072 ins_pipe(ialu_reg);
10073 %}
10075 // Xor Register with Immediate
10076 instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
10077 %{
10078 match(Set dst (XorL dst src));
10079 effect(KILL cr);
10081 format %{ "xorq $dst, $src\t# long" %}
10082 opcode(0x81, 0x06); /* Opcode 81 /6 id */
10083 ins_encode(OpcSErm_wide(dst, src), Con8or32(src));
10084 ins_pipe(ialu_reg);
10085 %}
10087 // Xor Register with Memory
10088 instruct xorL_rReg_mem(rRegL dst, memory src, rFlagsReg cr)
10089 %{
10090 match(Set dst (XorL dst (LoadL src)));
10091 effect(KILL cr);
10093 ins_cost(125);
10094 format %{ "xorq $dst, $src\t# long" %}
10095 opcode(0x33);
10096 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
10097 ins_pipe(ialu_reg_mem);
10098 %}
10100 // Xor Memory with Register
10101 instruct xorL_mem_rReg(memory dst, rRegL src, rFlagsReg cr)
10102 %{
10103 match(Set dst (StoreL dst (XorL (LoadL dst) src)));
10104 effect(KILL cr);
10106 ins_cost(150);
10107 format %{ "xorq $dst, $src\t# long" %}
10108 opcode(0x31); /* Opcode 31 /r */
10109 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
10110 ins_pipe(ialu_mem_reg);
10111 %}
10113 // Xor Memory with Immediate
10114 instruct xorL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
10115 %{
10116 match(Set dst (StoreL dst (XorL (LoadL dst) src)));
10117 effect(KILL cr);
10119 ins_cost(125);
10120 format %{ "xorq $dst, $src\t# long" %}
10121 opcode(0x81, 0x6); /* Opcode 81 /6 id */
10122 ins_encode(REX_mem_wide(dst), OpcSE(src),
10123 RM_opc_mem(secondary, dst), Con8or32(src));
10124 ins_pipe(ialu_mem_imm);
10125 %}
10127 // Convert Int to Boolean
10128 instruct convI2B(rRegI dst, rRegI src, rFlagsReg cr)
10129 %{
10130 match(Set dst (Conv2B src));
10131 effect(KILL cr);
10133 format %{ "testl $src, $src\t# ci2b\n\t"
10134 "setnz $dst\n\t"
10135 "movzbl $dst, $dst" %}
10136 ins_encode(REX_reg_reg(src, src), opc_reg_reg(0x85, src, src), // testl
10137 setNZ_reg(dst),
10138 REX_reg_breg(dst, dst), // movzbl
10139 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst));
10140 ins_pipe(pipe_slow); // XXX
10141 %}
10143 // Convert Pointer to Boolean
10144 instruct convP2B(rRegI dst, rRegP src, rFlagsReg cr)
10145 %{
10146 match(Set dst (Conv2B src));
10147 effect(KILL cr);
10149 format %{ "testq $src, $src\t# cp2b\n\t"
10150 "setnz $dst\n\t"
10151 "movzbl $dst, $dst" %}
10152 ins_encode(REX_reg_reg_wide(src, src), opc_reg_reg(0x85, src, src), // testq
10153 setNZ_reg(dst),
10154 REX_reg_breg(dst, dst), // movzbl
10155 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst));
10156 ins_pipe(pipe_slow); // XXX
10157 %}
10159 instruct cmpLTMask(rRegI dst, rRegI p, rRegI q, rFlagsReg cr)
10160 %{
10161 match(Set dst (CmpLTMask p q));
10162 effect(KILL cr);
10164 ins_cost(400); // XXX
10165 format %{ "cmpl $p, $q\t# cmpLTMask\n\t"
10166 "setlt $dst\n\t"
10167 "movzbl $dst, $dst\n\t"
10168 "negl $dst" %}
10169 ins_encode(REX_reg_reg(p, q), opc_reg_reg(0x3B, p, q), // cmpl
10170 setLT_reg(dst),
10171 REX_reg_breg(dst, dst), // movzbl
10172 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst),
10173 neg_reg(dst));
10174 ins_pipe(pipe_slow);
10175 %}
10177 instruct cmpLTMask0(rRegI dst, immI0 zero, rFlagsReg cr)
10178 %{
10179 match(Set dst (CmpLTMask dst zero));
10180 effect(KILL cr);
10182 ins_cost(100); // XXX
10183 format %{ "sarl $dst, #31\t# cmpLTMask0" %}
10184 opcode(0xC1, 0x7); /* C1 /7 ib */
10185 ins_encode(reg_opc_imm(dst, 0x1F));
10186 ins_pipe(ialu_reg);
10187 %}
10190 instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y,
10191 rRegI tmp,
10192 rFlagsReg cr)
10193 %{
10194 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
10195 effect(TEMP tmp, KILL cr);
10197 ins_cost(400); // XXX
10198 format %{ "subl $p, $q\t# cadd_cmpLTMask1\n\t"
10199 "sbbl $tmp, $tmp\n\t"
10200 "andl $tmp, $y\n\t"
10201 "addl $p, $tmp" %}
10202 ins_encode(enc_cmpLTP(p, q, y, tmp));
10203 ins_pipe(pipe_cmplt);
10204 %}
10206 /* If I enable this, I encourage spilling in the inner loop of compress.
10207 instruct cadd_cmpLTMask_mem( rRegI p, rRegI q, memory y, rRegI tmp, rFlagsReg cr )
10208 %{
10209 match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
10210 effect( TEMP tmp, KILL cr );
10211 ins_cost(400);
10213 format %{ "SUB $p,$q\n\t"
10214 "SBB RCX,RCX\n\t"
10215 "AND RCX,$y\n\t"
10216 "ADD $p,RCX" %}
10217 ins_encode( enc_cmpLTP_mem(p,q,y,tmp) );
10218 %}
10219 */
10221 //---------- FP Instructions------------------------------------------------
10223 instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
10224 %{
10225 match(Set cr (CmpF src1 src2));
10227 ins_cost(145);
10228 format %{ "ucomiss $src1, $src2\n\t"
10229 "jnp,s exit\n\t"
10230 "pushfq\t# saw NaN, set CF\n\t"
10231 "andq [rsp], #0xffffff2b\n\t"
10232 "popfq\n"
10233 "exit: nop\t# avoid branch to branch" %}
10234 opcode(0x0F, 0x2E);
10235 ins_encode(REX_reg_reg(src1, src2), OpcP, OpcS, reg_reg(src1, src2),
10236 cmpfp_fixup);
10237 ins_pipe(pipe_slow);
10238 %}
10240 instruct cmpF_cc_reg_CF(rFlagsRegUCF cr, regF src1, regF src2) %{
10241 match(Set cr (CmpF src1 src2));
10243 ins_cost(145);
10244 format %{ "ucomiss $src1, $src2" %}
10245 ins_encode %{
10246 __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
10247 %}
10248 ins_pipe(pipe_slow);
10249 %}
10251 instruct cmpF_cc_mem(rFlagsRegU cr, regF src1, memory src2)
10252 %{
10253 match(Set cr (CmpF src1 (LoadF src2)));
10255 ins_cost(145);
10256 format %{ "ucomiss $src1, $src2\n\t"
10257 "jnp,s exit\n\t"
10258 "pushfq\t# saw NaN, set CF\n\t"
10259 "andq [rsp], #0xffffff2b\n\t"
10260 "popfq\n"
10261 "exit: nop\t# avoid branch to branch" %}
10262 opcode(0x0F, 0x2E);
10263 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2),
10264 cmpfp_fixup);
10265 ins_pipe(pipe_slow);
10266 %}
10268 instruct cmpF_cc_memCF(rFlagsRegUCF cr, regF src1, memory src2) %{
10269 match(Set cr (CmpF src1 (LoadF src2)));
10271 ins_cost(100);
10272 format %{ "ucomiss $src1, $src2" %}
10273 opcode(0x0F, 0x2E);
10274 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2));
10275 ins_pipe(pipe_slow);
10276 %}
10278 instruct cmpF_cc_imm(rFlagsRegU cr, regF src1, immF src2)
10279 %{
10280 match(Set cr (CmpF src1 src2));
10282 ins_cost(145);
10283 format %{ "ucomiss $src1, $src2\n\t"
10284 "jnp,s exit\n\t"
10285 "pushfq\t# saw NaN, set CF\n\t"
10286 "andq [rsp], #0xffffff2b\n\t"
10287 "popfq\n"
10288 "exit: nop\t# avoid branch to branch" %}
10289 opcode(0x0F, 0x2E);
10290 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2),
10291 cmpfp_fixup);
10292 ins_pipe(pipe_slow);
10293 %}
10295 instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src1, immF src2) %{
10296 match(Set cr (CmpF src1 src2));
10298 ins_cost(100);
10299 format %{ "ucomiss $src1, $src2" %}
10300 opcode(0x0F, 0x2E);
10301 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2));
10302 ins_pipe(pipe_slow);
10303 %}
10305 instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2)
10306 %{
10307 match(Set cr (CmpD src1 src2));
10309 ins_cost(145);
10310 format %{ "ucomisd $src1, $src2\n\t"
10311 "jnp,s exit\n\t"
10312 "pushfq\t# saw NaN, set CF\n\t"
10313 "andq [rsp], #0xffffff2b\n\t"
10314 "popfq\n"
10315 "exit: nop\t# avoid branch to branch" %}
10316 opcode(0x66, 0x0F, 0x2E);
10317 ins_encode(OpcP, REX_reg_reg(src1, src2), OpcS, OpcT, reg_reg(src1, src2),
10318 cmpfp_fixup);
10319 ins_pipe(pipe_slow);
10320 %}
10322 instruct cmpD_cc_reg_CF(rFlagsRegUCF cr, regD src1, regD src2) %{
10323 match(Set cr (CmpD src1 src2));
10325 ins_cost(100);
10326 format %{ "ucomisd $src1, $src2 test" %}
10327 ins_encode %{
10328 __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
10329 %}
10330 ins_pipe(pipe_slow);
10331 %}
10333 instruct cmpD_cc_mem(rFlagsRegU cr, regD src1, memory src2)
10334 %{
10335 match(Set cr (CmpD src1 (LoadD src2)));
10337 ins_cost(145);
10338 format %{ "ucomisd $src1, $src2\n\t"
10339 "jnp,s exit\n\t"
10340 "pushfq\t# saw NaN, set CF\n\t"
10341 "andq [rsp], #0xffffff2b\n\t"
10342 "popfq\n"
10343 "exit: nop\t# avoid branch to branch" %}
10344 opcode(0x66, 0x0F, 0x2E);
10345 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2),
10346 cmpfp_fixup);
10347 ins_pipe(pipe_slow);
10348 %}
10350 instruct cmpD_cc_memCF(rFlagsRegUCF cr, regD src1, memory src2) %{
10351 match(Set cr (CmpD src1 (LoadD src2)));
10353 ins_cost(100);
10354 format %{ "ucomisd $src1, $src2" %}
10355 opcode(0x66, 0x0F, 0x2E);
10356 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2));
10357 ins_pipe(pipe_slow);
10358 %}
10360 instruct cmpD_cc_imm(rFlagsRegU cr, regD src1, immD src2)
10361 %{
10362 match(Set cr (CmpD src1 src2));
10364 ins_cost(145);
10365 format %{ "ucomisd $src1, [$src2]\n\t"
10366 "jnp,s exit\n\t"
10367 "pushfq\t# saw NaN, set CF\n\t"
10368 "andq [rsp], #0xffffff2b\n\t"
10369 "popfq\n"
10370 "exit: nop\t# avoid branch to branch" %}
10371 opcode(0x66, 0x0F, 0x2E);
10372 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2),
10373 cmpfp_fixup);
10374 ins_pipe(pipe_slow);
10375 %}
10377 instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src1, immD src2) %{
10378 match(Set cr (CmpD src1 src2));
10380 ins_cost(100);
10381 format %{ "ucomisd $src1, [$src2]" %}
10382 opcode(0x66, 0x0F, 0x2E);
10383 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2));
10384 ins_pipe(pipe_slow);
10385 %}
10387 // Compare into -1,0,1
10388 instruct cmpF_reg(rRegI dst, regF src1, regF src2, rFlagsReg cr)
10389 %{
10390 match(Set dst (CmpF3 src1 src2));
10391 effect(KILL cr);
10393 ins_cost(275);
10394 format %{ "ucomiss $src1, $src2\n\t"
10395 "movl $dst, #-1\n\t"
10396 "jp,s done\n\t"
10397 "jb,s done\n\t"
10398 "setne $dst\n\t"
10399 "movzbl $dst, $dst\n"
10400 "done:" %}
10402 opcode(0x0F, 0x2E);
10403 ins_encode(REX_reg_reg(src1, src2), OpcP, OpcS, reg_reg(src1, src2),
10404 cmpfp3(dst));
10405 ins_pipe(pipe_slow);
10406 %}
10408 // Compare into -1,0,1
10409 instruct cmpF_mem(rRegI dst, regF src1, memory src2, rFlagsReg cr)
10410 %{
10411 match(Set dst (CmpF3 src1 (LoadF src2)));
10412 effect(KILL cr);
10414 ins_cost(275);
10415 format %{ "ucomiss $src1, $src2\n\t"
10416 "movl $dst, #-1\n\t"
10417 "jp,s done\n\t"
10418 "jb,s done\n\t"
10419 "setne $dst\n\t"
10420 "movzbl $dst, $dst\n"
10421 "done:" %}
10423 opcode(0x0F, 0x2E);
10424 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2),
10425 cmpfp3(dst));
10426 ins_pipe(pipe_slow);
10427 %}
10429 // Compare into -1,0,1
10430 instruct cmpF_imm(rRegI dst, regF src1, immF src2, rFlagsReg cr)
10431 %{
10432 match(Set dst (CmpF3 src1 src2));
10433 effect(KILL cr);
10435 ins_cost(275);
10436 format %{ "ucomiss $src1, [$src2]\n\t"
10437 "movl $dst, #-1\n\t"
10438 "jp,s done\n\t"
10439 "jb,s done\n\t"
10440 "setne $dst\n\t"
10441 "movzbl $dst, $dst\n"
10442 "done:" %}
10444 opcode(0x0F, 0x2E);
10445 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2),
10446 cmpfp3(dst));
10447 ins_pipe(pipe_slow);
10448 %}
10450 // Compare into -1,0,1
10451 instruct cmpD_reg(rRegI dst, regD src1, regD src2, rFlagsReg cr)
10452 %{
10453 match(Set dst (CmpD3 src1 src2));
10454 effect(KILL cr);
10456 ins_cost(275);
10457 format %{ "ucomisd $src1, $src2\n\t"
10458 "movl $dst, #-1\n\t"
10459 "jp,s done\n\t"
10460 "jb,s done\n\t"
10461 "setne $dst\n\t"
10462 "movzbl $dst, $dst\n"
10463 "done:" %}
10465 opcode(0x66, 0x0F, 0x2E);
10466 ins_encode(OpcP, REX_reg_reg(src1, src2), OpcS, OpcT, reg_reg(src1, src2),
10467 cmpfp3(dst));
10468 ins_pipe(pipe_slow);
10469 %}
10471 // Compare into -1,0,1
10472 instruct cmpD_mem(rRegI dst, regD src1, memory src2, rFlagsReg cr)
10473 %{
10474 match(Set dst (CmpD3 src1 (LoadD src2)));
10475 effect(KILL cr);
10477 ins_cost(275);
10478 format %{ "ucomisd $src1, $src2\n\t"
10479 "movl $dst, #-1\n\t"
10480 "jp,s done\n\t"
10481 "jb,s done\n\t"
10482 "setne $dst\n\t"
10483 "movzbl $dst, $dst\n"
10484 "done:" %}
10486 opcode(0x66, 0x0F, 0x2E);
10487 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2),
10488 cmpfp3(dst));
10489 ins_pipe(pipe_slow);
10490 %}
10492 // Compare into -1,0,1
10493 instruct cmpD_imm(rRegI dst, regD src1, immD src2, rFlagsReg cr)
10494 %{
10495 match(Set dst (CmpD3 src1 src2));
10496 effect(KILL cr);
10498 ins_cost(275);
10499 format %{ "ucomisd $src1, [$src2]\n\t"
10500 "movl $dst, #-1\n\t"
10501 "jp,s done\n\t"
10502 "jb,s done\n\t"
10503 "setne $dst\n\t"
10504 "movzbl $dst, $dst\n"
10505 "done:" %}
10507 opcode(0x66, 0x0F, 0x2E);
10508 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2),
10509 cmpfp3(dst));
10510 ins_pipe(pipe_slow);
10511 %}
10513 instruct addF_reg(regF dst, regF src)
10514 %{
10515 match(Set dst (AddF dst src));
10517 format %{ "addss $dst, $src" %}
10518 ins_cost(150); // XXX
10519 opcode(0xF3, 0x0F, 0x58);
10520 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10521 ins_pipe(pipe_slow);
10522 %}
10524 instruct addF_mem(regF dst, memory src)
10525 %{
10526 match(Set dst (AddF dst (LoadF src)));
10528 format %{ "addss $dst, $src" %}
10529 ins_cost(150); // XXX
10530 opcode(0xF3, 0x0F, 0x58);
10531 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10532 ins_pipe(pipe_slow);
10533 %}
10535 instruct addF_imm(regF dst, immF src)
10536 %{
10537 match(Set dst (AddF dst src));
10539 format %{ "addss $dst, [$src]" %}
10540 ins_cost(150); // XXX
10541 opcode(0xF3, 0x0F, 0x58);
10542 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
10543 ins_pipe(pipe_slow);
10544 %}
10546 instruct addD_reg(regD dst, regD src)
10547 %{
10548 match(Set dst (AddD dst src));
10550 format %{ "addsd $dst, $src" %}
10551 ins_cost(150); // XXX
10552 opcode(0xF2, 0x0F, 0x58);
10553 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10554 ins_pipe(pipe_slow);
10555 %}
10557 instruct addD_mem(regD dst, memory src)
10558 %{
10559 match(Set dst (AddD dst (LoadD src)));
10561 format %{ "addsd $dst, $src" %}
10562 ins_cost(150); // XXX
10563 opcode(0xF2, 0x0F, 0x58);
10564 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10565 ins_pipe(pipe_slow);
10566 %}
10568 instruct addD_imm(regD dst, immD src)
10569 %{
10570 match(Set dst (AddD dst src));
10572 format %{ "addsd $dst, [$src]" %}
10573 ins_cost(150); // XXX
10574 opcode(0xF2, 0x0F, 0x58);
10575 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
10576 ins_pipe(pipe_slow);
10577 %}
10579 instruct subF_reg(regF dst, regF src)
10580 %{
10581 match(Set dst (SubF dst src));
10583 format %{ "subss $dst, $src" %}
10584 ins_cost(150); // XXX
10585 opcode(0xF3, 0x0F, 0x5C);
10586 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10587 ins_pipe(pipe_slow);
10588 %}
10590 instruct subF_mem(regF dst, memory src)
10591 %{
10592 match(Set dst (SubF dst (LoadF src)));
10594 format %{ "subss $dst, $src" %}
10595 ins_cost(150); // XXX
10596 opcode(0xF3, 0x0F, 0x5C);
10597 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10598 ins_pipe(pipe_slow);
10599 %}
10601 instruct subF_imm(regF dst, immF src)
10602 %{
10603 match(Set dst (SubF dst src));
10605 format %{ "subss $dst, [$src]" %}
10606 ins_cost(150); // XXX
10607 opcode(0xF3, 0x0F, 0x5C);
10608 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
10609 ins_pipe(pipe_slow);
10610 %}
10612 instruct subD_reg(regD dst, regD src)
10613 %{
10614 match(Set dst (SubD dst src));
10616 format %{ "subsd $dst, $src" %}
10617 ins_cost(150); // XXX
10618 opcode(0xF2, 0x0F, 0x5C);
10619 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10620 ins_pipe(pipe_slow);
10621 %}
10623 instruct subD_mem(regD dst, memory src)
10624 %{
10625 match(Set dst (SubD dst (LoadD src)));
10627 format %{ "subsd $dst, $src" %}
10628 ins_cost(150); // XXX
10629 opcode(0xF2, 0x0F, 0x5C);
10630 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10631 ins_pipe(pipe_slow);
10632 %}
10634 instruct subD_imm(regD dst, immD src)
10635 %{
10636 match(Set dst (SubD dst src));
10638 format %{ "subsd $dst, [$src]" %}
10639 ins_cost(150); // XXX
10640 opcode(0xF2, 0x0F, 0x5C);
10641 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
10642 ins_pipe(pipe_slow);
10643 %}
10645 instruct mulF_reg(regF dst, regF src)
10646 %{
10647 match(Set dst (MulF dst src));
10649 format %{ "mulss $dst, $src" %}
10650 ins_cost(150); // XXX
10651 opcode(0xF3, 0x0F, 0x59);
10652 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10653 ins_pipe(pipe_slow);
10654 %}
10656 instruct mulF_mem(regF dst, memory src)
10657 %{
10658 match(Set dst (MulF dst (LoadF src)));
10660 format %{ "mulss $dst, $src" %}
10661 ins_cost(150); // XXX
10662 opcode(0xF3, 0x0F, 0x59);
10663 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10664 ins_pipe(pipe_slow);
10665 %}
10667 instruct mulF_imm(regF dst, immF src)
10668 %{
10669 match(Set dst (MulF dst src));
10671 format %{ "mulss $dst, [$src]" %}
10672 ins_cost(150); // XXX
10673 opcode(0xF3, 0x0F, 0x59);
10674 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
10675 ins_pipe(pipe_slow);
10676 %}
10678 instruct mulD_reg(regD dst, regD src)
10679 %{
10680 match(Set dst (MulD dst src));
10682 format %{ "mulsd $dst, $src" %}
10683 ins_cost(150); // XXX
10684 opcode(0xF2, 0x0F, 0x59);
10685 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10686 ins_pipe(pipe_slow);
10687 %}
10689 instruct mulD_mem(regD dst, memory src)
10690 %{
10691 match(Set dst (MulD dst (LoadD src)));
10693 format %{ "mulsd $dst, $src" %}
10694 ins_cost(150); // XXX
10695 opcode(0xF2, 0x0F, 0x59);
10696 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10697 ins_pipe(pipe_slow);
10698 %}
10700 instruct mulD_imm(regD dst, immD src)
10701 %{
10702 match(Set dst (MulD dst src));
10704 format %{ "mulsd $dst, [$src]" %}
10705 ins_cost(150); // XXX
10706 opcode(0xF2, 0x0F, 0x59);
10707 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
10708 ins_pipe(pipe_slow);
10709 %}
10711 instruct divF_reg(regF dst, regF src)
10712 %{
10713 match(Set dst (DivF dst src));
10715 format %{ "divss $dst, $src" %}
10716 ins_cost(150); // XXX
10717 opcode(0xF3, 0x0F, 0x5E);
10718 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10719 ins_pipe(pipe_slow);
10720 %}
10722 instruct divF_mem(regF dst, memory src)
10723 %{
10724 match(Set dst (DivF dst (LoadF src)));
10726 format %{ "divss $dst, $src" %}
10727 ins_cost(150); // XXX
10728 opcode(0xF3, 0x0F, 0x5E);
10729 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10730 ins_pipe(pipe_slow);
10731 %}
10733 instruct divF_imm(regF dst, immF src)
10734 %{
10735 match(Set dst (DivF dst src));
10737 format %{ "divss $dst, [$src]" %}
10738 ins_cost(150); // XXX
10739 opcode(0xF3, 0x0F, 0x5E);
10740 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
10741 ins_pipe(pipe_slow);
10742 %}
10744 instruct divD_reg(regD dst, regD src)
10745 %{
10746 match(Set dst (DivD dst src));
10748 format %{ "divsd $dst, $src" %}
10749 ins_cost(150); // XXX
10750 opcode(0xF2, 0x0F, 0x5E);
10751 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10752 ins_pipe(pipe_slow);
10753 %}
10755 instruct divD_mem(regD dst, memory src)
10756 %{
10757 match(Set dst (DivD dst (LoadD src)));
10759 format %{ "divsd $dst, $src" %}
10760 ins_cost(150); // XXX
10761 opcode(0xF2, 0x0F, 0x5E);
10762 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10763 ins_pipe(pipe_slow);
10764 %}
10766 instruct divD_imm(regD dst, immD src)
10767 %{
10768 match(Set dst (DivD dst src));
10770 format %{ "divsd $dst, [$src]" %}
10771 ins_cost(150); // XXX
10772 opcode(0xF2, 0x0F, 0x5E);
10773 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
10774 ins_pipe(pipe_slow);
10775 %}
10777 instruct sqrtF_reg(regF dst, regF src)
10778 %{
10779 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10781 format %{ "sqrtss $dst, $src" %}
10782 ins_cost(150); // XXX
10783 opcode(0xF3, 0x0F, 0x51);
10784 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10785 ins_pipe(pipe_slow);
10786 %}
10788 instruct sqrtF_mem(regF dst, memory src)
10789 %{
10790 match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src)))));
10792 format %{ "sqrtss $dst, $src" %}
10793 ins_cost(150); // XXX
10794 opcode(0xF3, 0x0F, 0x51);
10795 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10796 ins_pipe(pipe_slow);
10797 %}
10799 instruct sqrtF_imm(regF dst, immF src)
10800 %{
10801 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10803 format %{ "sqrtss $dst, [$src]" %}
10804 ins_cost(150); // XXX
10805 opcode(0xF3, 0x0F, 0x51);
10806 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
10807 ins_pipe(pipe_slow);
10808 %}
10810 instruct sqrtD_reg(regD dst, regD src)
10811 %{
10812 match(Set dst (SqrtD src));
10814 format %{ "sqrtsd $dst, $src" %}
10815 ins_cost(150); // XXX
10816 opcode(0xF2, 0x0F, 0x51);
10817 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10818 ins_pipe(pipe_slow);
10819 %}
10821 instruct sqrtD_mem(regD dst, memory src)
10822 %{
10823 match(Set dst (SqrtD (LoadD src)));
10825 format %{ "sqrtsd $dst, $src" %}
10826 ins_cost(150); // XXX
10827 opcode(0xF2, 0x0F, 0x51);
10828 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10829 ins_pipe(pipe_slow);
10830 %}
10832 instruct sqrtD_imm(regD dst, immD src)
10833 %{
10834 match(Set dst (SqrtD src));
10836 format %{ "sqrtsd $dst, [$src]" %}
10837 ins_cost(150); // XXX
10838 opcode(0xF2, 0x0F, 0x51);
10839 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
10840 ins_pipe(pipe_slow);
10841 %}
10843 instruct absF_reg(regF dst)
10844 %{
10845 match(Set dst (AbsF dst));
10847 format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %}
10848 ins_encode(absF_encoding(dst));
10849 ins_pipe(pipe_slow);
10850 %}
10852 instruct absD_reg(regD dst)
10853 %{
10854 match(Set dst (AbsD dst));
10856 format %{ "andpd $dst, [0x7fffffffffffffff]\t"
10857 "# abs double by sign masking" %}
10858 ins_encode(absD_encoding(dst));
10859 ins_pipe(pipe_slow);
10860 %}
10862 instruct negF_reg(regF dst)
10863 %{
10864 match(Set dst (NegF dst));
10866 format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %}
10867 ins_encode(negF_encoding(dst));
10868 ins_pipe(pipe_slow);
10869 %}
10871 instruct negD_reg(regD dst)
10872 %{
10873 match(Set dst (NegD dst));
10875 format %{ "xorpd $dst, [0x8000000000000000]\t"
10876 "# neg double by sign flipping" %}
10877 ins_encode(negD_encoding(dst));
10878 ins_pipe(pipe_slow);
10879 %}
10881 // -----------Trig and Trancendental Instructions------------------------------
10882 instruct cosD_reg(regD dst) %{
10883 match(Set dst (CosD dst));
10885 format %{ "dcos $dst\n\t" %}
10886 opcode(0xD9, 0xFF);
10887 ins_encode( Push_SrcXD(dst), OpcP, OpcS, Push_ResultXD(dst) );
10888 ins_pipe( pipe_slow );
10889 %}
10891 instruct sinD_reg(regD dst) %{
10892 match(Set dst (SinD dst));
10894 format %{ "dsin $dst\n\t" %}
10895 opcode(0xD9, 0xFE);
10896 ins_encode( Push_SrcXD(dst), OpcP, OpcS, Push_ResultXD(dst) );
10897 ins_pipe( pipe_slow );
10898 %}
10900 instruct tanD_reg(regD dst) %{
10901 match(Set dst (TanD dst));
10903 format %{ "dtan $dst\n\t" %}
10904 ins_encode( Push_SrcXD(dst),
10905 Opcode(0xD9), Opcode(0xF2), //fptan
10906 Opcode(0xDD), Opcode(0xD8), //fstp st
10907 Push_ResultXD(dst) );
10908 ins_pipe( pipe_slow );
10909 %}
10911 instruct log10D_reg(regD dst) %{
10912 // The source and result Double operands in XMM registers
10913 match(Set dst (Log10D dst));
10914 // fldlg2 ; push log_10(2) on the FPU stack; full 80-bit number
10915 // fyl2x ; compute log_10(2) * log_2(x)
10916 format %{ "fldlg2\t\t\t#Log10\n\t"
10917 "fyl2x\t\t\t# Q=Log10*Log_2(x)\n\t"
10918 %}
10919 ins_encode(Opcode(0xD9), Opcode(0xEC), // fldlg2
10920 Push_SrcXD(dst),
10921 Opcode(0xD9), Opcode(0xF1), // fyl2x
10922 Push_ResultXD(dst));
10924 ins_pipe( pipe_slow );
10925 %}
10927 instruct logD_reg(regD dst) %{
10928 // The source and result Double operands in XMM registers
10929 match(Set dst (LogD dst));
10930 // fldln2 ; push log_e(2) on the FPU stack; full 80-bit number
10931 // fyl2x ; compute log_e(2) * log_2(x)
10932 format %{ "fldln2\t\t\t#Log_e\n\t"
10933 "fyl2x\t\t\t# Q=Log_e*Log_2(x)\n\t"
10934 %}
10935 ins_encode( Opcode(0xD9), Opcode(0xED), // fldln2
10936 Push_SrcXD(dst),
10937 Opcode(0xD9), Opcode(0xF1), // fyl2x
10938 Push_ResultXD(dst));
10939 ins_pipe( pipe_slow );
10940 %}
10944 //----------Arithmetic Conversion Instructions---------------------------------
10946 instruct roundFloat_nop(regF dst)
10947 %{
10948 match(Set dst (RoundFloat dst));
10950 ins_cost(0);
10951 ins_encode();
10952 ins_pipe(empty);
10953 %}
10955 instruct roundDouble_nop(regD dst)
10956 %{
10957 match(Set dst (RoundDouble dst));
10959 ins_cost(0);
10960 ins_encode();
10961 ins_pipe(empty);
10962 %}
10964 instruct convF2D_reg_reg(regD dst, regF src)
10965 %{
10966 match(Set dst (ConvF2D src));
10968 format %{ "cvtss2sd $dst, $src" %}
10969 opcode(0xF3, 0x0F, 0x5A);
10970 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10971 ins_pipe(pipe_slow); // XXX
10972 %}
10974 instruct convF2D_reg_mem(regD dst, memory src)
10975 %{
10976 match(Set dst (ConvF2D (LoadF src)));
10978 format %{ "cvtss2sd $dst, $src" %}
10979 opcode(0xF3, 0x0F, 0x5A);
10980 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
10981 ins_pipe(pipe_slow); // XXX
10982 %}
10984 instruct convD2F_reg_reg(regF dst, regD src)
10985 %{
10986 match(Set dst (ConvD2F src));
10988 format %{ "cvtsd2ss $dst, $src" %}
10989 opcode(0xF2, 0x0F, 0x5A);
10990 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
10991 ins_pipe(pipe_slow); // XXX
10992 %}
10994 instruct convD2F_reg_mem(regF dst, memory src)
10995 %{
10996 match(Set dst (ConvD2F (LoadD src)));
10998 format %{ "cvtsd2ss $dst, $src" %}
10999 opcode(0xF2, 0x0F, 0x5A);
11000 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11001 ins_pipe(pipe_slow); // XXX
11002 %}
11004 // XXX do mem variants
11005 instruct convF2I_reg_reg(rRegI dst, regF src, rFlagsReg cr)
11006 %{
11007 match(Set dst (ConvF2I src));
11008 effect(KILL cr);
11010 format %{ "cvttss2sil $dst, $src\t# f2i\n\t"
11011 "cmpl $dst, #0x80000000\n\t"
11012 "jne,s done\n\t"
11013 "subq rsp, #8\n\t"
11014 "movss [rsp], $src\n\t"
11015 "call f2i_fixup\n\t"
11016 "popq $dst\n"
11017 "done: "%}
11018 opcode(0xF3, 0x0F, 0x2C);
11019 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src),
11020 f2i_fixup(dst, src));
11021 ins_pipe(pipe_slow);
11022 %}
11024 instruct convF2L_reg_reg(rRegL dst, regF src, rFlagsReg cr)
11025 %{
11026 match(Set dst (ConvF2L src));
11027 effect(KILL cr);
11029 format %{ "cvttss2siq $dst, $src\t# f2l\n\t"
11030 "cmpq $dst, [0x8000000000000000]\n\t"
11031 "jne,s done\n\t"
11032 "subq rsp, #8\n\t"
11033 "movss [rsp], $src\n\t"
11034 "call f2l_fixup\n\t"
11035 "popq $dst\n"
11036 "done: "%}
11037 opcode(0xF3, 0x0F, 0x2C);
11038 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src),
11039 f2l_fixup(dst, src));
11040 ins_pipe(pipe_slow);
11041 %}
11043 instruct convD2I_reg_reg(rRegI dst, regD src, rFlagsReg cr)
11044 %{
11045 match(Set dst (ConvD2I src));
11046 effect(KILL cr);
11048 format %{ "cvttsd2sil $dst, $src\t# d2i\n\t"
11049 "cmpl $dst, #0x80000000\n\t"
11050 "jne,s done\n\t"
11051 "subq rsp, #8\n\t"
11052 "movsd [rsp], $src\n\t"
11053 "call d2i_fixup\n\t"
11054 "popq $dst\n"
11055 "done: "%}
11056 opcode(0xF2, 0x0F, 0x2C);
11057 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src),
11058 d2i_fixup(dst, src));
11059 ins_pipe(pipe_slow);
11060 %}
11062 instruct convD2L_reg_reg(rRegL dst, regD src, rFlagsReg cr)
11063 %{
11064 match(Set dst (ConvD2L src));
11065 effect(KILL cr);
11067 format %{ "cvttsd2siq $dst, $src\t# d2l\n\t"
11068 "cmpq $dst, [0x8000000000000000]\n\t"
11069 "jne,s done\n\t"
11070 "subq rsp, #8\n\t"
11071 "movsd [rsp], $src\n\t"
11072 "call d2l_fixup\n\t"
11073 "popq $dst\n"
11074 "done: "%}
11075 opcode(0xF2, 0x0F, 0x2C);
11076 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src),
11077 d2l_fixup(dst, src));
11078 ins_pipe(pipe_slow);
11079 %}
11081 instruct convI2F_reg_reg(regF dst, rRegI src)
11082 %{
11083 predicate(!UseXmmI2F);
11084 match(Set dst (ConvI2F src));
11086 format %{ "cvtsi2ssl $dst, $src\t# i2f" %}
11087 opcode(0xF3, 0x0F, 0x2A);
11088 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11089 ins_pipe(pipe_slow); // XXX
11090 %}
11092 instruct convI2F_reg_mem(regF dst, memory src)
11093 %{
11094 match(Set dst (ConvI2F (LoadI src)));
11096 format %{ "cvtsi2ssl $dst, $src\t# i2f" %}
11097 opcode(0xF3, 0x0F, 0x2A);
11098 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11099 ins_pipe(pipe_slow); // XXX
11100 %}
11102 instruct convI2D_reg_reg(regD dst, rRegI src)
11103 %{
11104 predicate(!UseXmmI2D);
11105 match(Set dst (ConvI2D src));
11107 format %{ "cvtsi2sdl $dst, $src\t# i2d" %}
11108 opcode(0xF2, 0x0F, 0x2A);
11109 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src));
11110 ins_pipe(pipe_slow); // XXX
11111 %}
11113 instruct convI2D_reg_mem(regD dst, memory src)
11114 %{
11115 match(Set dst (ConvI2D (LoadI src)));
11117 format %{ "cvtsi2sdl $dst, $src\t# i2d" %}
11118 opcode(0xF2, 0x0F, 0x2A);
11119 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11120 ins_pipe(pipe_slow); // XXX
11121 %}
11123 instruct convXI2F_reg(regF dst, rRegI src)
11124 %{
11125 predicate(UseXmmI2F);
11126 match(Set dst (ConvI2F src));
11128 format %{ "movdl $dst, $src\n\t"
11129 "cvtdq2psl $dst, $dst\t# i2f" %}
11130 ins_encode %{
11131 __ movdl($dst$$XMMRegister, $src$$Register);
11132 __ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister);
11133 %}
11134 ins_pipe(pipe_slow); // XXX
11135 %}
11137 instruct convXI2D_reg(regD dst, rRegI src)
11138 %{
11139 predicate(UseXmmI2D);
11140 match(Set dst (ConvI2D src));
11142 format %{ "movdl $dst, $src\n\t"
11143 "cvtdq2pdl $dst, $dst\t# i2d" %}
11144 ins_encode %{
11145 __ movdl($dst$$XMMRegister, $src$$Register);
11146 __ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister);
11147 %}
11148 ins_pipe(pipe_slow); // XXX
11149 %}
11151 instruct convL2F_reg_reg(regF dst, rRegL src)
11152 %{
11153 match(Set dst (ConvL2F src));
11155 format %{ "cvtsi2ssq $dst, $src\t# l2f" %}
11156 opcode(0xF3, 0x0F, 0x2A);
11157 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src));
11158 ins_pipe(pipe_slow); // XXX
11159 %}
11161 instruct convL2F_reg_mem(regF dst, memory src)
11162 %{
11163 match(Set dst (ConvL2F (LoadL src)));
11165 format %{ "cvtsi2ssq $dst, $src\t# l2f" %}
11166 opcode(0xF3, 0x0F, 0x2A);
11167 ins_encode(OpcP, REX_reg_mem_wide(dst, src), OpcS, OpcT, reg_mem(dst, src));
11168 ins_pipe(pipe_slow); // XXX
11169 %}
11171 instruct convL2D_reg_reg(regD dst, rRegL src)
11172 %{
11173 match(Set dst (ConvL2D src));
11175 format %{ "cvtsi2sdq $dst, $src\t# l2d" %}
11176 opcode(0xF2, 0x0F, 0x2A);
11177 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src));
11178 ins_pipe(pipe_slow); // XXX
11179 %}
11181 instruct convL2D_reg_mem(regD dst, memory src)
11182 %{
11183 match(Set dst (ConvL2D (LoadL src)));
11185 format %{ "cvtsi2sdq $dst, $src\t# l2d" %}
11186 opcode(0xF2, 0x0F, 0x2A);
11187 ins_encode(OpcP, REX_reg_mem_wide(dst, src), OpcS, OpcT, reg_mem(dst, src));
11188 ins_pipe(pipe_slow); // XXX
11189 %}
11191 instruct convI2L_reg_reg(rRegL dst, rRegI src)
11192 %{
11193 match(Set dst (ConvI2L src));
11195 ins_cost(125);
11196 format %{ "movslq $dst, $src\t# i2l" %}
11197 opcode(0x63); // needs REX.W
11198 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst,src));
11199 ins_pipe(ialu_reg_reg);
11200 %}
11202 // instruct convI2L_reg_reg_foo(rRegL dst, rRegI src)
11203 // %{
11204 // match(Set dst (ConvI2L src));
11205 // // predicate(_kids[0]->_leaf->as_Type()->type()->is_int()->_lo >= 0 &&
11206 // // _kids[0]->_leaf->as_Type()->type()->is_int()->_hi >= 0);
11207 // predicate(((const TypeNode*) n)->type()->is_long()->_hi ==
11208 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_hi &&
11209 // ((const TypeNode*) n)->type()->is_long()->_lo ==
11210 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_lo);
11212 // format %{ "movl $dst, $src\t# unsigned i2l" %}
11213 // ins_encode(enc_copy(dst, src));
11214 // // opcode(0x63); // needs REX.W
11215 // // ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst,src));
11216 // ins_pipe(ialu_reg_reg);
11217 // %}
11219 // Zero-extend convert int to long
11220 instruct convI2L_reg_reg_zex(rRegL dst, rRegI src, immL_32bits mask)
11221 %{
11222 match(Set dst (AndL (ConvI2L src) mask));
11224 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %}
11225 ins_encode(enc_copy(dst, src));
11226 ins_pipe(ialu_reg_reg);
11227 %}
11229 // Zero-extend convert int to long
11230 instruct convI2L_reg_mem_zex(rRegL dst, memory src, immL_32bits mask)
11231 %{
11232 match(Set dst (AndL (ConvI2L (LoadI src)) mask));
11234 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %}
11235 opcode(0x8B);
11236 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
11237 ins_pipe(ialu_reg_mem);
11238 %}
11240 instruct zerox_long_reg_reg(rRegL dst, rRegL src, immL_32bits mask)
11241 %{
11242 match(Set dst (AndL src mask));
11244 format %{ "movl $dst, $src\t# zero-extend long" %}
11245 ins_encode(enc_copy_always(dst, src));
11246 ins_pipe(ialu_reg_reg);
11247 %}
11249 instruct convL2I_reg_reg(rRegI dst, rRegL src)
11250 %{
11251 match(Set dst (ConvL2I src));
11253 format %{ "movl $dst, $src\t# l2i" %}
11254 ins_encode(enc_copy_always(dst, src));
11255 ins_pipe(ialu_reg_reg);
11256 %}
11259 instruct MoveF2I_stack_reg(rRegI dst, stackSlotF src) %{
11260 match(Set dst (MoveF2I src));
11261 effect(DEF dst, USE src);
11263 ins_cost(125);
11264 format %{ "movl $dst, $src\t# MoveF2I_stack_reg" %}
11265 opcode(0x8B);
11266 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src));
11267 ins_pipe(ialu_reg_mem);
11268 %}
11270 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{
11271 match(Set dst (MoveI2F src));
11272 effect(DEF dst, USE src);
11274 ins_cost(125);
11275 format %{ "movss $dst, $src\t# MoveI2F_stack_reg" %}
11276 opcode(0xF3, 0x0F, 0x10);
11277 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11278 ins_pipe(pipe_slow);
11279 %}
11281 instruct MoveD2L_stack_reg(rRegL dst, stackSlotD src) %{
11282 match(Set dst (MoveD2L src));
11283 effect(DEF dst, USE src);
11285 ins_cost(125);
11286 format %{ "movq $dst, $src\t# MoveD2L_stack_reg" %}
11287 opcode(0x8B);
11288 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src));
11289 ins_pipe(ialu_reg_mem);
11290 %}
11292 instruct MoveL2D_stack_reg_partial(regD dst, stackSlotL src) %{
11293 predicate(!UseXmmLoadAndClearUpper);
11294 match(Set dst (MoveL2D src));
11295 effect(DEF dst, USE src);
11297 ins_cost(125);
11298 format %{ "movlpd $dst, $src\t# MoveL2D_stack_reg" %}
11299 opcode(0x66, 0x0F, 0x12);
11300 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11301 ins_pipe(pipe_slow);
11302 %}
11304 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{
11305 predicate(UseXmmLoadAndClearUpper);
11306 match(Set dst (MoveL2D src));
11307 effect(DEF dst, USE src);
11309 ins_cost(125);
11310 format %{ "movsd $dst, $src\t# MoveL2D_stack_reg" %}
11311 opcode(0xF2, 0x0F, 0x10);
11312 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src));
11313 ins_pipe(pipe_slow);
11314 %}
11317 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{
11318 match(Set dst (MoveF2I src));
11319 effect(DEF dst, USE src);
11321 ins_cost(95); // XXX
11322 format %{ "movss $dst, $src\t# MoveF2I_reg_stack" %}
11323 opcode(0xF3, 0x0F, 0x11);
11324 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
11325 ins_pipe(pipe_slow);
11326 %}
11328 instruct MoveI2F_reg_stack(stackSlotF dst, rRegI src) %{
11329 match(Set dst (MoveI2F src));
11330 effect(DEF dst, USE src);
11332 ins_cost(100);
11333 format %{ "movl $dst, $src\t# MoveI2F_reg_stack" %}
11334 opcode(0x89);
11335 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst));
11336 ins_pipe( ialu_mem_reg );
11337 %}
11339 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{
11340 match(Set dst (MoveD2L src));
11341 effect(DEF dst, USE src);
11343 ins_cost(95); // XXX
11344 format %{ "movsd $dst, $src\t# MoveL2D_reg_stack" %}
11345 opcode(0xF2, 0x0F, 0x11);
11346 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst));
11347 ins_pipe(pipe_slow);
11348 %}
11350 instruct MoveL2D_reg_stack(stackSlotD dst, rRegL src) %{
11351 match(Set dst (MoveL2D src));
11352 effect(DEF dst, USE src);
11354 ins_cost(100);
11355 format %{ "movq $dst, $src\t# MoveL2D_reg_stack" %}
11356 opcode(0x89);
11357 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst));
11358 ins_pipe(ialu_mem_reg);
11359 %}
11361 instruct MoveF2I_reg_reg(rRegI dst, regF src) %{
11362 match(Set dst (MoveF2I src));
11363 effect(DEF dst, USE src);
11364 ins_cost(85);
11365 format %{ "movd $dst,$src\t# MoveF2I" %}
11366 ins_encode %{ __ movdl($dst$$Register, $src$$XMMRegister); %}
11367 ins_pipe( pipe_slow );
11368 %}
11370 instruct MoveD2L_reg_reg(rRegL dst, regD src) %{
11371 match(Set dst (MoveD2L src));
11372 effect(DEF dst, USE src);
11373 ins_cost(85);
11374 format %{ "movd $dst,$src\t# MoveD2L" %}
11375 ins_encode %{ __ movdq($dst$$Register, $src$$XMMRegister); %}
11376 ins_pipe( pipe_slow );
11377 %}
11379 // The next instructions have long latency and use Int unit. Set high cost.
11380 instruct MoveI2F_reg_reg(regF dst, rRegI src) %{
11381 match(Set dst (MoveI2F src));
11382 effect(DEF dst, USE src);
11383 ins_cost(300);
11384 format %{ "movd $dst,$src\t# MoveI2F" %}
11385 ins_encode %{ __ movdl($dst$$XMMRegister, $src$$Register); %}
11386 ins_pipe( pipe_slow );
11387 %}
11389 instruct MoveL2D_reg_reg(regD dst, rRegL src) %{
11390 match(Set dst (MoveL2D src));
11391 effect(DEF dst, USE src);
11392 ins_cost(300);
11393 format %{ "movd $dst,$src\t# MoveL2D" %}
11394 ins_encode %{ __ movdq($dst$$XMMRegister, $src$$Register); %}
11395 ins_pipe( pipe_slow );
11396 %}
11398 // Replicate scalar to packed byte (1 byte) values in xmm
11399 instruct Repl8B_reg(regD dst, regD src) %{
11400 match(Set dst (Replicate8B src));
11401 format %{ "MOVDQA $dst,$src\n\t"
11402 "PUNPCKLBW $dst,$dst\n\t"
11403 "PSHUFLW $dst,$dst,0x00\t! replicate8B" %}
11404 ins_encode( pshufd_8x8(dst, src));
11405 ins_pipe( pipe_slow );
11406 %}
11408 // Replicate scalar to packed byte (1 byte) values in xmm
11409 instruct Repl8B_rRegI(regD dst, rRegI src) %{
11410 match(Set dst (Replicate8B src));
11411 format %{ "MOVD $dst,$src\n\t"
11412 "PUNPCKLBW $dst,$dst\n\t"
11413 "PSHUFLW $dst,$dst,0x00\t! replicate8B" %}
11414 ins_encode( mov_i2x(dst, src), pshufd_8x8(dst, dst));
11415 ins_pipe( pipe_slow );
11416 %}
11418 // Replicate scalar zero to packed byte (1 byte) values in xmm
11419 instruct Repl8B_immI0(regD dst, immI0 zero) %{
11420 match(Set dst (Replicate8B zero));
11421 format %{ "PXOR $dst,$dst\t! replicate8B" %}
11422 ins_encode( pxor(dst, dst));
11423 ins_pipe( fpu_reg_reg );
11424 %}
11426 // Replicate scalar to packed shore (2 byte) values in xmm
11427 instruct Repl4S_reg(regD dst, regD src) %{
11428 match(Set dst (Replicate4S src));
11429 format %{ "PSHUFLW $dst,$src,0x00\t! replicate4S" %}
11430 ins_encode( pshufd_4x16(dst, src));
11431 ins_pipe( fpu_reg_reg );
11432 %}
11434 // Replicate scalar to packed shore (2 byte) values in xmm
11435 instruct Repl4S_rRegI(regD dst, rRegI src) %{
11436 match(Set dst (Replicate4S src));
11437 format %{ "MOVD $dst,$src\n\t"
11438 "PSHUFLW $dst,$dst,0x00\t! replicate4S" %}
11439 ins_encode( mov_i2x(dst, src), pshufd_4x16(dst, dst));
11440 ins_pipe( fpu_reg_reg );
11441 %}
11443 // Replicate scalar zero to packed short (2 byte) values in xmm
11444 instruct Repl4S_immI0(regD dst, immI0 zero) %{
11445 match(Set dst (Replicate4S zero));
11446 format %{ "PXOR $dst,$dst\t! replicate4S" %}
11447 ins_encode( pxor(dst, dst));
11448 ins_pipe( fpu_reg_reg );
11449 %}
11451 // Replicate scalar to packed char (2 byte) values in xmm
11452 instruct Repl4C_reg(regD dst, regD src) %{
11453 match(Set dst (Replicate4C src));
11454 format %{ "PSHUFLW $dst,$src,0x00\t! replicate4C" %}
11455 ins_encode( pshufd_4x16(dst, src));
11456 ins_pipe( fpu_reg_reg );
11457 %}
11459 // Replicate scalar to packed char (2 byte) values in xmm
11460 instruct Repl4C_rRegI(regD dst, rRegI src) %{
11461 match(Set dst (Replicate4C src));
11462 format %{ "MOVD $dst,$src\n\t"
11463 "PSHUFLW $dst,$dst,0x00\t! replicate4C" %}
11464 ins_encode( mov_i2x(dst, src), pshufd_4x16(dst, dst));
11465 ins_pipe( fpu_reg_reg );
11466 %}
11468 // Replicate scalar zero to packed char (2 byte) values in xmm
11469 instruct Repl4C_immI0(regD dst, immI0 zero) %{
11470 match(Set dst (Replicate4C zero));
11471 format %{ "PXOR $dst,$dst\t! replicate4C" %}
11472 ins_encode( pxor(dst, dst));
11473 ins_pipe( fpu_reg_reg );
11474 %}
11476 // Replicate scalar to packed integer (4 byte) values in xmm
11477 instruct Repl2I_reg(regD dst, regD src) %{
11478 match(Set dst (Replicate2I src));
11479 format %{ "PSHUFD $dst,$src,0x00\t! replicate2I" %}
11480 ins_encode( pshufd(dst, src, 0x00));
11481 ins_pipe( fpu_reg_reg );
11482 %}
11484 // Replicate scalar to packed integer (4 byte) values in xmm
11485 instruct Repl2I_rRegI(regD dst, rRegI src) %{
11486 match(Set dst (Replicate2I src));
11487 format %{ "MOVD $dst,$src\n\t"
11488 "PSHUFD $dst,$dst,0x00\t! replicate2I" %}
11489 ins_encode( mov_i2x(dst, src), pshufd(dst, dst, 0x00));
11490 ins_pipe( fpu_reg_reg );
11491 %}
11493 // Replicate scalar zero to packed integer (2 byte) values in xmm
11494 instruct Repl2I_immI0(regD dst, immI0 zero) %{
11495 match(Set dst (Replicate2I zero));
11496 format %{ "PXOR $dst,$dst\t! replicate2I" %}
11497 ins_encode( pxor(dst, dst));
11498 ins_pipe( fpu_reg_reg );
11499 %}
11501 // Replicate scalar to packed single precision floating point values in xmm
11502 instruct Repl2F_reg(regD dst, regD src) %{
11503 match(Set dst (Replicate2F src));
11504 format %{ "PSHUFD $dst,$src,0xe0\t! replicate2F" %}
11505 ins_encode( pshufd(dst, src, 0xe0));
11506 ins_pipe( fpu_reg_reg );
11507 %}
11509 // Replicate scalar to packed single precision floating point values in xmm
11510 instruct Repl2F_regF(regD dst, regF src) %{
11511 match(Set dst (Replicate2F src));
11512 format %{ "PSHUFD $dst,$src,0xe0\t! replicate2F" %}
11513 ins_encode( pshufd(dst, src, 0xe0));
11514 ins_pipe( fpu_reg_reg );
11515 %}
11517 // Replicate scalar to packed single precision floating point values in xmm
11518 instruct Repl2F_immF0(regD dst, immF0 zero) %{
11519 match(Set dst (Replicate2F zero));
11520 format %{ "PXOR $dst,$dst\t! replicate2F" %}
11521 ins_encode( pxor(dst, dst));
11522 ins_pipe( fpu_reg_reg );
11523 %}
11526 // =======================================================================
11527 // fast clearing of an array
11528 instruct rep_stos(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy,
11529 rFlagsReg cr)
11530 %{
11531 match(Set dummy (ClearArray cnt base));
11532 effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
11534 format %{ "xorl rax, rax\t# ClearArray:\n\t"
11535 "rep stosq\t# Store rax to *rdi++ while rcx--" %}
11536 ins_encode(opc_reg_reg(0x33, RAX, RAX), // xorl %eax, %eax
11537 Opcode(0xF3), Opcode(0x48), Opcode(0xAB)); // rep REX_W stos
11538 ins_pipe(pipe_slow);
11539 %}
11541 instruct string_compare(rdi_RegP str1, rsi_RegP str2, rax_RegI tmp1,
11542 rbx_RegI tmp2, rcx_RegI result, rFlagsReg cr)
11543 %{
11544 match(Set result (StrComp str1 str2));
11545 effect(USE_KILL str1, USE_KILL str2, KILL tmp1, KILL tmp2, KILL cr);
11546 //ins_cost(300);
11548 format %{ "String Compare $str1, $str2 -> $result // XXX KILL RAX, RBX" %}
11549 ins_encode( enc_String_Compare() );
11550 ins_pipe( pipe_slow );
11551 %}
11553 // fast array equals
11554 instruct array_equals(rdi_RegP ary1, rsi_RegP ary2, rax_RegI tmp1,
11555 rbx_RegI tmp2, rcx_RegI result, rFlagsReg cr) %{
11556 match(Set result (AryEq ary1 ary2));
11557 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL cr);
11558 //ins_cost(300);
11560 format %{ "Array Equals $ary1,$ary2 -> $result // KILL RAX, RBX" %}
11561 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, tmp2, result) );
11562 ins_pipe( pipe_slow );
11563 %}
11565 //----------Control Flow Instructions------------------------------------------
11566 // Signed compare Instructions
11568 // XXX more variants!!
11569 instruct compI_rReg(rFlagsReg cr, rRegI op1, rRegI op2)
11570 %{
11571 match(Set cr (CmpI op1 op2));
11572 effect(DEF cr, USE op1, USE op2);
11574 format %{ "cmpl $op1, $op2" %}
11575 opcode(0x3B); /* Opcode 3B /r */
11576 ins_encode(REX_reg_reg(op1, op2), OpcP, reg_reg(op1, op2));
11577 ins_pipe(ialu_cr_reg_reg);
11578 %}
11580 instruct compI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2)
11581 %{
11582 match(Set cr (CmpI op1 op2));
11584 format %{ "cmpl $op1, $op2" %}
11585 opcode(0x81, 0x07); /* Opcode 81 /7 */
11586 ins_encode(OpcSErm(op1, op2), Con8or32(op2));
11587 ins_pipe(ialu_cr_reg_imm);
11588 %}
11590 instruct compI_rReg_mem(rFlagsReg cr, rRegI op1, memory op2)
11591 %{
11592 match(Set cr (CmpI op1 (LoadI op2)));
11594 ins_cost(500); // XXX
11595 format %{ "cmpl $op1, $op2" %}
11596 opcode(0x3B); /* Opcode 3B /r */
11597 ins_encode(REX_reg_mem(op1, op2), OpcP, reg_mem(op1, op2));
11598 ins_pipe(ialu_cr_reg_mem);
11599 %}
11601 instruct testI_reg(rFlagsReg cr, rRegI src, immI0 zero)
11602 %{
11603 match(Set cr (CmpI src zero));
11605 format %{ "testl $src, $src" %}
11606 opcode(0x85);
11607 ins_encode(REX_reg_reg(src, src), OpcP, reg_reg(src, src));
11608 ins_pipe(ialu_cr_reg_imm);
11609 %}
11611 instruct testI_reg_imm(rFlagsReg cr, rRegI src, immI con, immI0 zero)
11612 %{
11613 match(Set cr (CmpI (AndI src con) zero));
11615 format %{ "testl $src, $con" %}
11616 opcode(0xF7, 0x00);
11617 ins_encode(REX_reg(src), OpcP, reg_opc(src), Con32(con));
11618 ins_pipe(ialu_cr_reg_imm);
11619 %}
11621 instruct testI_reg_mem(rFlagsReg cr, rRegI src, memory mem, immI0 zero)
11622 %{
11623 match(Set cr (CmpI (AndI src (LoadI mem)) zero));
11625 format %{ "testl $src, $mem" %}
11626 opcode(0x85);
11627 ins_encode(REX_reg_mem(src, mem), OpcP, reg_mem(src, mem));
11628 ins_pipe(ialu_cr_reg_mem);
11629 %}
11631 // Unsigned compare Instructions; really, same as signed except they
11632 // produce an rFlagsRegU instead of rFlagsReg.
11633 instruct compU_rReg(rFlagsRegU cr, rRegI op1, rRegI op2)
11634 %{
11635 match(Set cr (CmpU op1 op2));
11637 format %{ "cmpl $op1, $op2\t# unsigned" %}
11638 opcode(0x3B); /* Opcode 3B /r */
11639 ins_encode(REX_reg_reg(op1, op2), OpcP, reg_reg(op1, op2));
11640 ins_pipe(ialu_cr_reg_reg);
11641 %}
11643 instruct compU_rReg_imm(rFlagsRegU cr, rRegI op1, immI op2)
11644 %{
11645 match(Set cr (CmpU op1 op2));
11647 format %{ "cmpl $op1, $op2\t# unsigned" %}
11648 opcode(0x81,0x07); /* Opcode 81 /7 */
11649 ins_encode(OpcSErm(op1, op2), Con8or32(op2));
11650 ins_pipe(ialu_cr_reg_imm);
11651 %}
11653 instruct compU_rReg_mem(rFlagsRegU cr, rRegI op1, memory op2)
11654 %{
11655 match(Set cr (CmpU op1 (LoadI op2)));
11657 ins_cost(500); // XXX
11658 format %{ "cmpl $op1, $op2\t# unsigned" %}
11659 opcode(0x3B); /* Opcode 3B /r */
11660 ins_encode(REX_reg_mem(op1, op2), OpcP, reg_mem(op1, op2));
11661 ins_pipe(ialu_cr_reg_mem);
11662 %}
11664 // // // Cisc-spilled version of cmpU_rReg
11665 // //instruct compU_mem_rReg(rFlagsRegU cr, memory op1, rRegI op2)
11666 // //%{
11667 // // match(Set cr (CmpU (LoadI op1) op2));
11668 // //
11669 // // format %{ "CMPu $op1,$op2" %}
11670 // // ins_cost(500);
11671 // // opcode(0x39); /* Opcode 39 /r */
11672 // // ins_encode( OpcP, reg_mem( op1, op2) );
11673 // //%}
11675 instruct testU_reg(rFlagsRegU cr, rRegI src, immI0 zero)
11676 %{
11677 match(Set cr (CmpU src zero));
11679 format %{ "testl $src, $src\t# unsigned" %}
11680 opcode(0x85);
11681 ins_encode(REX_reg_reg(src, src), OpcP, reg_reg(src, src));
11682 ins_pipe(ialu_cr_reg_imm);
11683 %}
11685 instruct compP_rReg(rFlagsRegU cr, rRegP op1, rRegP op2)
11686 %{
11687 match(Set cr (CmpP op1 op2));
11689 format %{ "cmpq $op1, $op2\t# ptr" %}
11690 opcode(0x3B); /* Opcode 3B /r */
11691 ins_encode(REX_reg_reg_wide(op1, op2), OpcP, reg_reg(op1, op2));
11692 ins_pipe(ialu_cr_reg_reg);
11693 %}
11695 instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2)
11696 %{
11697 match(Set cr (CmpP op1 (LoadP op2)));
11699 ins_cost(500); // XXX
11700 format %{ "cmpq $op1, $op2\t# ptr" %}
11701 opcode(0x3B); /* Opcode 3B /r */
11702 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
11703 ins_pipe(ialu_cr_reg_mem);
11704 %}
11706 // // // Cisc-spilled version of cmpP_rReg
11707 // //instruct compP_mem_rReg(rFlagsRegU cr, memory op1, rRegP op2)
11708 // //%{
11709 // // match(Set cr (CmpP (LoadP op1) op2));
11710 // //
11711 // // format %{ "CMPu $op1,$op2" %}
11712 // // ins_cost(500);
11713 // // opcode(0x39); /* Opcode 39 /r */
11714 // // ins_encode( OpcP, reg_mem( op1, op2) );
11715 // //%}
11717 // XXX this is generalized by compP_rReg_mem???
11718 // Compare raw pointer (used in out-of-heap check).
11719 // Only works because non-oop pointers must be raw pointers
11720 // and raw pointers have no anti-dependencies.
11721 instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2)
11722 %{
11723 predicate(!n->in(2)->in(2)->bottom_type()->isa_oop_ptr());
11724 match(Set cr (CmpP op1 (LoadP op2)));
11726 format %{ "cmpq $op1, $op2\t# raw ptr" %}
11727 opcode(0x3B); /* Opcode 3B /r */
11728 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
11729 ins_pipe(ialu_cr_reg_mem);
11730 %}
11732 // This will generate a signed flags result. This should be OK since
11733 // any compare to a zero should be eq/neq.
11734 instruct testP_reg(rFlagsReg cr, rRegP src, immP0 zero)
11735 %{
11736 match(Set cr (CmpP src zero));
11738 format %{ "testq $src, $src\t# ptr" %}
11739 opcode(0x85);
11740 ins_encode(REX_reg_reg_wide(src, src), OpcP, reg_reg(src, src));
11741 ins_pipe(ialu_cr_reg_imm);
11742 %}
11744 // This will generate a signed flags result. This should be OK since
11745 // any compare to a zero should be eq/neq.
11746 instruct testP_mem(rFlagsReg cr, memory op, immP0 zero)
11747 %{
11748 predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL));
11749 match(Set cr (CmpP (LoadP op) zero));
11751 ins_cost(500); // XXX
11752 format %{ "testq $op, 0xffffffffffffffff\t# ptr" %}
11753 opcode(0xF7); /* Opcode F7 /0 */
11754 ins_encode(REX_mem_wide(op),
11755 OpcP, RM_opc_mem(0x00, op), Con_d32(0xFFFFFFFF));
11756 ins_pipe(ialu_cr_reg_imm);
11757 %}
11759 instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero)
11760 %{
11761 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL));
11762 match(Set cr (CmpP (LoadP mem) zero));
11764 format %{ "cmpq R12, $mem\t# ptr (R12_heapbase==0)" %}
11765 ins_encode %{
11766 __ cmpq(r12, $mem$$Address);
11767 %}
11768 ins_pipe(ialu_cr_reg_mem);
11769 %}
11771 instruct compN_rReg(rFlagsRegU cr, rRegN op1, rRegN op2)
11772 %{
11773 match(Set cr (CmpN op1 op2));
11775 format %{ "cmpl $op1, $op2\t# compressed ptr" %}
11776 ins_encode %{ __ cmpl($op1$$Register, $op2$$Register); %}
11777 ins_pipe(ialu_cr_reg_reg);
11778 %}
11780 instruct compN_rReg_mem(rFlagsRegU cr, rRegN src, memory mem)
11781 %{
11782 match(Set cr (CmpN src (LoadN mem)));
11784 format %{ "cmpl $src, $mem\t# compressed ptr" %}
11785 ins_encode %{
11786 __ cmpl($src$$Register, $mem$$Address);
11787 %}
11788 ins_pipe(ialu_cr_reg_mem);
11789 %}
11791 instruct compN_rReg_imm(rFlagsRegU cr, rRegN op1, immN op2) %{
11792 match(Set cr (CmpN op1 op2));
11794 format %{ "cmpl $op1, $op2\t# compressed ptr" %}
11795 ins_encode %{
11796 __ cmp_narrow_oop($op1$$Register, (jobject)$op2$$constant);
11797 %}
11798 ins_pipe(ialu_cr_reg_imm);
11799 %}
11801 instruct compN_mem_imm(rFlagsRegU cr, memory mem, immN src)
11802 %{
11803 match(Set cr (CmpN src (LoadN mem)));
11805 format %{ "cmpl $mem, $src\t# compressed ptr" %}
11806 ins_encode %{
11807 __ cmp_narrow_oop($mem$$Address, (jobject)$src$$constant);
11808 %}
11809 ins_pipe(ialu_cr_reg_mem);
11810 %}
11812 instruct testN_reg(rFlagsReg cr, rRegN src, immN0 zero) %{
11813 match(Set cr (CmpN src zero));
11815 format %{ "testl $src, $src\t# compressed ptr" %}
11816 ins_encode %{ __ testl($src$$Register, $src$$Register); %}
11817 ins_pipe(ialu_cr_reg_imm);
11818 %}
11820 instruct testN_mem(rFlagsReg cr, memory mem, immN0 zero)
11821 %{
11822 predicate(Universe::narrow_oop_base() != NULL);
11823 match(Set cr (CmpN (LoadN mem) zero));
11825 ins_cost(500); // XXX
11826 format %{ "testl $mem, 0xffffffff\t# compressed ptr" %}
11827 ins_encode %{
11828 __ cmpl($mem$$Address, (int)0xFFFFFFFF);
11829 %}
11830 ins_pipe(ialu_cr_reg_mem);
11831 %}
11833 instruct testN_mem_reg0(rFlagsReg cr, memory mem, immN0 zero)
11834 %{
11835 predicate(Universe::narrow_oop_base() == NULL);
11836 match(Set cr (CmpN (LoadN mem) zero));
11838 format %{ "cmpl R12, $mem\t# compressed ptr (R12_heapbase==0)" %}
11839 ins_encode %{
11840 __ cmpl(r12, $mem$$Address);
11841 %}
11842 ins_pipe(ialu_cr_reg_mem);
11843 %}
11845 // Yanked all unsigned pointer compare operations.
11846 // Pointer compares are done with CmpP which is already unsigned.
11848 instruct compL_rReg(rFlagsReg cr, rRegL op1, rRegL op2)
11849 %{
11850 match(Set cr (CmpL op1 op2));
11852 format %{ "cmpq $op1, $op2" %}
11853 opcode(0x3B); /* Opcode 3B /r */
11854 ins_encode(REX_reg_reg_wide(op1, op2), OpcP, reg_reg(op1, op2));
11855 ins_pipe(ialu_cr_reg_reg);
11856 %}
11858 instruct compL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2)
11859 %{
11860 match(Set cr (CmpL op1 op2));
11862 format %{ "cmpq $op1, $op2" %}
11863 opcode(0x81, 0x07); /* Opcode 81 /7 */
11864 ins_encode(OpcSErm_wide(op1, op2), Con8or32(op2));
11865 ins_pipe(ialu_cr_reg_imm);
11866 %}
11868 instruct compL_rReg_mem(rFlagsReg cr, rRegL op1, memory op2)
11869 %{
11870 match(Set cr (CmpL op1 (LoadL op2)));
11872 format %{ "cmpq $op1, $op2" %}
11873 opcode(0x3B); /* Opcode 3B /r */
11874 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2));
11875 ins_pipe(ialu_cr_reg_mem);
11876 %}
11878 instruct testL_reg(rFlagsReg cr, rRegL src, immL0 zero)
11879 %{
11880 match(Set cr (CmpL src zero));
11882 format %{ "testq $src, $src" %}
11883 opcode(0x85);
11884 ins_encode(REX_reg_reg_wide(src, src), OpcP, reg_reg(src, src));
11885 ins_pipe(ialu_cr_reg_imm);
11886 %}
11888 instruct testL_reg_imm(rFlagsReg cr, rRegL src, immL32 con, immL0 zero)
11889 %{
11890 match(Set cr (CmpL (AndL src con) zero));
11892 format %{ "testq $src, $con\t# long" %}
11893 opcode(0xF7, 0x00);
11894 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src), Con32(con));
11895 ins_pipe(ialu_cr_reg_imm);
11896 %}
11898 instruct testL_reg_mem(rFlagsReg cr, rRegL src, memory mem, immL0 zero)
11899 %{
11900 match(Set cr (CmpL (AndL src (LoadL mem)) zero));
11902 format %{ "testq $src, $mem" %}
11903 opcode(0x85);
11904 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
11905 ins_pipe(ialu_cr_reg_mem);
11906 %}
11908 // Manifest a CmpL result in an integer register. Very painful.
11909 // This is the test to avoid.
11910 instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags)
11911 %{
11912 match(Set dst (CmpL3 src1 src2));
11913 effect(KILL flags);
11915 ins_cost(275); // XXX
11916 format %{ "cmpq $src1, $src2\t# CmpL3\n\t"
11917 "movl $dst, -1\n\t"
11918 "jl,s done\n\t"
11919 "setne $dst\n\t"
11920 "movzbl $dst, $dst\n\t"
11921 "done:" %}
11922 ins_encode(cmpl3_flag(src1, src2, dst));
11923 ins_pipe(pipe_slow);
11924 %}
11926 //----------Max and Min--------------------------------------------------------
11927 // Min Instructions
11929 instruct cmovI_reg_g(rRegI dst, rRegI src, rFlagsReg cr)
11930 %{
11931 effect(USE_DEF dst, USE src, USE cr);
11933 format %{ "cmovlgt $dst, $src\t# min" %}
11934 opcode(0x0F, 0x4F);
11935 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
11936 ins_pipe(pipe_cmov_reg);
11937 %}
11940 instruct minI_rReg(rRegI dst, rRegI src)
11941 %{
11942 match(Set dst (MinI dst src));
11944 ins_cost(200);
11945 expand %{
11946 rFlagsReg cr;
11947 compI_rReg(cr, dst, src);
11948 cmovI_reg_g(dst, src, cr);
11949 %}
11950 %}
11952 instruct cmovI_reg_l(rRegI dst, rRegI src, rFlagsReg cr)
11953 %{
11954 effect(USE_DEF dst, USE src, USE cr);
11956 format %{ "cmovllt $dst, $src\t# max" %}
11957 opcode(0x0F, 0x4C);
11958 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src));
11959 ins_pipe(pipe_cmov_reg);
11960 %}
11963 instruct maxI_rReg(rRegI dst, rRegI src)
11964 %{
11965 match(Set dst (MaxI dst src));
11967 ins_cost(200);
11968 expand %{
11969 rFlagsReg cr;
11970 compI_rReg(cr, dst, src);
11971 cmovI_reg_l(dst, src, cr);
11972 %}
11973 %}
11975 // ============================================================================
11976 // Branch Instructions
11978 // Jump Direct - Label defines a relative address from JMP+1
11979 instruct jmpDir(label labl)
11980 %{
11981 match(Goto);
11982 effect(USE labl);
11984 ins_cost(300);
11985 format %{ "jmp $labl" %}
11986 size(5);
11987 opcode(0xE9);
11988 ins_encode(OpcP, Lbl(labl));
11989 ins_pipe(pipe_jmp);
11990 ins_pc_relative(1);
11991 %}
11993 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11994 instruct jmpCon(cmpOp cop, rFlagsReg cr, label labl)
11995 %{
11996 match(If cop cr);
11997 effect(USE labl);
11999 ins_cost(300);
12000 format %{ "j$cop $labl" %}
12001 size(6);
12002 opcode(0x0F, 0x80);
12003 ins_encode(Jcc(cop, labl));
12004 ins_pipe(pipe_jcc);
12005 ins_pc_relative(1);
12006 %}
12008 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12009 instruct jmpLoopEnd(cmpOp cop, rFlagsReg cr, label labl)
12010 %{
12011 match(CountedLoopEnd cop cr);
12012 effect(USE labl);
12014 ins_cost(300);
12015 format %{ "j$cop $labl\t# loop end" %}
12016 size(6);
12017 opcode(0x0F, 0x80);
12018 ins_encode(Jcc(cop, labl));
12019 ins_pipe(pipe_jcc);
12020 ins_pc_relative(1);
12021 %}
12023 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12024 instruct jmpLoopEndU(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12025 match(CountedLoopEnd cop cmp);
12026 effect(USE labl);
12028 ins_cost(300);
12029 format %{ "j$cop,u $labl\t# loop end" %}
12030 size(6);
12031 opcode(0x0F, 0x80);
12032 ins_encode(Jcc(cop, labl));
12033 ins_pipe(pipe_jcc);
12034 ins_pc_relative(1);
12035 %}
12037 instruct jmpLoopEndUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12038 match(CountedLoopEnd cop cmp);
12039 effect(USE labl);
12041 ins_cost(200);
12042 format %{ "j$cop,u $labl\t# loop end" %}
12043 size(6);
12044 opcode(0x0F, 0x80);
12045 ins_encode(Jcc(cop, labl));
12046 ins_pipe(pipe_jcc);
12047 ins_pc_relative(1);
12048 %}
12050 // Jump Direct Conditional - using unsigned comparison
12051 instruct jmpConU(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12052 match(If cop cmp);
12053 effect(USE labl);
12055 ins_cost(300);
12056 format %{ "j$cop,u $labl" %}
12057 size(6);
12058 opcode(0x0F, 0x80);
12059 ins_encode(Jcc(cop, labl));
12060 ins_pipe(pipe_jcc);
12061 ins_pc_relative(1);
12062 %}
12064 instruct jmpConUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12065 match(If cop cmp);
12066 effect(USE labl);
12068 ins_cost(200);
12069 format %{ "j$cop,u $labl" %}
12070 size(6);
12071 opcode(0x0F, 0x80);
12072 ins_encode(Jcc(cop, labl));
12073 ins_pipe(pipe_jcc);
12074 ins_pc_relative(1);
12075 %}
12077 instruct jmpConUCF2(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
12078 match(If cop cmp);
12079 effect(USE labl);
12081 ins_cost(200);
12082 format %{ $$template
12083 if ($cop$$cmpcode == Assembler::notEqual) {
12084 $$emit$$"jp,u $labl\n\t"
12085 $$emit$$"j$cop,u $labl"
12086 } else {
12087 $$emit$$"jp,u done\n\t"
12088 $$emit$$"j$cop,u $labl\n\t"
12089 $$emit$$"done:"
12090 }
12091 %}
12092 size(12);
12093 opcode(0x0F, 0x80);
12094 ins_encode %{
12095 Label* l = $labl$$label;
12096 $$$emit8$primary;
12097 emit_cc(cbuf, $secondary, Assembler::parity);
12098 int parity_disp = -1;
12099 if ($cop$$cmpcode == Assembler::notEqual) {
12100 // the two jumps 6 bytes apart so the jump distances are too
12101 parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
12102 } else if ($cop$$cmpcode == Assembler::equal) {
12103 parity_disp = 6;
12104 } else {
12105 ShouldNotReachHere();
12106 }
12107 emit_d32(cbuf, parity_disp);
12108 $$$emit8$primary;
12109 emit_cc(cbuf, $secondary, $cop$$cmpcode);
12110 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
12111 emit_d32(cbuf, disp);
12112 %}
12113 ins_pipe(pipe_jcc);
12114 ins_pc_relative(1);
12115 %}
12117 // ============================================================================
12118 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary
12119 // superklass array for an instance of the superklass. Set a hidden
12120 // internal cache on a hit (cache is checked with exposed code in
12121 // gen_subtype_check()). Return NZ for a miss or zero for a hit. The
12122 // encoding ALSO sets flags.
12124 instruct partialSubtypeCheck(rdi_RegP result,
12125 rsi_RegP sub, rax_RegP super, rcx_RegI rcx,
12126 rFlagsReg cr)
12127 %{
12128 match(Set result (PartialSubtypeCheck sub super));
12129 effect(KILL rcx, KILL cr);
12131 ins_cost(1100); // slightly larger than the next version
12132 format %{ "cmpq rax, rsi\n\t"
12133 "jeq,s hit\n\t"
12134 "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t"
12135 "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
12136 "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
12137 "repne scasq\t# Scan *rdi++ for a match with rax while rcx--\n\t"
12138 "jne,s miss\t\t# Missed: rdi not-zero\n\t"
12139 "movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t"
12140 "hit:\n\t"
12141 "xorq $result, $result\t\t Hit: rdi zero\n\t"
12142 "miss:\t" %}
12144 opcode(0x1); // Force a XOR of RDI
12145 ins_encode(enc_PartialSubtypeCheck());
12146 ins_pipe(pipe_slow);
12147 %}
12149 instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr,
12150 rsi_RegP sub, rax_RegP super, rcx_RegI rcx,
12151 immP0 zero,
12152 rdi_RegP result)
12153 %{
12154 match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
12155 effect(KILL rcx, KILL result);
12157 ins_cost(1000);
12158 format %{ "cmpq rax, rsi\n\t"
12159 "jeq,s miss\t# Actually a hit; we are done.\n\t"
12160 "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t"
12161 "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
12162 "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
12163 "repne scasq\t# Scan *rdi++ for a match with rax while cx-- != 0\n\t"
12164 "jne,s miss\t\t# Missed: flags nz\n\t"
12165 "movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t"
12166 "miss:\t" %}
12168 opcode(0x0); // No need to XOR RDI
12169 ins_encode(enc_PartialSubtypeCheck());
12170 ins_pipe(pipe_slow);
12171 %}
12173 // ============================================================================
12174 // Branch Instructions -- short offset versions
12175 //
12176 // These instructions are used to replace jumps of a long offset (the default
12177 // match) with jumps of a shorter offset. These instructions are all tagged
12178 // with the ins_short_branch attribute, which causes the ADLC to suppress the
12179 // match rules in general matching. Instead, the ADLC generates a conversion
12180 // method in the MachNode which can be used to do in-place replacement of the
12181 // long variant with the shorter variant. The compiler will determine if a
12182 // branch can be taken by the is_short_branch_offset() predicate in the machine
12183 // specific code section of the file.
12185 // Jump Direct - Label defines a relative address from JMP+1
12186 instruct jmpDir_short(label labl) %{
12187 match(Goto);
12188 effect(USE labl);
12190 ins_cost(300);
12191 format %{ "jmp,s $labl" %}
12192 size(2);
12193 opcode(0xEB);
12194 ins_encode(OpcP, LblShort(labl));
12195 ins_pipe(pipe_jmp);
12196 ins_pc_relative(1);
12197 ins_short_branch(1);
12198 %}
12200 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12201 instruct jmpCon_short(cmpOp cop, rFlagsReg cr, label labl) %{
12202 match(If cop cr);
12203 effect(USE labl);
12205 ins_cost(300);
12206 format %{ "j$cop,s $labl" %}
12207 size(2);
12208 opcode(0x70);
12209 ins_encode(JccShort(cop, labl));
12210 ins_pipe(pipe_jcc);
12211 ins_pc_relative(1);
12212 ins_short_branch(1);
12213 %}
12215 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12216 instruct jmpLoopEnd_short(cmpOp cop, rFlagsReg cr, label labl) %{
12217 match(CountedLoopEnd cop cr);
12218 effect(USE labl);
12220 ins_cost(300);
12221 format %{ "j$cop,s $labl\t# loop end" %}
12222 size(2);
12223 opcode(0x70);
12224 ins_encode(JccShort(cop, labl));
12225 ins_pipe(pipe_jcc);
12226 ins_pc_relative(1);
12227 ins_short_branch(1);
12228 %}
12230 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12231 instruct jmpLoopEndU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12232 match(CountedLoopEnd cop cmp);
12233 effect(USE labl);
12235 ins_cost(300);
12236 format %{ "j$cop,us $labl\t# loop end" %}
12237 size(2);
12238 opcode(0x70);
12239 ins_encode(JccShort(cop, labl));
12240 ins_pipe(pipe_jcc);
12241 ins_pc_relative(1);
12242 ins_short_branch(1);
12243 %}
12245 instruct jmpLoopEndUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12246 match(CountedLoopEnd cop cmp);
12247 effect(USE labl);
12249 ins_cost(300);
12250 format %{ "j$cop,us $labl\t# loop end" %}
12251 size(2);
12252 opcode(0x70);
12253 ins_encode(JccShort(cop, labl));
12254 ins_pipe(pipe_jcc);
12255 ins_pc_relative(1);
12256 ins_short_branch(1);
12257 %}
12259 // Jump Direct Conditional - using unsigned comparison
12260 instruct jmpConU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{
12261 match(If cop cmp);
12262 effect(USE labl);
12264 ins_cost(300);
12265 format %{ "j$cop,us $labl" %}
12266 size(2);
12267 opcode(0x70);
12268 ins_encode(JccShort(cop, labl));
12269 ins_pipe(pipe_jcc);
12270 ins_pc_relative(1);
12271 ins_short_branch(1);
12272 %}
12274 instruct jmpConUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
12275 match(If cop cmp);
12276 effect(USE labl);
12278 ins_cost(300);
12279 format %{ "j$cop,us $labl" %}
12280 size(2);
12281 opcode(0x70);
12282 ins_encode(JccShort(cop, labl));
12283 ins_pipe(pipe_jcc);
12284 ins_pc_relative(1);
12285 ins_short_branch(1);
12286 %}
12288 instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
12289 match(If cop cmp);
12290 effect(USE labl);
12292 ins_cost(300);
12293 format %{ $$template
12294 if ($cop$$cmpcode == Assembler::notEqual) {
12295 $$emit$$"jp,u,s $labl\n\t"
12296 $$emit$$"j$cop,u,s $labl"
12297 } else {
12298 $$emit$$"jp,u,s done\n\t"
12299 $$emit$$"j$cop,u,s $labl\n\t"
12300 $$emit$$"done:"
12301 }
12302 %}
12303 size(4);
12304 opcode(0x70);
12305 ins_encode %{
12306 Label* l = $labl$$label;
12307 emit_cc(cbuf, $primary, Assembler::parity);
12308 int parity_disp = -1;
12309 if ($cop$$cmpcode == Assembler::notEqual) {
12310 parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
12311 } else if ($cop$$cmpcode == Assembler::equal) {
12312 parity_disp = 2;
12313 } else {
12314 ShouldNotReachHere();
12315 }
12316 emit_d8(cbuf, parity_disp);
12317 emit_cc(cbuf, $primary, $cop$$cmpcode);
12318 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
12319 emit_d8(cbuf, disp);
12320 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
12321 assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp");
12322 %}
12323 ins_pipe(pipe_jcc);
12324 ins_pc_relative(1);
12325 ins_short_branch(1);
12326 %}
12328 // ============================================================================
12329 // inlined locking and unlocking
12331 instruct cmpFastLock(rFlagsReg cr,
12332 rRegP object, rRegP box, rax_RegI tmp, rRegP scr)
12333 %{
12334 match(Set cr (FastLock object box));
12335 effect(TEMP tmp, TEMP scr);
12337 ins_cost(300);
12338 format %{ "fastlock $object,$box,$tmp,$scr" %}
12339 ins_encode(Fast_Lock(object, box, tmp, scr));
12340 ins_pipe(pipe_slow);
12341 ins_pc_relative(1);
12342 %}
12344 instruct cmpFastUnlock(rFlagsReg cr,
12345 rRegP object, rax_RegP box, rRegP tmp)
12346 %{
12347 match(Set cr (FastUnlock object box));
12348 effect(TEMP tmp);
12350 ins_cost(300);
12351 format %{ "fastunlock $object, $box, $tmp" %}
12352 ins_encode(Fast_Unlock(object, box, tmp));
12353 ins_pipe(pipe_slow);
12354 ins_pc_relative(1);
12355 %}
12358 // ============================================================================
12359 // Safepoint Instructions
12360 instruct safePoint_poll(rFlagsReg cr)
12361 %{
12362 match(SafePoint);
12363 effect(KILL cr);
12365 format %{ "testl rax, [rip + #offset_to_poll_page]\t"
12366 "# Safepoint: poll for GC" %}
12367 size(6); // Opcode + ModRM + Disp32 == 6 bytes
12368 ins_cost(125);
12369 ins_encode(enc_safepoint_poll);
12370 ins_pipe(ialu_reg_mem);
12371 %}
12373 // ============================================================================
12374 // Procedure Call/Return Instructions
12375 // Call Java Static Instruction
12376 // Note: If this code changes, the corresponding ret_addr_offset() and
12377 // compute_padding() functions will have to be adjusted.
12378 instruct CallStaticJavaDirect(method meth)
12379 %{
12380 match(CallStaticJava);
12381 effect(USE meth);
12383 ins_cost(300);
12384 format %{ "call,static " %}
12385 opcode(0xE8); /* E8 cd */
12386 ins_encode(Java_Static_Call(meth), call_epilog);
12387 ins_pipe(pipe_slow);
12388 ins_pc_relative(1);
12389 ins_alignment(4);
12390 %}
12392 // Call Java Dynamic Instruction
12393 // Note: If this code changes, the corresponding ret_addr_offset() and
12394 // compute_padding() functions will have to be adjusted.
12395 instruct CallDynamicJavaDirect(method meth)
12396 %{
12397 match(CallDynamicJava);
12398 effect(USE meth);
12400 ins_cost(300);
12401 format %{ "movq rax, #Universe::non_oop_word()\n\t"
12402 "call,dynamic " %}
12403 opcode(0xE8); /* E8 cd */
12404 ins_encode(Java_Dynamic_Call(meth), call_epilog);
12405 ins_pipe(pipe_slow);
12406 ins_pc_relative(1);
12407 ins_alignment(4);
12408 %}
12410 // Call Runtime Instruction
12411 instruct CallRuntimeDirect(method meth)
12412 %{
12413 match(CallRuntime);
12414 effect(USE meth);
12416 ins_cost(300);
12417 format %{ "call,runtime " %}
12418 opcode(0xE8); /* E8 cd */
12419 ins_encode(Java_To_Runtime(meth));
12420 ins_pipe(pipe_slow);
12421 ins_pc_relative(1);
12422 %}
12424 // Call runtime without safepoint
12425 instruct CallLeafDirect(method meth)
12426 %{
12427 match(CallLeaf);
12428 effect(USE meth);
12430 ins_cost(300);
12431 format %{ "call_leaf,runtime " %}
12432 opcode(0xE8); /* E8 cd */
12433 ins_encode(Java_To_Runtime(meth));
12434 ins_pipe(pipe_slow);
12435 ins_pc_relative(1);
12436 %}
12438 // Call runtime without safepoint
12439 instruct CallLeafNoFPDirect(method meth)
12440 %{
12441 match(CallLeafNoFP);
12442 effect(USE meth);
12444 ins_cost(300);
12445 format %{ "call_leaf_nofp,runtime " %}
12446 opcode(0xE8); /* E8 cd */
12447 ins_encode(Java_To_Runtime(meth));
12448 ins_pipe(pipe_slow);
12449 ins_pc_relative(1);
12450 %}
12452 // Return Instruction
12453 // Remove the return address & jump to it.
12454 // Notice: We always emit a nop after a ret to make sure there is room
12455 // for safepoint patching
12456 instruct Ret()
12457 %{
12458 match(Return);
12460 format %{ "ret" %}
12461 opcode(0xC3);
12462 ins_encode(OpcP);
12463 ins_pipe(pipe_jmp);
12464 %}
12466 // Tail Call; Jump from runtime stub to Java code.
12467 // Also known as an 'interprocedural jump'.
12468 // Target of jump will eventually return to caller.
12469 // TailJump below removes the return address.
12470 instruct TailCalljmpInd(no_rbp_RegP jump_target, rbx_RegP method_oop)
12471 %{
12472 match(TailCall jump_target method_oop);
12474 ins_cost(300);
12475 format %{ "jmp $jump_target\t# rbx holds method oop" %}
12476 opcode(0xFF, 0x4); /* Opcode FF /4 */
12477 ins_encode(REX_reg(jump_target), OpcP, reg_opc(jump_target));
12478 ins_pipe(pipe_jmp);
12479 %}
12481 // Tail Jump; remove the return address; jump to target.
12482 // TailCall above leaves the return address around.
12483 instruct tailjmpInd(no_rbp_RegP jump_target, rax_RegP ex_oop)
12484 %{
12485 match(TailJump jump_target ex_oop);
12487 ins_cost(300);
12488 format %{ "popq rdx\t# pop return address\n\t"
12489 "jmp $jump_target" %}
12490 opcode(0xFF, 0x4); /* Opcode FF /4 */
12491 ins_encode(Opcode(0x5a), // popq rdx
12492 REX_reg(jump_target), OpcP, reg_opc(jump_target));
12493 ins_pipe(pipe_jmp);
12494 %}
12496 // Create exception oop: created by stack-crawling runtime code.
12497 // Created exception is now available to this handler, and is setup
12498 // just prior to jumping to this handler. No code emitted.
12499 instruct CreateException(rax_RegP ex_oop)
12500 %{
12501 match(Set ex_oop (CreateEx));
12503 size(0);
12504 // use the following format syntax
12505 format %{ "# exception oop is in rax; no code emitted" %}
12506 ins_encode();
12507 ins_pipe(empty);
12508 %}
12510 // Rethrow exception:
12511 // The exception oop will come in the first argument position.
12512 // Then JUMP (not call) to the rethrow stub code.
12513 instruct RethrowException()
12514 %{
12515 match(Rethrow);
12517 // use the following format syntax
12518 format %{ "jmp rethrow_stub" %}
12519 ins_encode(enc_rethrow);
12520 ins_pipe(pipe_jmp);
12521 %}
12524 //----------PEEPHOLE RULES-----------------------------------------------------
12525 // These must follow all instruction definitions as they use the names
12526 // defined in the instructions definitions.
12527 //
12528 // peepmatch ( root_instr_name [preceding_instruction]* );
12529 //
12530 // peepconstraint %{
12531 // (instruction_number.operand_name relational_op instruction_number.operand_name
12532 // [, ...] );
12533 // // instruction numbers are zero-based using left to right order in peepmatch
12534 //
12535 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
12536 // // provide an instruction_number.operand_name for each operand that appears
12537 // // in the replacement instruction's match rule
12538 //
12539 // ---------VM FLAGS---------------------------------------------------------
12540 //
12541 // All peephole optimizations can be turned off using -XX:-OptoPeephole
12542 //
12543 // Each peephole rule is given an identifying number starting with zero and
12544 // increasing by one in the order seen by the parser. An individual peephole
12545 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
12546 // on the command-line.
12547 //
12548 // ---------CURRENT LIMITATIONS----------------------------------------------
12549 //
12550 // Only match adjacent instructions in same basic block
12551 // Only equality constraints
12552 // Only constraints between operands, not (0.dest_reg == RAX_enc)
12553 // Only one replacement instruction
12554 //
12555 // ---------EXAMPLE----------------------------------------------------------
12556 //
12557 // // pertinent parts of existing instructions in architecture description
12558 // instruct movI(rRegI dst, rRegI src)
12559 // %{
12560 // match(Set dst (CopyI src));
12561 // %}
12562 //
12563 // instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr)
12564 // %{
12565 // match(Set dst (AddI dst src));
12566 // effect(KILL cr);
12567 // %}
12568 //
12569 // // Change (inc mov) to lea
12570 // peephole %{
12571 // // increment preceeded by register-register move
12572 // peepmatch ( incI_rReg movI );
12573 // // require that the destination register of the increment
12574 // // match the destination register of the move
12575 // peepconstraint ( 0.dst == 1.dst );
12576 // // construct a replacement instruction that sets
12577 // // the destination to ( move's source register + one )
12578 // peepreplace ( leaI_rReg_immI( 0.dst 1.src 0.src ) );
12579 // %}
12580 //
12582 // Implementation no longer uses movX instructions since
12583 // machine-independent system no longer uses CopyX nodes.
12584 //
12585 // peephole
12586 // %{
12587 // peepmatch (incI_rReg movI);
12588 // peepconstraint (0.dst == 1.dst);
12589 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
12590 // %}
12592 // peephole
12593 // %{
12594 // peepmatch (decI_rReg movI);
12595 // peepconstraint (0.dst == 1.dst);
12596 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
12597 // %}
12599 // peephole
12600 // %{
12601 // peepmatch (addI_rReg_imm movI);
12602 // peepconstraint (0.dst == 1.dst);
12603 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src));
12604 // %}
12606 // peephole
12607 // %{
12608 // peepmatch (incL_rReg movL);
12609 // peepconstraint (0.dst == 1.dst);
12610 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
12611 // %}
12613 // peephole
12614 // %{
12615 // peepmatch (decL_rReg movL);
12616 // peepconstraint (0.dst == 1.dst);
12617 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
12618 // %}
12620 // peephole
12621 // %{
12622 // peepmatch (addL_rReg_imm movL);
12623 // peepconstraint (0.dst == 1.dst);
12624 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src));
12625 // %}
12627 // peephole
12628 // %{
12629 // peepmatch (addP_rReg_imm movP);
12630 // peepconstraint (0.dst == 1.dst);
12631 // peepreplace (leaP_rReg_imm(0.dst 1.src 0.src));
12632 // %}
12634 // // Change load of spilled value to only a spill
12635 // instruct storeI(memory mem, rRegI src)
12636 // %{
12637 // match(Set mem (StoreI mem src));
12638 // %}
12639 //
12640 // instruct loadI(rRegI dst, memory mem)
12641 // %{
12642 // match(Set dst (LoadI mem));
12643 // %}
12644 //
12646 peephole
12647 %{
12648 peepmatch (loadI storeI);
12649 peepconstraint (1.src == 0.dst, 1.mem == 0.mem);
12650 peepreplace (storeI(1.mem 1.mem 1.src));
12651 %}
12653 peephole
12654 %{
12655 peepmatch (loadL storeL);
12656 peepconstraint (1.src == 0.dst, 1.mem == 0.mem);
12657 peepreplace (storeL(1.mem 1.mem 1.src));
12658 %}
12660 //----------SMARTSPILL RULES---------------------------------------------------
12661 // These must follow all instruction definitions as they use the names
12662 // defined in the instructions definitions.