Thu, 03 Jan 2013 16:30:47 -0800
8005544: Use 256bit YMM registers in arraycopy stubs on x86
Summary: Use YMM registers in arraycopy and array_fill stubs.
Reviewed-by: roland, twisti
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
28 #include "asm/assembler.hpp"
31 // MacroAssembler extends Assembler by frequently used macros.
32 //
33 // Instructions for which a 'better' code sequence exists depending
34 // on arguments should also go in here.
36 class MacroAssembler: public Assembler {
37 friend class LIR_Assembler;
38 friend class Runtime1; // as_Address()
40 protected:
42 Address as_Address(AddressLiteral adr);
43 Address as_Address(ArrayAddress adr);
45 // Support for VM calls
46 //
47 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
48 // may customize this version by overriding it for its purposes (e.g., to save/restore
49 // additional registers when doing a VM call).
50 #ifdef CC_INTERP
51 // c++ interpreter never wants to use interp_masm version of call_VM
52 #define VIRTUAL
53 #else
54 #define VIRTUAL virtual
55 #endif
57 VIRTUAL void call_VM_leaf_base(
58 address entry_point, // the entry point
59 int number_of_arguments // the number of arguments to pop after the call
60 );
62 // This is the base routine called by the different versions of call_VM. The interpreter
63 // may customize this version by overriding it for its purposes (e.g., to save/restore
64 // additional registers when doing a VM call).
65 //
66 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
67 // returns the register which contains the thread upon return. If a thread register has been
68 // specified, the return value will correspond to that register. If no last_java_sp is specified
69 // (noreg) than rsp will be used instead.
70 VIRTUAL void call_VM_base( // returns the register containing the thread upon return
71 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
72 Register java_thread, // the thread if computed before ; use noreg otherwise
73 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
74 address entry_point, // the entry point
75 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
76 bool check_exceptions // whether to check for pending exceptions after return
77 );
79 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
80 // The implementation is only non-empty for the InterpreterMacroAssembler,
81 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
82 virtual void check_and_handle_popframe(Register java_thread);
83 virtual void check_and_handle_earlyret(Register java_thread);
85 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
87 // helpers for FPU flag access
88 // tmp is a temporary register, if none is available use noreg
89 void save_rax (Register tmp);
90 void restore_rax(Register tmp);
92 public:
93 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
95 // Support for NULL-checks
96 //
97 // Generates code that causes a NULL OS exception if the content of reg is NULL.
98 // If the accessed location is M[reg + offset] and the offset is known, provide the
99 // offset. No explicit code generation is needed if the offset is within a certain
100 // range (0 <= offset <= page_size).
102 void null_check(Register reg, int offset = -1);
103 static bool needs_explicit_null_check(intptr_t offset);
105 // Required platform-specific helpers for Label::patch_instructions.
106 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
107 void pd_patch_instruction(address branch, address target) {
108 unsigned char op = branch[0];
109 assert(op == 0xE8 /* call */ ||
110 op == 0xE9 /* jmp */ ||
111 op == 0xEB /* short jmp */ ||
112 (op & 0xF0) == 0x70 /* short jcc */ ||
113 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
114 "Invalid opcode at patch point");
116 if (op == 0xEB || (op & 0xF0) == 0x70) {
117 // short offset operators (jmp and jcc)
118 char* disp = (char*) &branch[1];
119 int imm8 = target - (address) &disp[1];
120 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
121 *disp = imm8;
122 } else {
123 int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
124 int imm32 = target - (address) &disp[1];
125 *disp = imm32;
126 }
127 }
129 // The following 4 methods return the offset of the appropriate move instruction
131 // Support for fast byte/short loading with zero extension (depending on particular CPU)
132 int load_unsigned_byte(Register dst, Address src);
133 int load_unsigned_short(Register dst, Address src);
135 // Support for fast byte/short loading with sign extension (depending on particular CPU)
136 int load_signed_byte(Register dst, Address src);
137 int load_signed_short(Register dst, Address src);
139 // Support for sign-extension (hi:lo = extend_sign(lo))
140 void extend_sign(Register hi, Register lo);
142 // Load and store values by size and signed-ness
143 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
144 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
146 // Support for inc/dec with optimal instruction selection depending on value
148 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
149 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
151 void decrementl(Address dst, int value = 1);
152 void decrementl(Register reg, int value = 1);
154 void decrementq(Register reg, int value = 1);
155 void decrementq(Address dst, int value = 1);
157 void incrementl(Address dst, int value = 1);
158 void incrementl(Register reg, int value = 1);
160 void incrementq(Register reg, int value = 1);
161 void incrementq(Address dst, int value = 1);
164 // Support optimal SSE move instructions.
165 void movflt(XMMRegister dst, XMMRegister src) {
166 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
167 else { movss (dst, src); return; }
168 }
169 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
170 void movflt(XMMRegister dst, AddressLiteral src);
171 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
173 void movdbl(XMMRegister dst, XMMRegister src) {
174 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
175 else { movsd (dst, src); return; }
176 }
178 void movdbl(XMMRegister dst, AddressLiteral src);
180 void movdbl(XMMRegister dst, Address src) {
181 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
182 else { movlpd(dst, src); return; }
183 }
184 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
186 void incrementl(AddressLiteral dst);
187 void incrementl(ArrayAddress dst);
189 // Alignment
190 void align(int modulus);
192 // A 5 byte nop that is safe for patching (see patch_verified_entry)
193 void fat_nop();
195 // Stack frame creation/removal
196 void enter();
197 void leave();
199 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
200 // The pointer will be loaded into the thread register.
201 void get_thread(Register thread);
204 // Support for VM calls
205 //
206 // It is imperative that all calls into the VM are handled via the call_VM macros.
207 // They make sure that the stack linkage is setup correctly. call_VM's correspond
208 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
211 void call_VM(Register oop_result,
212 address entry_point,
213 bool check_exceptions = true);
214 void call_VM(Register oop_result,
215 address entry_point,
216 Register arg_1,
217 bool check_exceptions = true);
218 void call_VM(Register oop_result,
219 address entry_point,
220 Register arg_1, Register arg_2,
221 bool check_exceptions = true);
222 void call_VM(Register oop_result,
223 address entry_point,
224 Register arg_1, Register arg_2, Register arg_3,
225 bool check_exceptions = true);
227 // Overloadings with last_Java_sp
228 void call_VM(Register oop_result,
229 Register last_java_sp,
230 address entry_point,
231 int number_of_arguments = 0,
232 bool check_exceptions = true);
233 void call_VM(Register oop_result,
234 Register last_java_sp,
235 address entry_point,
236 Register arg_1, bool
237 check_exceptions = true);
238 void call_VM(Register oop_result,
239 Register last_java_sp,
240 address entry_point,
241 Register arg_1, Register arg_2,
242 bool check_exceptions = true);
243 void call_VM(Register oop_result,
244 Register last_java_sp,
245 address entry_point,
246 Register arg_1, Register arg_2, Register arg_3,
247 bool check_exceptions = true);
249 void get_vm_result (Register oop_result, Register thread);
250 void get_vm_result_2(Register metadata_result, Register thread);
252 // These always tightly bind to MacroAssembler::call_VM_base
253 // bypassing the virtual implementation
254 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
255 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
256 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
257 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
258 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
260 void call_VM_leaf(address entry_point,
261 int number_of_arguments = 0);
262 void call_VM_leaf(address entry_point,
263 Register arg_1);
264 void call_VM_leaf(address entry_point,
265 Register arg_1, Register arg_2);
266 void call_VM_leaf(address entry_point,
267 Register arg_1, Register arg_2, Register arg_3);
269 // These always tightly bind to MacroAssembler::call_VM_leaf_base
270 // bypassing the virtual implementation
271 void super_call_VM_leaf(address entry_point);
272 void super_call_VM_leaf(address entry_point, Register arg_1);
273 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
274 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
275 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
277 // last Java Frame (fills frame anchor)
278 void set_last_Java_frame(Register thread,
279 Register last_java_sp,
280 Register last_java_fp,
281 address last_java_pc);
283 // thread in the default location (r15_thread on 64bit)
284 void set_last_Java_frame(Register last_java_sp,
285 Register last_java_fp,
286 address last_java_pc);
288 void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc);
290 // thread in the default location (r15_thread on 64bit)
291 void reset_last_Java_frame(bool clear_fp, bool clear_pc);
293 // Stores
294 void store_check(Register obj); // store check for obj - register is destroyed afterwards
295 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
297 #ifndef SERIALGC
299 void g1_write_barrier_pre(Register obj,
300 Register pre_val,
301 Register thread,
302 Register tmp,
303 bool tosca_live,
304 bool expand_call);
306 void g1_write_barrier_post(Register store_addr,
307 Register new_val,
308 Register thread,
309 Register tmp,
310 Register tmp2);
312 #endif // SERIALGC
314 // split store_check(Register obj) to enhance instruction interleaving
315 void store_check_part_1(Register obj);
316 void store_check_part_2(Register obj);
318 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
319 void c2bool(Register x);
321 // C++ bool manipulation
323 void movbool(Register dst, Address src);
324 void movbool(Address dst, bool boolconst);
325 void movbool(Address dst, Register src);
326 void testbool(Register dst);
328 // oop manipulations
329 void load_klass(Register dst, Register src);
330 void store_klass(Register dst, Register src);
332 void load_heap_oop(Register dst, Address src);
333 void load_heap_oop_not_null(Register dst, Address src);
334 void store_heap_oop(Address dst, Register src);
335 void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg);
337 // Used for storing NULL. All other oop constants should be
338 // stored using routines that take a jobject.
339 void store_heap_oop_null(Address dst);
341 void load_prototype_header(Register dst, Register src);
343 #ifdef _LP64
344 void store_klass_gap(Register dst, Register src);
346 // This dummy is to prevent a call to store_heap_oop from
347 // converting a zero (like NULL) into a Register by giving
348 // the compiler two choices it can't resolve
350 void store_heap_oop(Address dst, void* dummy);
352 void encode_heap_oop(Register r);
353 void decode_heap_oop(Register r);
354 void encode_heap_oop_not_null(Register r);
355 void decode_heap_oop_not_null(Register r);
356 void encode_heap_oop_not_null(Register dst, Register src);
357 void decode_heap_oop_not_null(Register dst, Register src);
359 void set_narrow_oop(Register dst, jobject obj);
360 void set_narrow_oop(Address dst, jobject obj);
361 void cmp_narrow_oop(Register dst, jobject obj);
362 void cmp_narrow_oop(Address dst, jobject obj);
364 void encode_klass_not_null(Register r);
365 void decode_klass_not_null(Register r);
366 void encode_klass_not_null(Register dst, Register src);
367 void decode_klass_not_null(Register dst, Register src);
368 void set_narrow_klass(Register dst, Klass* k);
369 void set_narrow_klass(Address dst, Klass* k);
370 void cmp_narrow_klass(Register dst, Klass* k);
371 void cmp_narrow_klass(Address dst, Klass* k);
373 // if heap base register is used - reinit it with the correct value
374 void reinit_heapbase();
376 DEBUG_ONLY(void verify_heapbase(const char* msg);)
378 #endif // _LP64
380 // Int division/remainder for Java
381 // (as idivl, but checks for special case as described in JVM spec.)
382 // returns idivl instruction offset for implicit exception handling
383 int corrected_idivl(Register reg);
385 // Long division/remainder for Java
386 // (as idivq, but checks for special case as described in JVM spec.)
387 // returns idivq instruction offset for implicit exception handling
388 int corrected_idivq(Register reg);
390 void int3();
392 // Long operation macros for a 32bit cpu
393 // Long negation for Java
394 void lneg(Register hi, Register lo);
396 // Long multiplication for Java
397 // (destroys contents of eax, ebx, ecx and edx)
398 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
400 // Long shifts for Java
401 // (semantics as described in JVM spec.)
402 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
403 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
405 // Long compare for Java
406 // (semantics as described in JVM spec.)
407 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
410 // misc
412 // Sign extension
413 void sign_extend_short(Register reg);
414 void sign_extend_byte(Register reg);
416 // Division by power of 2, rounding towards 0
417 void division_with_shift(Register reg, int shift_value);
419 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
420 //
421 // CF (corresponds to C0) if x < y
422 // PF (corresponds to C2) if unordered
423 // ZF (corresponds to C3) if x = y
424 //
425 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
426 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
427 void fcmp(Register tmp);
428 // Variant of the above which allows y to be further down the stack
429 // and which only pops x and y if specified. If pop_right is
430 // specified then pop_left must also be specified.
431 void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
433 // Floating-point comparison for Java
434 // Compares the top-most stack entries on the FPU stack and stores the result in dst.
435 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
436 // (semantics as described in JVM spec.)
437 void fcmp2int(Register dst, bool unordered_is_less);
438 // Variant of the above which allows y to be further down the stack
439 // and which only pops x and y if specified. If pop_right is
440 // specified then pop_left must also be specified.
441 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
443 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
444 // tmp is a temporary register, if none is available use noreg
445 void fremr(Register tmp);
448 // same as fcmp2int, but using SSE2
449 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
450 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
452 // Inlined sin/cos generator for Java; must not use CPU instruction
453 // directly on Intel as it does not have high enough precision
454 // outside of the range [-pi/4, pi/4]. Extra argument indicate the
455 // number of FPU stack slots in use; all but the topmost will
456 // require saving if a slow case is necessary. Assumes argument is
457 // on FP TOS; result is on FP TOS. No cpu registers are changed by
458 // this code.
459 void trigfunc(char trig, int num_fpu_regs_in_use = 1);
461 // branch to L if FPU flag C2 is set/not set
462 // tmp is a temporary register, if none is available use noreg
463 void jC2 (Register tmp, Label& L);
464 void jnC2(Register tmp, Label& L);
466 // Pop ST (ffree & fincstp combined)
467 void fpop();
469 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
470 void push_fTOS();
472 // pops double TOS element from CPU stack and pushes on FPU stack
473 void pop_fTOS();
475 void empty_FPU_stack();
477 void push_IU_state();
478 void pop_IU_state();
480 void push_FPU_state();
481 void pop_FPU_state();
483 void push_CPU_state();
484 void pop_CPU_state();
486 // Round up to a power of two
487 void round_to(Register reg, int modulus);
489 // Callee saved registers handling
490 void push_callee_saved_registers();
491 void pop_callee_saved_registers();
493 // allocation
494 void eden_allocate(
495 Register obj, // result: pointer to object after successful allocation
496 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
497 int con_size_in_bytes, // object size in bytes if known at compile time
498 Register t1, // temp register
499 Label& slow_case // continuation point if fast allocation fails
500 );
501 void tlab_allocate(
502 Register obj, // result: pointer to object after successful allocation
503 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
504 int con_size_in_bytes, // object size in bytes if known at compile time
505 Register t1, // temp register
506 Register t2, // temp register
507 Label& slow_case // continuation point if fast allocation fails
508 );
509 Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address
510 void incr_allocated_bytes(Register thread,
511 Register var_size_in_bytes, int con_size_in_bytes,
512 Register t1 = noreg);
514 // interface method calling
515 void lookup_interface_method(Register recv_klass,
516 Register intf_klass,
517 RegisterOrConstant itable_index,
518 Register method_result,
519 Register scan_temp,
520 Label& no_such_interface);
522 // virtual method calling
523 void lookup_virtual_method(Register recv_klass,
524 RegisterOrConstant vtable_index,
525 Register method_result);
527 // Test sub_klass against super_klass, with fast and slow paths.
529 // The fast path produces a tri-state answer: yes / no / maybe-slow.
530 // One of the three labels can be NULL, meaning take the fall-through.
531 // If super_check_offset is -1, the value is loaded up from super_klass.
532 // No registers are killed, except temp_reg.
533 void check_klass_subtype_fast_path(Register sub_klass,
534 Register super_klass,
535 Register temp_reg,
536 Label* L_success,
537 Label* L_failure,
538 Label* L_slow_path,
539 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
541 // The rest of the type check; must be wired to a corresponding fast path.
542 // It does not repeat the fast path logic, so don't use it standalone.
543 // The temp_reg and temp2_reg can be noreg, if no temps are available.
544 // Updates the sub's secondary super cache as necessary.
545 // If set_cond_codes, condition codes will be Z on success, NZ on failure.
546 void check_klass_subtype_slow_path(Register sub_klass,
547 Register super_klass,
548 Register temp_reg,
549 Register temp2_reg,
550 Label* L_success,
551 Label* L_failure,
552 bool set_cond_codes = false);
554 // Simplified, combined version, good for typical uses.
555 // Falls through on failure.
556 void check_klass_subtype(Register sub_klass,
557 Register super_klass,
558 Register temp_reg,
559 Label& L_success);
561 // method handles (JSR 292)
562 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
564 //----
565 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
567 // Debugging
569 // only if +VerifyOops
570 // TODO: Make these macros with file and line like sparc version!
571 void verify_oop(Register reg, const char* s = "broken oop");
572 void verify_oop_addr(Address addr, const char * s = "broken oop addr");
574 // TODO: verify method and klass metadata (compare against vptr?)
575 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
576 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
578 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
579 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
581 // only if +VerifyFPU
582 void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
584 // prints msg, dumps registers and stops execution
585 void stop(const char* msg);
587 // prints msg and continues
588 void warn(const char* msg);
590 // dumps registers and other state
591 void print_state();
593 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
594 static void debug64(char* msg, int64_t pc, int64_t regs[]);
595 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
596 static void print_state64(int64_t pc, int64_t regs[]);
598 void os_breakpoint();
600 void untested() { stop("untested"); }
602 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
604 void should_not_reach_here() { stop("should not reach here"); }
606 void print_CPU_state();
608 // Stack overflow checking
609 void bang_stack_with_offset(int offset) {
610 // stack grows down, caller passes positive offset
611 assert(offset > 0, "must bang with negative offset");
612 movl(Address(rsp, (-offset)), rax);
613 }
615 // Writes to stack successive pages until offset reached to check for
616 // stack overflow + shadow pages. Also, clobbers tmp
617 void bang_stack_size(Register size, Register tmp);
619 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
620 Register tmp,
621 int offset);
623 // Support for serializing memory accesses between threads
624 void serialize_memory(Register thread, Register tmp);
626 void verify_tlab();
628 // Biased locking support
629 // lock_reg and obj_reg must be loaded up with the appropriate values.
630 // swap_reg must be rax, and is killed.
631 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
632 // be killed; if not supplied, push/pop will be used internally to
633 // allocate a temporary (inefficient, avoid if possible).
634 // Optional slow case is for implementations (interpreter and C1) which branch to
635 // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
636 // Returns offset of first potentially-faulting instruction for null
637 // check info (currently consumed only by C1). If
638 // swap_reg_contains_mark is true then returns -1 as it is assumed
639 // the calling code has already passed any potential faults.
640 int biased_locking_enter(Register lock_reg, Register obj_reg,
641 Register swap_reg, Register tmp_reg,
642 bool swap_reg_contains_mark,
643 Label& done, Label* slow_case = NULL,
644 BiasedLockingCounters* counters = NULL);
645 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
648 Condition negate_condition(Condition cond);
650 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
651 // operands. In general the names are modified to avoid hiding the instruction in Assembler
652 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
653 // here in MacroAssembler. The major exception to this rule is call
655 // Arithmetics
658 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
659 void addptr(Address dst, Register src);
661 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
662 void addptr(Register dst, int32_t src);
663 void addptr(Register dst, Register src);
664 void addptr(Register dst, RegisterOrConstant src) {
665 if (src.is_constant()) addptr(dst, (int) src.as_constant());
666 else addptr(dst, src.as_register());
667 }
669 void andptr(Register dst, int32_t src);
670 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
672 void cmp8(AddressLiteral src1, int imm);
674 // renamed to drag out the casting of address to int32_t/intptr_t
675 void cmp32(Register src1, int32_t imm);
677 void cmp32(AddressLiteral src1, int32_t imm);
678 // compare reg - mem, or reg - &mem
679 void cmp32(Register src1, AddressLiteral src2);
681 void cmp32(Register src1, Address src2);
683 #ifndef _LP64
684 void cmpklass(Address dst, Metadata* obj);
685 void cmpklass(Register dst, Metadata* obj);
686 void cmpoop(Address dst, jobject obj);
687 void cmpoop(Register dst, jobject obj);
688 #endif // _LP64
690 // NOTE src2 must be the lval. This is NOT an mem-mem compare
691 void cmpptr(Address src1, AddressLiteral src2);
693 void cmpptr(Register src1, AddressLiteral src2);
695 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
696 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
697 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
699 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
700 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
702 // cmp64 to avoild hiding cmpq
703 void cmp64(Register src1, AddressLiteral src);
705 void cmpxchgptr(Register reg, Address adr);
707 void locked_cmpxchgptr(Register reg, AddressLiteral adr);
710 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
713 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
715 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
717 void shlptr(Register dst, int32_t shift);
718 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
720 void shrptr(Register dst, int32_t shift);
721 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
723 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
724 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
726 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
728 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
729 void subptr(Register dst, int32_t src);
730 // Force generation of a 4 byte immediate value even if it fits into 8bit
731 void subptr_imm32(Register dst, int32_t src);
732 void subptr(Register dst, Register src);
733 void subptr(Register dst, RegisterOrConstant src) {
734 if (src.is_constant()) subptr(dst, (int) src.as_constant());
735 else subptr(dst, src.as_register());
736 }
738 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
739 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
741 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
742 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
744 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
748 // Helper functions for statistics gathering.
749 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
750 void cond_inc32(Condition cond, AddressLiteral counter_addr);
751 // Unconditional atomic increment.
752 void atomic_incl(AddressLiteral counter_addr);
754 void lea(Register dst, AddressLiteral adr);
755 void lea(Address dst, AddressLiteral adr);
756 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
758 void leal32(Register dst, Address src) { leal(dst, src); }
760 // Import other testl() methods from the parent class or else
761 // they will be hidden by the following overriding declaration.
762 using Assembler::testl;
763 void testl(Register dst, AddressLiteral src);
765 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
766 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
767 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
769 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
770 void testptr(Register src1, Register src2);
772 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
773 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
775 // Calls
777 void call(Label& L, relocInfo::relocType rtype);
778 void call(Register entry);
780 // NOTE: this call tranfers to the effective address of entry NOT
781 // the address contained by entry. This is because this is more natural
782 // for jumps/calls.
783 void call(AddressLiteral entry);
785 // Emit the CompiledIC call idiom
786 void ic_call(address entry);
788 // Jumps
790 // NOTE: these jumps tranfer to the effective address of dst NOT
791 // the address contained by dst. This is because this is more natural
792 // for jumps/calls.
793 void jump(AddressLiteral dst);
794 void jump_cc(Condition cc, AddressLiteral dst);
796 // 32bit can do a case table jump in one instruction but we no longer allow the base
797 // to be installed in the Address class. This jump will tranfers to the address
798 // contained in the location described by entry (not the address of entry)
799 void jump(ArrayAddress entry);
801 // Floating
803 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
804 void andpd(XMMRegister dst, AddressLiteral src);
806 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
807 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
808 void andps(XMMRegister dst, AddressLiteral src);
810 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
811 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
812 void comiss(XMMRegister dst, AddressLiteral src);
814 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
815 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
816 void comisd(XMMRegister dst, AddressLiteral src);
818 void fadd_s(Address src) { Assembler::fadd_s(src); }
819 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
821 void fldcw(Address src) { Assembler::fldcw(src); }
822 void fldcw(AddressLiteral src);
824 void fld_s(int index) { Assembler::fld_s(index); }
825 void fld_s(Address src) { Assembler::fld_s(src); }
826 void fld_s(AddressLiteral src);
828 void fld_d(Address src) { Assembler::fld_d(src); }
829 void fld_d(AddressLiteral src);
831 void fld_x(Address src) { Assembler::fld_x(src); }
832 void fld_x(AddressLiteral src);
834 void fmul_s(Address src) { Assembler::fmul_s(src); }
835 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
837 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
838 void ldmxcsr(AddressLiteral src);
840 // compute pow(x,y) and exp(x) with x86 instructions. Don't cover
841 // all corner cases and may result in NaN and require fallback to a
842 // runtime call.
843 void fast_pow();
844 void fast_exp();
845 void increase_precision();
846 void restore_precision();
848 // computes exp(x). Fallback to runtime call included.
849 void exp_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(true, num_fpu_regs_in_use); }
850 // computes pow(x,y). Fallback to runtime call included.
851 void pow_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(false, num_fpu_regs_in_use); }
853 private:
855 // call runtime as a fallback for trig functions and pow/exp.
856 void fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use);
858 // computes 2^(Ylog2X); Ylog2X in ST(0)
859 void pow_exp_core_encoding();
861 // computes pow(x,y) or exp(x). Fallback to runtime call included.
862 void pow_or_exp(bool is_exp, int num_fpu_regs_in_use);
864 // these are private because users should be doing movflt/movdbl
866 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
867 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
868 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
869 void movss(XMMRegister dst, AddressLiteral src);
871 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
872 void movlpd(XMMRegister dst, AddressLiteral src);
874 public:
876 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
877 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
878 void addsd(XMMRegister dst, AddressLiteral src);
880 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
881 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
882 void addss(XMMRegister dst, AddressLiteral src);
884 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
885 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
886 void divsd(XMMRegister dst, AddressLiteral src);
888 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
889 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
890 void divss(XMMRegister dst, AddressLiteral src);
892 // Move Unaligned Double Quadword
893 void movdqu(Address dst, XMMRegister src) { Assembler::movdqu(dst, src); }
894 void movdqu(XMMRegister dst, Address src) { Assembler::movdqu(dst, src); }
895 void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); }
896 void movdqu(XMMRegister dst, AddressLiteral src);
898 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
899 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
900 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
901 void movsd(XMMRegister dst, AddressLiteral src);
903 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
904 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
905 void mulsd(XMMRegister dst, AddressLiteral src);
907 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
908 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
909 void mulss(XMMRegister dst, AddressLiteral src);
911 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
912 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
913 void sqrtsd(XMMRegister dst, AddressLiteral src);
915 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
916 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
917 void sqrtss(XMMRegister dst, AddressLiteral src);
919 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
920 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
921 void subsd(XMMRegister dst, AddressLiteral src);
923 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
924 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
925 void subss(XMMRegister dst, AddressLiteral src);
927 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
928 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
929 void ucomiss(XMMRegister dst, AddressLiteral src);
931 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
932 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
933 void ucomisd(XMMRegister dst, AddressLiteral src);
935 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
936 void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); }
937 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
938 void xorpd(XMMRegister dst, AddressLiteral src);
940 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
941 void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); }
942 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
943 void xorps(XMMRegister dst, AddressLiteral src);
945 // Shuffle Bytes
946 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
947 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
948 void pshufb(XMMRegister dst, AddressLiteral src);
949 // AVX 3-operands instructions
951 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
952 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
953 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
955 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
956 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
957 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
959 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
960 void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
961 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
963 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
964 void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
965 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
967 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
968 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
969 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
971 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
972 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
973 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
975 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
976 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
977 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
979 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
980 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
981 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
983 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
984 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
985 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
987 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
988 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
989 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
991 // AVX Vector instructions
993 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
994 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
995 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
997 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
998 void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
999 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
1001 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
1002 if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
1003 Assembler::vpxor(dst, nds, src, vector256);
1004 else
1005 Assembler::vxorpd(dst, nds, src, vector256);
1006 }
1007 void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
1008 if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
1009 Assembler::vpxor(dst, nds, src, vector256);
1010 else
1011 Assembler::vxorpd(dst, nds, src, vector256);
1012 }
1014 // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
1015 void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1016 if (UseAVX > 1) // vinserti128h is available only in AVX2
1017 Assembler::vinserti128h(dst, nds, src);
1018 else
1019 Assembler::vinsertf128h(dst, nds, src);
1020 }
1022 // Data
1024 void cmov32( Condition cc, Register dst, Address src);
1025 void cmov32( Condition cc, Register dst, Register src);
1027 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1029 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1030 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1032 void movoop(Register dst, jobject obj);
1033 void movoop(Address dst, jobject obj);
1035 void mov_metadata(Register dst, Metadata* obj);
1036 void mov_metadata(Address dst, Metadata* obj);
1038 void movptr(ArrayAddress dst, Register src);
1039 // can this do an lea?
1040 void movptr(Register dst, ArrayAddress src);
1042 void movptr(Register dst, Address src);
1044 void movptr(Register dst, AddressLiteral src);
1046 void movptr(Register dst, intptr_t src);
1047 void movptr(Register dst, Register src);
1048 void movptr(Address dst, intptr_t src);
1050 void movptr(Address dst, Register src);
1052 void movptr(Register dst, RegisterOrConstant src) {
1053 if (src.is_constant()) movptr(dst, src.as_constant());
1054 else movptr(dst, src.as_register());
1055 }
1057 #ifdef _LP64
1058 // Generally the next two are only used for moving NULL
1059 // Although there are situations in initializing the mark word where
1060 // they could be used. They are dangerous.
1062 // They only exist on LP64 so that int32_t and intptr_t are not the same
1063 // and we have ambiguous declarations.
1065 void movptr(Address dst, int32_t imm32);
1066 void movptr(Register dst, int32_t imm32);
1067 #endif // _LP64
1069 // to avoid hiding movl
1070 void mov32(AddressLiteral dst, Register src);
1071 void mov32(Register dst, AddressLiteral src);
1073 // to avoid hiding movb
1074 void movbyte(ArrayAddress dst, int src);
1076 // Import other mov() methods from the parent class or else
1077 // they will be hidden by the following overriding declaration.
1078 using Assembler::movdl;
1079 using Assembler::movq;
1080 void movdl(XMMRegister dst, AddressLiteral src);
1081 void movq(XMMRegister dst, AddressLiteral src);
1083 // Can push value or effective address
1084 void pushptr(AddressLiteral src);
1086 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1087 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1089 void pushoop(jobject obj);
1090 void pushklass(Metadata* obj);
1092 // sign extend as need a l to ptr sized element
1093 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1094 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1096 // C2 compiled method's prolog code.
1097 void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
1099 // clear memory of size 'cnt' qwords, starting at 'base'.
1100 void clear_mem(Register base, Register cnt, Register rtmp);
1102 // IndexOf strings.
1103 // Small strings are loaded through stack if they cross page boundary.
1104 void string_indexof(Register str1, Register str2,
1105 Register cnt1, Register cnt2,
1106 int int_cnt2, Register result,
1107 XMMRegister vec, Register tmp);
1109 // IndexOf for constant substrings with size >= 8 elements
1110 // which don't need to be loaded through stack.
1111 void string_indexofC8(Register str1, Register str2,
1112 Register cnt1, Register cnt2,
1113 int int_cnt2, Register result,
1114 XMMRegister vec, Register tmp);
1116 // Smallest code: we don't need to load through stack,
1117 // check string tail.
1119 // Compare strings.
1120 void string_compare(Register str1, Register str2,
1121 Register cnt1, Register cnt2, Register result,
1122 XMMRegister vec1);
1124 // Compare char[] arrays.
1125 void char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
1126 Register limit, Register result, Register chr,
1127 XMMRegister vec1, XMMRegister vec2);
1129 // Fill primitive arrays
1130 void generate_fill(BasicType t, bool aligned,
1131 Register to, Register value, Register count,
1132 Register rtmp, XMMRegister xtmp);
1134 #undef VIRTUAL
1136 };
1138 /**
1139 * class SkipIfEqual:
1140 *
1141 * Instantiating this class will result in assembly code being output that will
1142 * jump around any code emitted between the creation of the instance and it's
1143 * automatic destruction at the end of a scope block, depending on the value of
1144 * the flag passed to the constructor, which will be checked at run-time.
1145 */
1146 class SkipIfEqual {
1147 private:
1148 MacroAssembler* _masm;
1149 Label _label;
1151 public:
1152 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
1153 ~SkipIfEqual();
1154 };
1156 #endif // CPU_X86_VM_MACROASSEMBLER_X86_HPP