Wed, 23 Jan 2013 13:02:39 -0500
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
Summary: Rename INCLUDE_ALTERNATE_GCS to INCLUDE_ALL_GCS and replace SERIALGC with INCLUDE_ALL_GCS.
Reviewed-by: coleenp, stefank
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP
28 #include "asm/assembler.hpp"
29 #include "utilities/macros.hpp"
32 // MacroAssembler extends Assembler by frequently used macros.
33 //
34 // Instructions for which a 'better' code sequence exists depending
35 // on arguments should also go in here.
37 class MacroAssembler: public Assembler {
38 friend class LIR_Assembler;
39 friend class Runtime1; // as_Address()
41 protected:
43 Address as_Address(AddressLiteral adr);
44 Address as_Address(ArrayAddress adr);
46 // Support for VM calls
47 //
48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
49 // may customize this version by overriding it for its purposes (e.g., to save/restore
50 // additional registers when doing a VM call).
51 #ifdef CC_INTERP
52 // c++ interpreter never wants to use interp_masm version of call_VM
53 #define VIRTUAL
54 #else
55 #define VIRTUAL virtual
56 #endif
58 VIRTUAL void call_VM_leaf_base(
59 address entry_point, // the entry point
60 int number_of_arguments // the number of arguments to pop after the call
61 );
63 // This is the base routine called by the different versions of call_VM. The interpreter
64 // may customize this version by overriding it for its purposes (e.g., to save/restore
65 // additional registers when doing a VM call).
66 //
67 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
68 // returns the register which contains the thread upon return. If a thread register has been
69 // specified, the return value will correspond to that register. If no last_java_sp is specified
70 // (noreg) than rsp will be used instead.
71 VIRTUAL void call_VM_base( // returns the register containing the thread upon return
72 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
73 Register java_thread, // the thread if computed before ; use noreg otherwise
74 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
75 address entry_point, // the entry point
76 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
77 bool check_exceptions // whether to check for pending exceptions after return
78 );
80 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
81 // The implementation is only non-empty for the InterpreterMacroAssembler,
82 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
83 virtual void check_and_handle_popframe(Register java_thread);
84 virtual void check_and_handle_earlyret(Register java_thread);
86 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
88 // helpers for FPU flag access
89 // tmp is a temporary register, if none is available use noreg
90 void save_rax (Register tmp);
91 void restore_rax(Register tmp);
93 public:
94 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
96 // Support for NULL-checks
97 //
98 // Generates code that causes a NULL OS exception if the content of reg is NULL.
99 // If the accessed location is M[reg + offset] and the offset is known, provide the
100 // offset. No explicit code generation is needed if the offset is within a certain
101 // range (0 <= offset <= page_size).
103 void null_check(Register reg, int offset = -1);
104 static bool needs_explicit_null_check(intptr_t offset);
106 // Required platform-specific helpers for Label::patch_instructions.
107 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
108 void pd_patch_instruction(address branch, address target) {
109 unsigned char op = branch[0];
110 assert(op == 0xE8 /* call */ ||
111 op == 0xE9 /* jmp */ ||
112 op == 0xEB /* short jmp */ ||
113 (op & 0xF0) == 0x70 /* short jcc */ ||
114 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
115 "Invalid opcode at patch point");
117 if (op == 0xEB || (op & 0xF0) == 0x70) {
118 // short offset operators (jmp and jcc)
119 char* disp = (char*) &branch[1];
120 int imm8 = target - (address) &disp[1];
121 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
122 *disp = imm8;
123 } else {
124 int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
125 int imm32 = target - (address) &disp[1];
126 *disp = imm32;
127 }
128 }
130 // The following 4 methods return the offset of the appropriate move instruction
132 // Support for fast byte/short loading with zero extension (depending on particular CPU)
133 int load_unsigned_byte(Register dst, Address src);
134 int load_unsigned_short(Register dst, Address src);
136 // Support for fast byte/short loading with sign extension (depending on particular CPU)
137 int load_signed_byte(Register dst, Address src);
138 int load_signed_short(Register dst, Address src);
140 // Support for sign-extension (hi:lo = extend_sign(lo))
141 void extend_sign(Register hi, Register lo);
143 // Load and store values by size and signed-ness
144 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
145 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
147 // Support for inc/dec with optimal instruction selection depending on value
149 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
150 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
152 void decrementl(Address dst, int value = 1);
153 void decrementl(Register reg, int value = 1);
155 void decrementq(Register reg, int value = 1);
156 void decrementq(Address dst, int value = 1);
158 void incrementl(Address dst, int value = 1);
159 void incrementl(Register reg, int value = 1);
161 void incrementq(Register reg, int value = 1);
162 void incrementq(Address dst, int value = 1);
165 // Support optimal SSE move instructions.
166 void movflt(XMMRegister dst, XMMRegister src) {
167 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
168 else { movss (dst, src); return; }
169 }
170 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
171 void movflt(XMMRegister dst, AddressLiteral src);
172 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
174 void movdbl(XMMRegister dst, XMMRegister src) {
175 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
176 else { movsd (dst, src); return; }
177 }
179 void movdbl(XMMRegister dst, AddressLiteral src);
181 void movdbl(XMMRegister dst, Address src) {
182 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
183 else { movlpd(dst, src); return; }
184 }
185 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
187 void incrementl(AddressLiteral dst);
188 void incrementl(ArrayAddress dst);
190 // Alignment
191 void align(int modulus);
193 // A 5 byte nop that is safe for patching (see patch_verified_entry)
194 void fat_nop();
196 // Stack frame creation/removal
197 void enter();
198 void leave();
200 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
201 // The pointer will be loaded into the thread register.
202 void get_thread(Register thread);
205 // Support for VM calls
206 //
207 // It is imperative that all calls into the VM are handled via the call_VM macros.
208 // They make sure that the stack linkage is setup correctly. call_VM's correspond
209 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
212 void call_VM(Register oop_result,
213 address entry_point,
214 bool check_exceptions = true);
215 void call_VM(Register oop_result,
216 address entry_point,
217 Register arg_1,
218 bool check_exceptions = true);
219 void call_VM(Register oop_result,
220 address entry_point,
221 Register arg_1, Register arg_2,
222 bool check_exceptions = true);
223 void call_VM(Register oop_result,
224 address entry_point,
225 Register arg_1, Register arg_2, Register arg_3,
226 bool check_exceptions = true);
228 // Overloadings with last_Java_sp
229 void call_VM(Register oop_result,
230 Register last_java_sp,
231 address entry_point,
232 int number_of_arguments = 0,
233 bool check_exceptions = true);
234 void call_VM(Register oop_result,
235 Register last_java_sp,
236 address entry_point,
237 Register arg_1, bool
238 check_exceptions = true);
239 void call_VM(Register oop_result,
240 Register last_java_sp,
241 address entry_point,
242 Register arg_1, Register arg_2,
243 bool check_exceptions = true);
244 void call_VM(Register oop_result,
245 Register last_java_sp,
246 address entry_point,
247 Register arg_1, Register arg_2, Register arg_3,
248 bool check_exceptions = true);
250 void get_vm_result (Register oop_result, Register thread);
251 void get_vm_result_2(Register metadata_result, Register thread);
253 // These always tightly bind to MacroAssembler::call_VM_base
254 // bypassing the virtual implementation
255 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
256 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
257 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
258 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
259 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
261 void call_VM_leaf(address entry_point,
262 int number_of_arguments = 0);
263 void call_VM_leaf(address entry_point,
264 Register arg_1);
265 void call_VM_leaf(address entry_point,
266 Register arg_1, Register arg_2);
267 void call_VM_leaf(address entry_point,
268 Register arg_1, Register arg_2, Register arg_3);
270 // These always tightly bind to MacroAssembler::call_VM_leaf_base
271 // bypassing the virtual implementation
272 void super_call_VM_leaf(address entry_point);
273 void super_call_VM_leaf(address entry_point, Register arg_1);
274 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
275 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
276 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
278 // last Java Frame (fills frame anchor)
279 void set_last_Java_frame(Register thread,
280 Register last_java_sp,
281 Register last_java_fp,
282 address last_java_pc);
284 // thread in the default location (r15_thread on 64bit)
285 void set_last_Java_frame(Register last_java_sp,
286 Register last_java_fp,
287 address last_java_pc);
289 void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc);
291 // thread in the default location (r15_thread on 64bit)
292 void reset_last_Java_frame(bool clear_fp, bool clear_pc);
294 // Stores
295 void store_check(Register obj); // store check for obj - register is destroyed afterwards
296 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
298 #if INCLUDE_ALL_GCS
300 void g1_write_barrier_pre(Register obj,
301 Register pre_val,
302 Register thread,
303 Register tmp,
304 bool tosca_live,
305 bool expand_call);
307 void g1_write_barrier_post(Register store_addr,
308 Register new_val,
309 Register thread,
310 Register tmp,
311 Register tmp2);
313 #endif // INCLUDE_ALL_GCS
315 // split store_check(Register obj) to enhance instruction interleaving
316 void store_check_part_1(Register obj);
317 void store_check_part_2(Register obj);
319 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
320 void c2bool(Register x);
322 // C++ bool manipulation
324 void movbool(Register dst, Address src);
325 void movbool(Address dst, bool boolconst);
326 void movbool(Address dst, Register src);
327 void testbool(Register dst);
329 // oop manipulations
330 void load_klass(Register dst, Register src);
331 void store_klass(Register dst, Register src);
333 void load_heap_oop(Register dst, Address src);
334 void load_heap_oop_not_null(Register dst, Address src);
335 void store_heap_oop(Address dst, Register src);
336 void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg);
338 // Used for storing NULL. All other oop constants should be
339 // stored using routines that take a jobject.
340 void store_heap_oop_null(Address dst);
342 void load_prototype_header(Register dst, Register src);
344 #ifdef _LP64
345 void store_klass_gap(Register dst, Register src);
347 // This dummy is to prevent a call to store_heap_oop from
348 // converting a zero (like NULL) into a Register by giving
349 // the compiler two choices it can't resolve
351 void store_heap_oop(Address dst, void* dummy);
353 void encode_heap_oop(Register r);
354 void decode_heap_oop(Register r);
355 void encode_heap_oop_not_null(Register r);
356 void decode_heap_oop_not_null(Register r);
357 void encode_heap_oop_not_null(Register dst, Register src);
358 void decode_heap_oop_not_null(Register dst, Register src);
360 void set_narrow_oop(Register dst, jobject obj);
361 void set_narrow_oop(Address dst, jobject obj);
362 void cmp_narrow_oop(Register dst, jobject obj);
363 void cmp_narrow_oop(Address dst, jobject obj);
365 void encode_klass_not_null(Register r);
366 void decode_klass_not_null(Register r);
367 void encode_klass_not_null(Register dst, Register src);
368 void decode_klass_not_null(Register dst, Register src);
369 void set_narrow_klass(Register dst, Klass* k);
370 void set_narrow_klass(Address dst, Klass* k);
371 void cmp_narrow_klass(Register dst, Klass* k);
372 void cmp_narrow_klass(Address dst, Klass* k);
374 // if heap base register is used - reinit it with the correct value
375 void reinit_heapbase();
377 DEBUG_ONLY(void verify_heapbase(const char* msg);)
379 #endif // _LP64
381 // Int division/remainder for Java
382 // (as idivl, but checks for special case as described in JVM spec.)
383 // returns idivl instruction offset for implicit exception handling
384 int corrected_idivl(Register reg);
386 // Long division/remainder for Java
387 // (as idivq, but checks for special case as described in JVM spec.)
388 // returns idivq instruction offset for implicit exception handling
389 int corrected_idivq(Register reg);
391 void int3();
393 // Long operation macros for a 32bit cpu
394 // Long negation for Java
395 void lneg(Register hi, Register lo);
397 // Long multiplication for Java
398 // (destroys contents of eax, ebx, ecx and edx)
399 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
401 // Long shifts for Java
402 // (semantics as described in JVM spec.)
403 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
404 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
406 // Long compare for Java
407 // (semantics as described in JVM spec.)
408 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
411 // misc
413 // Sign extension
414 void sign_extend_short(Register reg);
415 void sign_extend_byte(Register reg);
417 // Division by power of 2, rounding towards 0
418 void division_with_shift(Register reg, int shift_value);
420 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
421 //
422 // CF (corresponds to C0) if x < y
423 // PF (corresponds to C2) if unordered
424 // ZF (corresponds to C3) if x = y
425 //
426 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
427 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
428 void fcmp(Register tmp);
429 // Variant of the above which allows y to be further down the stack
430 // and which only pops x and y if specified. If pop_right is
431 // specified then pop_left must also be specified.
432 void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
434 // Floating-point comparison for Java
435 // Compares the top-most stack entries on the FPU stack and stores the result in dst.
436 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
437 // (semantics as described in JVM spec.)
438 void fcmp2int(Register dst, bool unordered_is_less);
439 // Variant of the above which allows y to be further down the stack
440 // and which only pops x and y if specified. If pop_right is
441 // specified then pop_left must also be specified.
442 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
444 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
445 // tmp is a temporary register, if none is available use noreg
446 void fremr(Register tmp);
449 // same as fcmp2int, but using SSE2
450 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
451 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
453 // Inlined sin/cos generator for Java; must not use CPU instruction
454 // directly on Intel as it does not have high enough precision
455 // outside of the range [-pi/4, pi/4]. Extra argument indicate the
456 // number of FPU stack slots in use; all but the topmost will
457 // require saving if a slow case is necessary. Assumes argument is
458 // on FP TOS; result is on FP TOS. No cpu registers are changed by
459 // this code.
460 void trigfunc(char trig, int num_fpu_regs_in_use = 1);
462 // branch to L if FPU flag C2 is set/not set
463 // tmp is a temporary register, if none is available use noreg
464 void jC2 (Register tmp, Label& L);
465 void jnC2(Register tmp, Label& L);
467 // Pop ST (ffree & fincstp combined)
468 void fpop();
470 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
471 void push_fTOS();
473 // pops double TOS element from CPU stack and pushes on FPU stack
474 void pop_fTOS();
476 void empty_FPU_stack();
478 void push_IU_state();
479 void pop_IU_state();
481 void push_FPU_state();
482 void pop_FPU_state();
484 void push_CPU_state();
485 void pop_CPU_state();
487 // Round up to a power of two
488 void round_to(Register reg, int modulus);
490 // Callee saved registers handling
491 void push_callee_saved_registers();
492 void pop_callee_saved_registers();
494 // allocation
495 void eden_allocate(
496 Register obj, // result: pointer to object after successful allocation
497 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
498 int con_size_in_bytes, // object size in bytes if known at compile time
499 Register t1, // temp register
500 Label& slow_case // continuation point if fast allocation fails
501 );
502 void tlab_allocate(
503 Register obj, // result: pointer to object after successful allocation
504 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
505 int con_size_in_bytes, // object size in bytes if known at compile time
506 Register t1, // temp register
507 Register t2, // temp register
508 Label& slow_case // continuation point if fast allocation fails
509 );
510 Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address
511 void incr_allocated_bytes(Register thread,
512 Register var_size_in_bytes, int con_size_in_bytes,
513 Register t1 = noreg);
515 // interface method calling
516 void lookup_interface_method(Register recv_klass,
517 Register intf_klass,
518 RegisterOrConstant itable_index,
519 Register method_result,
520 Register scan_temp,
521 Label& no_such_interface);
523 // virtual method calling
524 void lookup_virtual_method(Register recv_klass,
525 RegisterOrConstant vtable_index,
526 Register method_result);
528 // Test sub_klass against super_klass, with fast and slow paths.
530 // The fast path produces a tri-state answer: yes / no / maybe-slow.
531 // One of the three labels can be NULL, meaning take the fall-through.
532 // If super_check_offset is -1, the value is loaded up from super_klass.
533 // No registers are killed, except temp_reg.
534 void check_klass_subtype_fast_path(Register sub_klass,
535 Register super_klass,
536 Register temp_reg,
537 Label* L_success,
538 Label* L_failure,
539 Label* L_slow_path,
540 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
542 // The rest of the type check; must be wired to a corresponding fast path.
543 // It does not repeat the fast path logic, so don't use it standalone.
544 // The temp_reg and temp2_reg can be noreg, if no temps are available.
545 // Updates the sub's secondary super cache as necessary.
546 // If set_cond_codes, condition codes will be Z on success, NZ on failure.
547 void check_klass_subtype_slow_path(Register sub_klass,
548 Register super_klass,
549 Register temp_reg,
550 Register temp2_reg,
551 Label* L_success,
552 Label* L_failure,
553 bool set_cond_codes = false);
555 // Simplified, combined version, good for typical uses.
556 // Falls through on failure.
557 void check_klass_subtype(Register sub_klass,
558 Register super_klass,
559 Register temp_reg,
560 Label& L_success);
562 // method handles (JSR 292)
563 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
565 //----
566 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
568 // Debugging
570 // only if +VerifyOops
571 // TODO: Make these macros with file and line like sparc version!
572 void verify_oop(Register reg, const char* s = "broken oop");
573 void verify_oop_addr(Address addr, const char * s = "broken oop addr");
575 // TODO: verify method and klass metadata (compare against vptr?)
576 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
577 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
579 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
580 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
582 // only if +VerifyFPU
583 void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
585 // prints msg, dumps registers and stops execution
586 void stop(const char* msg);
588 // prints msg and continues
589 void warn(const char* msg);
591 // dumps registers and other state
592 void print_state();
594 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
595 static void debug64(char* msg, int64_t pc, int64_t regs[]);
596 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
597 static void print_state64(int64_t pc, int64_t regs[]);
599 void os_breakpoint();
601 void untested() { stop("untested"); }
603 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
605 void should_not_reach_here() { stop("should not reach here"); }
607 void print_CPU_state();
609 // Stack overflow checking
610 void bang_stack_with_offset(int offset) {
611 // stack grows down, caller passes positive offset
612 assert(offset > 0, "must bang with negative offset");
613 movl(Address(rsp, (-offset)), rax);
614 }
616 // Writes to stack successive pages until offset reached to check for
617 // stack overflow + shadow pages. Also, clobbers tmp
618 void bang_stack_size(Register size, Register tmp);
620 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
621 Register tmp,
622 int offset);
624 // Support for serializing memory accesses between threads
625 void serialize_memory(Register thread, Register tmp);
627 void verify_tlab();
629 // Biased locking support
630 // lock_reg and obj_reg must be loaded up with the appropriate values.
631 // swap_reg must be rax, and is killed.
632 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
633 // be killed; if not supplied, push/pop will be used internally to
634 // allocate a temporary (inefficient, avoid if possible).
635 // Optional slow case is for implementations (interpreter and C1) which branch to
636 // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
637 // Returns offset of first potentially-faulting instruction for null
638 // check info (currently consumed only by C1). If
639 // swap_reg_contains_mark is true then returns -1 as it is assumed
640 // the calling code has already passed any potential faults.
641 int biased_locking_enter(Register lock_reg, Register obj_reg,
642 Register swap_reg, Register tmp_reg,
643 bool swap_reg_contains_mark,
644 Label& done, Label* slow_case = NULL,
645 BiasedLockingCounters* counters = NULL);
646 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
649 Condition negate_condition(Condition cond);
651 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
652 // operands. In general the names are modified to avoid hiding the instruction in Assembler
653 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
654 // here in MacroAssembler. The major exception to this rule is call
656 // Arithmetics
659 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
660 void addptr(Address dst, Register src);
662 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
663 void addptr(Register dst, int32_t src);
664 void addptr(Register dst, Register src);
665 void addptr(Register dst, RegisterOrConstant src) {
666 if (src.is_constant()) addptr(dst, (int) src.as_constant());
667 else addptr(dst, src.as_register());
668 }
670 void andptr(Register dst, int32_t src);
671 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
673 void cmp8(AddressLiteral src1, int imm);
675 // renamed to drag out the casting of address to int32_t/intptr_t
676 void cmp32(Register src1, int32_t imm);
678 void cmp32(AddressLiteral src1, int32_t imm);
679 // compare reg - mem, or reg - &mem
680 void cmp32(Register src1, AddressLiteral src2);
682 void cmp32(Register src1, Address src2);
684 #ifndef _LP64
685 void cmpklass(Address dst, Metadata* obj);
686 void cmpklass(Register dst, Metadata* obj);
687 void cmpoop(Address dst, jobject obj);
688 void cmpoop(Register dst, jobject obj);
689 #endif // _LP64
691 // NOTE src2 must be the lval. This is NOT an mem-mem compare
692 void cmpptr(Address src1, AddressLiteral src2);
694 void cmpptr(Register src1, AddressLiteral src2);
696 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
697 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
698 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
700 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
701 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
703 // cmp64 to avoild hiding cmpq
704 void cmp64(Register src1, AddressLiteral src);
706 void cmpxchgptr(Register reg, Address adr);
708 void locked_cmpxchgptr(Register reg, AddressLiteral adr);
711 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
714 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
716 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
718 void shlptr(Register dst, int32_t shift);
719 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
721 void shrptr(Register dst, int32_t shift);
722 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
724 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
725 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
727 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
729 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
730 void subptr(Register dst, int32_t src);
731 // Force generation of a 4 byte immediate value even if it fits into 8bit
732 void subptr_imm32(Register dst, int32_t src);
733 void subptr(Register dst, Register src);
734 void subptr(Register dst, RegisterOrConstant src) {
735 if (src.is_constant()) subptr(dst, (int) src.as_constant());
736 else subptr(dst, src.as_register());
737 }
739 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
740 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
742 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
743 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
745 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
749 // Helper functions for statistics gathering.
750 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
751 void cond_inc32(Condition cond, AddressLiteral counter_addr);
752 // Unconditional atomic increment.
753 void atomic_incl(AddressLiteral counter_addr);
755 void lea(Register dst, AddressLiteral adr);
756 void lea(Address dst, AddressLiteral adr);
757 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
759 void leal32(Register dst, Address src) { leal(dst, src); }
761 // Import other testl() methods from the parent class or else
762 // they will be hidden by the following overriding declaration.
763 using Assembler::testl;
764 void testl(Register dst, AddressLiteral src);
766 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
767 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
768 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
770 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
771 void testptr(Register src1, Register src2);
773 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
774 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
776 // Calls
778 void call(Label& L, relocInfo::relocType rtype);
779 void call(Register entry);
781 // NOTE: this call tranfers to the effective address of entry NOT
782 // the address contained by entry. This is because this is more natural
783 // for jumps/calls.
784 void call(AddressLiteral entry);
786 // Emit the CompiledIC call idiom
787 void ic_call(address entry);
789 // Jumps
791 // NOTE: these jumps tranfer to the effective address of dst NOT
792 // the address contained by dst. This is because this is more natural
793 // for jumps/calls.
794 void jump(AddressLiteral dst);
795 void jump_cc(Condition cc, AddressLiteral dst);
797 // 32bit can do a case table jump in one instruction but we no longer allow the base
798 // to be installed in the Address class. This jump will tranfers to the address
799 // contained in the location described by entry (not the address of entry)
800 void jump(ArrayAddress entry);
802 // Floating
804 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
805 void andpd(XMMRegister dst, AddressLiteral src);
807 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
808 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
809 void andps(XMMRegister dst, AddressLiteral src);
811 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
812 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
813 void comiss(XMMRegister dst, AddressLiteral src);
815 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
816 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
817 void comisd(XMMRegister dst, AddressLiteral src);
819 void fadd_s(Address src) { Assembler::fadd_s(src); }
820 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
822 void fldcw(Address src) { Assembler::fldcw(src); }
823 void fldcw(AddressLiteral src);
825 void fld_s(int index) { Assembler::fld_s(index); }
826 void fld_s(Address src) { Assembler::fld_s(src); }
827 void fld_s(AddressLiteral src);
829 void fld_d(Address src) { Assembler::fld_d(src); }
830 void fld_d(AddressLiteral src);
832 void fld_x(Address src) { Assembler::fld_x(src); }
833 void fld_x(AddressLiteral src);
835 void fmul_s(Address src) { Assembler::fmul_s(src); }
836 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
838 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
839 void ldmxcsr(AddressLiteral src);
841 // compute pow(x,y) and exp(x) with x86 instructions. Don't cover
842 // all corner cases and may result in NaN and require fallback to a
843 // runtime call.
844 void fast_pow();
845 void fast_exp();
846 void increase_precision();
847 void restore_precision();
849 // computes exp(x). Fallback to runtime call included.
850 void exp_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(true, num_fpu_regs_in_use); }
851 // computes pow(x,y). Fallback to runtime call included.
852 void pow_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(false, num_fpu_regs_in_use); }
854 private:
856 // call runtime as a fallback for trig functions and pow/exp.
857 void fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use);
859 // computes 2^(Ylog2X); Ylog2X in ST(0)
860 void pow_exp_core_encoding();
862 // computes pow(x,y) or exp(x). Fallback to runtime call included.
863 void pow_or_exp(bool is_exp, int num_fpu_regs_in_use);
865 // these are private because users should be doing movflt/movdbl
867 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
868 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
869 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
870 void movss(XMMRegister dst, AddressLiteral src);
872 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
873 void movlpd(XMMRegister dst, AddressLiteral src);
875 public:
877 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
878 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
879 void addsd(XMMRegister dst, AddressLiteral src);
881 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
882 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
883 void addss(XMMRegister dst, AddressLiteral src);
885 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
886 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
887 void divsd(XMMRegister dst, AddressLiteral src);
889 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
890 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
891 void divss(XMMRegister dst, AddressLiteral src);
893 // Move Unaligned Double Quadword
894 void movdqu(Address dst, XMMRegister src) { Assembler::movdqu(dst, src); }
895 void movdqu(XMMRegister dst, Address src) { Assembler::movdqu(dst, src); }
896 void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); }
897 void movdqu(XMMRegister dst, AddressLiteral src);
899 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
900 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
901 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
902 void movsd(XMMRegister dst, AddressLiteral src);
904 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
905 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
906 void mulsd(XMMRegister dst, AddressLiteral src);
908 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
909 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
910 void mulss(XMMRegister dst, AddressLiteral src);
912 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
913 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
914 void sqrtsd(XMMRegister dst, AddressLiteral src);
916 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
917 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
918 void sqrtss(XMMRegister dst, AddressLiteral src);
920 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
921 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
922 void subsd(XMMRegister dst, AddressLiteral src);
924 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
925 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
926 void subss(XMMRegister dst, AddressLiteral src);
928 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
929 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
930 void ucomiss(XMMRegister dst, AddressLiteral src);
932 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
933 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
934 void ucomisd(XMMRegister dst, AddressLiteral src);
936 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
937 void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); }
938 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
939 void xorpd(XMMRegister dst, AddressLiteral src);
941 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
942 void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); }
943 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
944 void xorps(XMMRegister dst, AddressLiteral src);
946 // Shuffle Bytes
947 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
948 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
949 void pshufb(XMMRegister dst, AddressLiteral src);
950 // AVX 3-operands instructions
952 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
953 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
954 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
956 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
957 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
958 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
960 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
961 void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
962 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
964 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
965 void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
966 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
968 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
969 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
970 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
972 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
973 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
974 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
976 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
977 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
978 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
980 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
981 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
982 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
984 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
985 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
986 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
988 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
989 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
990 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
992 // AVX Vector instructions
994 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
995 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
996 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
998 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
999 void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
1000 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
1002 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
1003 if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
1004 Assembler::vpxor(dst, nds, src, vector256);
1005 else
1006 Assembler::vxorpd(dst, nds, src, vector256);
1007 }
1008 void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
1009 if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
1010 Assembler::vpxor(dst, nds, src, vector256);
1011 else
1012 Assembler::vxorpd(dst, nds, src, vector256);
1013 }
1015 // Simple version for AVX2 256bit vectors
1016 void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); }
1017 void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); }
1019 // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
1020 void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1021 if (UseAVX > 1) // vinserti128h is available only in AVX2
1022 Assembler::vinserti128h(dst, nds, src);
1023 else
1024 Assembler::vinsertf128h(dst, nds, src);
1025 }
1027 // Data
1029 void cmov32( Condition cc, Register dst, Address src);
1030 void cmov32( Condition cc, Register dst, Register src);
1032 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1034 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1035 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1037 void movoop(Register dst, jobject obj);
1038 void movoop(Address dst, jobject obj);
1040 void mov_metadata(Register dst, Metadata* obj);
1041 void mov_metadata(Address dst, Metadata* obj);
1043 void movptr(ArrayAddress dst, Register src);
1044 // can this do an lea?
1045 void movptr(Register dst, ArrayAddress src);
1047 void movptr(Register dst, Address src);
1049 void movptr(Register dst, AddressLiteral src);
1051 void movptr(Register dst, intptr_t src);
1052 void movptr(Register dst, Register src);
1053 void movptr(Address dst, intptr_t src);
1055 void movptr(Address dst, Register src);
1057 void movptr(Register dst, RegisterOrConstant src) {
1058 if (src.is_constant()) movptr(dst, src.as_constant());
1059 else movptr(dst, src.as_register());
1060 }
1062 #ifdef _LP64
1063 // Generally the next two are only used for moving NULL
1064 // Although there are situations in initializing the mark word where
1065 // they could be used. They are dangerous.
1067 // They only exist on LP64 so that int32_t and intptr_t are not the same
1068 // and we have ambiguous declarations.
1070 void movptr(Address dst, int32_t imm32);
1071 void movptr(Register dst, int32_t imm32);
1072 #endif // _LP64
1074 // to avoid hiding movl
1075 void mov32(AddressLiteral dst, Register src);
1076 void mov32(Register dst, AddressLiteral src);
1078 // to avoid hiding movb
1079 void movbyte(ArrayAddress dst, int src);
1081 // Import other mov() methods from the parent class or else
1082 // they will be hidden by the following overriding declaration.
1083 using Assembler::movdl;
1084 using Assembler::movq;
1085 void movdl(XMMRegister dst, AddressLiteral src);
1086 void movq(XMMRegister dst, AddressLiteral src);
1088 // Can push value or effective address
1089 void pushptr(AddressLiteral src);
1091 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1092 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1094 void pushoop(jobject obj);
1095 void pushklass(Metadata* obj);
1097 // sign extend as need a l to ptr sized element
1098 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1099 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1101 // C2 compiled method's prolog code.
1102 void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
1104 // clear memory of size 'cnt' qwords, starting at 'base'.
1105 void clear_mem(Register base, Register cnt, Register rtmp);
1107 // IndexOf strings.
1108 // Small strings are loaded through stack if they cross page boundary.
1109 void string_indexof(Register str1, Register str2,
1110 Register cnt1, Register cnt2,
1111 int int_cnt2, Register result,
1112 XMMRegister vec, Register tmp);
1114 // IndexOf for constant substrings with size >= 8 elements
1115 // which don't need to be loaded through stack.
1116 void string_indexofC8(Register str1, Register str2,
1117 Register cnt1, Register cnt2,
1118 int int_cnt2, Register result,
1119 XMMRegister vec, Register tmp);
1121 // Smallest code: we don't need to load through stack,
1122 // check string tail.
1124 // Compare strings.
1125 void string_compare(Register str1, Register str2,
1126 Register cnt1, Register cnt2, Register result,
1127 XMMRegister vec1);
1129 // Compare char[] arrays.
1130 void char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
1131 Register limit, Register result, Register chr,
1132 XMMRegister vec1, XMMRegister vec2);
1134 // Fill primitive arrays
1135 void generate_fill(BasicType t, bool aligned,
1136 Register to, Register value, Register count,
1137 Register rtmp, XMMRegister xtmp);
1139 #undef VIRTUAL
1141 };
1143 /**
1144 * class SkipIfEqual:
1145 *
1146 * Instantiating this class will result in assembly code being output that will
1147 * jump around any code emitted between the creation of the instance and it's
1148 * automatic destruction at the end of a scope block, depending on the value of
1149 * the flag passed to the constructor, which will be checked at run-time.
1150 */
1151 class SkipIfEqual {
1152 private:
1153 MacroAssembler* _masm;
1154 Label _label;
1156 public:
1157 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
1158 ~SkipIfEqual();
1159 };
1161 #endif // CPU_X86_VM_MACROASSEMBLER_X86_HPP