src/cpu/x86/vm/macroAssembler_x86.hpp

changeset 6429
606acabe7b5c
parent 6356
4d4ea046d32a
child 6723
0bf37f737702
     1.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Sat Mar 22 00:26:48 2014 +0400
     1.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Mar 20 17:49:27 2014 -0700
     1.3 @@ -27,6 +27,7 @@
     1.4  
     1.5  #include "asm/assembler.hpp"
     1.6  #include "utilities/macros.hpp"
     1.7 +#include "runtime/rtmLocking.hpp"
     1.8  
     1.9  
    1.10  // MacroAssembler extends Assembler by frequently used macros.
    1.11 @@ -111,7 +112,8 @@
    1.12          op == 0xE9 /* jmp */ ||
    1.13          op == 0xEB /* short jmp */ ||
    1.14          (op & 0xF0) == 0x70 /* short jcc */ ||
    1.15 -        op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
    1.16 +        op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
    1.17 +        op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
    1.18          "Invalid opcode at patch point");
    1.19  
    1.20      if (op == 0xEB || (op & 0xF0) == 0x70) {
    1.21 @@ -121,7 +123,7 @@
    1.22        guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
    1.23        *disp = imm8;
    1.24      } else {
    1.25 -      int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
    1.26 +      int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
    1.27        int imm32 = target - (address) &disp[1];
    1.28        *disp = imm32;
    1.29      }
    1.30 @@ -161,7 +163,6 @@
    1.31    void incrementq(Register reg, int value = 1);
    1.32    void incrementq(Address dst, int value = 1);
    1.33  
    1.34 -
    1.35    // Support optimal SSE move instructions.
    1.36    void movflt(XMMRegister dst, XMMRegister src) {
    1.37      if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
    1.38 @@ -187,6 +188,8 @@
    1.39    void incrementl(AddressLiteral dst);
    1.40    void incrementl(ArrayAddress dst);
    1.41  
    1.42 +  void incrementq(AddressLiteral dst);
    1.43 +
    1.44    // Alignment
    1.45    void align(int modulus);
    1.46  
    1.47 @@ -654,8 +657,36 @@
    1.48  #ifdef COMPILER2
    1.49    // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
    1.50    // See full desription in macroAssembler_x86.cpp.
    1.51 -  void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
    1.52 -  void fast_unlock(Register obj, Register box, Register tmp);
    1.53 +  void fast_lock(Register obj, Register box, Register tmp,
    1.54 +                 Register scr, Register cx1, Register cx2,
    1.55 +                 BiasedLockingCounters* counters,
    1.56 +                 RTMLockingCounters* rtm_counters,
    1.57 +                 RTMLockingCounters* stack_rtm_counters,
    1.58 +                 Metadata* method_data,
    1.59 +                 bool use_rtm, bool profile_rtm);
    1.60 +  void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
    1.61 +#if INCLUDE_RTM_OPT
    1.62 +  void rtm_counters_update(Register abort_status, Register rtm_counters);
    1.63 +  void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
    1.64 +  void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
    1.65 +                                   RTMLockingCounters* rtm_counters,
    1.66 +                                   Metadata* method_data);
    1.67 +  void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
    1.68 +                     RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
    1.69 +  void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
    1.70 +  void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
    1.71 +  void rtm_stack_locking(Register obj, Register tmp, Register scr,
    1.72 +                         Register retry_on_abort_count,
    1.73 +                         RTMLockingCounters* stack_rtm_counters,
    1.74 +                         Metadata* method_data, bool profile_rtm,
    1.75 +                         Label& DONE_LABEL, Label& IsInflated);
    1.76 +  void rtm_inflated_locking(Register obj, Register box, Register tmp,
    1.77 +                            Register scr, Register retry_on_busy_count,
    1.78 +                            Register retry_on_abort_count,
    1.79 +                            RTMLockingCounters* rtm_counters,
    1.80 +                            Metadata* method_data, bool profile_rtm,
    1.81 +                            Label& DONE_LABEL);
    1.82 +#endif
    1.83  #endif
    1.84  
    1.85    Condition negate_condition(Condition cond);
    1.86 @@ -721,6 +752,7 @@
    1.87  
    1.88  
    1.89    void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
    1.90 +  void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
    1.91  
    1.92  
    1.93    void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
    1.94 @@ -762,7 +794,14 @@
    1.95    // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
    1.96    void cond_inc32(Condition cond, AddressLiteral counter_addr);
    1.97    // Unconditional atomic increment.
    1.98 -  void atomic_incl(AddressLiteral counter_addr);
    1.99 +  void atomic_incl(Address counter_addr);
   1.100 +  void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
   1.101 +#ifdef _LP64
   1.102 +  void atomic_incq(Address counter_addr);
   1.103 +  void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
   1.104 +#endif
   1.105 +  void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
   1.106 +  void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
   1.107  
   1.108    void lea(Register dst, AddressLiteral adr);
   1.109    void lea(Address dst, AddressLiteral adr);
   1.110 @@ -1074,7 +1113,11 @@
   1.111  
   1.112    void movptr(Register dst, Address src);
   1.113  
   1.114 -  void movptr(Register dst, AddressLiteral src);
   1.115 +#ifdef _LP64
   1.116 +  void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
   1.117 +#else
   1.118 +  void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
   1.119 +#endif
   1.120  
   1.121    void movptr(Register dst, intptr_t src);
   1.122    void movptr(Register dst, Register src);

mercurial