src/cpu/x86/vm/assembler_x86.cpp

changeset 3882
8c92982cbbc4
parent 3844
e7715c222897
child 3929
2c368ea3e844
     1.1 --- a/src/cpu/x86/vm/assembler_x86.cpp	Thu Jun 14 14:59:52 2012 -0700
     1.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp	Fri Jun 15 01:25:19 2012 -0700
     1.3 @@ -1637,6 +1637,13 @@
     1.4    emit_byte(0xC0 | encode);
     1.5  }
     1.6  
     1.7 +void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
     1.8 +  NOT_LP64(assert(VM_Version::supports_sse(), ""));
     1.9 +  int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE);
    1.10 +  emit_byte(0x16);
    1.11 +  emit_byte(0xC0 | encode);
    1.12 +}
    1.13 +
    1.14  void Assembler::movb(Register dst, Address src) {
    1.15    NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
    1.16    InstructionMark im(this);
    1.17 @@ -1686,6 +1693,14 @@
    1.18    emit_operand(dst, src);
    1.19  }
    1.20  
    1.21 +void Assembler::movdl(Address dst, XMMRegister src) {
    1.22 +  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
    1.23 +  InstructionMark im(this);
    1.24 +  simd_prefix(dst, src, VEX_SIMD_66);
    1.25 +  emit_byte(0x7E);
    1.26 +  emit_operand(src, dst);
    1.27 +}
    1.28 +
    1.29  void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
    1.30    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
    1.31    int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66);
    1.32 @@ -1716,6 +1731,35 @@
    1.33    emit_operand(src, dst);
    1.34  }
    1.35  
    1.36 +// Move Unaligned 256bit Vector
    1.37 +void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
    1.38 +  assert(UseAVX, "");
    1.39 +  bool vector256 = true;
    1.40 +  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256);
    1.41 +  emit_byte(0x6F);
    1.42 +  emit_byte(0xC0 | encode);
    1.43 +}
    1.44 +
    1.45 +void Assembler::vmovdqu(XMMRegister dst, Address src) {
    1.46 +  assert(UseAVX, "");
    1.47 +  InstructionMark im(this);
    1.48 +  bool vector256 = true;
    1.49 +  vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256);
    1.50 +  emit_byte(0x6F);
    1.51 +  emit_operand(dst, src);
    1.52 +}
    1.53 +
    1.54 +void Assembler::vmovdqu(Address dst, XMMRegister src) {
    1.55 +  assert(UseAVX, "");
    1.56 +  InstructionMark im(this);
    1.57 +  bool vector256 = true;
    1.58 +  // swap src<->dst for encoding
    1.59 +  assert(src != xnoreg, "sanity");
    1.60 +  vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector256);
    1.61 +  emit_byte(0x7F);
    1.62 +  emit_operand(src, dst);
    1.63 +}
    1.64 +
    1.65  // Uses zero extension on 64bit
    1.66  
    1.67  void Assembler::movl(Register dst, int32_t imm32) {
    1.68 @@ -3112,6 +3156,13 @@
    1.69    emit_operand(dst, src);
    1.70  }
    1.71  
    1.72 +void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
    1.73 +  assert(VM_Version::supports_avx(), "");
    1.74 +  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256);
    1.75 +  emit_byte(0x57);
    1.76 +  emit_byte(0xC0 | encode);
    1.77 +}
    1.78 +
    1.79  void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src) {
    1.80    assert(VM_Version::supports_avx(), "");
    1.81    InstructionMark im(this);
    1.82 @@ -3120,6 +3171,30 @@
    1.83    emit_operand(dst, src);
    1.84  }
    1.85  
    1.86 +void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
    1.87 +  assert(VM_Version::supports_avx(), "");
    1.88 +  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, vector256);
    1.89 +  emit_byte(0x57);
    1.90 +  emit_byte(0xC0 | encode);
    1.91 +}
    1.92 +
    1.93 +void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
    1.94 +  assert(VM_Version::supports_avx(), "");
    1.95 +  bool vector256 = true;
    1.96 +  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
    1.97 +  emit_byte(0x18);
    1.98 +  emit_byte(0xC0 | encode);
    1.99 +  // 0x00 - insert into lower 128 bits
   1.100 +  // 0x01 - insert into upper 128 bits
   1.101 +  emit_byte(0x01);
   1.102 +}
   1.103 +
   1.104 +void Assembler::vzeroupper() {
   1.105 +  assert(VM_Version::supports_avx(), "");
   1.106 +  (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
   1.107 +  emit_byte(0x77);
   1.108 +}
   1.109 +
   1.110  
   1.111  #ifndef _LP64
   1.112  // 32bit only pieces of the assembler

mercurial