1.1 --- a/src/cpu/x86/vm/assembler_x86.cpp Tue Oct 23 13:06:37 2012 -0700 1.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp Wed Oct 24 14:33:22 2012 -0700 1.3 @@ -1007,6 +1007,67 @@ 1.4 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3); 1.5 } 1.6 1.7 +void Assembler::aesdec(XMMRegister dst, Address src) { 1.8 + assert(VM_Version::supports_aes(), ""); 1.9 + InstructionMark im(this); 1.10 + simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38); 1.11 + emit_byte(0xde); 1.12 + emit_operand(dst, src); 1.13 +} 1.14 + 1.15 +void Assembler::aesdec(XMMRegister dst, XMMRegister src) { 1.16 + assert(VM_Version::supports_aes(), ""); 1.17 + int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38); 1.18 + emit_byte(0xde); 1.19 + emit_byte(0xC0 | encode); 1.20 +} 1.21 + 1.22 +void Assembler::aesdeclast(XMMRegister dst, Address src) { 1.23 + assert(VM_Version::supports_aes(), ""); 1.24 + InstructionMark im(this); 1.25 + simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38); 1.26 + emit_byte(0xdf); 1.27 + emit_operand(dst, src); 1.28 +} 1.29 + 1.30 +void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { 1.31 + assert(VM_Version::supports_aes(), ""); 1.32 + int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38); 1.33 + emit_byte(0xdf); 1.34 + emit_byte(0xC0 | encode); 1.35 +} 1.36 + 1.37 +void Assembler::aesenc(XMMRegister dst, Address src) { 1.38 + assert(VM_Version::supports_aes(), ""); 1.39 + InstructionMark im(this); 1.40 + simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38); 1.41 + emit_byte(0xdc); 1.42 + emit_operand(dst, src); 1.43 +} 1.44 + 1.45 +void Assembler::aesenc(XMMRegister dst, XMMRegister src) { 1.46 + assert(VM_Version::supports_aes(), ""); 1.47 + int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38); 1.48 + emit_byte(0xdc); 1.49 + emit_byte(0xC0 | encode); 1.50 +} 1.51 + 1.52 +void Assembler::aesenclast(XMMRegister dst, Address src) { 1.53 + assert(VM_Version::supports_aes(), ""); 1.54 + InstructionMark im(this); 1.55 + simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38); 1.56 + emit_byte(0xdd); 1.57 + emit_operand(dst, src); 1.58 +} 1.59 + 1.60 +void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { 1.61 + assert(VM_Version::supports_aes(), ""); 1.62 + int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38); 1.63 + emit_byte(0xdd); 1.64 + emit_byte(0xC0 | encode); 1.65 +} 1.66 + 1.67 + 1.68 void Assembler::andl(Address dst, int32_t imm32) { 1.69 InstructionMark im(this); 1.70 prefix(dst); 1.71 @@ -2307,6 +2368,22 @@ 1.72 a_byte(p); 1.73 } 1.74 1.75 +void Assembler::pshufb(XMMRegister dst, XMMRegister src) { 1.76 + assert(VM_Version::supports_ssse3(), ""); 1.77 + int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38); 1.78 + emit_byte(0x00); 1.79 + emit_byte(0xC0 | encode); 1.80 +} 1.81 + 1.82 +void Assembler::pshufb(XMMRegister dst, Address src) { 1.83 + assert(VM_Version::supports_ssse3(), ""); 1.84 + assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 1.85 + InstructionMark im(this); 1.86 + simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38); 1.87 + emit_byte(0x00); 1.88 + emit_operand(dst, src); 1.89 +} 1.90 + 1.91 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { 1.92 assert(isByte(mode), "invalid value"); 1.93 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1.94 @@ -8067,6 +8144,15 @@ 1.95 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 1.96 } 1.97 1.98 +void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) { 1.99 + if (reachable(src)) { 1.100 + Assembler::movdqu(dst, as_Address(src)); 1.101 + } else { 1.102 + lea(rscratch1, src); 1.103 + Assembler::movdqu(dst, Address(rscratch1, 0)); 1.104 + } 1.105 +} 1.106 + 1.107 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) { 1.108 if (reachable(src)) { 1.109 Assembler::movsd(dst, as_Address(src)); 1.110 @@ -8357,6 +8443,17 @@ 1.111 } 1.112 } 1.113 1.114 +void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) { 1.115 + // Used in sign-bit flipping with aligned address. 1.116 + assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1.117 + if (reachable(src)) { 1.118 + Assembler::pshufb(dst, as_Address(src)); 1.119 + } else { 1.120 + lea(rscratch1, src); 1.121 + Assembler::pshufb(dst, Address(rscratch1, 0)); 1.122 + } 1.123 +} 1.124 + 1.125 // AVX 3-operands instructions 1.126 1.127 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {