1.1 --- a/src/cpu/x86/vm/assembler_x86.cpp Mon Jul 16 11:14:41 2012 -0700 1.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp Mon Jul 16 17:10:22 2012 -0700 1.3 @@ -2573,6 +2573,13 @@ 1.4 emit_byte(0xC0 | encode); 1.5 } 1.6 1.7 +void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { 1.8 + NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1.9 + int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66); 1.10 + emit_byte(0x6C); 1.11 + emit_byte(0xC0 | encode); 1.12 +} 1.13 + 1.14 void Assembler::push(int32_t imm32) { 1.15 // in 64bits we push 64bits onto the stack but only 1.16 // take a 32bit immediate 1.17 @@ -3178,6 +3185,13 @@ 1.18 emit_byte(0xC0 | encode); 1.19 } 1.20 1.21 +void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { 1.22 + assert(VM_Version::supports_avx2() || (!vector256) && VM_Version::supports_avx(), ""); 1.23 + int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256); 1.24 + emit_byte(0xEF); 1.25 + emit_byte(0xC0 | encode); 1.26 +} 1.27 + 1.28 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1.29 assert(VM_Version::supports_avx(), ""); 1.30 bool vector256 = true; 1.31 @@ -3189,6 +3203,17 @@ 1.32 emit_byte(0x01); 1.33 } 1.34 1.35 +void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1.36 + assert(VM_Version::supports_avx2(), ""); 1.37 + bool vector256 = true; 1.38 + int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A); 1.39 + emit_byte(0x38); 1.40 + emit_byte(0xC0 | encode); 1.41 + // 0x00 - insert into lower 128 bits 1.42 + // 0x01 - insert into upper 128 bits 1.43 + emit_byte(0x01); 1.44 +} 1.45 + 1.46 void Assembler::vzeroupper() { 1.47 assert(VM_Version::supports_avx(), ""); 1.48 (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE); 1.49 @@ -7480,6 +7505,24 @@ 1.50 movb(as_Address(dst), src); 1.51 } 1.52 1.53 +void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) { 1.54 + if (reachable(src)) { 1.55 + movdl(dst, as_Address(src)); 1.56 + } else { 1.57 + lea(rscratch1, src); 1.58 + movdl(dst, Address(rscratch1, 0)); 1.59 + } 1.60 +} 1.61 + 1.62 +void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) { 1.63 + if (reachable(src)) { 1.64 + movq(dst, as_Address(src)); 1.65 + } else { 1.66 + lea(rscratch1, src); 1.67 + movq(dst, Address(rscratch1, 0)); 1.68 + } 1.69 +} 1.70 + 1.71 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) { 1.72 if (reachable(src)) { 1.73 if (UseXmmLoadAndClearUpper) {