1.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp Thu May 07 19:37:47 2015 -0700 1.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Fri Apr 17 17:39:19 2015 -0700 1.3 @@ -6690,7 +6690,7 @@ 1.4 subl(cnt2, stride2); 1.5 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP); 1.6 // clean upper bits of YMM registers 1.7 - vzeroupper(); 1.8 + vpxor(vec1, vec1); 1.9 1.10 // compare wide vectors tail 1.11 bind(COMPARE_WIDE_TAIL); 1.12 @@ -6705,7 +6705,7 @@ 1.13 // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors. 1.14 bind(VECTOR_NOT_EQUAL); 1.15 // clean upper bits of YMM registers 1.16 - vzeroupper(); 1.17 + vpxor(vec1, vec1); 1.18 lea(str1, Address(str1, result, scale)); 1.19 lea(str2, Address(str2, result, scale)); 1.20 jmp(COMPARE_16_CHARS); 1.21 @@ -6964,7 +6964,8 @@ 1.22 bind(DONE); 1.23 if (UseAVX >= 2) { 1.24 // clean upper bits of YMM registers 1.25 - vzeroupper(); 1.26 + vpxor(vec1, vec1); 1.27 + vpxor(vec2, vec2); 1.28 } 1.29 } 1.30 1.31 @@ -7098,7 +7099,8 @@ 1.32 1.33 BIND(L_check_fill_8_bytes); 1.34 // clean upper bits of YMM registers 1.35 - vzeroupper(); 1.36 + movdl(xtmp, value); 1.37 + pshufd(xtmp, xtmp, 0); 1.38 } else { 1.39 // Fill 32-byte chunks 1.40 pshufd(xtmp, xtmp, 0); 1.41 @@ -7261,7 +7263,11 @@ 1.42 bind(L_copy_16_chars_exit); 1.43 if (UseAVX >= 2) { 1.44 // clean upper bits of YMM registers 1.45 - vzeroupper(); 1.46 + vpxor(tmp2Reg, tmp2Reg); 1.47 + vpxor(tmp3Reg, tmp3Reg); 1.48 + vpxor(tmp4Reg, tmp4Reg); 1.49 + movdl(tmp1Reg, tmp5); 1.50 + pshufd(tmp1Reg, tmp1Reg, 0); 1.51 } 1.52 subptr(len, 8); 1.53 jccb(Assembler::greater, L_copy_8_chars_exit);