src/cpu/x86/vm/assembler_x86.cpp

changeset 3390
65149e74c706
parent 3388
127b3692c168
child 3391
069ab3f976d3
equal deleted inserted replaced
3389:669f6a7d5b70 3390:65149e74c706
2930 simd_prefix(dst, dst, src, VEX_SIMD_NONE); 2930 simd_prefix(dst, dst, src, VEX_SIMD_NONE);
2931 emit_byte(0x57); 2931 emit_byte(0x57);
2932 emit_operand(dst, src); 2932 emit_operand(dst, src);
2933 } 2933 }
2934 2934
2935 // AVX 3-operands non destructive source instructions (encoded with VEX prefix)
2936
2937 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
2938 assert(VM_Version::supports_avx(), "");
2939 InstructionMark im(this);
2940 vex_prefix(dst, nds, src, VEX_SIMD_F2);
2941 emit_byte(0x58);
2942 emit_operand(dst, src);
2943 }
2944
2945 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2946 assert(VM_Version::supports_avx(), "");
2947 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_F2);
2948 emit_byte(0x58);
2949 emit_byte(0xC0 | encode);
2950 }
2951
2952 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
2953 assert(VM_Version::supports_avx(), "");
2954 InstructionMark im(this);
2955 vex_prefix(dst, nds, src, VEX_SIMD_F3);
2956 emit_byte(0x58);
2957 emit_operand(dst, src);
2958 }
2959
2960 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2961 assert(VM_Version::supports_avx(), "");
2962 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_F3);
2963 emit_byte(0x58);
2964 emit_byte(0xC0 | encode);
2965 }
2966
2967 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src) {
2968 assert(VM_Version::supports_avx(), "");
2969 InstructionMark im(this);
2970 vex_prefix(dst, nds, src, VEX_SIMD_66); // 128-bit vector
2971 emit_byte(0x54);
2972 emit_operand(dst, src);
2973 }
2974
2975 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src) {
2976 assert(VM_Version::supports_avx(), "");
2977 InstructionMark im(this);
2978 vex_prefix(dst, nds, src, VEX_SIMD_NONE); // 128-bit vector
2979 emit_byte(0x54);
2980 emit_operand(dst, src);
2981 }
2982
2983 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
2984 assert(VM_Version::supports_avx(), "");
2985 InstructionMark im(this);
2986 vex_prefix(dst, nds, src, VEX_SIMD_F2);
2987 emit_byte(0x5E);
2988 emit_operand(dst, src);
2989 }
2990
2991 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2992 assert(VM_Version::supports_avx(), "");
2993 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_F2);
2994 emit_byte(0x5E);
2995 emit_byte(0xC0 | encode);
2996 }
2997
2998 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
2999 assert(VM_Version::supports_avx(), "");
3000 InstructionMark im(this);
3001 vex_prefix(dst, nds, src, VEX_SIMD_F3);
3002 emit_byte(0x5E);
3003 emit_operand(dst, src);
3004 }
3005
3006 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3007 assert(VM_Version::supports_avx(), "");
3008 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_F3);
3009 emit_byte(0x5E);
3010 emit_byte(0xC0 | encode);
3011 }
3012
3013 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
3014 assert(VM_Version::supports_avx(), "");
3015 InstructionMark im(this);
3016 vex_prefix(dst, nds, src, VEX_SIMD_F2);
3017 emit_byte(0x59);
3018 emit_operand(dst, src);
3019 }
3020
3021 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3022 assert(VM_Version::supports_avx(), "");
3023 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_F2);
3024 emit_byte(0x59);
3025 emit_byte(0xC0 | encode);
3026 }
3027
3028 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
3029 InstructionMark im(this);
3030 vex_prefix(dst, nds, src, VEX_SIMD_F3);
3031 emit_byte(0x59);
3032 emit_operand(dst, src);
3033 }
3034
3035 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3036 assert(VM_Version::supports_avx(), "");
3037 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_F3);
3038 emit_byte(0x59);
3039 emit_byte(0xC0 | encode);
3040 }
3041
3042
3043 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
3044 assert(VM_Version::supports_avx(), "");
3045 InstructionMark im(this);
3046 vex_prefix(dst, nds, src, VEX_SIMD_F2);
3047 emit_byte(0x5C);
3048 emit_operand(dst, src);
3049 }
3050
3051 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3052 assert(VM_Version::supports_avx(), "");
3053 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_F2);
3054 emit_byte(0x5C);
3055 emit_byte(0xC0 | encode);
3056 }
3057
3058 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
3059 assert(VM_Version::supports_avx(), "");
3060 InstructionMark im(this);
3061 vex_prefix(dst, nds, src, VEX_SIMD_F3);
3062 emit_byte(0x5C);
3063 emit_operand(dst, src);
3064 }
3065
3066 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3067 assert(VM_Version::supports_avx(), "");
3068 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_F3);
3069 emit_byte(0x5C);
3070 emit_byte(0xC0 | encode);
3071 }
3072
3073 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src) {
3074 assert(VM_Version::supports_avx(), "");
3075 InstructionMark im(this);
3076 vex_prefix(dst, nds, src, VEX_SIMD_66); // 128-bit vector
3077 emit_byte(0x57);
3078 emit_operand(dst, src);
3079 }
3080
3081 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src) {
3082 assert(VM_Version::supports_avx(), "");
3083 InstructionMark im(this);
3084 vex_prefix(dst, nds, src, VEX_SIMD_NONE); // 128-bit vector
3085 emit_byte(0x57);
3086 emit_operand(dst, src);
3087 }
3088
3089
2935 #ifndef _LP64 3090 #ifndef _LP64
2936 // 32bit only pieces of the assembler 3091 // 32bit only pieces of the assembler
2937 3092
2938 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { 3093 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
2939 // NO PREFIX AS NEVER 64BIT 3094 // NO PREFIX AS NEVER 64BIT
7233 lea(rscratch1, src); 7388 lea(rscratch1, src);
7234 Assembler::subss(dst, Address(rscratch1, 0)); 7389 Assembler::subss(dst, Address(rscratch1, 0));
7235 } 7390 }
7236 } 7391 }
7237 7392
7393 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
7394 if (reachable(src)) {
7395 Assembler::ucomisd(dst, as_Address(src));
7396 } else {
7397 lea(rscratch1, src);
7398 Assembler::ucomisd(dst, Address(rscratch1, 0));
7399 }
7400 }
7401
7402 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
7403 if (reachable(src)) {
7404 Assembler::ucomiss(dst, as_Address(src));
7405 } else {
7406 lea(rscratch1, src);
7407 Assembler::ucomiss(dst, Address(rscratch1, 0));
7408 }
7409 }
7410
7411 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
7412 // Used in sign-bit flipping with aligned address.
7413 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
7414 if (reachable(src)) {
7415 Assembler::xorpd(dst, as_Address(src));
7416 } else {
7417 lea(rscratch1, src);
7418 Assembler::xorpd(dst, Address(rscratch1, 0));
7419 }
7420 }
7421
7422 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
7423 // Used in sign-bit flipping with aligned address.
7424 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
7425 if (reachable(src)) {
7426 Assembler::xorps(dst, as_Address(src));
7427 } else {
7428 lea(rscratch1, src);
7429 Assembler::xorps(dst, Address(rscratch1, 0));
7430 }
7431 }
7432
7433 // AVX 3-operands instructions
7434
7435 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7436 if (reachable(src)) {
7437 vaddsd(dst, nds, as_Address(src));
7438 } else {
7439 lea(rscratch1, src);
7440 vaddsd(dst, nds, Address(rscratch1, 0));
7441 }
7442 }
7443
7444 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7445 if (reachable(src)) {
7446 vaddss(dst, nds, as_Address(src));
7447 } else {
7448 lea(rscratch1, src);
7449 vaddss(dst, nds, Address(rscratch1, 0));
7450 }
7451 }
7452
7453 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7454 if (reachable(src)) {
7455 vandpd(dst, nds, as_Address(src));
7456 } else {
7457 lea(rscratch1, src);
7458 vandpd(dst, nds, Address(rscratch1, 0));
7459 }
7460 }
7461
7462 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7463 if (reachable(src)) {
7464 vandps(dst, nds, as_Address(src));
7465 } else {
7466 lea(rscratch1, src);
7467 vandps(dst, nds, Address(rscratch1, 0));
7468 }
7469 }
7470
7471 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7472 if (reachable(src)) {
7473 vdivsd(dst, nds, as_Address(src));
7474 } else {
7475 lea(rscratch1, src);
7476 vdivsd(dst, nds, Address(rscratch1, 0));
7477 }
7478 }
7479
7480 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7481 if (reachable(src)) {
7482 vdivss(dst, nds, as_Address(src));
7483 } else {
7484 lea(rscratch1, src);
7485 vdivss(dst, nds, Address(rscratch1, 0));
7486 }
7487 }
7488
7489 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7490 if (reachable(src)) {
7491 vmulsd(dst, nds, as_Address(src));
7492 } else {
7493 lea(rscratch1, src);
7494 vmulsd(dst, nds, Address(rscratch1, 0));
7495 }
7496 }
7497
7498 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7499 if (reachable(src)) {
7500 vmulss(dst, nds, as_Address(src));
7501 } else {
7502 lea(rscratch1, src);
7503 vmulss(dst, nds, Address(rscratch1, 0));
7504 }
7505 }
7506
7507 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7508 if (reachable(src)) {
7509 vsubsd(dst, nds, as_Address(src));
7510 } else {
7511 lea(rscratch1, src);
7512 vsubsd(dst, nds, Address(rscratch1, 0));
7513 }
7514 }
7515
7516 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7517 if (reachable(src)) {
7518 vsubss(dst, nds, as_Address(src));
7519 } else {
7520 lea(rscratch1, src);
7521 vsubss(dst, nds, Address(rscratch1, 0));
7522 }
7523 }
7524
7525 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7526 if (reachable(src)) {
7527 vxorpd(dst, nds, as_Address(src));
7528 } else {
7529 lea(rscratch1, src);
7530 vxorpd(dst, nds, Address(rscratch1, 0));
7531 }
7532 }
7533
7534 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
7535 if (reachable(src)) {
7536 vxorps(dst, nds, as_Address(src));
7537 } else {
7538 lea(rscratch1, src);
7539 vxorps(dst, nds, Address(rscratch1, 0));
7540 }
7541 }
7542
7543
7238 ////////////////////////////////////////////////////////////////////////////////// 7544 //////////////////////////////////////////////////////////////////////////////////
7239 #ifndef SERIALGC 7545 #ifndef SERIALGC
7240 7546
7241 void MacroAssembler::g1_write_barrier_pre(Register obj, 7547 void MacroAssembler::g1_write_barrier_pre(Register obj,
7242 Register pre_val, 7548 Register pre_val,
8116 #undef IS_A_TEMP 8422 #undef IS_A_TEMP
8117 8423
8118 bind(L_fallthrough); 8424 bind(L_fallthrough);
8119 } 8425 }
8120 8426
8121
8122 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
8123 if (reachable(src)) {
8124 Assembler::ucomisd(dst, as_Address(src));
8125 } else {
8126 lea(rscratch1, src);
8127 Assembler::ucomisd(dst, Address(rscratch1, 0));
8128 }
8129 }
8130
8131 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
8132 if (reachable(src)) {
8133 Assembler::ucomiss(dst, as_Address(src));
8134 } else {
8135 lea(rscratch1, src);
8136 Assembler::ucomiss(dst, Address(rscratch1, 0));
8137 }
8138 }
8139
8140 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
8141 // Used in sign-bit flipping with aligned address.
8142 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
8143 if (reachable(src)) {
8144 Assembler::xorpd(dst, as_Address(src));
8145 } else {
8146 lea(rscratch1, src);
8147 Assembler::xorpd(dst, Address(rscratch1, 0));
8148 }
8149 }
8150
8151 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
8152 // Used in sign-bit flipping with aligned address.
8153 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
8154 if (reachable(src)) {
8155 Assembler::xorps(dst, as_Address(src));
8156 } else {
8157 lea(rscratch1, src);
8158 Assembler::xorps(dst, Address(rscratch1, 0));
8159 }
8160 }
8161 8427
8162 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 8428 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
8163 if (VM_Version::supports_cmov()) { 8429 if (VM_Version::supports_cmov()) {
8164 cmovl(cc, dst, src); 8430 cmovl(cc, dst, src);
8165 } else { 8431 } else {

mercurial