src/cpu/x86/vm/assembler_x86.cpp

Thu, 06 Dec 2012 09:57:41 -0800

author
twisti
date
Thu, 06 Dec 2012 09:57:41 -0800
changeset 4323
f0c2369fda5a
parent 4318
cd3d6a6b95d9
child 4360
c4bd2eccea46
permissions
-rw-r--r--

8003250: SPARC: move MacroAssembler into separate file
Reviewed-by: jrose, kvn

duke@435 1 /*
iveresov@3399 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
twisti@4318 26 #include "asm/assembler.hpp"
twisti@4318 27 #include "asm/assembler.inline.hpp"
stefank@2314 28 #include "gc_interface/collectedHeap.inline.hpp"
stefank@2314 29 #include "interpreter/interpreter.hpp"
stefank@2314 30 #include "memory/cardTableModRefBS.hpp"
stefank@2314 31 #include "memory/resourceArea.hpp"
stefank@2314 32 #include "prims/methodHandles.hpp"
stefank@2314 33 #include "runtime/biasedLocking.hpp"
stefank@2314 34 #include "runtime/interfaceSupport.hpp"
stefank@2314 35 #include "runtime/objectMonitor.hpp"
stefank@2314 36 #include "runtime/os.hpp"
stefank@2314 37 #include "runtime/sharedRuntime.hpp"
stefank@2314 38 #include "runtime/stubRoutines.hpp"
stefank@2314 39 #ifndef SERIALGC
stefank@2314 40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 41 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
stefank@2314 42 #include "gc_implementation/g1/heapRegion.hpp"
stefank@2314 43 #endif
duke@435 44
twisti@3969 45 #ifdef PRODUCT
twisti@3969 46 #define BLOCK_COMMENT(str) /* nothing */
twisti@3969 47 #define STOP(error) stop(error)
twisti@3969 48 #else
twisti@3969 49 #define BLOCK_COMMENT(str) block_comment(str)
twisti@3969 50 #define STOP(error) block_comment(error); stop(error)
twisti@3969 51 #endif
twisti@3969 52
twisti@3969 53 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
duke@435 54 // Implementation of AddressLiteral
duke@435 55
duke@435 56 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
duke@435 57 _is_lval = false;
duke@435 58 _target = target;
duke@435 59 switch (rtype) {
duke@435 60 case relocInfo::oop_type:
coleenp@4037 61 case relocInfo::metadata_type:
duke@435 62 // Oops are a special case. Normally they would be their own section
duke@435 63 // but in cases like icBuffer they are literals in the code stream that
duke@435 64 // we don't have a section for. We use none so that we get a literal address
duke@435 65 // which is always patchable.
duke@435 66 break;
duke@435 67 case relocInfo::external_word_type:
duke@435 68 _rspec = external_word_Relocation::spec(target);
duke@435 69 break;
duke@435 70 case relocInfo::internal_word_type:
duke@435 71 _rspec = internal_word_Relocation::spec(target);
duke@435 72 break;
duke@435 73 case relocInfo::opt_virtual_call_type:
duke@435 74 _rspec = opt_virtual_call_Relocation::spec();
duke@435 75 break;
duke@435 76 case relocInfo::static_call_type:
duke@435 77 _rspec = static_call_Relocation::spec();
duke@435 78 break;
duke@435 79 case relocInfo::runtime_call_type:
duke@435 80 _rspec = runtime_call_Relocation::spec();
duke@435 81 break;
duke@435 82 case relocInfo::poll_type:
duke@435 83 case relocInfo::poll_return_type:
duke@435 84 _rspec = Relocation::spec_simple(rtype);
duke@435 85 break;
duke@435 86 case relocInfo::none:
duke@435 87 break;
duke@435 88 default:
duke@435 89 ShouldNotReachHere();
duke@435 90 break;
duke@435 91 }
duke@435 92 }
duke@435 93
duke@435 94 // Implementation of Address
duke@435 95
never@739 96 #ifdef _LP64
never@739 97
duke@435 98 Address Address::make_array(ArrayAddress adr) {
duke@435 99 // Not implementable on 64bit machines
duke@435 100 // Should have been handled higher up the call chain.
duke@435 101 ShouldNotReachHere();
never@739 102 return Address();
never@739 103 }
never@739 104
never@739 105 // exceedingly dangerous constructor
never@739 106 Address::Address(int disp, address loc, relocInfo::relocType rtype) {
never@739 107 _base = noreg;
never@739 108 _index = noreg;
never@739 109 _scale = no_scale;
never@739 110 _disp = disp;
never@739 111 switch (rtype) {
never@739 112 case relocInfo::external_word_type:
never@739 113 _rspec = external_word_Relocation::spec(loc);
never@739 114 break;
never@739 115 case relocInfo::internal_word_type:
never@739 116 _rspec = internal_word_Relocation::spec(loc);
never@739 117 break;
never@739 118 case relocInfo::runtime_call_type:
never@739 119 // HMM
never@739 120 _rspec = runtime_call_Relocation::spec();
never@739 121 break;
never@739 122 case relocInfo::poll_type:
never@739 123 case relocInfo::poll_return_type:
never@739 124 _rspec = Relocation::spec_simple(rtype);
never@739 125 break;
never@739 126 case relocInfo::none:
never@739 127 break;
never@739 128 default:
never@739 129 ShouldNotReachHere();
never@739 130 }
never@739 131 }
never@739 132 #else // LP64
never@739 133
never@739 134 Address Address::make_array(ArrayAddress adr) {
duke@435 135 AddressLiteral base = adr.base();
duke@435 136 Address index = adr.index();
duke@435 137 assert(index._disp == 0, "must not have disp"); // maybe it can?
duke@435 138 Address array(index._base, index._index, index._scale, (intptr_t) base.target());
duke@435 139 array._rspec = base._rspec;
duke@435 140 return array;
never@739 141 }
duke@435 142
duke@435 143 // exceedingly dangerous constructor
duke@435 144 Address::Address(address loc, RelocationHolder spec) {
duke@435 145 _base = noreg;
duke@435 146 _index = noreg;
duke@435 147 _scale = no_scale;
duke@435 148 _disp = (intptr_t) loc;
duke@435 149 _rspec = spec;
duke@435 150 }
never@739 151
duke@435 152 #endif // _LP64
duke@435 153
never@739 154
never@739 155
duke@435 156 // Convert the raw encoding form into the form expected by the constructor for
duke@435 157 // Address. An index of 4 (rsp) corresponds to having no index, so convert
duke@435 158 // that to noreg for the Address constructor.
coleenp@4037 159 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
twisti@1059 160 RelocationHolder rspec;
coleenp@4037 161 if (disp_reloc != relocInfo::none) {
coleenp@4037 162 rspec = Relocation::spec_simple(disp_reloc);
twisti@1059 163 }
duke@435 164 bool valid_index = index != rsp->encoding();
duke@435 165 if (valid_index) {
duke@435 166 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
twisti@1059 167 madr._rspec = rspec;
duke@435 168 return madr;
duke@435 169 } else {
duke@435 170 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
twisti@1059 171 madr._rspec = rspec;
duke@435 172 return madr;
duke@435 173 }
duke@435 174 }
duke@435 175
duke@435 176 // Implementation of Assembler
duke@435 177
duke@435 178 int AbstractAssembler::code_fill_byte() {
duke@435 179 return (u_char)'\xF4'; // hlt
duke@435 180 }
duke@435 181
duke@435 182 // make this go away someday
duke@435 183 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
duke@435 184 if (rtype == relocInfo::none)
duke@435 185 emit_long(data);
duke@435 186 else emit_data(data, Relocation::spec_simple(rtype), format);
duke@435 187 }
duke@435 188
duke@435 189 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
never@739 190 assert(imm_operand == 0, "default format must be immediate in this file");
duke@435 191 assert(inst_mark() != NULL, "must be inside InstructionMark");
duke@435 192 if (rspec.type() != relocInfo::none) {
duke@435 193 #ifdef ASSERT
duke@435 194 check_relocation(rspec, format);
duke@435 195 #endif
duke@435 196 // Do not use AbstractAssembler::relocate, which is not intended for
duke@435 197 // embedded words. Instead, relocate to the enclosing instruction.
duke@435 198
duke@435 199 // hack. call32 is too wide for mask so use disp32
duke@435 200 if (format == call32_operand)
duke@435 201 code_section()->relocate(inst_mark(), rspec, disp32_operand);
duke@435 202 else
duke@435 203 code_section()->relocate(inst_mark(), rspec, format);
duke@435 204 }
duke@435 205 emit_long(data);
duke@435 206 }
duke@435 207
never@739 208 static int encode(Register r) {
never@739 209 int enc = r->encoding();
never@739 210 if (enc >= 8) {
never@739 211 enc -= 8;
never@739 212 }
never@739 213 return enc;
never@739 214 }
never@739 215
never@739 216 static int encode(XMMRegister r) {
never@739 217 int enc = r->encoding();
never@739 218 if (enc >= 8) {
never@739 219 enc -= 8;
never@739 220 }
never@739 221 return enc;
never@739 222 }
duke@435 223
duke@435 224 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
duke@435 225 assert(dst->has_byte_register(), "must have byte register");
duke@435 226 assert(isByte(op1) && isByte(op2), "wrong opcode");
duke@435 227 assert(isByte(imm8), "not a byte");
duke@435 228 assert((op1 & 0x01) == 0, "should be 8bit operation");
duke@435 229 emit_byte(op1);
never@739 230 emit_byte(op2 | encode(dst));
duke@435 231 emit_byte(imm8);
duke@435 232 }
duke@435 233
duke@435 234
never@739 235 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
duke@435 236 assert(isByte(op1) && isByte(op2), "wrong opcode");
duke@435 237 assert((op1 & 0x01) == 1, "should be 32bit operation");
duke@435 238 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
duke@435 239 if (is8bit(imm32)) {
duke@435 240 emit_byte(op1 | 0x02); // set sign bit
never@739 241 emit_byte(op2 | encode(dst));
duke@435 242 emit_byte(imm32 & 0xFF);
duke@435 243 } else {
duke@435 244 emit_byte(op1);
never@739 245 emit_byte(op2 | encode(dst));
duke@435 246 emit_long(imm32);
duke@435 247 }
duke@435 248 }
duke@435 249
kvn@3574 250 // Force generation of a 4 byte immediate value even if it fits into 8bit
kvn@3574 251 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
kvn@3574 252 assert(isByte(op1) && isByte(op2), "wrong opcode");
kvn@3574 253 assert((op1 & 0x01) == 1, "should be 32bit operation");
kvn@3574 254 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
kvn@3574 255 emit_byte(op1);
kvn@3574 256 emit_byte(op2 | encode(dst));
kvn@3574 257 emit_long(imm32);
kvn@3574 258 }
kvn@3574 259
duke@435 260 // immediate-to-memory forms
never@739 261 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
duke@435 262 assert((op1 & 0x01) == 1, "should be 32bit operation");
duke@435 263 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
duke@435 264 if (is8bit(imm32)) {
duke@435 265 emit_byte(op1 | 0x02); // set sign bit
never@739 266 emit_operand(rm, adr, 1);
duke@435 267 emit_byte(imm32 & 0xFF);
duke@435 268 } else {
duke@435 269 emit_byte(op1);
never@739 270 emit_operand(rm, adr, 4);
duke@435 271 emit_long(imm32);
duke@435 272 }
duke@435 273 }
duke@435 274
duke@435 275
duke@435 276 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
duke@435 277 assert(isByte(op1) && isByte(op2), "wrong opcode");
duke@435 278 emit_byte(op1);
never@739 279 emit_byte(op2 | encode(dst) << 3 | encode(src));
never@739 280 }
never@739 281
never@739 282
never@739 283 void Assembler::emit_operand(Register reg, Register base, Register index,
never@739 284 Address::ScaleFactor scale, int disp,
never@739 285 RelocationHolder const& rspec,
never@739 286 int rip_relative_correction) {
duke@435 287 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
never@739 288
never@739 289 // Encode the registers as needed in the fields they are used in
never@739 290
never@739 291 int regenc = encode(reg) << 3;
never@739 292 int indexenc = index->is_valid() ? encode(index) << 3 : 0;
never@739 293 int baseenc = base->is_valid() ? encode(base) : 0;
never@739 294
duke@435 295 if (base->is_valid()) {
duke@435 296 if (index->is_valid()) {
duke@435 297 assert(scale != Address::no_scale, "inconsistent address");
duke@435 298 // [base + index*scale + disp]
never@739 299 if (disp == 0 && rtype == relocInfo::none &&
never@739 300 base != rbp LP64_ONLY(&& base != r13)) {
duke@435 301 // [base + index*scale]
duke@435 302 // [00 reg 100][ss index base]
duke@435 303 assert(index != rsp, "illegal addressing mode");
never@739 304 emit_byte(0x04 | regenc);
never@739 305 emit_byte(scale << 6 | indexenc | baseenc);
duke@435 306 } else if (is8bit(disp) && rtype == relocInfo::none) {
duke@435 307 // [base + index*scale + imm8]
duke@435 308 // [01 reg 100][ss index base] imm8
duke@435 309 assert(index != rsp, "illegal addressing mode");
never@739 310 emit_byte(0x44 | regenc);
never@739 311 emit_byte(scale << 6 | indexenc | baseenc);
duke@435 312 emit_byte(disp & 0xFF);
duke@435 313 } else {
never@739 314 // [base + index*scale + disp32]
never@739 315 // [10 reg 100][ss index base] disp32
duke@435 316 assert(index != rsp, "illegal addressing mode");
never@739 317 emit_byte(0x84 | regenc);
never@739 318 emit_byte(scale << 6 | indexenc | baseenc);
duke@435 319 emit_data(disp, rspec, disp32_operand);
duke@435 320 }
never@739 321 } else if (base == rsp LP64_ONLY(|| base == r12)) {
never@739 322 // [rsp + disp]
duke@435 323 if (disp == 0 && rtype == relocInfo::none) {
never@739 324 // [rsp]
duke@435 325 // [00 reg 100][00 100 100]
never@739 326 emit_byte(0x04 | regenc);
duke@435 327 emit_byte(0x24);
duke@435 328 } else if (is8bit(disp) && rtype == relocInfo::none) {
never@739 329 // [rsp + imm8]
never@739 330 // [01 reg 100][00 100 100] disp8
never@739 331 emit_byte(0x44 | regenc);
duke@435 332 emit_byte(0x24);
duke@435 333 emit_byte(disp & 0xFF);
duke@435 334 } else {
never@739 335 // [rsp + imm32]
never@739 336 // [10 reg 100][00 100 100] disp32
never@739 337 emit_byte(0x84 | regenc);
duke@435 338 emit_byte(0x24);
duke@435 339 emit_data(disp, rspec, disp32_operand);
duke@435 340 }
duke@435 341 } else {
duke@435 342 // [base + disp]
never@739 343 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
never@739 344 if (disp == 0 && rtype == relocInfo::none &&
never@739 345 base != rbp LP64_ONLY(&& base != r13)) {
duke@435 346 // [base]
duke@435 347 // [00 reg base]
never@739 348 emit_byte(0x00 | regenc | baseenc);
duke@435 349 } else if (is8bit(disp) && rtype == relocInfo::none) {
never@739 350 // [base + disp8]
never@739 351 // [01 reg base] disp8
never@739 352 emit_byte(0x40 | regenc | baseenc);
duke@435 353 emit_byte(disp & 0xFF);
duke@435 354 } else {
never@739 355 // [base + disp32]
never@739 356 // [10 reg base] disp32
never@739 357 emit_byte(0x80 | regenc | baseenc);
duke@435 358 emit_data(disp, rspec, disp32_operand);
duke@435 359 }
duke@435 360 }
duke@435 361 } else {
duke@435 362 if (index->is_valid()) {
duke@435 363 assert(scale != Address::no_scale, "inconsistent address");
duke@435 364 // [index*scale + disp]
never@739 365 // [00 reg 100][ss index 101] disp32
duke@435 366 assert(index != rsp, "illegal addressing mode");
never@739 367 emit_byte(0x04 | regenc);
never@739 368 emit_byte(scale << 6 | indexenc | 0x05);
duke@435 369 emit_data(disp, rspec, disp32_operand);
never@739 370 } else if (rtype != relocInfo::none ) {
never@739 371 // [disp] (64bit) RIP-RELATIVE (32bit) abs
never@739 372 // [00 000 101] disp32
never@739 373
never@739 374 emit_byte(0x05 | regenc);
never@739 375 // Note that the RIP-rel. correction applies to the generated
never@739 376 // disp field, but _not_ to the target address in the rspec.
never@739 377
never@739 378 // disp was created by converting the target address minus the pc
never@739 379 // at the start of the instruction. That needs more correction here.
never@739 380 // intptr_t disp = target - next_ip;
never@739 381 assert(inst_mark() != NULL, "must be inside InstructionMark");
never@739 382 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
never@739 383 int64_t adjusted = disp;
never@739 384 // Do rip-rel adjustment for 64bit
never@739 385 LP64_ONLY(adjusted -= (next_ip - inst_mark()));
never@739 386 assert(is_simm32(adjusted),
never@739 387 "must be 32bit offset (RIP relative address)");
never@739 388 emit_data((int32_t) adjusted, rspec, disp32_operand);
never@739 389
duke@435 390 } else {
never@739 391 // 32bit never did this, did everything as the rip-rel/disp code above
never@739 392 // [disp] ABSOLUTE
never@739 393 // [00 reg 100][00 100 101] disp32
never@739 394 emit_byte(0x04 | regenc);
never@739 395 emit_byte(0x25);
duke@435 396 emit_data(disp, rspec, disp32_operand);
duke@435 397 }
duke@435 398 }
duke@435 399 }
duke@435 400
never@739 401 void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
never@739 402 Address::ScaleFactor scale, int disp,
never@739 403 RelocationHolder const& rspec) {
never@739 404 emit_operand((Register)reg, base, index, scale, disp, rspec);
never@739 405 }
never@739 406
duke@435 407 // Secret local extension to Assembler::WhichOperand:
duke@435 408 #define end_pc_operand (_WhichOperand_limit)
duke@435 409
duke@435 410 address Assembler::locate_operand(address inst, WhichOperand which) {
duke@435 411 // Decode the given instruction, and return the address of
duke@435 412 // an embedded 32-bit operand word.
duke@435 413
duke@435 414 // If "which" is disp32_operand, selects the displacement portion
duke@435 415 // of an effective address specifier.
never@739 416 // If "which" is imm64_operand, selects the trailing immediate constant.
duke@435 417 // If "which" is call32_operand, selects the displacement of a call or jump.
duke@435 418 // Caller is responsible for ensuring that there is such an operand,
never@739 419 // and that it is 32/64 bits wide.
duke@435 420
duke@435 421 // If "which" is end_pc_operand, find the end of the instruction.
duke@435 422
duke@435 423 address ip = inst;
never@739 424 bool is_64bit = false;
never@739 425
never@739 426 debug_only(bool has_disp32 = false);
never@739 427 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
never@739 428
never@739 429 again_after_prefix:
duke@435 430 switch (0xFF & *ip++) {
duke@435 431
duke@435 432 // These convenience macros generate groups of "case" labels for the switch.
never@739 433 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
never@739 434 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
duke@435 435 case (x)+4: case (x)+5: case (x)+6: case (x)+7
never@739 436 #define REP16(x) REP8((x)+0): \
duke@435 437 case REP8((x)+8)
duke@435 438
duke@435 439 case CS_segment:
duke@435 440 case SS_segment:
duke@435 441 case DS_segment:
duke@435 442 case ES_segment:
duke@435 443 case FS_segment:
duke@435 444 case GS_segment:
never@739 445 // Seems dubious
never@739 446 LP64_ONLY(assert(false, "shouldn't have that prefix"));
duke@435 447 assert(ip == inst+1, "only one prefix allowed");
duke@435 448 goto again_after_prefix;
duke@435 449
never@739 450 case 0x67:
never@739 451 case REX:
never@739 452 case REX_B:
never@739 453 case REX_X:
never@739 454 case REX_XB:
never@739 455 case REX_R:
never@739 456 case REX_RB:
never@739 457 case REX_RX:
never@739 458 case REX_RXB:
never@739 459 NOT_LP64(assert(false, "64bit prefixes"));
never@739 460 goto again_after_prefix;
never@739 461
never@739 462 case REX_W:
never@739 463 case REX_WB:
never@739 464 case REX_WX:
never@739 465 case REX_WXB:
never@739 466 case REX_WR:
never@739 467 case REX_WRB:
never@739 468 case REX_WRX:
never@739 469 case REX_WRXB:
never@739 470 NOT_LP64(assert(false, "64bit prefixes"));
never@739 471 is_64bit = true;
never@739 472 goto again_after_prefix;
never@739 473
never@739 474 case 0xFF: // pushq a; decl a; incl a; call a; jmp a
duke@435 475 case 0x88: // movb a, r
duke@435 476 case 0x89: // movl a, r
duke@435 477 case 0x8A: // movb r, a
duke@435 478 case 0x8B: // movl r, a
duke@435 479 case 0x8F: // popl a
never@739 480 debug_only(has_disp32 = true);
duke@435 481 break;
duke@435 482
never@739 483 case 0x68: // pushq #32
never@739 484 if (which == end_pc_operand) {
never@739 485 return ip + 4;
never@739 486 }
never@739 487 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
duke@435 488 return ip; // not produced by emit_operand
duke@435 489
duke@435 490 case 0x66: // movw ... (size prefix)
never@739 491 again_after_size_prefix2:
duke@435 492 switch (0xFF & *ip++) {
never@739 493 case REX:
never@739 494 case REX_B:
never@739 495 case REX_X:
never@739 496 case REX_XB:
never@739 497 case REX_R:
never@739 498 case REX_RB:
never@739 499 case REX_RX:
never@739 500 case REX_RXB:
never@739 501 case REX_W:
never@739 502 case REX_WB:
never@739 503 case REX_WX:
never@739 504 case REX_WXB:
never@739 505 case REX_WR:
never@739 506 case REX_WRB:
never@739 507 case REX_WRX:
never@739 508 case REX_WRXB:
never@739 509 NOT_LP64(assert(false, "64bit prefix found"));
never@739 510 goto again_after_size_prefix2;
duke@435 511 case 0x8B: // movw r, a
duke@435 512 case 0x89: // movw a, r
never@739 513 debug_only(has_disp32 = true);
duke@435 514 break;
duke@435 515 case 0xC7: // movw a, #16
never@739 516 debug_only(has_disp32 = true);
duke@435 517 tail_size = 2; // the imm16
duke@435 518 break;
duke@435 519 case 0x0F: // several SSE/SSE2 variants
duke@435 520 ip--; // reparse the 0x0F
duke@435 521 goto again_after_prefix;
duke@435 522 default:
duke@435 523 ShouldNotReachHere();
duke@435 524 }
duke@435 525 break;
duke@435 526
never@739 527 case REP8(0xB8): // movl/q r, #32/#64(oop?)
never@739 528 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4);
never@739 529 // these asserts are somewhat nonsensical
never@739 530 #ifndef _LP64
never@3687 531 assert(which == imm_operand || which == disp32_operand,
never@3687 532 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
never@739 533 #else
never@739 534 assert((which == call32_operand || which == imm_operand) && is_64bit ||
never@3687 535 which == narrow_oop_operand && !is_64bit,
never@3687 536 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
never@739 537 #endif // _LP64
duke@435 538 return ip;
duke@435 539
duke@435 540 case 0x69: // imul r, a, #32
duke@435 541 case 0xC7: // movl a, #32(oop?)
duke@435 542 tail_size = 4;
never@739 543 debug_only(has_disp32 = true); // has both kinds of operands!
duke@435 544 break;
duke@435 545
duke@435 546 case 0x0F: // movx..., etc.
duke@435 547 switch (0xFF & *ip++) {
kvn@3388 548 case 0x3A: // pcmpestri
kvn@3388 549 tail_size = 1;
kvn@3388 550 case 0x38: // ptest, pmovzxbw
kvn@3388 551 ip++; // skip opcode
kvn@3388 552 debug_only(has_disp32 = true); // has both kinds of operands!
kvn@3388 553 break;
kvn@3388 554
kvn@3388 555 case 0x70: // pshufd r, r/a, #8
kvn@3388 556 debug_only(has_disp32 = true); // has both kinds of operands!
kvn@3388 557 case 0x73: // psrldq r, #8
kvn@3388 558 tail_size = 1;
kvn@3388 559 break;
kvn@3388 560
duke@435 561 case 0x12: // movlps
duke@435 562 case 0x28: // movaps
duke@435 563 case 0x2E: // ucomiss
duke@435 564 case 0x2F: // comiss
duke@435 565 case 0x54: // andps
duke@435 566 case 0x55: // andnps
duke@435 567 case 0x56: // orps
duke@435 568 case 0x57: // xorps
duke@435 569 case 0x6E: // movd
duke@435 570 case 0x7E: // movd
kvn@3388 571 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
never@739 572 debug_only(has_disp32 = true);
duke@435 573 break;
duke@435 574
duke@435 575 case 0xAD: // shrd r, a, %cl
duke@435 576 case 0xAF: // imul r, a
never@739 577 case 0xBE: // movsbl r, a (movsxb)
never@739 578 case 0xBF: // movswl r, a (movsxw)
never@739 579 case 0xB6: // movzbl r, a (movzxb)
never@739 580 case 0xB7: // movzwl r, a (movzxw)
duke@435 581 case REP16(0x40): // cmovl cc, r, a
duke@435 582 case 0xB0: // cmpxchgb
duke@435 583 case 0xB1: // cmpxchg
duke@435 584 case 0xC1: // xaddl
duke@435 585 case 0xC7: // cmpxchg8
duke@435 586 case REP16(0x90): // setcc a
never@739 587 debug_only(has_disp32 = true);
duke@435 588 // fall out of the switch to decode the address
duke@435 589 break;
never@739 590
kvn@3388 591 case 0xC4: // pinsrw r, a, #8
kvn@3388 592 debug_only(has_disp32 = true);
kvn@3388 593 case 0xC5: // pextrw r, r, #8
kvn@3388 594 tail_size = 1; // the imm8
kvn@3388 595 break;
kvn@3388 596
duke@435 597 case 0xAC: // shrd r, a, #8
never@739 598 debug_only(has_disp32 = true);
duke@435 599 tail_size = 1; // the imm8
duke@435 600 break;
never@739 601
duke@435 602 case REP16(0x80): // jcc rdisp32
duke@435 603 if (which == end_pc_operand) return ip + 4;
never@739 604 assert(which == call32_operand, "jcc has no disp32 or imm");
duke@435 605 return ip;
duke@435 606 default:
duke@435 607 ShouldNotReachHere();
duke@435 608 }
duke@435 609 break;
duke@435 610
duke@435 611 case 0x81: // addl a, #32; addl r, #32
duke@435 612 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
never@739 613 // on 32bit in the case of cmpl, the imm might be an oop
duke@435 614 tail_size = 4;
never@739 615 debug_only(has_disp32 = true); // has both kinds of operands!
duke@435 616 break;
duke@435 617
duke@435 618 case 0x83: // addl a, #8; addl r, #8
duke@435 619 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
never@739 620 debug_only(has_disp32 = true); // has both kinds of operands!
duke@435 621 tail_size = 1;
duke@435 622 break;
duke@435 623
duke@435 624 case 0x9B:
duke@435 625 switch (0xFF & *ip++) {
duke@435 626 case 0xD9: // fnstcw a
never@739 627 debug_only(has_disp32 = true);
duke@435 628 break;
duke@435 629 default:
duke@435 630 ShouldNotReachHere();
duke@435 631 }
duke@435 632 break;
duke@435 633
duke@435 634 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
duke@435 635 case REP4(0x10): // adc...
duke@435 636 case REP4(0x20): // and...
duke@435 637 case REP4(0x30): // xor...
duke@435 638 case REP4(0x08): // or...
duke@435 639 case REP4(0x18): // sbb...
duke@435 640 case REP4(0x28): // sub...
never@739 641 case 0xF7: // mull a
never@739 642 case 0x8D: // lea r, a
never@739 643 case 0x87: // xchg r, a
duke@435 644 case REP4(0x38): // cmp...
never@739 645 case 0x85: // test r, a
never@739 646 debug_only(has_disp32 = true); // has both kinds of operands!
duke@435 647 break;
duke@435 648
duke@435 649 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
duke@435 650 case 0xC6: // movb a, #8
duke@435 651 case 0x80: // cmpb a, #8
duke@435 652 case 0x6B: // imul r, a, #8
never@739 653 debug_only(has_disp32 = true); // has both kinds of operands!
duke@435 654 tail_size = 1; // the imm8
duke@435 655 break;
duke@435 656
kvn@3388 657 case 0xC4: // VEX_3bytes
kvn@3388 658 case 0xC5: // VEX_2bytes
kvn@3388 659 assert((UseAVX > 0), "shouldn't have VEX prefix");
kvn@3388 660 assert(ip == inst+1, "no prefixes allowed");
kvn@3388 661 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions
kvn@3388 662 // but they have prefix 0x0F and processed when 0x0F processed above.
kvn@3388 663 //
kvn@3388 664 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES
kvn@3388 665 // instructions (these instructions are not supported in 64-bit mode).
kvn@3388 666 // To distinguish them bits [7:6] are set in the VEX second byte since
kvn@3388 667 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set
kvn@3388 668 // those VEX bits REX and vvvv bits are inverted.
kvn@3388 669 //
kvn@3388 670 // Fortunately C2 doesn't generate these instructions so we don't need
kvn@3388 671 // to check for them in product version.
kvn@3388 672
kvn@3388 673 // Check second byte
kvn@3388 674 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));
kvn@3388 675
kvn@3388 676 // First byte
kvn@3388 677 if ((0xFF & *inst) == VEX_3bytes) {
kvn@3388 678 ip++; // third byte
kvn@3388 679 is_64bit = ((VEX_W & *ip) == VEX_W);
kvn@3388 680 }
kvn@3388 681 ip++; // opcode
kvn@3388 682 // To find the end of instruction (which == end_pc_operand).
kvn@3388 683 switch (0xFF & *ip) {
kvn@3388 684 case 0x61: // pcmpestri r, r/a, #8
kvn@3388 685 case 0x70: // pshufd r, r/a, #8
kvn@3388 686 case 0x73: // psrldq r, #8
kvn@3388 687 tail_size = 1; // the imm8
kvn@3388 688 break;
kvn@3388 689 default:
kvn@3388 690 break;
kvn@3388 691 }
kvn@3388 692 ip++; // skip opcode
kvn@3388 693 debug_only(has_disp32 = true); // has both kinds of operands!
kvn@3388 694 break;
duke@435 695
duke@435 696 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
duke@435 697 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
duke@435 698 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
duke@435 699 case 0xDD: // fld_d a; fst_d a; fstp_d a
duke@435 700 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
duke@435 701 case 0xDF: // fild_d a; fistp_d a
duke@435 702 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
duke@435 703 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
duke@435 704 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
never@739 705 debug_only(has_disp32 = true);
duke@435 706 break;
duke@435 707
kvn@3388 708 case 0xE8: // call rdisp32
kvn@3388 709 case 0xE9: // jmp rdisp32
kvn@3388 710 if (which == end_pc_operand) return ip + 4;
kvn@3388 711 assert(which == call32_operand, "call has no disp32 or imm");
kvn@3388 712 return ip;
kvn@3388 713
kvn@855 714 case 0xF0: // Lock
kvn@855 715 assert(os::is_MP(), "only on MP");
kvn@855 716 goto again_after_prefix;
kvn@855 717
duke@435 718 case 0xF3: // For SSE
duke@435 719 case 0xF2: // For SSE2
never@739 720 switch (0xFF & *ip++) {
never@739 721 case REX:
never@739 722 case REX_B:
never@739 723 case REX_X:
never@739 724 case REX_XB:
never@739 725 case REX_R:
never@739 726 case REX_RB:
never@739 727 case REX_RX:
never@739 728 case REX_RXB:
never@739 729 case REX_W:
never@739 730 case REX_WB:
never@739 731 case REX_WX:
never@739 732 case REX_WXB:
never@739 733 case REX_WR:
never@739 734 case REX_WRB:
never@739 735 case REX_WRX:
never@739 736 case REX_WRXB:
never@739 737 NOT_LP64(assert(false, "found 64bit prefix"));
never@739 738 ip++;
never@739 739 default:
never@739 740 ip++;
never@739 741 }
never@739 742 debug_only(has_disp32 = true); // has both kinds of operands!
duke@435 743 break;
duke@435 744
duke@435 745 default:
duke@435 746 ShouldNotReachHere();
duke@435 747
never@739 748 #undef REP8
never@739 749 #undef REP16
duke@435 750 }
duke@435 751
duke@435 752 assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
never@739 753 #ifdef _LP64
never@739 754 assert(which != imm_operand, "instruction is not a movq reg, imm64");
never@739 755 #else
never@739 756 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
never@739 757 assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
never@739 758 #endif // LP64
never@739 759 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
duke@435 760
duke@435 761 // parse the output of emit_operand
duke@435 762 int op2 = 0xFF & *ip++;
duke@435 763 int base = op2 & 0x07;
duke@435 764 int op3 = -1;
duke@435 765 const int b100 = 4;
duke@435 766 const int b101 = 5;
duke@435 767 if (base == b100 && (op2 >> 6) != 3) {
duke@435 768 op3 = 0xFF & *ip++;
duke@435 769 base = op3 & 0x07; // refetch the base
duke@435 770 }
duke@435 771 // now ip points at the disp (if any)
duke@435 772
duke@435 773 switch (op2 >> 6) {
duke@435 774 case 0:
duke@435 775 // [00 reg 100][ss index base]
never@739 776 // [00 reg 100][00 100 esp]
duke@435 777 // [00 reg base]
duke@435 778 // [00 reg 100][ss index 101][disp32]
duke@435 779 // [00 reg 101] [disp32]
duke@435 780
duke@435 781 if (base == b101) {
duke@435 782 if (which == disp32_operand)
duke@435 783 return ip; // caller wants the disp32
duke@435 784 ip += 4; // skip the disp32
duke@435 785 }
duke@435 786 break;
duke@435 787
duke@435 788 case 1:
duke@435 789 // [01 reg 100][ss index base][disp8]
never@739 790 // [01 reg 100][00 100 esp][disp8]
duke@435 791 // [01 reg base] [disp8]
duke@435 792 ip += 1; // skip the disp8
duke@435 793 break;
duke@435 794
duke@435 795 case 2:
duke@435 796 // [10 reg 100][ss index base][disp32]
never@739 797 // [10 reg 100][00 100 esp][disp32]
duke@435 798 // [10 reg base] [disp32]
duke@435 799 if (which == disp32_operand)
duke@435 800 return ip; // caller wants the disp32
duke@435 801 ip += 4; // skip the disp32
duke@435 802 break;
duke@435 803
duke@435 804 case 3:
duke@435 805 // [11 reg base] (not a memory addressing mode)
duke@435 806 break;
duke@435 807 }
duke@435 808
duke@435 809 if (which == end_pc_operand) {
duke@435 810 return ip + tail_size;
duke@435 811 }
duke@435 812
never@739 813 #ifdef _LP64
kvn@1077 814 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
never@739 815 #else
never@739 816 assert(which == imm_operand, "instruction has only an imm field");
never@739 817 #endif // LP64
duke@435 818 return ip;
duke@435 819 }
duke@435 820
duke@435 821 address Assembler::locate_next_instruction(address inst) {
duke@435 822 // Secretly share code with locate_operand:
duke@435 823 return locate_operand(inst, end_pc_operand);
duke@435 824 }
duke@435 825
duke@435 826
duke@435 827 #ifdef ASSERT
duke@435 828 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
duke@435 829 address inst = inst_mark();
duke@435 830 assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
duke@435 831 address opnd;
duke@435 832
duke@435 833 Relocation* r = rspec.reloc();
duke@435 834 if (r->type() == relocInfo::none) {
duke@435 835 return;
duke@435 836 } else if (r->is_call() || format == call32_operand) {
duke@435 837 // assert(format == imm32_operand, "cannot specify a nonzero format");
duke@435 838 opnd = locate_operand(inst, call32_operand);
duke@435 839 } else if (r->is_data()) {
never@739 840 assert(format == imm_operand || format == disp32_operand
never@739 841 LP64_ONLY(|| format == narrow_oop_operand), "format ok");
duke@435 842 opnd = locate_operand(inst, (WhichOperand)format);
duke@435 843 } else {
never@739 844 assert(format == imm_operand, "cannot specify a format");
duke@435 845 return;
duke@435 846 }
duke@435 847 assert(opnd == pc(), "must put operand where relocs can find it");
duke@435 848 }
never@739 849 #endif // ASSERT
never@739 850
never@739 851 void Assembler::emit_operand32(Register reg, Address adr) {
never@739 852 assert(reg->encoding() < 8, "no extended registers");
never@739 853 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
never@739 854 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
never@739 855 adr._rspec);
never@739 856 }
never@739 857
never@739 858 void Assembler::emit_operand(Register reg, Address adr,
never@739 859 int rip_relative_correction) {
never@739 860 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
never@739 861 adr._rspec,
never@739 862 rip_relative_correction);
never@739 863 }
never@739 864
never@739 865 void Assembler::emit_operand(XMMRegister reg, Address adr) {
never@739 866 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
never@739 867 adr._rspec);
never@739 868 }
never@739 869
never@739 870 // MMX operations
never@739 871 void Assembler::emit_operand(MMXRegister reg, Address adr) {
never@739 872 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
never@739 873 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
never@739 874 }
never@739 875
never@739 876 // work around gcc (3.2.1-7a) bug
never@739 877 void Assembler::emit_operand(Address adr, MMXRegister reg) {
never@739 878 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
never@739 879 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
duke@435 880 }
duke@435 881
duke@435 882
duke@435 883 void Assembler::emit_farith(int b1, int b2, int i) {
duke@435 884 assert(isByte(b1) && isByte(b2), "wrong opcode");
duke@435 885 assert(0 <= i && i < 8, "illegal stack offset");
duke@435 886 emit_byte(b1);
duke@435 887 emit_byte(b2 + i);
duke@435 888 }
duke@435 889
duke@435 890
phh@2423 891 // Now the Assembler instructions (identical for 32/64 bits)
phh@2423 892
phh@2423 893 void Assembler::adcl(Address dst, int32_t imm32) {
phh@2423 894 InstructionMark im(this);
phh@2423 895 prefix(dst);
phh@2423 896 emit_arith_operand(0x81, rdx, dst, imm32);
phh@2423 897 }
phh@2423 898
phh@2423 899 void Assembler::adcl(Address dst, Register src) {
phh@2423 900 InstructionMark im(this);
phh@2423 901 prefix(dst, src);
phh@2423 902 emit_byte(0x11);
phh@2423 903 emit_operand(src, dst);
phh@2423 904 }
never@739 905
never@739 906 void Assembler::adcl(Register dst, int32_t imm32) {
never@739 907 prefix(dst);
duke@435 908 emit_arith(0x81, 0xD0, dst, imm32);
duke@435 909 }
duke@435 910
duke@435 911 void Assembler::adcl(Register dst, Address src) {
duke@435 912 InstructionMark im(this);
never@739 913 prefix(src, dst);
duke@435 914 emit_byte(0x13);
duke@435 915 emit_operand(dst, src);
duke@435 916 }
duke@435 917
duke@435 918 void Assembler::adcl(Register dst, Register src) {
never@739 919 (void) prefix_and_encode(dst->encoding(), src->encoding());
duke@435 920 emit_arith(0x13, 0xC0, dst, src);
duke@435 921 }
duke@435 922
never@739 923 void Assembler::addl(Address dst, int32_t imm32) {
never@739 924 InstructionMark im(this);
never@739 925 prefix(dst);
never@739 926 emit_arith_operand(0x81, rax, dst, imm32);
never@739 927 }
duke@435 928
duke@435 929 void Assembler::addl(Address dst, Register src) {
duke@435 930 InstructionMark im(this);
never@739 931 prefix(dst, src);
duke@435 932 emit_byte(0x01);
duke@435 933 emit_operand(src, dst);
duke@435 934 }
duke@435 935
never@739 936 void Assembler::addl(Register dst, int32_t imm32) {
never@739 937 prefix(dst);
duke@435 938 emit_arith(0x81, 0xC0, dst, imm32);
duke@435 939 }
duke@435 940
duke@435 941 void Assembler::addl(Register dst, Address src) {
duke@435 942 InstructionMark im(this);
never@739 943 prefix(src, dst);
duke@435 944 emit_byte(0x03);
duke@435 945 emit_operand(dst, src);
duke@435 946 }
duke@435 947
duke@435 948 void Assembler::addl(Register dst, Register src) {
never@739 949 (void) prefix_and_encode(dst->encoding(), src->encoding());
duke@435 950 emit_arith(0x03, 0xC0, dst, src);
duke@435 951 }
duke@435 952
duke@435 953 void Assembler::addr_nop_4() {
kvn@3574 954 assert(UseAddressNop, "no CPU support");
duke@435 955 // 4 bytes: NOP DWORD PTR [EAX+0]
duke@435 956 emit_byte(0x0F);
duke@435 957 emit_byte(0x1F);
duke@435 958 emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
duke@435 959 emit_byte(0); // 8-bits offset (1 byte)
duke@435 960 }
duke@435 961
duke@435 962 void Assembler::addr_nop_5() {
kvn@3574 963 assert(UseAddressNop, "no CPU support");
duke@435 964 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
duke@435 965 emit_byte(0x0F);
duke@435 966 emit_byte(0x1F);
duke@435 967 emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
duke@435 968 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
duke@435 969 emit_byte(0); // 8-bits offset (1 byte)
duke@435 970 }
duke@435 971
duke@435 972 void Assembler::addr_nop_7() {
kvn@3574 973 assert(UseAddressNop, "no CPU support");
duke@435 974 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
duke@435 975 emit_byte(0x0F);
duke@435 976 emit_byte(0x1F);
duke@435 977 emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
duke@435 978 emit_long(0); // 32-bits offset (4 bytes)
duke@435 979 }
duke@435 980
duke@435 981 void Assembler::addr_nop_8() {
kvn@3574 982 assert(UseAddressNop, "no CPU support");
duke@435 983 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
duke@435 984 emit_byte(0x0F);
duke@435 985 emit_byte(0x1F);
duke@435 986 emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
duke@435 987 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
duke@435 988 emit_long(0); // 32-bits offset (4 bytes)
duke@435 989 }
duke@435 990
never@739 991 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
never@739 992 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 993 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
never@739 994 }
never@739 995
never@739 996 void Assembler::addsd(XMMRegister dst, Address src) {
never@739 997 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 998 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
never@739 999 }
never@739 1000
never@739 1001 void Assembler::addss(XMMRegister dst, XMMRegister src) {
never@739 1002 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1003 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
never@739 1004 }
never@739 1005
never@739 1006 void Assembler::addss(XMMRegister dst, Address src) {
never@739 1007 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1008 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
never@739 1009 }
never@739 1010
kvn@4205 1011 void Assembler::aesdec(XMMRegister dst, Address src) {
kvn@4205 1012 assert(VM_Version::supports_aes(), "");
kvn@4205 1013 InstructionMark im(this);
kvn@4205 1014 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@4205 1015 emit_byte(0xde);
kvn@4205 1016 emit_operand(dst, src);
kvn@4205 1017 }
kvn@4205 1018
kvn@4205 1019 void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
kvn@4205 1020 assert(VM_Version::supports_aes(), "");
kvn@4205 1021 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@4205 1022 emit_byte(0xde);
kvn@4205 1023 emit_byte(0xC0 | encode);
kvn@4205 1024 }
kvn@4205 1025
kvn@4205 1026 void Assembler::aesdeclast(XMMRegister dst, Address src) {
kvn@4205 1027 assert(VM_Version::supports_aes(), "");
kvn@4205 1028 InstructionMark im(this);
kvn@4205 1029 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@4205 1030 emit_byte(0xdf);
kvn@4205 1031 emit_operand(dst, src);
kvn@4205 1032 }
kvn@4205 1033
kvn@4205 1034 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
kvn@4205 1035 assert(VM_Version::supports_aes(), "");
kvn@4205 1036 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@4205 1037 emit_byte(0xdf);
kvn@4205 1038 emit_byte(0xC0 | encode);
kvn@4205 1039 }
kvn@4205 1040
kvn@4205 1041 void Assembler::aesenc(XMMRegister dst, Address src) {
kvn@4205 1042 assert(VM_Version::supports_aes(), "");
kvn@4205 1043 InstructionMark im(this);
kvn@4205 1044 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@4205 1045 emit_byte(0xdc);
kvn@4205 1046 emit_operand(dst, src);
kvn@4205 1047 }
kvn@4205 1048
kvn@4205 1049 void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
kvn@4205 1050 assert(VM_Version::supports_aes(), "");
kvn@4205 1051 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@4205 1052 emit_byte(0xdc);
kvn@4205 1053 emit_byte(0xC0 | encode);
kvn@4205 1054 }
kvn@4205 1055
kvn@4205 1056 void Assembler::aesenclast(XMMRegister dst, Address src) {
kvn@4205 1057 assert(VM_Version::supports_aes(), "");
kvn@4205 1058 InstructionMark im(this);
kvn@4205 1059 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@4205 1060 emit_byte(0xdd);
kvn@4205 1061 emit_operand(dst, src);
kvn@4205 1062 }
kvn@4205 1063
kvn@4205 1064 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
kvn@4205 1065 assert(VM_Version::supports_aes(), "");
kvn@4205 1066 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@4205 1067 emit_byte(0xdd);
kvn@4205 1068 emit_byte(0xC0 | encode);
kvn@4205 1069 }
kvn@4205 1070
kvn@4205 1071
kvn@3388 1072 void Assembler::andl(Address dst, int32_t imm32) {
kvn@3388 1073 InstructionMark im(this);
kvn@3388 1074 prefix(dst);
kvn@3388 1075 emit_byte(0x81);
kvn@3388 1076 emit_operand(rsp, dst, 4);
kvn@3388 1077 emit_long(imm32);
kvn@3388 1078 }
kvn@3388 1079
never@739 1080 void Assembler::andl(Register dst, int32_t imm32) {
never@739 1081 prefix(dst);
never@739 1082 emit_arith(0x81, 0xE0, dst, imm32);
never@739 1083 }
never@739 1084
never@739 1085 void Assembler::andl(Register dst, Address src) {
never@739 1086 InstructionMark im(this);
never@739 1087 prefix(src, dst);
never@739 1088 emit_byte(0x23);
never@739 1089 emit_operand(dst, src);
never@739 1090 }
never@739 1091
never@739 1092 void Assembler::andl(Register dst, Register src) {
never@739 1093 (void) prefix_and_encode(dst->encoding(), src->encoding());
never@739 1094 emit_arith(0x23, 0xC0, dst, src);
never@739 1095 }
never@739 1096
twisti@1210 1097 void Assembler::bsfl(Register dst, Register src) {
twisti@1210 1098 int encode = prefix_and_encode(dst->encoding(), src->encoding());
twisti@1210 1099 emit_byte(0x0F);
twisti@1210 1100 emit_byte(0xBC);
twisti@1210 1101 emit_byte(0xC0 | encode);
twisti@1210 1102 }
twisti@1210 1103
twisti@1210 1104 void Assembler::bsrl(Register dst, Register src) {
twisti@1210 1105 assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
twisti@1210 1106 int encode = prefix_and_encode(dst->encoding(), src->encoding());
twisti@1210 1107 emit_byte(0x0F);
twisti@1210 1108 emit_byte(0xBD);
twisti@1210 1109 emit_byte(0xC0 | encode);
twisti@1210 1110 }
twisti@1210 1111
never@739 1112 void Assembler::bswapl(Register reg) { // bswap
never@739 1113 int encode = prefix_and_encode(reg->encoding());
never@739 1114 emit_byte(0x0F);
never@739 1115 emit_byte(0xC8 | encode);
never@739 1116 }
never@739 1117
never@739 1118 void Assembler::call(Label& L, relocInfo::relocType rtype) {
never@739 1119 // suspect disp32 is always good
never@739 1120 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
never@739 1121
never@739 1122 if (L.is_bound()) {
never@739 1123 const int long_size = 5;
never@739 1124 int offs = (int)( target(L) - pc() );
never@739 1125 assert(offs <= 0, "assembler error");
never@739 1126 InstructionMark im(this);
never@739 1127 // 1110 1000 #32-bit disp
never@739 1128 emit_byte(0xE8);
never@739 1129 emit_data(offs - long_size, rtype, operand);
never@739 1130 } else {
never@739 1131 InstructionMark im(this);
never@739 1132 // 1110 1000 #32-bit disp
never@739 1133 L.add_patch_at(code(), locator());
never@739 1134
never@739 1135 emit_byte(0xE8);
never@739 1136 emit_data(int(0), rtype, operand);
never@739 1137 }
never@739 1138 }
never@739 1139
never@739 1140 void Assembler::call(Register dst) {
kvn@3388 1141 int encode = prefix_and_encode(dst->encoding());
never@739 1142 emit_byte(0xFF);
never@739 1143 emit_byte(0xD0 | encode);
never@739 1144 }
never@739 1145
never@739 1146
never@739 1147 void Assembler::call(Address adr) {
never@739 1148 InstructionMark im(this);
never@739 1149 prefix(adr);
never@739 1150 emit_byte(0xFF);
never@739 1151 emit_operand(rdx, adr);
never@739 1152 }
never@739 1153
never@739 1154 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
never@739 1155 assert(entry != NULL, "call most probably wrong");
never@739 1156 InstructionMark im(this);
never@739 1157 emit_byte(0xE8);
twisti@4317 1158 intptr_t disp = entry - (pc() + sizeof(int32_t));
never@739 1159 assert(is_simm32(disp), "must be 32bit offset (call2)");
never@739 1160 // Technically, should use call32_operand, but this format is
never@739 1161 // implied by the fact that we're emitting a call instruction.
never@739 1162
never@739 1163 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
never@739 1164 emit_data((int) disp, rspec, operand);
never@739 1165 }
never@739 1166
never@739 1167 void Assembler::cdql() {
never@739 1168 emit_byte(0x99);
never@739 1169 }
never@739 1170
twisti@4318 1171 void Assembler::cld() {
twisti@4318 1172 emit_byte(0xfc);
twisti@4318 1173 }
twisti@4318 1174
never@739 1175 void Assembler::cmovl(Condition cc, Register dst, Register src) {
never@739 1176 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
never@739 1177 int encode = prefix_and_encode(dst->encoding(), src->encoding());
never@739 1178 emit_byte(0x0F);
never@739 1179 emit_byte(0x40 | cc);
never@739 1180 emit_byte(0xC0 | encode);
never@739 1181 }
never@739 1182
never@739 1183
never@739 1184 void Assembler::cmovl(Condition cc, Register dst, Address src) {
never@739 1185 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
never@739 1186 prefix(src, dst);
never@739 1187 emit_byte(0x0F);
never@739 1188 emit_byte(0x40 | cc);
never@739 1189 emit_operand(dst, src);
never@739 1190 }
never@739 1191
never@739 1192 void Assembler::cmpb(Address dst, int imm8) {
never@739 1193 InstructionMark im(this);
never@739 1194 prefix(dst);
never@739 1195 emit_byte(0x80);
never@739 1196 emit_operand(rdi, dst, 1);
never@739 1197 emit_byte(imm8);
never@739 1198 }
never@739 1199
never@739 1200 void Assembler::cmpl(Address dst, int32_t imm32) {
never@739 1201 InstructionMark im(this);
never@739 1202 prefix(dst);
never@739 1203 emit_byte(0x81);
never@739 1204 emit_operand(rdi, dst, 4);
never@739 1205 emit_long(imm32);
never@739 1206 }
never@739 1207
never@739 1208 void Assembler::cmpl(Register dst, int32_t imm32) {
never@739 1209 prefix(dst);
never@739 1210 emit_arith(0x81, 0xF8, dst, imm32);
never@739 1211 }
never@739 1212
never@739 1213 void Assembler::cmpl(Register dst, Register src) {
never@739 1214 (void) prefix_and_encode(dst->encoding(), src->encoding());
never@739 1215 emit_arith(0x3B, 0xC0, dst, src);
never@739 1216 }
never@739 1217
never@739 1218
never@739 1219 void Assembler::cmpl(Register dst, Address src) {
never@739 1220 InstructionMark im(this);
never@739 1221 prefix(src, dst);
never@739 1222 emit_byte(0x3B);
never@739 1223 emit_operand(dst, src);
never@739 1224 }
never@739 1225
never@739 1226 void Assembler::cmpw(Address dst, int imm16) {
never@739 1227 InstructionMark im(this);
never@739 1228 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
never@739 1229 emit_byte(0x66);
never@739 1230 emit_byte(0x81);
never@739 1231 emit_operand(rdi, dst, 2);
never@739 1232 emit_word(imm16);
never@739 1233 }
never@739 1234
never@739 1235 // The 32-bit cmpxchg compares the value at adr with the contents of rax,
never@739 1236 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
never@739 1237 // The ZF is set if the compared values were equal, and cleared otherwise.
never@739 1238 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
coleenp@4145 1239 InstructionMark im(this);
coleenp@4145 1240 prefix(adr, reg);
coleenp@4145 1241 emit_byte(0x0F);
coleenp@4145 1242 emit_byte(0xB1);
coleenp@4145 1243 emit_operand(reg, adr);
never@739 1244 }
never@739 1245
never@739 1246 void Assembler::comisd(XMMRegister dst, Address src) {
never@739 1247 // NOTE: dbx seems to decode this as comiss even though the
never@739 1248 // 0x66 is there. Strangly ucomisd comes out correct
never@739 1249 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1250 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
kvn@3388 1251 }
kvn@3388 1252
kvn@3388 1253 void Assembler::comisd(XMMRegister dst, XMMRegister src) {
kvn@3388 1254 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1255 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
never@739 1256 }
never@739 1257
never@739 1258 void Assembler::comiss(XMMRegister dst, Address src) {
never@739 1259 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1260 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
never@739 1261 }
never@739 1262
kvn@3388 1263 void Assembler::comiss(XMMRegister dst, XMMRegister src) {
kvn@3388 1264 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1265 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
kvn@3388 1266 }
kvn@3388 1267
twisti@4318 1268 void Assembler::cpuid() {
twisti@4318 1269 emit_byte(0x0F);
twisti@4318 1270 emit_byte(0xA2);
twisti@4318 1271 }
twisti@4318 1272
never@739 1273 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
never@739 1274 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1275 emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3);
never@739 1276 }
never@739 1277
never@739 1278 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
never@739 1279 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1280 emit_simd_arith_nonds(0x5B, dst, src, VEX_SIMD_NONE);
never@739 1281 }
never@739 1282
never@739 1283 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
never@739 1284 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1285 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
never@739 1286 }
never@739 1287
kvn@3388 1288 void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
kvn@3388 1289 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1290 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
kvn@3388 1291 }
kvn@3388 1292
never@739 1293 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
never@739 1294 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 1295 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2);
never@739 1296 emit_byte(0x2A);
never@739 1297 emit_byte(0xC0 | encode);
never@739 1298 }
never@739 1299
kvn@3388 1300 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
kvn@3388 1301 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1302 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F2);
kvn@3388 1303 }
kvn@3388 1304
never@739 1305 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
never@739 1306 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@3388 1307 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3);
never@739 1308 emit_byte(0x2A);
never@739 1309 emit_byte(0xC0 | encode);
never@739 1310 }
never@739 1311
kvn@3388 1312 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
kvn@3388 1313 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1314 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F3);
kvn@3388 1315 }
kvn@3388 1316
never@739 1317 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
never@739 1318 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1319 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
never@739 1320 }
never@739 1321
kvn@3388 1322 void Assembler::cvtss2sd(XMMRegister dst, Address src) {
kvn@3388 1323 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1324 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
kvn@3388 1325 }
kvn@3388 1326
kvn@3388 1327
never@739 1328 void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
never@739 1329 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 1330 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2);
never@739 1331 emit_byte(0x2C);
never@739 1332 emit_byte(0xC0 | encode);
never@739 1333 }
never@739 1334
never@739 1335 void Assembler::cvttss2sil(Register dst, XMMRegister src) {
never@739 1336 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@3388 1337 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3);
never@739 1338 emit_byte(0x2C);
never@739 1339 emit_byte(0xC0 | encode);
never@739 1340 }
never@739 1341
never@739 1342 void Assembler::decl(Address dst) {
never@739 1343 // Don't use it directly. Use MacroAssembler::decrement() instead.
never@739 1344 InstructionMark im(this);
never@739 1345 prefix(dst);
never@739 1346 emit_byte(0xFF);
never@739 1347 emit_operand(rcx, dst);
never@739 1348 }
never@739 1349
never@739 1350 void Assembler::divsd(XMMRegister dst, Address src) {
never@739 1351 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1352 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
never@739 1353 }
never@739 1354
never@739 1355 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
never@739 1356 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1357 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
never@739 1358 }
never@739 1359
never@739 1360 void Assembler::divss(XMMRegister dst, Address src) {
never@739 1361 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1362 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
never@739 1363 }
never@739 1364
never@739 1365 void Assembler::divss(XMMRegister dst, XMMRegister src) {
never@739 1366 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1367 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
never@739 1368 }
never@739 1369
never@739 1370 void Assembler::emms() {
never@739 1371 NOT_LP64(assert(VM_Version::supports_mmx(), ""));
never@739 1372 emit_byte(0x0F);
never@739 1373 emit_byte(0x77);
never@739 1374 }
never@739 1375
never@739 1376 void Assembler::hlt() {
never@739 1377 emit_byte(0xF4);
never@739 1378 }
never@739 1379
never@739 1380 void Assembler::idivl(Register src) {
never@739 1381 int encode = prefix_and_encode(src->encoding());
never@739 1382 emit_byte(0xF7);
never@739 1383 emit_byte(0xF8 | encode);
never@739 1384 }
never@739 1385
kvn@2275 1386 void Assembler::divl(Register src) { // Unsigned
kvn@2275 1387 int encode = prefix_and_encode(src->encoding());
kvn@2275 1388 emit_byte(0xF7);
kvn@2275 1389 emit_byte(0xF0 | encode);
kvn@2275 1390 }
kvn@2275 1391
never@739 1392 void Assembler::imull(Register dst, Register src) {
never@739 1393 int encode = prefix_and_encode(dst->encoding(), src->encoding());
never@739 1394 emit_byte(0x0F);
never@739 1395 emit_byte(0xAF);
never@739 1396 emit_byte(0xC0 | encode);
never@739 1397 }
never@739 1398
never@739 1399
never@739 1400 void Assembler::imull(Register dst, Register src, int value) {
never@739 1401 int encode = prefix_and_encode(dst->encoding(), src->encoding());
never@739 1402 if (is8bit(value)) {
never@739 1403 emit_byte(0x6B);
never@739 1404 emit_byte(0xC0 | encode);
kvn@2269 1405 emit_byte(value & 0xFF);
never@739 1406 } else {
never@739 1407 emit_byte(0x69);
never@739 1408 emit_byte(0xC0 | encode);
never@739 1409 emit_long(value);
never@739 1410 }
never@739 1411 }
never@739 1412
never@739 1413 void Assembler::incl(Address dst) {
never@739 1414 // Don't use it directly. Use MacroAssembler::increment() instead.
never@739 1415 InstructionMark im(this);
never@739 1416 prefix(dst);
never@739 1417 emit_byte(0xFF);
never@739 1418 emit_operand(rax, dst);
never@739 1419 }
never@739 1420
kvn@3049 1421 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
kvn@3049 1422 InstructionMark im(this);
never@739 1423 assert((0 <= cc) && (cc < 16), "illegal cc");
never@739 1424 if (L.is_bound()) {
never@739 1425 address dst = target(L);
never@739 1426 assert(dst != NULL, "jcc most probably wrong");
never@739 1427
never@739 1428 const int short_size = 2;
never@739 1429 const int long_size = 6;
twisti@4317 1430 intptr_t offs = (intptr_t)dst - (intptr_t)pc();
kvn@3049 1431 if (maybe_short && is8bit(offs - short_size)) {
never@739 1432 // 0111 tttn #8-bit disp
never@739 1433 emit_byte(0x70 | cc);
never@739 1434 emit_byte((offs - short_size) & 0xFF);
never@739 1435 } else {
never@739 1436 // 0000 1111 1000 tttn #32-bit disp
never@739 1437 assert(is_simm32(offs - long_size),
never@739 1438 "must be 32bit offset (call4)");
never@739 1439 emit_byte(0x0F);
never@739 1440 emit_byte(0x80 | cc);
never@739 1441 emit_long(offs - long_size);
never@739 1442 }
never@739 1443 } else {
never@739 1444 // Note: could eliminate cond. jumps to this jump if condition
never@739 1445 // is the same however, seems to be rather unlikely case.
never@739 1446 // Note: use jccb() if label to be bound is very close to get
never@739 1447 // an 8-bit displacement
never@739 1448 L.add_patch_at(code(), locator());
never@739 1449 emit_byte(0x0F);
never@739 1450 emit_byte(0x80 | cc);
never@739 1451 emit_long(0);
never@739 1452 }
never@739 1453 }
never@739 1454
never@739 1455 void Assembler::jccb(Condition cc, Label& L) {
never@739 1456 if (L.is_bound()) {
never@739 1457 const int short_size = 2;
never@739 1458 address entry = target(L);
kvn@3395 1459 #ifdef ASSERT
twisti@4317 1460 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
kvn@3395 1461 intptr_t delta = short_branch_delta();
kvn@3395 1462 if (delta != 0) {
kvn@3395 1463 dist += (dist < 0 ? (-delta) :delta);
kvn@3395 1464 }
kvn@3395 1465 assert(is8bit(dist), "Dispacement too large for a short jmp");
kvn@3395 1466 #endif
twisti@4317 1467 intptr_t offs = (intptr_t)entry - (intptr_t)pc();
never@739 1468 // 0111 tttn #8-bit disp
never@739 1469 emit_byte(0x70 | cc);
never@739 1470 emit_byte((offs - short_size) & 0xFF);
never@739 1471 } else {
never@739 1472 InstructionMark im(this);
never@739 1473 L.add_patch_at(code(), locator());
never@739 1474 emit_byte(0x70 | cc);
never@739 1475 emit_byte(0);
never@739 1476 }
never@739 1477 }
never@739 1478
never@739 1479 void Assembler::jmp(Address adr) {
never@739 1480 InstructionMark im(this);
never@739 1481 prefix(adr);
never@739 1482 emit_byte(0xFF);
never@739 1483 emit_operand(rsp, adr);
never@739 1484 }
never@739 1485
kvn@3049 1486 void Assembler::jmp(Label& L, bool maybe_short) {
never@739 1487 if (L.is_bound()) {
never@739 1488 address entry = target(L);
never@739 1489 assert(entry != NULL, "jmp most probably wrong");
never@739 1490 InstructionMark im(this);
never@739 1491 const int short_size = 2;
never@739 1492 const int long_size = 5;
twisti@4317 1493 intptr_t offs = entry - pc();
kvn@3049 1494 if (maybe_short && is8bit(offs - short_size)) {
never@739 1495 emit_byte(0xEB);
never@739 1496 emit_byte((offs - short_size) & 0xFF);
never@739 1497 } else {
never@739 1498 emit_byte(0xE9);
never@739 1499 emit_long(offs - long_size);
never@739 1500 }
never@739 1501 } else {
never@739 1502 // By default, forward jumps are always 32-bit displacements, since
never@739 1503 // we can't yet know where the label will be bound. If you're sure that
never@739 1504 // the forward jump will not run beyond 256 bytes, use jmpb to
never@739 1505 // force an 8-bit displacement.
never@739 1506 InstructionMark im(this);
never@739 1507 L.add_patch_at(code(), locator());
never@739 1508 emit_byte(0xE9);
never@739 1509 emit_long(0);
never@739 1510 }
never@739 1511 }
never@739 1512
never@739 1513 void Assembler::jmp(Register entry) {
never@739 1514 int encode = prefix_and_encode(entry->encoding());
never@739 1515 emit_byte(0xFF);
never@739 1516 emit_byte(0xE0 | encode);
never@739 1517 }
never@739 1518
never@739 1519 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
never@739 1520 InstructionMark im(this);
never@739 1521 emit_byte(0xE9);
never@739 1522 assert(dest != NULL, "must have a target");
twisti@4317 1523 intptr_t disp = dest - (pc() + sizeof(int32_t));
never@739 1524 assert(is_simm32(disp), "must be 32bit offset (jmp)");
never@739 1525 emit_data(disp, rspec.reloc(), call32_operand);
never@739 1526 }
never@739 1527
never@739 1528 void Assembler::jmpb(Label& L) {
never@739 1529 if (L.is_bound()) {
never@739 1530 const int short_size = 2;
never@739 1531 address entry = target(L);
never@739 1532 assert(entry != NULL, "jmp most probably wrong");
kvn@3395 1533 #ifdef ASSERT
twisti@4317 1534 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
kvn@3395 1535 intptr_t delta = short_branch_delta();
kvn@3395 1536 if (delta != 0) {
kvn@3395 1537 dist += (dist < 0 ? (-delta) :delta);
kvn@3395 1538 }
kvn@3395 1539 assert(is8bit(dist), "Dispacement too large for a short jmp");
kvn@3395 1540 #endif
twisti@4317 1541 intptr_t offs = entry - pc();
never@739 1542 emit_byte(0xEB);
never@739 1543 emit_byte((offs - short_size) & 0xFF);
never@739 1544 } else {
never@739 1545 InstructionMark im(this);
never@739 1546 L.add_patch_at(code(), locator());
never@739 1547 emit_byte(0xEB);
never@739 1548 emit_byte(0);
never@739 1549 }
never@739 1550 }
never@739 1551
never@739 1552 void Assembler::ldmxcsr( Address src) {
never@739 1553 NOT_LP64(assert(VM_Version::supports_sse(), ""));
never@739 1554 InstructionMark im(this);
never@739 1555 prefix(src);
never@739 1556 emit_byte(0x0F);
never@739 1557 emit_byte(0xAE);
never@739 1558 emit_operand(as_Register(2), src);
never@739 1559 }
never@739 1560
never@739 1561 void Assembler::leal(Register dst, Address src) {
never@739 1562 InstructionMark im(this);
never@739 1563 #ifdef _LP64
never@739 1564 emit_byte(0x67); // addr32
never@739 1565 prefix(src, dst);
never@739 1566 #endif // LP64
never@739 1567 emit_byte(0x8D);
never@739 1568 emit_operand(dst, src);
never@739 1569 }
never@739 1570
twisti@4318 1571 void Assembler::lfence() {
twisti@4318 1572 emit_byte(0x0F);
twisti@4318 1573 emit_byte(0xAE);
twisti@4318 1574 emit_byte(0xE8);
twisti@4318 1575 }
twisti@4318 1576
never@739 1577 void Assembler::lock() {
coleenp@4145 1578 emit_byte(0xF0);
never@739 1579 }
never@739 1580
twisti@1210 1581 void Assembler::lzcntl(Register dst, Register src) {
twisti@1210 1582 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
twisti@1210 1583 emit_byte(0xF3);
twisti@1210 1584 int encode = prefix_and_encode(dst->encoding(), src->encoding());
twisti@1210 1585 emit_byte(0x0F);
twisti@1210 1586 emit_byte(0xBD);
twisti@1210 1587 emit_byte(0xC0 | encode);
twisti@1210 1588 }
twisti@1210 1589
never@1106 1590 // Emit mfence instruction
never@739 1591 void Assembler::mfence() {
never@1106 1592 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
never@1106 1593 emit_byte( 0x0F );
never@1106 1594 emit_byte( 0xAE );
never@1106 1595 emit_byte( 0xF0 );
never@739 1596 }
never@739 1597
never@739 1598 void Assembler::mov(Register dst, Register src) {
never@739 1599 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
never@739 1600 }
never@739 1601
never@739 1602 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
never@739 1603 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1604 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_66);
never@739 1605 }
never@739 1606
never@739 1607 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
never@739 1608 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1609 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_NONE);
never@739 1610 }
never@739 1611
kvn@3882 1612 void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
kvn@3882 1613 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@3882 1614 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE);
kvn@3882 1615 emit_byte(0x16);
kvn@3882 1616 emit_byte(0xC0 | encode);
kvn@3882 1617 }
kvn@3882 1618
never@739 1619 void Assembler::movb(Register dst, Address src) {
never@739 1620 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
never@739 1621 InstructionMark im(this);
never@739 1622 prefix(src, dst, true);
never@739 1623 emit_byte(0x8A);
never@739 1624 emit_operand(dst, src);
never@739 1625 }
never@739 1626
never@739 1627
never@739 1628 void Assembler::movb(Address dst, int imm8) {
never@739 1629 InstructionMark im(this);
never@739 1630 prefix(dst);
never@739 1631 emit_byte(0xC6);
never@739 1632 emit_operand(rax, dst, 1);
never@739 1633 emit_byte(imm8);
never@739 1634 }
never@739 1635
never@739 1636
never@739 1637 void Assembler::movb(Address dst, Register src) {
never@739 1638 assert(src->has_byte_register(), "must have byte register");
never@739 1639 InstructionMark im(this);
never@739 1640 prefix(dst, src, true);
never@739 1641 emit_byte(0x88);
never@739 1642 emit_operand(src, dst);
never@739 1643 }
never@739 1644
never@739 1645 void Assembler::movdl(XMMRegister dst, Register src) {
never@739 1646 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 1647 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66);
never@739 1648 emit_byte(0x6E);
never@739 1649 emit_byte(0xC0 | encode);
never@739 1650 }
never@739 1651
never@739 1652 void Assembler::movdl(Register dst, XMMRegister src) {
never@739 1653 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
never@739 1654 // swap src/dst to get correct prefix
kvn@3388 1655 int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66);
never@739 1656 emit_byte(0x7E);
never@739 1657 emit_byte(0xC0 | encode);
never@739 1658 }
never@739 1659
kvn@2602 1660 void Assembler::movdl(XMMRegister dst, Address src) {
kvn@2602 1661 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@2602 1662 InstructionMark im(this);
kvn@3388 1663 simd_prefix(dst, src, VEX_SIMD_66);
kvn@2602 1664 emit_byte(0x6E);
kvn@2602 1665 emit_operand(dst, src);
kvn@2602 1666 }
kvn@2602 1667
kvn@3882 1668 void Assembler::movdl(Address dst, XMMRegister src) {
kvn@3882 1669 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3882 1670 InstructionMark im(this);
kvn@3882 1671 simd_prefix(dst, src, VEX_SIMD_66);
kvn@3882 1672 emit_byte(0x7E);
kvn@3882 1673 emit_operand(src, dst);
kvn@3882 1674 }
kvn@3882 1675
kvn@3388 1676 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
never@739 1677 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1678 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
kvn@3388 1679 }
kvn@3388 1680
kvn@3388 1681 void Assembler::movdqu(XMMRegister dst, Address src) {
kvn@3388 1682 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1683 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
never@739 1684 }
never@739 1685
kvn@3388 1686 void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
never@739 1687 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1688 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
never@739 1689 }
never@739 1690
kvn@840 1691 void Assembler::movdqu(Address dst, XMMRegister src) {
kvn@840 1692 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@840 1693 InstructionMark im(this);
kvn@3388 1694 simd_prefix(dst, src, VEX_SIMD_F3);
kvn@840 1695 emit_byte(0x7F);
kvn@840 1696 emit_operand(src, dst);
kvn@840 1697 }
kvn@840 1698
kvn@3882 1699 // Move Unaligned 256bit Vector
kvn@3882 1700 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
kvn@3882 1701 assert(UseAVX, "");
kvn@3882 1702 bool vector256 = true;
kvn@3882 1703 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256);
kvn@3882 1704 emit_byte(0x6F);
kvn@3882 1705 emit_byte(0xC0 | encode);
kvn@3882 1706 }
kvn@3882 1707
kvn@3882 1708 void Assembler::vmovdqu(XMMRegister dst, Address src) {
kvn@3882 1709 assert(UseAVX, "");
kvn@3882 1710 InstructionMark im(this);
kvn@3882 1711 bool vector256 = true;
kvn@3882 1712 vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256);
kvn@3882 1713 emit_byte(0x6F);
kvn@3882 1714 emit_operand(dst, src);
kvn@3882 1715 }
kvn@3882 1716
kvn@3882 1717 void Assembler::vmovdqu(Address dst, XMMRegister src) {
kvn@3882 1718 assert(UseAVX, "");
kvn@3882 1719 InstructionMark im(this);
kvn@3882 1720 bool vector256 = true;
kvn@3882 1721 // swap src<->dst for encoding
kvn@3882 1722 assert(src != xnoreg, "sanity");
kvn@3882 1723 vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector256);
kvn@3882 1724 emit_byte(0x7F);
kvn@3882 1725 emit_operand(src, dst);
kvn@3882 1726 }
kvn@3882 1727
never@739 1728 // Uses zero extension on 64bit
never@739 1729
never@739 1730 void Assembler::movl(Register dst, int32_t imm32) {
never@739 1731 int encode = prefix_and_encode(dst->encoding());
never@739 1732 emit_byte(0xB8 | encode);
never@739 1733 emit_long(imm32);
never@739 1734 }
never@739 1735
never@739 1736 void Assembler::movl(Register dst, Register src) {
never@739 1737 int encode = prefix_and_encode(dst->encoding(), src->encoding());
never@739 1738 emit_byte(0x8B);
never@739 1739 emit_byte(0xC0 | encode);
never@739 1740 }
never@739 1741
never@739 1742 void Assembler::movl(Register dst, Address src) {
never@739 1743 InstructionMark im(this);
never@739 1744 prefix(src, dst);
never@739 1745 emit_byte(0x8B);
never@739 1746 emit_operand(dst, src);
never@739 1747 }
never@739 1748
never@739 1749 void Assembler::movl(Address dst, int32_t imm32) {
never@739 1750 InstructionMark im(this);
never@739 1751 prefix(dst);
never@739 1752 emit_byte(0xC7);
never@739 1753 emit_operand(rax, dst, 4);
never@739 1754 emit_long(imm32);
never@739 1755 }
never@739 1756
never@739 1757 void Assembler::movl(Address dst, Register src) {
never@739 1758 InstructionMark im(this);
never@739 1759 prefix(dst, src);
never@739 1760 emit_byte(0x89);
never@739 1761 emit_operand(src, dst);
never@739 1762 }
never@739 1763
never@739 1764 // New cpus require to use movsd and movss to avoid partial register stall
never@739 1765 // when loading from memory. But for old Opteron use movlpd instead of movsd.
never@739 1766 // The selection is done in MacroAssembler::movdbl() and movflt().
never@739 1767 void Assembler::movlpd(XMMRegister dst, Address src) {
never@739 1768 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1769 emit_simd_arith(0x12, dst, src, VEX_SIMD_66);
never@739 1770 }
never@739 1771
never@739 1772 void Assembler::movq( MMXRegister dst, Address src ) {
never@739 1773 assert( VM_Version::supports_mmx(), "" );
never@739 1774 emit_byte(0x0F);
never@739 1775 emit_byte(0x6F);
never@739 1776 emit_operand(dst, src);
never@739 1777 }
never@739 1778
never@739 1779 void Assembler::movq( Address dst, MMXRegister src ) {
never@739 1780 assert( VM_Version::supports_mmx(), "" );
never@739 1781 emit_byte(0x0F);
never@739 1782 emit_byte(0x7F);
never@739 1783 // workaround gcc (3.2.1-7a) bug
never@739 1784 // In that version of gcc with only an emit_operand(MMX, Address)
never@739 1785 // gcc will tail jump and try and reverse the parameters completely
never@739 1786 // obliterating dst in the process. By having a version available
never@739 1787 // that doesn't need to swap the args at the tail jump the bug is
never@739 1788 // avoided.
never@739 1789 emit_operand(dst, src);
never@739 1790 }
never@739 1791
never@739 1792 void Assembler::movq(XMMRegister dst, Address src) {
never@739 1793 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
never@739 1794 InstructionMark im(this);
kvn@3388 1795 simd_prefix(dst, src, VEX_SIMD_F3);
never@739 1796 emit_byte(0x7E);
never@739 1797 emit_operand(dst, src);
never@739 1798 }
never@739 1799
never@739 1800 void Assembler::movq(Address dst, XMMRegister src) {
never@739 1801 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
never@739 1802 InstructionMark im(this);
kvn@3388 1803 simd_prefix(dst, src, VEX_SIMD_66);
never@739 1804 emit_byte(0xD6);
never@739 1805 emit_operand(src, dst);
never@739 1806 }
never@739 1807
never@739 1808 void Assembler::movsbl(Register dst, Address src) { // movsxb
never@739 1809 InstructionMark im(this);
never@739 1810 prefix(src, dst);
never@739 1811 emit_byte(0x0F);
never@739 1812 emit_byte(0xBE);
never@739 1813 emit_operand(dst, src);
never@739 1814 }
never@739 1815
never@739 1816 void Assembler::movsbl(Register dst, Register src) { // movsxb
never@739 1817 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
never@739 1818 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
never@739 1819 emit_byte(0x0F);
never@739 1820 emit_byte(0xBE);
never@739 1821 emit_byte(0xC0 | encode);
never@739 1822 }
never@739 1823
never@739 1824 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
never@739 1825 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1826 emit_simd_arith(0x10, dst, src, VEX_SIMD_F2);
never@739 1827 }
never@739 1828
never@739 1829 void Assembler::movsd(XMMRegister dst, Address src) {
never@739 1830 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1831 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F2);
never@739 1832 }
never@739 1833
never@739 1834 void Assembler::movsd(Address dst, XMMRegister src) {
never@739 1835 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
never@739 1836 InstructionMark im(this);
kvn@3388 1837 simd_prefix(dst, src, VEX_SIMD_F2);
never@739 1838 emit_byte(0x11);
never@739 1839 emit_operand(src, dst);
never@739 1840 }
never@739 1841
never@739 1842 void Assembler::movss(XMMRegister dst, XMMRegister src) {
never@739 1843 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1844 emit_simd_arith(0x10, dst, src, VEX_SIMD_F3);
never@739 1845 }
never@739 1846
never@739 1847 void Assembler::movss(XMMRegister dst, Address src) {
never@739 1848 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1849 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F3);
never@739 1850 }
never@739 1851
never@739 1852 void Assembler::movss(Address dst, XMMRegister src) {
never@739 1853 NOT_LP64(assert(VM_Version::supports_sse(), ""));
never@739 1854 InstructionMark im(this);
kvn@3388 1855 simd_prefix(dst, src, VEX_SIMD_F3);
never@739 1856 emit_byte(0x11);
never@739 1857 emit_operand(src, dst);
never@739 1858 }
never@739 1859
never@739 1860 void Assembler::movswl(Register dst, Address src) { // movsxw
never@739 1861 InstructionMark im(this);
never@739 1862 prefix(src, dst);
never@739 1863 emit_byte(0x0F);
never@739 1864 emit_byte(0xBF);
never@739 1865 emit_operand(dst, src);
never@739 1866 }
never@739 1867
never@739 1868 void Assembler::movswl(Register dst, Register src) { // movsxw
never@739 1869 int encode = prefix_and_encode(dst->encoding(), src->encoding());
never@739 1870 emit_byte(0x0F);
never@739 1871 emit_byte(0xBF);
never@739 1872 emit_byte(0xC0 | encode);
never@739 1873 }
never@739 1874
never@739 1875 void Assembler::movw(Address dst, int imm16) {
never@739 1876 InstructionMark im(this);
never@739 1877
never@739 1878 emit_byte(0x66); // switch to 16-bit mode
never@739 1879 prefix(dst);
never@739 1880 emit_byte(0xC7);
never@739 1881 emit_operand(rax, dst, 2);
never@739 1882 emit_word(imm16);
never@739 1883 }
never@739 1884
never@739 1885 void Assembler::movw(Register dst, Address src) {
never@739 1886 InstructionMark im(this);
never@739 1887 emit_byte(0x66);
never@739 1888 prefix(src, dst);
never@739 1889 emit_byte(0x8B);
never@739 1890 emit_operand(dst, src);
never@739 1891 }
never@739 1892
never@739 1893 void Assembler::movw(Address dst, Register src) {
never@739 1894 InstructionMark im(this);
never@739 1895 emit_byte(0x66);
never@739 1896 prefix(dst, src);
never@739 1897 emit_byte(0x89);
never@739 1898 emit_operand(src, dst);
never@739 1899 }
never@739 1900
never@739 1901 void Assembler::movzbl(Register dst, Address src) { // movzxb
never@739 1902 InstructionMark im(this);
never@739 1903 prefix(src, dst);
never@739 1904 emit_byte(0x0F);
never@739 1905 emit_byte(0xB6);
never@739 1906 emit_operand(dst, src);
never@739 1907 }
never@739 1908
never@739 1909 void Assembler::movzbl(Register dst, Register src) { // movzxb
never@739 1910 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
never@739 1911 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
never@739 1912 emit_byte(0x0F);
never@739 1913 emit_byte(0xB6);
never@739 1914 emit_byte(0xC0 | encode);
never@739 1915 }
never@739 1916
never@739 1917 void Assembler::movzwl(Register dst, Address src) { // movzxw
never@739 1918 InstructionMark im(this);
never@739 1919 prefix(src, dst);
never@739 1920 emit_byte(0x0F);
never@739 1921 emit_byte(0xB7);
never@739 1922 emit_operand(dst, src);
never@739 1923 }
never@739 1924
never@739 1925 void Assembler::movzwl(Register dst, Register src) { // movzxw
never@739 1926 int encode = prefix_and_encode(dst->encoding(), src->encoding());
never@739 1927 emit_byte(0x0F);
never@739 1928 emit_byte(0xB7);
never@739 1929 emit_byte(0xC0 | encode);
never@739 1930 }
never@739 1931
never@739 1932 void Assembler::mull(Address src) {
never@739 1933 InstructionMark im(this);
never@739 1934 prefix(src);
never@739 1935 emit_byte(0xF7);
never@739 1936 emit_operand(rsp, src);
never@739 1937 }
never@739 1938
never@739 1939 void Assembler::mull(Register src) {
never@739 1940 int encode = prefix_and_encode(src->encoding());
never@739 1941 emit_byte(0xF7);
never@739 1942 emit_byte(0xE0 | encode);
never@739 1943 }
never@739 1944
never@739 1945 void Assembler::mulsd(XMMRegister dst, Address src) {
never@739 1946 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1947 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
never@739 1948 }
never@739 1949
never@739 1950 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
never@739 1951 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 1952 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
never@739 1953 }
never@739 1954
never@739 1955 void Assembler::mulss(XMMRegister dst, Address src) {
never@739 1956 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1957 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
never@739 1958 }
never@739 1959
never@739 1960 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
never@739 1961 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 1962 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
never@739 1963 }
never@739 1964
never@739 1965 void Assembler::negl(Register dst) {
never@739 1966 int encode = prefix_and_encode(dst->encoding());
never@739 1967 emit_byte(0xF7);
never@739 1968 emit_byte(0xD8 | encode);
never@739 1969 }
never@739 1970
duke@435 1971 void Assembler::nop(int i) {
never@739 1972 #ifdef ASSERT
duke@435 1973 assert(i > 0, " ");
never@739 1974 // The fancy nops aren't currently recognized by debuggers making it a
never@739 1975 // pain to disassemble code while debugging. If asserts are on clearly
never@739 1976 // speed is not an issue so simply use the single byte traditional nop
never@739 1977 // to do alignment.
never@739 1978
never@739 1979 for (; i > 0 ; i--) emit_byte(0x90);
never@739 1980 return;
never@739 1981
never@739 1982 #endif // ASSERT
never@739 1983
duke@435 1984 if (UseAddressNop && VM_Version::is_intel()) {
duke@435 1985 //
duke@435 1986 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
duke@435 1987 // 1: 0x90
duke@435 1988 // 2: 0x66 0x90
duke@435 1989 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
duke@435 1990 // 4: 0x0F 0x1F 0x40 0x00
duke@435 1991 // 5: 0x0F 0x1F 0x44 0x00 0x00
duke@435 1992 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
duke@435 1993 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
duke@435 1994 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
duke@435 1995 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
duke@435 1996 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
duke@435 1997 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
duke@435 1998
duke@435 1999 // The rest coding is Intel specific - don't use consecutive address nops
duke@435 2000
duke@435 2001 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
duke@435 2002 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
duke@435 2003 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
duke@435 2004 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
duke@435 2005
duke@435 2006 while(i >= 15) {
duke@435 2007 // For Intel don't generate consecutive addess nops (mix with regular nops)
duke@435 2008 i -= 15;
duke@435 2009 emit_byte(0x66); // size prefix
duke@435 2010 emit_byte(0x66); // size prefix
duke@435 2011 emit_byte(0x66); // size prefix
duke@435 2012 addr_nop_8();
duke@435 2013 emit_byte(0x66); // size prefix
duke@435 2014 emit_byte(0x66); // size prefix
duke@435 2015 emit_byte(0x66); // size prefix
duke@435 2016 emit_byte(0x90); // nop
duke@435 2017 }
duke@435 2018 switch (i) {
duke@435 2019 case 14:
duke@435 2020 emit_byte(0x66); // size prefix
duke@435 2021 case 13:
duke@435 2022 emit_byte(0x66); // size prefix
duke@435 2023 case 12:
duke@435 2024 addr_nop_8();
duke@435 2025 emit_byte(0x66); // size prefix
duke@435 2026 emit_byte(0x66); // size prefix
duke@435 2027 emit_byte(0x66); // size prefix
duke@435 2028 emit_byte(0x90); // nop
duke@435 2029 break;
duke@435 2030 case 11:
duke@435 2031 emit_byte(0x66); // size prefix
duke@435 2032 case 10:
duke@435 2033 emit_byte(0x66); // size prefix
duke@435 2034 case 9:
duke@435 2035 emit_byte(0x66); // size prefix
duke@435 2036 case 8:
duke@435 2037 addr_nop_8();
duke@435 2038 break;
duke@435 2039 case 7:
duke@435 2040 addr_nop_7();
duke@435 2041 break;
duke@435 2042 case 6:
duke@435 2043 emit_byte(0x66); // size prefix
duke@435 2044 case 5:
duke@435 2045 addr_nop_5();
duke@435 2046 break;
duke@435 2047 case 4:
duke@435 2048 addr_nop_4();
duke@435 2049 break;
duke@435 2050 case 3:
duke@435 2051 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
duke@435 2052 emit_byte(0x66); // size prefix
duke@435 2053 case 2:
duke@435 2054 emit_byte(0x66); // size prefix
duke@435 2055 case 1:
duke@435 2056 emit_byte(0x90); // nop
duke@435 2057 break;
duke@435 2058 default:
duke@435 2059 assert(i == 0, " ");
duke@435 2060 }
duke@435 2061 return;
duke@435 2062 }
duke@435 2063 if (UseAddressNop && VM_Version::is_amd()) {
duke@435 2064 //
duke@435 2065 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
duke@435 2066 // 1: 0x90
duke@435 2067 // 2: 0x66 0x90
duke@435 2068 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
duke@435 2069 // 4: 0x0F 0x1F 0x40 0x00
duke@435 2070 // 5: 0x0F 0x1F 0x44 0x00 0x00
duke@435 2071 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
duke@435 2072 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
duke@435 2073 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
duke@435 2074 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
duke@435 2075 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
duke@435 2076 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
duke@435 2077
duke@435 2078 // The rest coding is AMD specific - use consecutive address nops
duke@435 2079
duke@435 2080 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
duke@435 2081 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
duke@435 2082 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
duke@435 2083 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
duke@435 2084 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
duke@435 2085 // Size prefixes (0x66) are added for larger sizes
duke@435 2086
duke@435 2087 while(i >= 22) {
duke@435 2088 i -= 11;
duke@435 2089 emit_byte(0x66); // size prefix
duke@435 2090 emit_byte(0x66); // size prefix
duke@435 2091 emit_byte(0x66); // size prefix
duke@435 2092 addr_nop_8();
duke@435 2093 }
duke@435 2094 // Generate first nop for size between 21-12
duke@435 2095 switch (i) {
duke@435 2096 case 21:
duke@435 2097 i -= 1;
duke@435 2098 emit_byte(0x66); // size prefix
duke@435 2099 case 20:
duke@435 2100 case 19:
duke@435 2101 i -= 1;
duke@435 2102 emit_byte(0x66); // size prefix
duke@435 2103 case 18:
duke@435 2104 case 17:
duke@435 2105 i -= 1;
duke@435 2106 emit_byte(0x66); // size prefix
duke@435 2107 case 16:
duke@435 2108 case 15:
duke@435 2109 i -= 8;
duke@435 2110 addr_nop_8();
duke@435 2111 break;
duke@435 2112 case 14:
duke@435 2113 case 13:
duke@435 2114 i -= 7;
duke@435 2115 addr_nop_7();
duke@435 2116 break;
duke@435 2117 case 12:
duke@435 2118 i -= 6;
duke@435 2119 emit_byte(0x66); // size prefix
duke@435 2120 addr_nop_5();
duke@435 2121 break;
duke@435 2122 default:
duke@435 2123 assert(i < 12, " ");
duke@435 2124 }
duke@435 2125
duke@435 2126 // Generate second nop for size between 11-1
duke@435 2127 switch (i) {
duke@435 2128 case 11:
duke@435 2129 emit_byte(0x66); // size prefix
duke@435 2130 case 10:
duke@435 2131 emit_byte(0x66); // size prefix
duke@435 2132 case 9:
duke@435 2133 emit_byte(0x66); // size prefix
duke@435 2134 case 8:
duke@435 2135 addr_nop_8();
duke@435 2136 break;
duke@435 2137 case 7:
duke@435 2138 addr_nop_7();
duke@435 2139 break;
duke@435 2140 case 6:
duke@435 2141 emit_byte(0x66); // size prefix
duke@435 2142 case 5:
duke@435 2143 addr_nop_5();
duke@435 2144 break;
duke@435 2145 case 4:
duke@435 2146 addr_nop_4();
duke@435 2147 break;
duke@435 2148 case 3:
duke@435 2149 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
duke@435 2150 emit_byte(0x66); // size prefix
duke@435 2151 case 2:
duke@435 2152 emit_byte(0x66); // size prefix
duke@435 2153 case 1:
duke@435 2154 emit_byte(0x90); // nop
duke@435 2155 break;
duke@435 2156 default:
duke@435 2157 assert(i == 0, " ");
duke@435 2158 }
duke@435 2159 return;
duke@435 2160 }
duke@435 2161
duke@435 2162 // Using nops with size prefixes "0x66 0x90".
duke@435 2163 // From AMD Optimization Guide:
duke@435 2164 // 1: 0x90
duke@435 2165 // 2: 0x66 0x90
duke@435 2166 // 3: 0x66 0x66 0x90
duke@435 2167 // 4: 0x66 0x66 0x66 0x90
duke@435 2168 // 5: 0x66 0x66 0x90 0x66 0x90
duke@435 2169 // 6: 0x66 0x66 0x90 0x66 0x66 0x90
duke@435 2170 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
duke@435 2171 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
duke@435 2172 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
duke@435 2173 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
duke@435 2174 //
duke@435 2175 while(i > 12) {
duke@435 2176 i -= 4;
duke@435 2177 emit_byte(0x66); // size prefix
duke@435 2178 emit_byte(0x66);
duke@435 2179 emit_byte(0x66);
duke@435 2180 emit_byte(0x90); // nop
duke@435 2181 }
duke@435 2182 // 1 - 12 nops
duke@435 2183 if(i > 8) {
duke@435 2184 if(i > 9) {
duke@435 2185 i -= 1;
duke@435 2186 emit_byte(0x66);
duke@435 2187 }
duke@435 2188 i -= 3;
duke@435 2189 emit_byte(0x66);
duke@435 2190 emit_byte(0x66);
duke@435 2191 emit_byte(0x90);
duke@435 2192 }
duke@435 2193 // 1 - 8 nops
duke@435 2194 if(i > 4) {
duke@435 2195 if(i > 6) {
duke@435 2196 i -= 1;
duke@435 2197 emit_byte(0x66);
duke@435 2198 }
duke@435 2199 i -= 3;
duke@435 2200 emit_byte(0x66);
duke@435 2201 emit_byte(0x66);
duke@435 2202 emit_byte(0x90);
duke@435 2203 }
duke@435 2204 switch (i) {
duke@435 2205 case 4:
duke@435 2206 emit_byte(0x66);
duke@435 2207 case 3:
duke@435 2208 emit_byte(0x66);
duke@435 2209 case 2:
duke@435 2210 emit_byte(0x66);
duke@435 2211 case 1:
duke@435 2212 emit_byte(0x90);
duke@435 2213 break;
duke@435 2214 default:
duke@435 2215 assert(i == 0, " ");
duke@435 2216 }
duke@435 2217 }
duke@435 2218
never@739 2219 void Assembler::notl(Register dst) {
never@739 2220 int encode = prefix_and_encode(dst->encoding());
never@739 2221 emit_byte(0xF7);
never@739 2222 emit_byte(0xD0 | encode );
never@739 2223 }
never@739 2224
never@739 2225 void Assembler::orl(Address dst, int32_t imm32) {
never@739 2226 InstructionMark im(this);
never@739 2227 prefix(dst);
phh@2423 2228 emit_arith_operand(0x81, rcx, dst, imm32);
never@739 2229 }
never@739 2230
never@739 2231 void Assembler::orl(Register dst, int32_t imm32) {
never@739 2232 prefix(dst);
never@739 2233 emit_arith(0x81, 0xC8, dst, imm32);
never@739 2234 }
never@739 2235
never@739 2236 void Assembler::orl(Register dst, Address src) {
never@739 2237 InstructionMark im(this);
never@739 2238 prefix(src, dst);
never@739 2239 emit_byte(0x0B);
never@739 2240 emit_operand(dst, src);
never@739 2241 }
never@739 2242
never@739 2243 void Assembler::orl(Register dst, Register src) {
never@739 2244 (void) prefix_and_encode(dst->encoding(), src->encoding());
never@739 2245 emit_arith(0x0B, 0xC0, dst, src);
never@739 2246 }
never@739 2247
kvn@3388 2248 void Assembler::packuswb(XMMRegister dst, Address src) {
kvn@3388 2249 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 2250 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
kvn@4001 2251 emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
kvn@3388 2252 }
kvn@3388 2253
kvn@3388 2254 void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
kvn@3388 2255 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2256 emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
kvn@3388 2257 }
kvn@3388 2258
cfang@1116 2259 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
cfang@1116 2260 assert(VM_Version::supports_sse4_2(), "");
kvn@3388 2261 InstructionMark im(this);
kvn@3388 2262 simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
cfang@1116 2263 emit_byte(0x61);
cfang@1116 2264 emit_operand(dst, src);
cfang@1116 2265 emit_byte(imm8);
cfang@1116 2266 }
cfang@1116 2267
cfang@1116 2268 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
cfang@1116 2269 assert(VM_Version::supports_sse4_2(), "");
kvn@4001 2270 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
cfang@1116 2271 emit_byte(0x61);
cfang@1116 2272 emit_byte(0xC0 | encode);
cfang@1116 2273 emit_byte(imm8);
cfang@1116 2274 }
cfang@1116 2275
kvn@3388 2276 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
kvn@3388 2277 assert(VM_Version::supports_sse4_1(), "");
kvn@3388 2278 InstructionMark im(this);
kvn@3388 2279 simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@3388 2280 emit_byte(0x30);
kvn@3388 2281 emit_operand(dst, src);
kvn@3388 2282 }
kvn@3388 2283
kvn@3388 2284 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
kvn@3388 2285 assert(VM_Version::supports_sse4_1(), "");
kvn@4001 2286 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@3388 2287 emit_byte(0x30);
kvn@3388 2288 emit_byte(0xC0 | encode);
kvn@3388 2289 }
kvn@3388 2290
never@739 2291 // generic
never@739 2292 void Assembler::pop(Register dst) {
never@739 2293 int encode = prefix_and_encode(dst->encoding());
never@739 2294 emit_byte(0x58 | encode);
never@739 2295 }
never@739 2296
twisti@1078 2297 void Assembler::popcntl(Register dst, Address src) {
twisti@1078 2298 assert(VM_Version::supports_popcnt(), "must support");
twisti@1078 2299 InstructionMark im(this);
twisti@1078 2300 emit_byte(0xF3);
twisti@1078 2301 prefix(src, dst);
twisti@1078 2302 emit_byte(0x0F);
twisti@1078 2303 emit_byte(0xB8);
twisti@1078 2304 emit_operand(dst, src);
twisti@1078 2305 }
twisti@1078 2306
twisti@1078 2307 void Assembler::popcntl(Register dst, Register src) {
twisti@1078 2308 assert(VM_Version::supports_popcnt(), "must support");
twisti@1078 2309 emit_byte(0xF3);
twisti@1078 2310 int encode = prefix_and_encode(dst->encoding(), src->encoding());
twisti@1078 2311 emit_byte(0x0F);
twisti@1078 2312 emit_byte(0xB8);
twisti@1078 2313 emit_byte(0xC0 | encode);
twisti@1078 2314 }
twisti@1078 2315
never@739 2316 void Assembler::popf() {
never@739 2317 emit_byte(0x9D);
never@739 2318 }
never@739 2319
roland@1495 2320 #ifndef _LP64 // no 32bit push/pop on amd64
never@739 2321 void Assembler::popl(Address dst) {
never@739 2322 // NOTE: this will adjust stack by 8byte on 64bits
never@739 2323 InstructionMark im(this);
never@739 2324 prefix(dst);
never@739 2325 emit_byte(0x8F);
never@739 2326 emit_operand(rax, dst);
never@739 2327 }
roland@1495 2328 #endif
never@739 2329
never@739 2330 void Assembler::prefetch_prefix(Address src) {
never@739 2331 prefix(src);
never@739 2332 emit_byte(0x0F);
never@739 2333 }
never@739 2334
never@739 2335 void Assembler::prefetchnta(Address src) {
kvn@3071 2336 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
never@739 2337 InstructionMark im(this);
never@739 2338 prefetch_prefix(src);
never@739 2339 emit_byte(0x18);
never@739 2340 emit_operand(rax, src); // 0, src
never@739 2341 }
never@739 2342
never@739 2343 void Assembler::prefetchr(Address src) {
kvn@3052 2344 assert(VM_Version::supports_3dnow_prefetch(), "must support");
never@739 2345 InstructionMark im(this);
never@739 2346 prefetch_prefix(src);
never@739 2347 emit_byte(0x0D);
never@739 2348 emit_operand(rax, src); // 0, src
never@739 2349 }
never@739 2350
never@739 2351 void Assembler::prefetcht0(Address src) {
never@739 2352 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
never@739 2353 InstructionMark im(this);
never@739 2354 prefetch_prefix(src);
never@739 2355 emit_byte(0x18);
never@739 2356 emit_operand(rcx, src); // 1, src
never@739 2357 }
never@739 2358
never@739 2359 void Assembler::prefetcht1(Address src) {
never@739 2360 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
never@739 2361 InstructionMark im(this);
never@739 2362 prefetch_prefix(src);
never@739 2363 emit_byte(0x18);
never@739 2364 emit_operand(rdx, src); // 2, src
never@739 2365 }
never@739 2366
never@739 2367 void Assembler::prefetcht2(Address src) {
never@739 2368 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
never@739 2369 InstructionMark im(this);
never@739 2370 prefetch_prefix(src);
never@739 2371 emit_byte(0x18);
never@739 2372 emit_operand(rbx, src); // 3, src
never@739 2373 }
never@739 2374
never@739 2375 void Assembler::prefetchw(Address src) {
kvn@3052 2376 assert(VM_Version::supports_3dnow_prefetch(), "must support");
never@739 2377 InstructionMark im(this);
never@739 2378 prefetch_prefix(src);
never@739 2379 emit_byte(0x0D);
never@739 2380 emit_operand(rcx, src); // 1, src
never@739 2381 }
never@739 2382
never@739 2383 void Assembler::prefix(Prefix p) {
never@739 2384 a_byte(p);
never@739 2385 }
never@739 2386
kvn@4205 2387 void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
kvn@4205 2388 assert(VM_Version::supports_ssse3(), "");
kvn@4205 2389 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@4205 2390 emit_byte(0x00);
kvn@4205 2391 emit_byte(0xC0 | encode);
kvn@4205 2392 }
kvn@4205 2393
kvn@4205 2394 void Assembler::pshufb(XMMRegister dst, Address src) {
kvn@4205 2395 assert(VM_Version::supports_ssse3(), "");
kvn@4205 2396 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
kvn@4205 2397 InstructionMark im(this);
kvn@4205 2398 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@4205 2399 emit_byte(0x00);
kvn@4205 2400 emit_operand(dst, src);
kvn@4205 2401 }
kvn@4205 2402
never@739 2403 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
never@739 2404 assert(isByte(mode), "invalid value");
never@739 2405 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2406 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66);
never@739 2407 emit_byte(mode & 0xFF);
never@739 2408
never@739 2409 }
never@739 2410
never@739 2411 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
never@739 2412 assert(isByte(mode), "invalid value");
never@739 2413 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 2414 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
kvn@3388 2415 InstructionMark im(this);
kvn@3388 2416 simd_prefix(dst, src, VEX_SIMD_66);
never@739 2417 emit_byte(0x70);
never@739 2418 emit_operand(dst, src);
never@739 2419 emit_byte(mode & 0xFF);
never@739 2420 }
never@739 2421
never@739 2422 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
never@739 2423 assert(isByte(mode), "invalid value");
never@739 2424 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2425 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2);
never@739 2426 emit_byte(mode & 0xFF);
never@739 2427 }
never@739 2428
never@739 2429 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
never@739 2430 assert(isByte(mode), "invalid value");
never@739 2431 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 2432 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
kvn@3388 2433 InstructionMark im(this);
kvn@3388 2434 simd_prefix(dst, src, VEX_SIMD_F2);
never@739 2435 emit_byte(0x70);
never@739 2436 emit_operand(dst, src);
never@739 2437 emit_byte(mode & 0xFF);
never@739 2438 }
never@739 2439
kvn@2602 2440 void Assembler::psrldq(XMMRegister dst, int shift) {
kvn@2602 2441 // Shift 128 bit value in xmm register by number of bytes.
kvn@2602 2442 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 2443 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66);
kvn@2602 2444 emit_byte(0x73);
kvn@2602 2445 emit_byte(0xC0 | encode);
kvn@2602 2446 emit_byte(shift);
kvn@2602 2447 }
kvn@2602 2448
cfang@1116 2449 void Assembler::ptest(XMMRegister dst, Address src) {
cfang@1116 2450 assert(VM_Version::supports_sse4_1(), "");
kvn@3388 2451 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
kvn@3388 2452 InstructionMark im(this);
kvn@3388 2453 simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
cfang@1116 2454 emit_byte(0x17);
cfang@1116 2455 emit_operand(dst, src);
cfang@1116 2456 }
cfang@1116 2457
cfang@1116 2458 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
cfang@1116 2459 assert(VM_Version::supports_sse4_1(), "");
kvn@4001 2460 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
cfang@1116 2461 emit_byte(0x17);
cfang@1116 2462 emit_byte(0xC0 | encode);
cfang@1116 2463 }
cfang@1116 2464
kvn@3388 2465 void Assembler::punpcklbw(XMMRegister dst, Address src) {
kvn@3388 2466 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 2467 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
kvn@4001 2468 emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
kvn@3388 2469 }
kvn@3388 2470
never@739 2471 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
never@739 2472 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2473 emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
never@739 2474 }
never@739 2475
kvn@3388 2476 void Assembler::punpckldq(XMMRegister dst, Address src) {
kvn@3388 2477 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 2478 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
kvn@4001 2479 emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
kvn@3388 2480 }
kvn@3388 2481
kvn@3388 2482 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
kvn@3388 2483 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2484 emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
kvn@3388 2485 }
kvn@3388 2486
kvn@3929 2487 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
kvn@3929 2488 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2489 emit_simd_arith(0x6C, dst, src, VEX_SIMD_66);
kvn@3929 2490 }
kvn@3929 2491
never@739 2492 void Assembler::push(int32_t imm32) {
never@739 2493 // in 64bits we push 64bits onto the stack but only
never@739 2494 // take a 32bit immediate
never@739 2495 emit_byte(0x68);
never@739 2496 emit_long(imm32);
never@739 2497 }
never@739 2498
never@739 2499 void Assembler::push(Register src) {
never@739 2500 int encode = prefix_and_encode(src->encoding());
never@739 2501
never@739 2502 emit_byte(0x50 | encode);
never@739 2503 }
never@739 2504
never@739 2505 void Assembler::pushf() {
never@739 2506 emit_byte(0x9C);
never@739 2507 }
never@739 2508
roland@1495 2509 #ifndef _LP64 // no 32bit push/pop on amd64
never@739 2510 void Assembler::pushl(Address src) {
never@739 2511 // Note this will push 64bit on 64bit
never@739 2512 InstructionMark im(this);
never@739 2513 prefix(src);
never@739 2514 emit_byte(0xFF);
never@739 2515 emit_operand(rsi, src);
never@739 2516 }
roland@1495 2517 #endif
never@739 2518
never@739 2519 void Assembler::rcll(Register dst, int imm8) {
never@739 2520 assert(isShiftCount(imm8), "illegal shift count");
never@739 2521 int encode = prefix_and_encode(dst->encoding());
never@739 2522 if (imm8 == 1) {
never@739 2523 emit_byte(0xD1);
never@739 2524 emit_byte(0xD0 | encode);
never@739 2525 } else {
never@739 2526 emit_byte(0xC1);
never@739 2527 emit_byte(0xD0 | encode);
never@739 2528 emit_byte(imm8);
never@739 2529 }
never@739 2530 }
never@739 2531
never@739 2532 // copies data from [esi] to [edi] using rcx pointer sized words
never@739 2533 // generic
never@739 2534 void Assembler::rep_mov() {
never@739 2535 emit_byte(0xF3);
never@739 2536 // MOVSQ
never@739 2537 LP64_ONLY(prefix(REX_W));
never@739 2538 emit_byte(0xA5);
never@739 2539 }
never@739 2540
never@739 2541 // sets rcx pointer sized words with rax, value at [edi]
never@739 2542 // generic
never@739 2543 void Assembler::rep_set() { // rep_set
never@739 2544 emit_byte(0xF3);
never@739 2545 // STOSQ
never@739 2546 LP64_ONLY(prefix(REX_W));
never@739 2547 emit_byte(0xAB);
never@739 2548 }
never@739 2549
never@739 2550 // scans rcx pointer sized words at [edi] for occurance of rax,
never@739 2551 // generic
never@739 2552 void Assembler::repne_scan() { // repne_scan
never@739 2553 emit_byte(0xF2);
never@739 2554 // SCASQ
never@739 2555 LP64_ONLY(prefix(REX_W));
never@739 2556 emit_byte(0xAF);
never@739 2557 }
never@739 2558
never@739 2559 #ifdef _LP64
never@739 2560 // scans rcx 4 byte words at [edi] for occurance of rax,
never@739 2561 // generic
never@739 2562 void Assembler::repne_scanl() { // repne_scan
never@739 2563 emit_byte(0xF2);
never@739 2564 // SCASL
never@739 2565 emit_byte(0xAF);
never@739 2566 }
never@739 2567 #endif
never@739 2568
duke@435 2569 void Assembler::ret(int imm16) {
duke@435 2570 if (imm16 == 0) {
duke@435 2571 emit_byte(0xC3);
duke@435 2572 } else {
duke@435 2573 emit_byte(0xC2);
duke@435 2574 emit_word(imm16);
duke@435 2575 }
duke@435 2576 }
duke@435 2577
never@739 2578 void Assembler::sahf() {
never@739 2579 #ifdef _LP64
never@739 2580 // Not supported in 64bit mode
never@739 2581 ShouldNotReachHere();
never@739 2582 #endif
never@739 2583 emit_byte(0x9E);
never@739 2584 }
never@739 2585
never@739 2586 void Assembler::sarl(Register dst, int imm8) {
never@739 2587 int encode = prefix_and_encode(dst->encoding());
never@739 2588 assert(isShiftCount(imm8), "illegal shift count");
never@739 2589 if (imm8 == 1) {
never@739 2590 emit_byte(0xD1);
never@739 2591 emit_byte(0xF8 | encode);
never@739 2592 } else {
never@739 2593 emit_byte(0xC1);
never@739 2594 emit_byte(0xF8 | encode);
never@739 2595 emit_byte(imm8);
never@739 2596 }
never@739 2597 }
never@739 2598
never@739 2599 void Assembler::sarl(Register dst) {
never@739 2600 int encode = prefix_and_encode(dst->encoding());
never@739 2601 emit_byte(0xD3);
never@739 2602 emit_byte(0xF8 | encode);
never@739 2603 }
never@739 2604
never@739 2605 void Assembler::sbbl(Address dst, int32_t imm32) {
never@739 2606 InstructionMark im(this);
never@739 2607 prefix(dst);
never@739 2608 emit_arith_operand(0x81, rbx, dst, imm32);
never@739 2609 }
never@739 2610
never@739 2611 void Assembler::sbbl(Register dst, int32_t imm32) {
never@739 2612 prefix(dst);
never@739 2613 emit_arith(0x81, 0xD8, dst, imm32);
never@739 2614 }
never@739 2615
never@739 2616
never@739 2617 void Assembler::sbbl(Register dst, Address src) {
never@739 2618 InstructionMark im(this);
never@739 2619 prefix(src, dst);
never@739 2620 emit_byte(0x1B);
never@739 2621 emit_operand(dst, src);
never@739 2622 }
never@739 2623
never@739 2624 void Assembler::sbbl(Register dst, Register src) {
never@739 2625 (void) prefix_and_encode(dst->encoding(), src->encoding());
never@739 2626 emit_arith(0x1B, 0xC0, dst, src);
never@739 2627 }
never@739 2628
never@739 2629 void Assembler::setb(Condition cc, Register dst) {
never@739 2630 assert(0 <= cc && cc < 16, "illegal cc");
never@739 2631 int encode = prefix_and_encode(dst->encoding(), true);
duke@435 2632 emit_byte(0x0F);
never@739 2633 emit_byte(0x90 | cc);
never@739 2634 emit_byte(0xC0 | encode);
never@739 2635 }
never@739 2636
never@739 2637 void Assembler::shll(Register dst, int imm8) {
never@739 2638 assert(isShiftCount(imm8), "illegal shift count");
never@739 2639 int encode = prefix_and_encode(dst->encoding());
never@739 2640 if (imm8 == 1 ) {
never@739 2641 emit_byte(0xD1);
never@739 2642 emit_byte(0xE0 | encode);
never@739 2643 } else {
never@739 2644 emit_byte(0xC1);
never@739 2645 emit_byte(0xE0 | encode);
never@739 2646 emit_byte(imm8);
never@739 2647 }
never@739 2648 }
never@739 2649
never@739 2650 void Assembler::shll(Register dst) {
never@739 2651 int encode = prefix_and_encode(dst->encoding());
never@739 2652 emit_byte(0xD3);
never@739 2653 emit_byte(0xE0 | encode);
never@739 2654 }
never@739 2655
never@739 2656 void Assembler::shrl(Register dst, int imm8) {
never@739 2657 assert(isShiftCount(imm8), "illegal shift count");
never@739 2658 int encode = prefix_and_encode(dst->encoding());
never@739 2659 emit_byte(0xC1);
never@739 2660 emit_byte(0xE8 | encode);
never@739 2661 emit_byte(imm8);
never@739 2662 }
never@739 2663
never@739 2664 void Assembler::shrl(Register dst) {
never@739 2665 int encode = prefix_and_encode(dst->encoding());
never@739 2666 emit_byte(0xD3);
never@739 2667 emit_byte(0xE8 | encode);
never@739 2668 }
duke@435 2669
duke@435 2670 // copies a single word from [esi] to [edi]
duke@435 2671 void Assembler::smovl() {
duke@435 2672 emit_byte(0xA5);
duke@435 2673 }
duke@435 2674
never@739 2675 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
never@739 2676 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2677 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
never@739 2678 }
never@739 2679
twisti@2350 2680 void Assembler::sqrtsd(XMMRegister dst, Address src) {
twisti@2350 2681 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2682 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
twisti@2350 2683 }
twisti@2350 2684
twisti@2350 2685 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
kvn@3388 2686 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 2687 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
twisti@2350 2688 }
twisti@2350 2689
twisti@4318 2690 void Assembler::std() {
twisti@4318 2691 emit_byte(0xfd);
twisti@4318 2692 }
twisti@4318 2693
twisti@2350 2694 void Assembler::sqrtss(XMMRegister dst, Address src) {
kvn@3388 2695 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 2696 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
twisti@2350 2697 }
twisti@2350 2698
never@739 2699 void Assembler::stmxcsr( Address dst) {
never@739 2700 NOT_LP64(assert(VM_Version::supports_sse(), ""));
never@739 2701 InstructionMark im(this);
never@739 2702 prefix(dst);
never@739 2703 emit_byte(0x0F);
never@739 2704 emit_byte(0xAE);
never@739 2705 emit_operand(as_Register(3), dst);
never@739 2706 }
never@739 2707
never@739 2708 void Assembler::subl(Address dst, int32_t imm32) {
never@739 2709 InstructionMark im(this);
never@739 2710 prefix(dst);
phh@2423 2711 emit_arith_operand(0x81, rbp, dst, imm32);
phh@2423 2712 }
phh@2423 2713
phh@2423 2714 void Assembler::subl(Address dst, Register src) {
phh@2423 2715 InstructionMark im(this);
phh@2423 2716 prefix(dst, src);
phh@2423 2717 emit_byte(0x29);
phh@2423 2718 emit_operand(src, dst);
never@739 2719 }
never@739 2720
never@739 2721 void Assembler::subl(Register dst, int32_t imm32) {
never@739 2722 prefix(dst);
never@739 2723 emit_arith(0x81, 0xE8, dst, imm32);
never@739 2724 }
never@739 2725
kvn@3574 2726 // Force generation of a 4 byte immediate value even if it fits into 8bit
kvn@3574 2727 void Assembler::subl_imm32(Register dst, int32_t imm32) {
kvn@3574 2728 prefix(dst);
kvn@3574 2729 emit_arith_imm32(0x81, 0xE8, dst, imm32);
kvn@3574 2730 }
kvn@3574 2731
never@739 2732 void Assembler::subl(Register dst, Address src) {
never@739 2733 InstructionMark im(this);
never@739 2734 prefix(src, dst);
never@739 2735 emit_byte(0x2B);
never@739 2736 emit_operand(dst, src);
never@739 2737 }
never@739 2738
never@739 2739 void Assembler::subl(Register dst, Register src) {
never@739 2740 (void) prefix_and_encode(dst->encoding(), src->encoding());
never@739 2741 emit_arith(0x2B, 0xC0, dst, src);
never@739 2742 }
never@739 2743
never@739 2744 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
never@739 2745 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2746 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
never@739 2747 }
never@739 2748
never@739 2749 void Assembler::subsd(XMMRegister dst, Address src) {
never@739 2750 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2751 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
never@739 2752 }
never@739 2753
never@739 2754 void Assembler::subss(XMMRegister dst, XMMRegister src) {
never@739 2755 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 2756 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
never@739 2757 }
never@739 2758
never@739 2759 void Assembler::subss(XMMRegister dst, Address src) {
never@739 2760 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 2761 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
never@739 2762 }
never@739 2763
never@739 2764 void Assembler::testb(Register dst, int imm8) {
never@739 2765 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
never@739 2766 (void) prefix_and_encode(dst->encoding(), true);
never@739 2767 emit_arith_b(0xF6, 0xC0, dst, imm8);
never@739 2768 }
never@739 2769
never@739 2770 void Assembler::testl(Register dst, int32_t imm32) {
never@739 2771 // not using emit_arith because test
never@739 2772 // doesn't support sign-extension of
never@739 2773 // 8bit operands
never@739 2774 int encode = dst->encoding();
never@739 2775 if (encode == 0) {
never@739 2776 emit_byte(0xA9);
duke@435 2777 } else {
never@739 2778 encode = prefix_and_encode(encode);
never@739 2779 emit_byte(0xF7);
never@739 2780 emit_byte(0xC0 | encode);
never@739 2781 }
never@739 2782 emit_long(imm32);
never@739 2783 }
never@739 2784
never@739 2785 void Assembler::testl(Register dst, Register src) {
never@739 2786 (void) prefix_and_encode(dst->encoding(), src->encoding());
never@739 2787 emit_arith(0x85, 0xC0, dst, src);
never@739 2788 }
never@739 2789
never@739 2790 void Assembler::testl(Register dst, Address src) {
never@739 2791 InstructionMark im(this);
never@739 2792 prefix(src, dst);
never@739 2793 emit_byte(0x85);
never@739 2794 emit_operand(dst, src);
never@739 2795 }
never@739 2796
never@739 2797 void Assembler::ucomisd(XMMRegister dst, Address src) {
never@739 2798 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2799 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
never@739 2800 }
never@739 2801
never@739 2802 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
never@739 2803 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2804 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
never@739 2805 }
never@739 2806
never@739 2807 void Assembler::ucomiss(XMMRegister dst, Address src) {
never@739 2808 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 2809 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
never@739 2810 }
never@739 2811
never@739 2812 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
never@739 2813 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 2814 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
never@739 2815 }
never@739 2816
never@739 2817
never@739 2818 void Assembler::xaddl(Address dst, Register src) {
never@739 2819 InstructionMark im(this);
never@739 2820 prefix(dst, src);
never@739 2821 emit_byte(0x0F);
never@739 2822 emit_byte(0xC1);
never@739 2823 emit_operand(src, dst);
never@739 2824 }
never@739 2825
never@739 2826 void Assembler::xchgl(Register dst, Address src) { // xchg
never@739 2827 InstructionMark im(this);
never@739 2828 prefix(src, dst);
never@739 2829 emit_byte(0x87);
never@739 2830 emit_operand(dst, src);
never@739 2831 }
never@739 2832
never@739 2833 void Assembler::xchgl(Register dst, Register src) {
never@739 2834 int encode = prefix_and_encode(dst->encoding(), src->encoding());
never@739 2835 emit_byte(0x87);
never@739 2836 emit_byte(0xc0 | encode);
never@739 2837 }
never@739 2838
twisti@4318 2839 void Assembler::xgetbv() {
twisti@4318 2840 emit_byte(0x0F);
twisti@4318 2841 emit_byte(0x01);
twisti@4318 2842 emit_byte(0xD0);
twisti@4318 2843 }
twisti@4318 2844
never@739 2845 void Assembler::xorl(Register dst, int32_t imm32) {
never@739 2846 prefix(dst);
never@739 2847 emit_arith(0x81, 0xF0, dst, imm32);
never@739 2848 }
never@739 2849
never@739 2850 void Assembler::xorl(Register dst, Address src) {
never@739 2851 InstructionMark im(this);
never@739 2852 prefix(src, dst);
never@739 2853 emit_byte(0x33);
never@739 2854 emit_operand(dst, src);
never@739 2855 }
never@739 2856
never@739 2857 void Assembler::xorl(Register dst, Register src) {
never@739 2858 (void) prefix_and_encode(dst->encoding(), src->encoding());
never@739 2859 emit_arith(0x33, 0xC0, dst, src);
never@739 2860 }
never@739 2861
kvn@4001 2862
kvn@4001 2863 // AVX 3-operands scalar float-point arithmetic instructions
kvn@4001 2864
kvn@4001 2865 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
kvn@4001 2866 assert(VM_Version::supports_avx(), "");
kvn@4001 2867 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
kvn@4001 2868 }
kvn@4001 2869
kvn@4001 2870 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
kvn@4001 2871 assert(VM_Version::supports_avx(), "");
kvn@4001 2872 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
kvn@4001 2873 }
kvn@4001 2874
kvn@4001 2875 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
kvn@4001 2876 assert(VM_Version::supports_avx(), "");
kvn@4001 2877 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
kvn@4001 2878 }
kvn@4001 2879
kvn@4001 2880 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
kvn@4001 2881 assert(VM_Version::supports_avx(), "");
kvn@4001 2882 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
kvn@4001 2883 }
kvn@4001 2884
kvn@4001 2885 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
kvn@4001 2886 assert(VM_Version::supports_avx(), "");
kvn@4001 2887 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
kvn@4001 2888 }
kvn@4001 2889
kvn@4001 2890 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
kvn@4001 2891 assert(VM_Version::supports_avx(), "");
kvn@4001 2892 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
kvn@4001 2893 }
kvn@4001 2894
kvn@4001 2895 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
kvn@4001 2896 assert(VM_Version::supports_avx(), "");
kvn@4001 2897 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
kvn@4001 2898 }
kvn@4001 2899
kvn@4001 2900 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
kvn@4001 2901 assert(VM_Version::supports_avx(), "");
kvn@4001 2902 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
kvn@4001 2903 }
kvn@4001 2904
kvn@4001 2905 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
kvn@4001 2906 assert(VM_Version::supports_avx(), "");
kvn@4001 2907 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
kvn@4001 2908 }
kvn@4001 2909
kvn@4001 2910 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
kvn@4001 2911 assert(VM_Version::supports_avx(), "");
kvn@4001 2912 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
kvn@4001 2913 }
kvn@4001 2914
kvn@4001 2915 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
kvn@4001 2916 assert(VM_Version::supports_avx(), "");
kvn@4001 2917 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
kvn@4001 2918 }
kvn@4001 2919
kvn@4001 2920 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
kvn@4001 2921 assert(VM_Version::supports_avx(), "");
kvn@4001 2922 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
kvn@4001 2923 }
kvn@4001 2924
kvn@4001 2925 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
kvn@4001 2926 assert(VM_Version::supports_avx(), "");
kvn@4001 2927 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
kvn@4001 2928 }
kvn@4001 2929
kvn@4001 2930 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
kvn@4001 2931 assert(VM_Version::supports_avx(), "");
kvn@4001 2932 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
kvn@4001 2933 }
kvn@4001 2934
kvn@4001 2935 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
kvn@4001 2936 assert(VM_Version::supports_avx(), "");
kvn@4001 2937 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
kvn@4001 2938 }
kvn@4001 2939
kvn@4001 2940 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
kvn@4001 2941 assert(VM_Version::supports_avx(), "");
kvn@4001 2942 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
kvn@4001 2943 }
kvn@4001 2944
kvn@4001 2945 //====================VECTOR ARITHMETIC=====================================
kvn@4001 2946
kvn@4001 2947 // Float-point vector arithmetic
kvn@4001 2948
kvn@4001 2949 void Assembler::addpd(XMMRegister dst, XMMRegister src) {
kvn@4001 2950 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2951 emit_simd_arith(0x58, dst, src, VEX_SIMD_66);
kvn@4001 2952 }
kvn@4001 2953
kvn@4001 2954 void Assembler::addps(XMMRegister dst, XMMRegister src) {
kvn@4001 2955 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2956 emit_simd_arith(0x58, dst, src, VEX_SIMD_NONE);
kvn@4001 2957 }
kvn@4001 2958
kvn@4001 2959 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 2960 assert(VM_Version::supports_avx(), "");
kvn@4001 2961 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 2962 }
kvn@4001 2963
kvn@4001 2964 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 2965 assert(VM_Version::supports_avx(), "");
kvn@4001 2966 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 2967 }
kvn@4001 2968
kvn@4001 2969 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 2970 assert(VM_Version::supports_avx(), "");
kvn@4001 2971 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 2972 }
kvn@4001 2973
kvn@4001 2974 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 2975 assert(VM_Version::supports_avx(), "");
kvn@4001 2976 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 2977 }
kvn@4001 2978
kvn@4001 2979 void Assembler::subpd(XMMRegister dst, XMMRegister src) {
kvn@4001 2980 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2981 emit_simd_arith(0x5C, dst, src, VEX_SIMD_66);
kvn@4001 2982 }
kvn@4001 2983
kvn@4001 2984 void Assembler::subps(XMMRegister dst, XMMRegister src) {
kvn@4001 2985 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 2986 emit_simd_arith(0x5C, dst, src, VEX_SIMD_NONE);
kvn@4001 2987 }
kvn@4001 2988
kvn@4001 2989 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 2990 assert(VM_Version::supports_avx(), "");
kvn@4001 2991 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 2992 }
kvn@4001 2993
kvn@4001 2994 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 2995 assert(VM_Version::supports_avx(), "");
kvn@4001 2996 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 2997 }
kvn@4001 2998
kvn@4001 2999 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3000 assert(VM_Version::supports_avx(), "");
kvn@4001 3001 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3002 }
kvn@4001 3003
kvn@4001 3004 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3005 assert(VM_Version::supports_avx(), "");
kvn@4001 3006 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 3007 }
kvn@4001 3008
kvn@4001 3009 void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
kvn@4001 3010 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3011 emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
kvn@4001 3012 }
kvn@4001 3013
kvn@4001 3014 void Assembler::mulps(XMMRegister dst, XMMRegister src) {
kvn@4001 3015 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3016 emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE);
kvn@4001 3017 }
kvn@4001 3018
kvn@4001 3019 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3020 assert(VM_Version::supports_avx(), "");
kvn@4001 3021 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3022 }
kvn@4001 3023
kvn@4001 3024 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3025 assert(VM_Version::supports_avx(), "");
kvn@4001 3026 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 3027 }
kvn@4001 3028
kvn@4001 3029 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3030 assert(VM_Version::supports_avx(), "");
kvn@4001 3031 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3032 }
kvn@4001 3033
kvn@4001 3034 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3035 assert(VM_Version::supports_avx(), "");
kvn@4001 3036 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 3037 }
kvn@4001 3038
kvn@4001 3039 void Assembler::divpd(XMMRegister dst, XMMRegister src) {
kvn@4001 3040 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3041 emit_simd_arith(0x5E, dst, src, VEX_SIMD_66);
kvn@4001 3042 }
kvn@4001 3043
kvn@4001 3044 void Assembler::divps(XMMRegister dst, XMMRegister src) {
kvn@4001 3045 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3046 emit_simd_arith(0x5E, dst, src, VEX_SIMD_NONE);
kvn@4001 3047 }
kvn@4001 3048
kvn@4001 3049 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3050 assert(VM_Version::supports_avx(), "");
kvn@4001 3051 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3052 }
kvn@4001 3053
kvn@4001 3054 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3055 assert(VM_Version::supports_avx(), "");
kvn@4001 3056 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 3057 }
kvn@4001 3058
kvn@4001 3059 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3060 assert(VM_Version::supports_avx(), "");
kvn@4001 3061 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3062 }
kvn@4001 3063
kvn@4001 3064 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3065 assert(VM_Version::supports_avx(), "");
kvn@4001 3066 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 3067 }
kvn@4001 3068
kvn@4001 3069 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
kvn@4001 3070 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3071 emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
kvn@4001 3072 }
kvn@4001 3073
kvn@4001 3074 void Assembler::andps(XMMRegister dst, XMMRegister src) {
kvn@4001 3075 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 3076 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
kvn@4001 3077 }
kvn@4001 3078
kvn@4001 3079 void Assembler::andps(XMMRegister dst, Address src) {
kvn@4001 3080 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 3081 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
kvn@4001 3082 }
kvn@4001 3083
kvn@4001 3084 void Assembler::andpd(XMMRegister dst, Address src) {
kvn@4001 3085 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3086 emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
kvn@4001 3087 }
kvn@4001 3088
kvn@4001 3089 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3090 assert(VM_Version::supports_avx(), "");
kvn@4001 3091 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3092 }
kvn@4001 3093
kvn@4001 3094 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3095 assert(VM_Version::supports_avx(), "");
kvn@4001 3096 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 3097 }
kvn@4001 3098
kvn@4001 3099 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3100 assert(VM_Version::supports_avx(), "");
kvn@4001 3101 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3102 }
kvn@4001 3103
kvn@4001 3104 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3105 assert(VM_Version::supports_avx(), "");
kvn@4001 3106 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 3107 }
kvn@4001 3108
never@739 3109 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
never@739 3110 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3111 emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
kvn@4001 3112 }
never@739 3113
never@739 3114 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
never@739 3115 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 3116 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
kvn@4001 3117 }
kvn@4001 3118
kvn@4001 3119 void Assembler::xorpd(XMMRegister dst, Address src) {
kvn@4001 3120 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3121 emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
never@739 3122 }
never@739 3123
never@739 3124 void Assembler::xorps(XMMRegister dst, Address src) {
never@739 3125 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@4001 3126 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
kvn@3390 3127 }
kvn@3390 3128
kvn@3882 3129 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@3882 3130 assert(VM_Version::supports_avx(), "");
kvn@4001 3131 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
kvn@3390 3132 }
kvn@3390 3133
kvn@3882 3134 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@3882 3135 assert(VM_Version::supports_avx(), "");
kvn@4001 3136 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 3137 }
kvn@4001 3138
kvn@4001 3139 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3140 assert(VM_Version::supports_avx(), "");
kvn@4001 3141 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3142 }
kvn@4001 3143
kvn@4001 3144 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3145 assert(VM_Version::supports_avx(), "");
kvn@4001 3146 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
kvn@4001 3147 }
kvn@4001 3148
kvn@4001 3149
kvn@4001 3150 // Integer vector arithmetic
kvn@4001 3151 void Assembler::paddb(XMMRegister dst, XMMRegister src) {
kvn@4001 3152 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3153 emit_simd_arith(0xFC, dst, src, VEX_SIMD_66);
kvn@4001 3154 }
kvn@4001 3155
kvn@4001 3156 void Assembler::paddw(XMMRegister dst, XMMRegister src) {
kvn@4001 3157 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3158 emit_simd_arith(0xFD, dst, src, VEX_SIMD_66);
kvn@4001 3159 }
kvn@4001 3160
kvn@4001 3161 void Assembler::paddd(XMMRegister dst, XMMRegister src) {
kvn@4001 3162 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3163 emit_simd_arith(0xFE, dst, src, VEX_SIMD_66);
kvn@4001 3164 }
kvn@4001 3165
kvn@4001 3166 void Assembler::paddq(XMMRegister dst, XMMRegister src) {
kvn@4001 3167 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3168 emit_simd_arith(0xD4, dst, src, VEX_SIMD_66);
kvn@4001 3169 }
kvn@4001 3170
kvn@4001 3171 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3172 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3173 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3174 }
kvn@4001 3175
kvn@4001 3176 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3177 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3178 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3179 }
kvn@4001 3180
kvn@4001 3181 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3182 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3183 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3184 }
kvn@4001 3185
kvn@4001 3186 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3187 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3188 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3189 }
kvn@4001 3190
kvn@4001 3191 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3192 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3193 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3194 }
kvn@4001 3195
kvn@4001 3196 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3197 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3198 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3199 }
kvn@4001 3200
kvn@4001 3201 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3202 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3203 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3204 }
kvn@4001 3205
kvn@4001 3206 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3207 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3208 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3209 }
kvn@4001 3210
kvn@4001 3211 void Assembler::psubb(XMMRegister dst, XMMRegister src) {
kvn@4001 3212 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3213 emit_simd_arith(0xF8, dst, src, VEX_SIMD_66);
kvn@4001 3214 }
kvn@4001 3215
kvn@4001 3216 void Assembler::psubw(XMMRegister dst, XMMRegister src) {
kvn@4001 3217 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3218 emit_simd_arith(0xF9, dst, src, VEX_SIMD_66);
kvn@4001 3219 }
kvn@4001 3220
kvn@4001 3221 void Assembler::psubd(XMMRegister dst, XMMRegister src) {
kvn@4001 3222 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3223 emit_simd_arith(0xFA, dst, src, VEX_SIMD_66);
kvn@4001 3224 }
kvn@4001 3225
kvn@4001 3226 void Assembler::psubq(XMMRegister dst, XMMRegister src) {
kvn@4001 3227 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3228 emit_simd_arith(0xFB, dst, src, VEX_SIMD_66);
kvn@4001 3229 }
kvn@4001 3230
kvn@4001 3231 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3232 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3233 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3234 }
kvn@4001 3235
kvn@4001 3236 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3237 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3238 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3239 }
kvn@4001 3240
kvn@4001 3241 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3242 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3243 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3244 }
kvn@4001 3245
kvn@4001 3246 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3247 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3248 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3249 }
kvn@4001 3250
kvn@4001 3251 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3252 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3253 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3254 }
kvn@4001 3255
kvn@4001 3256 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3257 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3258 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3259 }
kvn@4001 3260
kvn@4001 3261 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3262 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3263 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3264 }
kvn@4001 3265
kvn@4001 3266 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3267 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3268 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3269 }
kvn@4001 3270
kvn@4001 3271 void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
kvn@4001 3272 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3273 emit_simd_arith(0xD5, dst, src, VEX_SIMD_66);
kvn@4001 3274 }
kvn@4001 3275
kvn@4001 3276 void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
kvn@4001 3277 assert(VM_Version::supports_sse4_1(), "");
kvn@4001 3278 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
kvn@4001 3279 emit_byte(0x40);
kvn@3882 3280 emit_byte(0xC0 | encode);
kvn@3882 3281 }
kvn@3882 3282
kvn@4001 3283 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3284 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3285 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3286 }
kvn@4001 3287
kvn@4001 3288 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3289 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3290 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
kvn@4001 3291 emit_byte(0x40);
kvn@4001 3292 emit_byte(0xC0 | encode);
kvn@4001 3293 }
kvn@4001 3294
kvn@4001 3295 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3296 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3297 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3298 }
kvn@4001 3299
kvn@4001 3300 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3301 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3302 InstructionMark im(this);
kvn@4001 3303 int dst_enc = dst->encoding();
kvn@4001 3304 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
kvn@4001 3305 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
kvn@4001 3306 emit_byte(0x40);
kvn@4001 3307 emit_operand(dst, src);
kvn@4001 3308 }
kvn@4001 3309
kvn@4001 3310 // Shift packed integers left by specified number of bits.
kvn@4001 3311 void Assembler::psllw(XMMRegister dst, int shift) {
kvn@4001 3312 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3313 // XMM6 is for /6 encoding: 66 0F 71 /6 ib
kvn@4001 3314 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
kvn@4001 3315 emit_byte(0x71);
kvn@4001 3316 emit_byte(0xC0 | encode);
kvn@4001 3317 emit_byte(shift & 0xFF);
kvn@4001 3318 }
kvn@4001 3319
kvn@4001 3320 void Assembler::pslld(XMMRegister dst, int shift) {
kvn@4001 3321 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3322 // XMM6 is for /6 encoding: 66 0F 72 /6 ib
kvn@4001 3323 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
kvn@4001 3324 emit_byte(0x72);
kvn@4001 3325 emit_byte(0xC0 | encode);
kvn@4001 3326 emit_byte(shift & 0xFF);
kvn@4001 3327 }
kvn@4001 3328
kvn@4001 3329 void Assembler::psllq(XMMRegister dst, int shift) {
kvn@4001 3330 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3331 // XMM6 is for /6 encoding: 66 0F 73 /6 ib
kvn@4001 3332 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
kvn@4001 3333 emit_byte(0x73);
kvn@4001 3334 emit_byte(0xC0 | encode);
kvn@4001 3335 emit_byte(shift & 0xFF);
kvn@4001 3336 }
kvn@4001 3337
kvn@4001 3338 void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
kvn@4001 3339 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3340 emit_simd_arith(0xF1, dst, shift, VEX_SIMD_66);
kvn@4001 3341 }
kvn@4001 3342
kvn@4001 3343 void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
kvn@4001 3344 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3345 emit_simd_arith(0xF2, dst, shift, VEX_SIMD_66);
kvn@4001 3346 }
kvn@4001 3347
kvn@4001 3348 void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
kvn@4001 3349 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3350 emit_simd_arith(0xF3, dst, shift, VEX_SIMD_66);
kvn@4001 3351 }
kvn@4001 3352
kvn@4001 3353 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
kvn@4001 3354 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3355 // XMM6 is for /6 encoding: 66 0F 71 /6 ib
kvn@4001 3356 emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector256);
kvn@4001 3357 emit_byte(shift & 0xFF);
kvn@4001 3358 }
kvn@4001 3359
kvn@4001 3360 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
kvn@4001 3361 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3362 // XMM6 is for /6 encoding: 66 0F 72 /6 ib
kvn@4001 3363 emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector256);
kvn@4001 3364 emit_byte(shift & 0xFF);
kvn@4001 3365 }
kvn@4001 3366
kvn@4001 3367 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
kvn@4001 3368 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3369 // XMM6 is for /6 encoding: 66 0F 73 /6 ib
kvn@4001 3370 emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector256);
kvn@4001 3371 emit_byte(shift & 0xFF);
kvn@4001 3372 }
kvn@4001 3373
kvn@4001 3374 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
kvn@4001 3375 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3376 emit_vex_arith(0xF1, dst, src, shift, VEX_SIMD_66, vector256);
kvn@4001 3377 }
kvn@4001 3378
kvn@4001 3379 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
kvn@4001 3380 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3381 emit_vex_arith(0xF2, dst, src, shift, VEX_SIMD_66, vector256);
kvn@4001 3382 }
kvn@4001 3383
kvn@4001 3384 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
kvn@4001 3385 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3386 emit_vex_arith(0xF3, dst, src, shift, VEX_SIMD_66, vector256);
kvn@4001 3387 }
kvn@4001 3388
kvn@4001 3389 // Shift packed integers logically right by specified number of bits.
kvn@4001 3390 void Assembler::psrlw(XMMRegister dst, int shift) {
kvn@4001 3391 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3392 // XMM2 is for /2 encoding: 66 0F 71 /2 ib
kvn@4001 3393 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
kvn@4001 3394 emit_byte(0x71);
kvn@4001 3395 emit_byte(0xC0 | encode);
kvn@4001 3396 emit_byte(shift & 0xFF);
kvn@4001 3397 }
kvn@4001 3398
kvn@4001 3399 void Assembler::psrld(XMMRegister dst, int shift) {
kvn@4001 3400 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3401 // XMM2 is for /2 encoding: 66 0F 72 /2 ib
kvn@4001 3402 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
kvn@4001 3403 emit_byte(0x72);
kvn@4001 3404 emit_byte(0xC0 | encode);
kvn@4001 3405 emit_byte(shift & 0xFF);
kvn@4001 3406 }
kvn@4001 3407
kvn@4001 3408 void Assembler::psrlq(XMMRegister dst, int shift) {
kvn@4001 3409 // Do not confuse it with psrldq SSE2 instruction which
kvn@4001 3410 // shifts 128 bit value in xmm register by number of bytes.
kvn@4001 3411 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3412 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
kvn@4001 3413 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
kvn@4001 3414 emit_byte(0x73);
kvn@4001 3415 emit_byte(0xC0 | encode);
kvn@4001 3416 emit_byte(shift & 0xFF);
kvn@4001 3417 }
kvn@4001 3418
kvn@4001 3419 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
kvn@4001 3420 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3421 emit_simd_arith(0xD1, dst, shift, VEX_SIMD_66);
kvn@4001 3422 }
kvn@4001 3423
kvn@4001 3424 void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
kvn@4001 3425 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3426 emit_simd_arith(0xD2, dst, shift, VEX_SIMD_66);
kvn@4001 3427 }
kvn@4001 3428
kvn@4001 3429 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
kvn@4001 3430 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3431 emit_simd_arith(0xD3, dst, shift, VEX_SIMD_66);
kvn@4001 3432 }
kvn@4001 3433
kvn@4001 3434 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
kvn@4001 3435 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3436 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
kvn@4001 3437 emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector256);
kvn@4001 3438 emit_byte(shift & 0xFF);
kvn@4001 3439 }
kvn@4001 3440
kvn@4001 3441 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
kvn@4001 3442 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3443 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
kvn@4001 3444 emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector256);
kvn@4001 3445 emit_byte(shift & 0xFF);
kvn@4001 3446 }
kvn@4001 3447
kvn@4001 3448 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
kvn@4001 3449 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3450 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
kvn@4001 3451 emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector256);
kvn@4001 3452 emit_byte(shift & 0xFF);
kvn@4001 3453 }
kvn@4001 3454
kvn@4001 3455 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
kvn@4001 3456 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3457 emit_vex_arith(0xD1, dst, src, shift, VEX_SIMD_66, vector256);
kvn@4001 3458 }
kvn@4001 3459
kvn@4001 3460 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
kvn@4001 3461 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3462 emit_vex_arith(0xD2, dst, src, shift, VEX_SIMD_66, vector256);
kvn@4001 3463 }
kvn@4001 3464
kvn@4001 3465 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
kvn@4001 3466 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3467 emit_vex_arith(0xD3, dst, src, shift, VEX_SIMD_66, vector256);
kvn@4001 3468 }
kvn@4001 3469
kvn@4001 3470 // Shift packed integers arithmetically right by specified number of bits.
kvn@4001 3471 void Assembler::psraw(XMMRegister dst, int shift) {
kvn@4001 3472 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3473 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
kvn@4001 3474 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
kvn@4001 3475 emit_byte(0x71);
kvn@4001 3476 emit_byte(0xC0 | encode);
kvn@4001 3477 emit_byte(shift & 0xFF);
kvn@4001 3478 }
kvn@4001 3479
kvn@4001 3480 void Assembler::psrad(XMMRegister dst, int shift) {
kvn@4001 3481 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3482 // XMM4 is for /4 encoding: 66 0F 72 /4 ib
kvn@4001 3483 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
kvn@4001 3484 emit_byte(0x72);
kvn@4001 3485 emit_byte(0xC0 | encode);
kvn@4001 3486 emit_byte(shift & 0xFF);
kvn@4001 3487 }
kvn@4001 3488
kvn@4001 3489 void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
kvn@4001 3490 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3491 emit_simd_arith(0xE1, dst, shift, VEX_SIMD_66);
kvn@4001 3492 }
kvn@4001 3493
kvn@4001 3494 void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
kvn@4001 3495 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3496 emit_simd_arith(0xE2, dst, shift, VEX_SIMD_66);
kvn@4001 3497 }
kvn@4001 3498
kvn@4001 3499 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
kvn@4001 3500 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3501 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
kvn@4001 3502 emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector256);
kvn@4001 3503 emit_byte(shift & 0xFF);
kvn@4001 3504 }
kvn@4001 3505
kvn@4001 3506 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
kvn@4001 3507 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3508 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
kvn@4001 3509 emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector256);
kvn@4001 3510 emit_byte(shift & 0xFF);
kvn@4001 3511 }
kvn@4001 3512
kvn@4001 3513 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
kvn@4001 3514 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3515 emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector256);
kvn@4001 3516 }
kvn@4001 3517
kvn@4001 3518 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
kvn@4001 3519 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3520 emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector256);
kvn@4001 3521 }
kvn@4001 3522
kvn@4001 3523
kvn@4001 3524 // AND packed integers
kvn@4001 3525 void Assembler::pand(XMMRegister dst, XMMRegister src) {
kvn@4001 3526 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3527 emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);
kvn@4001 3528 }
kvn@4001 3529
kvn@4001 3530 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3531 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3532 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3533 }
kvn@4001 3534
kvn@4001 3535 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3536 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3537 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3538 }
kvn@4001 3539
kvn@4001 3540 void Assembler::por(XMMRegister dst, XMMRegister src) {
kvn@4001 3541 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3542 emit_simd_arith(0xEB, dst, src, VEX_SIMD_66);
kvn@4001 3543 }
kvn@4001 3544
kvn@4001 3545 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3546 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3547 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3548 }
kvn@4001 3549
kvn@4001 3550 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3551 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3552 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3553 }
kvn@4001 3554
kvn@4001 3555 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
kvn@4001 3556 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@4001 3557 emit_simd_arith(0xEF, dst, src, VEX_SIMD_66);
kvn@4001 3558 }
kvn@4001 3559
kvn@3929 3560 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
kvn@4001 3561 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3562 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3563 }
kvn@4001 3564
kvn@4001 3565 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
kvn@4001 3566 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
kvn@4001 3567 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
kvn@4001 3568 }
kvn@4001 3569
kvn@3929 3570
kvn@3882 3571 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
kvn@3882 3572 assert(VM_Version::supports_avx(), "");
kvn@3882 3573 bool vector256 = true;
kvn@3882 3574 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
kvn@3882 3575 emit_byte(0x18);
kvn@3882 3576 emit_byte(0xC0 | encode);
kvn@3882 3577 // 0x00 - insert into lower 128 bits
kvn@3882 3578 // 0x01 - insert into upper 128 bits
kvn@3882 3579 emit_byte(0x01);
kvn@3882 3580 }
kvn@3882 3581
kvn@4103 3582 void Assembler::vinsertf128h(XMMRegister dst, Address src) {
kvn@4103 3583 assert(VM_Version::supports_avx(), "");
kvn@4103 3584 InstructionMark im(this);
kvn@4103 3585 bool vector256 = true;
kvn@4103 3586 assert(dst != xnoreg, "sanity");
kvn@4103 3587 int dst_enc = dst->encoding();
kvn@4103 3588 // swap src<->dst for encoding
kvn@4103 3589 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
kvn@4103 3590 emit_byte(0x18);
kvn@4103 3591 emit_operand(dst, src);
kvn@4103 3592 // 0x01 - insert into upper 128 bits
kvn@4103 3593 emit_byte(0x01);
kvn@4103 3594 }
kvn@4103 3595
kvn@4103 3596 void Assembler::vextractf128h(Address dst, XMMRegister src) {
kvn@4103 3597 assert(VM_Version::supports_avx(), "");
kvn@4103 3598 InstructionMark im(this);
kvn@4103 3599 bool vector256 = true;
kvn@4103 3600 assert(src != xnoreg, "sanity");
kvn@4103 3601 int src_enc = src->encoding();
kvn@4103 3602 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
kvn@4103 3603 emit_byte(0x19);
kvn@4103 3604 emit_operand(src, dst);
kvn@4103 3605 // 0x01 - extract from upper 128 bits
kvn@4103 3606 emit_byte(0x01);
kvn@4103 3607 }
kvn@4103 3608
kvn@3929 3609 void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
kvn@3929 3610 assert(VM_Version::supports_avx2(), "");
kvn@3929 3611 bool vector256 = true;
kvn@3929 3612 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
kvn@3929 3613 emit_byte(0x38);
kvn@3929 3614 emit_byte(0xC0 | encode);
kvn@3929 3615 // 0x00 - insert into lower 128 bits
kvn@3929 3616 // 0x01 - insert into upper 128 bits
kvn@3929 3617 emit_byte(0x01);
kvn@3929 3618 }
kvn@3929 3619
kvn@4103 3620 void Assembler::vinserti128h(XMMRegister dst, Address src) {
kvn@4103 3621 assert(VM_Version::supports_avx2(), "");
kvn@4103 3622 InstructionMark im(this);
kvn@4103 3623 bool vector256 = true;
kvn@4103 3624 assert(dst != xnoreg, "sanity");
kvn@4103 3625 int dst_enc = dst->encoding();
kvn@4103 3626 // swap src<->dst for encoding
kvn@4103 3627 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
kvn@4103 3628 emit_byte(0x38);
kvn@4103 3629 emit_operand(dst, src);
kvn@4103 3630 // 0x01 - insert into upper 128 bits
kvn@4103 3631 emit_byte(0x01);
kvn@4103 3632 }
kvn@4103 3633
kvn@4103 3634 void Assembler::vextracti128h(Address dst, XMMRegister src) {
kvn@4103 3635 assert(VM_Version::supports_avx2(), "");
kvn@4103 3636 InstructionMark im(this);
kvn@4103 3637 bool vector256 = true;
kvn@4103 3638 assert(src != xnoreg, "sanity");
kvn@4103 3639 int src_enc = src->encoding();
kvn@4103 3640 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
kvn@4103 3641 emit_byte(0x39);
kvn@4103 3642 emit_operand(src, dst);
kvn@4103 3643 // 0x01 - extract from upper 128 bits
kvn@4103 3644 emit_byte(0x01);
kvn@4103 3645 }
kvn@4103 3646
kvn@3882 3647 void Assembler::vzeroupper() {
kvn@3882 3648 assert(VM_Version::supports_avx(), "");
kvn@3882 3649 (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
kvn@3882 3650 emit_byte(0x77);
kvn@3882 3651 }
kvn@3882 3652
kvn@3390 3653
never@739 3654 #ifndef _LP64
never@739 3655 // 32bit only pieces of the assembler
never@739 3656
never@739 3657 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
never@739 3658 // NO PREFIX AS NEVER 64BIT
never@739 3659 InstructionMark im(this);
never@739 3660 emit_byte(0x81);
never@739 3661 emit_byte(0xF8 | src1->encoding());
never@739 3662 emit_data(imm32, rspec, 0);
never@739 3663 }
never@739 3664
never@739 3665 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
never@739 3666 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
never@739 3667 InstructionMark im(this);
never@739 3668 emit_byte(0x81);
never@739 3669 emit_operand(rdi, src1);
never@739 3670 emit_data(imm32, rspec, 0);
never@739 3671 }
never@739 3672
never@739 3673 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
never@739 3674 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
never@739 3675 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
never@739 3676 void Assembler::cmpxchg8(Address adr) {
never@739 3677 InstructionMark im(this);
never@739 3678 emit_byte(0x0F);
never@739 3679 emit_byte(0xc7);
never@739 3680 emit_operand(rcx, adr);
never@739 3681 }
never@739 3682
never@739 3683 void Assembler::decl(Register dst) {
never@739 3684 // Don't use it directly. Use MacroAssembler::decrementl() instead.
never@739 3685 emit_byte(0x48 | dst->encoding());
never@739 3686 }
never@739 3687
never@739 3688 #endif // _LP64
never@739 3689
never@739 3690 // 64bit typically doesn't use the x87 but needs to for the trig funcs
never@739 3691
never@739 3692 void Assembler::fabs() {
never@739 3693 emit_byte(0xD9);
never@739 3694 emit_byte(0xE1);
never@739 3695 }
never@739 3696
never@739 3697 void Assembler::fadd(int i) {
never@739 3698 emit_farith(0xD8, 0xC0, i);
never@739 3699 }
never@739 3700
never@739 3701 void Assembler::fadd_d(Address src) {
never@739 3702 InstructionMark im(this);
never@739 3703 emit_byte(0xDC);
never@739 3704 emit_operand32(rax, src);
never@739 3705 }
never@739 3706
never@739 3707 void Assembler::fadd_s(Address src) {
never@739 3708 InstructionMark im(this);
never@739 3709 emit_byte(0xD8);
never@739 3710 emit_operand32(rax, src);
never@739 3711 }
never@739 3712
never@739 3713 void Assembler::fadda(int i) {
never@739 3714 emit_farith(0xDC, 0xC0, i);
never@739 3715 }
never@739 3716
never@739 3717 void Assembler::faddp(int i) {
never@739 3718 emit_farith(0xDE, 0xC0, i);
never@739 3719 }
never@739 3720
never@739 3721 void Assembler::fchs() {
never@739 3722 emit_byte(0xD9);
never@739 3723 emit_byte(0xE0);
never@739 3724 }
never@739 3725
never@739 3726 void Assembler::fcom(int i) {
never@739 3727 emit_farith(0xD8, 0xD0, i);
never@739 3728 }
never@739 3729
never@739 3730 void Assembler::fcomp(int i) {
never@739 3731 emit_farith(0xD8, 0xD8, i);
never@739 3732 }
never@739 3733
never@739 3734 void Assembler::fcomp_d(Address src) {
never@739 3735 InstructionMark im(this);
never@739 3736 emit_byte(0xDC);
never@739 3737 emit_operand32(rbx, src);
never@739 3738 }
never@739 3739
never@739 3740 void Assembler::fcomp_s(Address src) {
never@739 3741 InstructionMark im(this);
never@739 3742 emit_byte(0xD8);
never@739 3743 emit_operand32(rbx, src);
never@739 3744 }
never@739 3745
never@739 3746 void Assembler::fcompp() {
never@739 3747 emit_byte(0xDE);
never@739 3748 emit_byte(0xD9);
never@739 3749 }
never@739 3750
never@739 3751 void Assembler::fcos() {
never@739 3752 emit_byte(0xD9);
duke@435 3753 emit_byte(0xFF);
never@739 3754 }
never@739 3755
never@739 3756 void Assembler::fdecstp() {
never@739 3757 emit_byte(0xD9);
never@739 3758 emit_byte(0xF6);
never@739 3759 }
never@739 3760
never@739 3761 void Assembler::fdiv(int i) {
never@739 3762 emit_farith(0xD8, 0xF0, i);
never@739 3763 }
never@739 3764
never@739 3765 void Assembler::fdiv_d(Address src) {
never@739 3766 InstructionMark im(this);
never@739 3767 emit_byte(0xDC);
never@739 3768 emit_operand32(rsi, src);
never@739 3769 }
never@739 3770
never@739 3771 void Assembler::fdiv_s(Address src) {
never@739 3772 InstructionMark im(this);
never@739 3773 emit_byte(0xD8);
never@739 3774 emit_operand32(rsi, src);
never@739 3775 }
never@739 3776
never@739 3777 void Assembler::fdiva(int i) {
never@739 3778 emit_farith(0xDC, 0xF8, i);
never@739 3779 }
never@739 3780
never@739 3781 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
never@739 3782 // is erroneous for some of the floating-point instructions below.
never@739 3783
never@739 3784 void Assembler::fdivp(int i) {
never@739 3785 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
never@739 3786 }
never@739 3787
never@739 3788 void Assembler::fdivr(int i) {
never@739 3789 emit_farith(0xD8, 0xF8, i);
never@739 3790 }
never@739 3791
never@739 3792 void Assembler::fdivr_d(Address src) {
never@739 3793 InstructionMark im(this);
never@739 3794 emit_byte(0xDC);
never@739 3795 emit_operand32(rdi, src);
never@739 3796 }
never@739 3797
never@739 3798 void Assembler::fdivr_s(Address src) {
never@739 3799 InstructionMark im(this);
never@739 3800 emit_byte(0xD8);
never@739 3801 emit_operand32(rdi, src);
never@739 3802 }
never@739 3803
never@739 3804 void Assembler::fdivra(int i) {
never@739 3805 emit_farith(0xDC, 0xF0, i);
never@739 3806 }
never@739 3807
never@739 3808 void Assembler::fdivrp(int i) {
never@739 3809 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
never@739 3810 }
never@739 3811
never@739 3812 void Assembler::ffree(int i) {
never@739 3813 emit_farith(0xDD, 0xC0, i);
never@739 3814 }
never@739 3815
never@739 3816 void Assembler::fild_d(Address adr) {
never@739 3817 InstructionMark im(this);
never@739 3818 emit_byte(0xDF);
never@739 3819 emit_operand32(rbp, adr);
never@739 3820 }
never@739 3821
never@739 3822 void Assembler::fild_s(Address adr) {
never@739 3823 InstructionMark im(this);
never@739 3824 emit_byte(0xDB);
never@739 3825 emit_operand32(rax, adr);
never@739 3826 }
never@739 3827
never@739 3828 void Assembler::fincstp() {
never@739 3829 emit_byte(0xD9);
never@739 3830 emit_byte(0xF7);
never@739 3831 }
never@739 3832
never@739 3833 void Assembler::finit() {
never@739 3834 emit_byte(0x9B);
never@739 3835 emit_byte(0xDB);
never@739 3836 emit_byte(0xE3);
never@739 3837 }
never@739 3838
never@739 3839 void Assembler::fist_s(Address adr) {
never@739 3840 InstructionMark im(this);
never@739 3841 emit_byte(0xDB);
never@739 3842 emit_operand32(rdx, adr);
never@739 3843 }
never@739 3844
never@739 3845 void Assembler::fistp_d(Address adr) {
never@739 3846 InstructionMark im(this);
never@739 3847 emit_byte(0xDF);
never@739 3848 emit_operand32(rdi, adr);
never@739 3849 }
never@739 3850
never@739 3851 void Assembler::fistp_s(Address adr) {
never@739 3852 InstructionMark im(this);
never@739 3853 emit_byte(0xDB);
never@739 3854 emit_operand32(rbx, adr);
never@739 3855 }
duke@435 3856
duke@435 3857 void Assembler::fld1() {
duke@435 3858 emit_byte(0xD9);
duke@435 3859 emit_byte(0xE8);
duke@435 3860 }
duke@435 3861
never@739 3862 void Assembler::fld_d(Address adr) {
never@739 3863 InstructionMark im(this);
never@739 3864 emit_byte(0xDD);
never@739 3865 emit_operand32(rax, adr);
never@739 3866 }
never@739 3867
never@739 3868 void Assembler::fld_s(Address adr) {
never@739 3869 InstructionMark im(this);
never@739 3870 emit_byte(0xD9);
never@739 3871 emit_operand32(rax, adr);
never@739 3872 }
never@739 3873
never@739 3874
never@739 3875 void Assembler::fld_s(int index) {
never@739 3876 emit_farith(0xD9, 0xC0, index);
never@739 3877 }
never@739 3878
never@739 3879 void Assembler::fld_x(Address adr) {
never@739 3880 InstructionMark im(this);
never@739 3881 emit_byte(0xDB);
never@739 3882 emit_operand32(rbp, adr);
never@739 3883 }
never@739 3884
never@739 3885 void Assembler::fldcw(Address src) {
never@739 3886 InstructionMark im(this);
never@739 3887 emit_byte(0xd9);
never@739 3888 emit_operand32(rbp, src);
never@739 3889 }
never@739 3890
never@739 3891 void Assembler::fldenv(Address src) {
never@739 3892 InstructionMark im(this);
never@739 3893 emit_byte(0xD9);
never@739 3894 emit_operand32(rsp, src);
never@739 3895 }
never@739 3896
never@739 3897 void Assembler::fldlg2() {
never@739 3898 emit_byte(0xD9);
never@739 3899 emit_byte(0xEC);
never@739 3900 }
never@739 3901
never@739 3902 void Assembler::fldln2() {
never@739 3903 emit_byte(0xD9);
never@739 3904 emit_byte(0xED);
never@739 3905 }
duke@435 3906
duke@435 3907 void Assembler::fldz() {
duke@435 3908 emit_byte(0xD9);
duke@435 3909 emit_byte(0xEE);
duke@435 3910 }
duke@435 3911
duke@435 3912 void Assembler::flog() {
duke@435 3913 fldln2();
duke@435 3914 fxch();
duke@435 3915 fyl2x();
duke@435 3916 }
duke@435 3917
duke@435 3918 void Assembler::flog10() {
duke@435 3919 fldlg2();
duke@435 3920 fxch();
duke@435 3921 fyl2x();
duke@435 3922 }
duke@435 3923
never@739 3924 void Assembler::fmul(int i) {
never@739 3925 emit_farith(0xD8, 0xC8, i);
never@739 3926 }
never@739 3927
never@739 3928 void Assembler::fmul_d(Address src) {
never@739 3929 InstructionMark im(this);
never@739 3930 emit_byte(0xDC);
never@739 3931 emit_operand32(rcx, src);
never@739 3932 }
never@739 3933
never@739 3934 void Assembler::fmul_s(Address src) {
never@739 3935 InstructionMark im(this);
never@739 3936 emit_byte(0xD8);
never@739 3937 emit_operand32(rcx, src);
never@739 3938 }
never@739 3939
never@739 3940 void Assembler::fmula(int i) {
never@739 3941 emit_farith(0xDC, 0xC8, i);
never@739 3942 }
never@739 3943
never@739 3944 void Assembler::fmulp(int i) {
never@739 3945 emit_farith(0xDE, 0xC8, i);
never@739 3946 }
never@739 3947
never@739 3948 void Assembler::fnsave(Address dst) {
never@739 3949 InstructionMark im(this);
never@739 3950 emit_byte(0xDD);
never@739 3951 emit_operand32(rsi, dst);
never@739 3952 }
never@739 3953
never@739 3954 void Assembler::fnstcw(Address src) {
never@739 3955 InstructionMark im(this);
never@739 3956 emit_byte(0x9B);
never@739 3957 emit_byte(0xD9);
never@739 3958 emit_operand32(rdi, src);
never@739 3959 }
never@739 3960
never@739 3961 void Assembler::fnstsw_ax() {
never@739 3962 emit_byte(0xdF);
never@739 3963 emit_byte(0xE0);
never@739 3964 }
never@739 3965
never@739 3966 void Assembler::fprem() {
never@739 3967 emit_byte(0xD9);
never@739 3968 emit_byte(0xF8);
never@739 3969 }
never@739 3970
never@739 3971 void Assembler::fprem1() {
never@739 3972 emit_byte(0xD9);
never@739 3973 emit_byte(0xF5);
never@739 3974 }
never@739 3975
never@739 3976 void Assembler::frstor(Address src) {
never@739 3977 InstructionMark im(this);
never@739 3978 emit_byte(0xDD);
never@739 3979 emit_operand32(rsp, src);
never@739 3980 }
duke@435 3981
duke@435 3982 void Assembler::fsin() {
duke@435 3983 emit_byte(0xD9);
duke@435 3984 emit_byte(0xFE);
duke@435 3985 }
duke@435 3986
never@739 3987 void Assembler::fsqrt() {
duke@435 3988 emit_byte(0xD9);
never@739 3989 emit_byte(0xFA);
never@739 3990 }
never@739 3991
never@739 3992 void Assembler::fst_d(Address adr) {
never@739 3993 InstructionMark im(this);
never@739 3994 emit_byte(0xDD);
never@739 3995 emit_operand32(rdx, adr);
never@739 3996 }
never@739 3997
never@739 3998 void Assembler::fst_s(Address adr) {
never@739 3999 InstructionMark im(this);
never@739 4000 emit_byte(0xD9);
never@739 4001 emit_operand32(rdx, adr);
never@739 4002 }
never@739 4003
never@739 4004 void Assembler::fstp_d(Address adr) {
never@739 4005 InstructionMark im(this);
never@739 4006 emit_byte(0xDD);
never@739 4007 emit_operand32(rbx, adr);
never@739 4008 }
never@739 4009
never@739 4010 void Assembler::fstp_d(int index) {
never@739 4011 emit_farith(0xDD, 0xD8, index);
never@739 4012 }
never@739 4013
never@739 4014 void Assembler::fstp_s(Address adr) {
never@739 4015 InstructionMark im(this);
never@739 4016 emit_byte(0xD9);
never@739 4017 emit_operand32(rbx, adr);
never@739 4018 }
never@739 4019
never@739 4020 void Assembler::fstp_x(Address adr) {
never@739 4021 InstructionMark im(this);
never@739 4022 emit_byte(0xDB);
never@739 4023 emit_operand32(rdi, adr);
never@739 4024 }
never@739 4025
never@739 4026 void Assembler::fsub(int i) {
never@739 4027 emit_farith(0xD8, 0xE0, i);
never@739 4028 }
never@739 4029
never@739 4030 void Assembler::fsub_d(Address src) {
never@739 4031 InstructionMark im(this);
never@739 4032 emit_byte(0xDC);
never@739 4033 emit_operand32(rsp, src);
never@739 4034 }
never@739 4035
never@739 4036 void Assembler::fsub_s(Address src) {
never@739 4037 InstructionMark im(this);
never@739 4038 emit_byte(0xD8);
never@739 4039 emit_operand32(rsp, src);
never@739 4040 }
never@739 4041
never@739 4042 void Assembler::fsuba(int i) {
never@739 4043 emit_farith(0xDC, 0xE8, i);
never@739 4044 }
never@739 4045
never@739 4046 void Assembler::fsubp(int i) {
never@739 4047 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
never@739 4048 }
never@739 4049
never@739 4050 void Assembler::fsubr(int i) {
never@739 4051 emit_farith(0xD8, 0xE8, i);
never@739 4052 }
never@739 4053
never@739 4054 void Assembler::fsubr_d(Address src) {
never@739 4055 InstructionMark im(this);
never@739 4056 emit_byte(0xDC);
never@739 4057 emit_operand32(rbp, src);
never@739 4058 }
never@739 4059
never@739 4060 void Assembler::fsubr_s(Address src) {
never@739 4061 InstructionMark im(this);
never@739 4062 emit_byte(0xD8);
never@739 4063 emit_operand32(rbp, src);
never@739 4064 }
never@739 4065
never@739 4066 void Assembler::fsubra(int i) {
never@739 4067 emit_farith(0xDC, 0xE0, i);
never@739 4068 }
never@739 4069
never@739 4070 void Assembler::fsubrp(int i) {
never@739 4071 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
duke@435 4072 }
duke@435 4073
duke@435 4074 void Assembler::ftan() {
duke@435 4075 emit_byte(0xD9);
duke@435 4076 emit_byte(0xF2);
duke@435 4077 emit_byte(0xDD);
duke@435 4078 emit_byte(0xD8);
duke@435 4079 }
duke@435 4080
never@739 4081 void Assembler::ftst() {
duke@435 4082 emit_byte(0xD9);
never@739 4083 emit_byte(0xE4);
never@739 4084 }
duke@435 4085
duke@435 4086 void Assembler::fucomi(int i) {
duke@435 4087 // make sure the instruction is supported (introduced for P6, together with cmov)
duke@435 4088 guarantee(VM_Version::supports_cmov(), "illegal instruction");
duke@435 4089 emit_farith(0xDB, 0xE8, i);
duke@435 4090 }
duke@435 4091
duke@435 4092 void Assembler::fucomip(int i) {
duke@435 4093 // make sure the instruction is supported (introduced for P6, together with cmov)
duke@435 4094 guarantee(VM_Version::supports_cmov(), "illegal instruction");
duke@435 4095 emit_farith(0xDF, 0xE8, i);
duke@435 4096 }
duke@435 4097
duke@435 4098 void Assembler::fwait() {
duke@435 4099 emit_byte(0x9B);
duke@435 4100 }
duke@435 4101
never@739 4102 void Assembler::fxch(int i) {
never@739 4103 emit_farith(0xD9, 0xC8, i);
never@739 4104 }
never@739 4105
never@739 4106 void Assembler::fyl2x() {
duke@435 4107 emit_byte(0xD9);
never@739 4108 emit_byte(0xF1);
never@739 4109 }
never@739 4110
roland@3787 4111 void Assembler::frndint() {
roland@3787 4112 emit_byte(0xD9);
roland@3787 4113 emit_byte(0xFC);
roland@3787 4114 }
roland@3787 4115
roland@3787 4116 void Assembler::f2xm1() {
roland@3787 4117 emit_byte(0xD9);
roland@3787 4118 emit_byte(0xF0);
roland@3787 4119 }
roland@3787 4120
roland@3787 4121 void Assembler::fldl2e() {
roland@3787 4122 emit_byte(0xD9);
roland@3787 4123 emit_byte(0xEA);
roland@3787 4124 }
roland@3787 4125
kvn@3388 4126 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
kvn@3388 4127 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
kvn@3388 4128 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding.
kvn@3388 4129 static int simd_opc[4] = { 0, 0, 0x38, 0x3A };
kvn@3388 4130
kvn@3388 4131 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
kvn@3388 4132 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
kvn@3388 4133 if (pre > 0) {
kvn@3388 4134 emit_byte(simd_pre[pre]);
kvn@3388 4135 }
kvn@3388 4136 if (rex_w) {
kvn@3388 4137 prefixq(adr, xreg);
kvn@3388 4138 } else {
kvn@3388 4139 prefix(adr, xreg);
kvn@3388 4140 }
kvn@3388 4141 if (opc > 0) {
kvn@3388 4142 emit_byte(0x0F);
kvn@3388 4143 int opc2 = simd_opc[opc];
kvn@3388 4144 if (opc2 > 0) {
kvn@3388 4145 emit_byte(opc2);
kvn@3388 4146 }
kvn@3388 4147 }
kvn@3388 4148 }
kvn@3388 4149
kvn@3388 4150 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
kvn@3388 4151 if (pre > 0) {
kvn@3388 4152 emit_byte(simd_pre[pre]);
kvn@3388 4153 }
kvn@3388 4154 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) :
kvn@3388 4155 prefix_and_encode(dst_enc, src_enc);
kvn@3388 4156 if (opc > 0) {
kvn@3388 4157 emit_byte(0x0F);
kvn@3388 4158 int opc2 = simd_opc[opc];
kvn@3388 4159 if (opc2 > 0) {
kvn@3388 4160 emit_byte(opc2);
kvn@3388 4161 }
kvn@3388 4162 }
kvn@3388 4163 return encode;
kvn@3388 4164 }
kvn@3388 4165
kvn@3388 4166
kvn@3388 4167 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool vector256) {
kvn@3388 4168 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) {
kvn@3388 4169 prefix(VEX_3bytes);
kvn@3388 4170
kvn@3388 4171 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
kvn@3388 4172 byte1 = (~byte1) & 0xE0;
kvn@3388 4173 byte1 |= opc;
kvn@3388 4174 a_byte(byte1);
kvn@3388 4175
kvn@3388 4176 int byte2 = ((~nds_enc) & 0xf) << 3;
kvn@3388 4177 byte2 |= (vex_w ? VEX_W : 0) | (vector256 ? 4 : 0) | pre;
kvn@3388 4178 emit_byte(byte2);
kvn@3388 4179 } else {
kvn@3388 4180 prefix(VEX_2bytes);
kvn@3388 4181
kvn@3388 4182 int byte1 = vex_r ? VEX_R : 0;
kvn@3388 4183 byte1 = (~byte1) & 0x80;
kvn@3388 4184 byte1 |= ((~nds_enc) & 0xf) << 3;
kvn@3388 4185 byte1 |= (vector256 ? 4 : 0) | pre;
kvn@3388 4186 emit_byte(byte1);
kvn@3388 4187 }
kvn@3388 4188 }
kvn@3388 4189
kvn@3388 4190 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256){
kvn@3388 4191 bool vex_r = (xreg_enc >= 8);
kvn@3388 4192 bool vex_b = adr.base_needs_rex();
kvn@3388 4193 bool vex_x = adr.index_needs_rex();
kvn@3388 4194 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
kvn@3388 4195 }
kvn@3388 4196
kvn@3388 4197 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256) {
kvn@3388 4198 bool vex_r = (dst_enc >= 8);
kvn@3388 4199 bool vex_b = (src_enc >= 8);
kvn@3388 4200 bool vex_x = false;
kvn@3388 4201 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
kvn@3388 4202 return (((dst_enc & 7) << 3) | (src_enc & 7));
kvn@3388 4203 }
kvn@3388 4204
kvn@3388 4205
kvn@3388 4206 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
kvn@3388 4207 if (UseAVX > 0) {
kvn@3388 4208 int xreg_enc = xreg->encoding();
kvn@3388 4209 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
kvn@3388 4210 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector256);
kvn@3388 4211 } else {
kvn@3388 4212 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding");
kvn@3388 4213 rex_prefix(adr, xreg, pre, opc, rex_w);
kvn@3388 4214 }
kvn@3388 4215 }
kvn@3388 4216
kvn@3388 4217 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
kvn@3388 4218 int dst_enc = dst->encoding();
kvn@3388 4219 int src_enc = src->encoding();
kvn@3388 4220 if (UseAVX > 0) {
kvn@3388 4221 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
kvn@3388 4222 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector256);
kvn@3388 4223 } else {
kvn@3388 4224 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding");
kvn@3388 4225 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, rex_w);
kvn@3388 4226 }
kvn@3388 4227 }
never@739 4228
kvn@4001 4229 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
kvn@4001 4230 InstructionMark im(this);
kvn@4001 4231 simd_prefix(dst, dst, src, pre);
kvn@4001 4232 emit_byte(opcode);
kvn@4001 4233 emit_operand(dst, src);
kvn@4001 4234 }
kvn@4001 4235
kvn@4001 4236 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
kvn@4001 4237 int encode = simd_prefix_and_encode(dst, dst, src, pre);
kvn@4001 4238 emit_byte(opcode);
kvn@4001 4239 emit_byte(0xC0 | encode);
kvn@4001 4240 }
kvn@4001 4241
kvn@4001 4242 // Versions with no second source register (non-destructive source).
kvn@4001 4243 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
kvn@4001 4244 InstructionMark im(this);
kvn@4001 4245 simd_prefix(dst, xnoreg, src, pre);
kvn@4001 4246 emit_byte(opcode);
kvn@4001 4247 emit_operand(dst, src);
kvn@4001 4248 }
kvn@4001 4249
kvn@4001 4250 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
kvn@4001 4251 int encode = simd_prefix_and_encode(dst, xnoreg, src, pre);
kvn@4001 4252 emit_byte(opcode);
kvn@4001 4253 emit_byte(0xC0 | encode);
kvn@4001 4254 }
kvn@4001 4255
kvn@4001 4256 // 3-operands AVX instructions
kvn@4001 4257 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
kvn@4001 4258 Address src, VexSimdPrefix pre, bool vector256) {
kvn@4001 4259 InstructionMark im(this);
kvn@4001 4260 vex_prefix(dst, nds, src, pre, vector256);
kvn@4001 4261 emit_byte(opcode);
kvn@4001 4262 emit_operand(dst, src);
kvn@4001 4263 }
kvn@4001 4264
kvn@4001 4265 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
kvn@4001 4266 XMMRegister src, VexSimdPrefix pre, bool vector256) {
kvn@4001 4267 int encode = vex_prefix_and_encode(dst, nds, src, pre, vector256);
kvn@4001 4268 emit_byte(opcode);
kvn@4001 4269 emit_byte(0xC0 | encode);
kvn@4001 4270 }
kvn@4001 4271
never@739 4272 #ifndef _LP64
never@739 4273
never@739 4274 void Assembler::incl(Register dst) {
never@739 4275 // Don't use it directly. Use MacroAssembler::incrementl() instead.
kvn@3388 4276 emit_byte(0x40 | dst->encoding());
never@739 4277 }
never@739 4278
never@739 4279 void Assembler::lea(Register dst, Address src) {
never@739 4280 leal(dst, src);
never@739 4281 }
never@739 4282
never@739 4283 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
never@739 4284 InstructionMark im(this);
never@739 4285 emit_byte(0xC7);
never@739 4286 emit_operand(rax, dst);
never@739 4287 emit_data((int)imm32, rspec, 0);
never@739 4288 }
never@739 4289
kvn@1077 4290 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
kvn@1077 4291 InstructionMark im(this);
kvn@1077 4292 int encode = prefix_and_encode(dst->encoding());
kvn@1077 4293 emit_byte(0xB8 | encode);
kvn@1077 4294 emit_data((int)imm32, rspec, 0);
kvn@1077 4295 }
never@739 4296
never@739 4297 void Assembler::popa() { // 32bit
never@739 4298 emit_byte(0x61);
never@739 4299 }
never@739 4300
never@739 4301 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
never@739 4302 InstructionMark im(this);
never@739 4303 emit_byte(0x68);
never@739 4304 emit_data(imm32, rspec, 0);
never@739 4305 }
never@739 4306
never@739 4307 void Assembler::pusha() { // 32bit
never@739 4308 emit_byte(0x60);
never@739 4309 }
never@739 4310
never@739 4311 void Assembler::set_byte_if_not_zero(Register dst) {
duke@435 4312 emit_byte(0x0F);
never@739 4313 emit_byte(0x95);
never@739 4314 emit_byte(0xE0 | dst->encoding());
never@739 4315 }
never@739 4316
never@739 4317 void Assembler::shldl(Register dst, Register src) {
duke@435 4318 emit_byte(0x0F);
never@739 4319 emit_byte(0xA5);
never@739 4320 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
never@739 4321 }
never@739 4322
never@739 4323 void Assembler::shrdl(Register dst, Register src) {
duke@435 4324 emit_byte(0x0F);
never@739 4325 emit_byte(0xAD);
never@739 4326 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
never@739 4327 }
never@739 4328
never@739 4329 #else // LP64
never@739 4330
iveresov@1804 4331 void Assembler::set_byte_if_not_zero(Register dst) {
iveresov@1804 4332 int enc = prefix_and_encode(dst->encoding(), true);
iveresov@1804 4333 emit_byte(0x0F);
iveresov@1804 4334 emit_byte(0x95);
iveresov@1804 4335 emit_byte(0xE0 | enc);
iveresov@1804 4336 }
iveresov@1804 4337
never@739 4338 // 64bit only pieces of the assembler
never@739 4339 // This should only be used by 64bit instructions that can use rip-relative
never@739 4340 // it cannot be used by instructions that want an immediate value.
never@739 4341
never@739 4342 bool Assembler::reachable(AddressLiteral adr) {
never@739 4343 int64_t disp;
never@739 4344 // None will force a 64bit literal to the code stream. Likely a placeholder
never@739 4345 // for something that will be patched later and we need to certain it will
never@739 4346 // always be reachable.
never@739 4347 if (adr.reloc() == relocInfo::none) {
never@739 4348 return false;
never@739 4349 }
never@739 4350 if (adr.reloc() == relocInfo::internal_word_type) {
never@739 4351 // This should be rip relative and easily reachable.
never@739 4352 return true;
never@739 4353 }
never@739 4354 if (adr.reloc() == relocInfo::virtual_call_type ||
never@739 4355 adr.reloc() == relocInfo::opt_virtual_call_type ||
never@739 4356 adr.reloc() == relocInfo::static_call_type ||
never@739 4357 adr.reloc() == relocInfo::static_stub_type ) {
never@739 4358 // This should be rip relative within the code cache and easily
never@739 4359 // reachable until we get huge code caches. (At which point
never@739 4360 // ic code is going to have issues).
never@739 4361 return true;
never@739 4362 }
never@739 4363 if (adr.reloc() != relocInfo::external_word_type &&
never@739 4364 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special
never@739 4365 adr.reloc() != relocInfo::poll_type && // relocs to identify them
never@739 4366 adr.reloc() != relocInfo::runtime_call_type ) {
never@739 4367 return false;
never@739 4368 }
never@739 4369
never@739 4370 // Stress the correction code
never@739 4371 if (ForceUnreachable) {
never@739 4372 // Must be runtimecall reloc, see if it is in the codecache
never@739 4373 // Flipping stuff in the codecache to be unreachable causes issues
never@739 4374 // with things like inline caches where the additional instructions
never@739 4375 // are not handled.
never@739 4376 if (CodeCache::find_blob(adr._target) == NULL) {
never@739 4377 return false;
never@739 4378 }
never@739 4379 }
never@739 4380 // For external_word_type/runtime_call_type if it is reachable from where we
never@739 4381 // are now (possibly a temp buffer) and where we might end up
never@739 4382 // anywhere in the codeCache then we are always reachable.
never@739 4383 // This would have to change if we ever save/restore shared code
never@739 4384 // to be more pessimistic.
never@739 4385 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
never@739 4386 if (!is_simm32(disp)) return false;
never@739 4387 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
never@739 4388 if (!is_simm32(disp)) return false;
never@739 4389
twisti@4317 4390 disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int));
never@739 4391
never@739 4392 // Because rip relative is a disp + address_of_next_instruction and we
never@739 4393 // don't know the value of address_of_next_instruction we apply a fudge factor
never@739 4394 // to make sure we will be ok no matter the size of the instruction we get placed into.
never@739 4395 // We don't have to fudge the checks above here because they are already worst case.
never@739 4396
never@739 4397 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
never@739 4398 // + 4 because better safe than sorry.
never@739 4399 const int fudge = 12 + 4;
never@739 4400 if (disp < 0) {
never@739 4401 disp -= fudge;
never@739 4402 } else {
never@739 4403 disp += fudge;
never@739 4404 }
never@739 4405 return is_simm32(disp);
never@739 4406 }
never@739 4407
iveresov@2686 4408 // Check if the polling page is not reachable from the code cache using rip-relative
iveresov@2686 4409 // addressing.
iveresov@2686 4410 bool Assembler::is_polling_page_far() {
iveresov@2686 4411 intptr_t addr = (intptr_t)os::get_polling_page();
never@3314 4412 return ForceUnreachable ||
never@3314 4413 !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
iveresov@2686 4414 !is_simm32(addr - (intptr_t)CodeCache::high_bound());
iveresov@2686 4415 }
iveresov@2686 4416
never@739 4417 void Assembler::emit_data64(jlong data,
never@739 4418 relocInfo::relocType rtype,
never@739 4419 int format) {
never@739 4420 if (rtype == relocInfo::none) {
twisti@4317 4421 emit_int64(data);
never@739 4422 } else {
never@739 4423 emit_data64(data, Relocation::spec_simple(rtype), format);
never@739 4424 }
never@739 4425 }
never@739 4426
never@739 4427 void Assembler::emit_data64(jlong data,
never@739 4428 RelocationHolder const& rspec,
never@739 4429 int format) {
never@739 4430 assert(imm_operand == 0, "default format must be immediate in this file");
never@739 4431 assert(imm_operand == format, "must be immediate");
never@739 4432 assert(inst_mark() != NULL, "must be inside InstructionMark");
never@739 4433 // Do not use AbstractAssembler::relocate, which is not intended for
never@739 4434 // embedded words. Instead, relocate to the enclosing instruction.
never@739 4435 code_section()->relocate(inst_mark(), rspec, format);
never@739 4436 #ifdef ASSERT
never@739 4437 check_relocation(rspec, format);
never@739 4438 #endif
twisti@4317 4439 emit_int64(data);
never@739 4440 }
never@739 4441
never@739 4442 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
never@739 4443 if (reg_enc >= 8) {
never@739 4444 prefix(REX_B);
never@739 4445 reg_enc -= 8;
never@739 4446 } else if (byteinst && reg_enc >= 4) {
never@739 4447 prefix(REX);
never@739 4448 }
never@739 4449 return reg_enc;
never@739 4450 }
never@739 4451
never@739 4452 int Assembler::prefixq_and_encode(int reg_enc) {
never@739 4453 if (reg_enc < 8) {
never@739 4454 prefix(REX_W);
never@739 4455 } else {
never@739 4456 prefix(REX_WB);
never@739 4457 reg_enc -= 8;
never@739 4458 }
never@739 4459 return reg_enc;
never@739 4460 }
never@739 4461
never@739 4462 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
never@739 4463 if (dst_enc < 8) {
never@739 4464 if (src_enc >= 8) {
never@739 4465 prefix(REX_B);
never@739 4466 src_enc -= 8;
never@739 4467 } else if (byteinst && src_enc >= 4) {
never@739 4468 prefix(REX);
never@739 4469 }
never@739 4470 } else {
never@739 4471 if (src_enc < 8) {
never@739 4472 prefix(REX_R);
never@739 4473 } else {
never@739 4474 prefix(REX_RB);
never@739 4475 src_enc -= 8;
never@739 4476 }
never@739 4477 dst_enc -= 8;
never@739 4478 }
never@739 4479 return dst_enc << 3 | src_enc;
never@739 4480 }
never@739 4481
never@739 4482 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
never@739 4483 if (dst_enc < 8) {
never@739 4484 if (src_enc < 8) {
never@739 4485 prefix(REX_W);
never@739 4486 } else {
never@739 4487 prefix(REX_WB);
never@739 4488 src_enc -= 8;
never@739 4489 }
never@739 4490 } else {
never@739 4491 if (src_enc < 8) {
never@739 4492 prefix(REX_WR);
never@739 4493 } else {
never@739 4494 prefix(REX_WRB);
never@739 4495 src_enc -= 8;
never@739 4496 }
never@739 4497 dst_enc -= 8;
never@739 4498 }
never@739 4499 return dst_enc << 3 | src_enc;
never@739 4500 }
never@739 4501
never@739 4502 void Assembler::prefix(Register reg) {
never@739 4503 if (reg->encoding() >= 8) {
never@739 4504 prefix(REX_B);
never@739 4505 }
never@739 4506 }
never@739 4507
never@739 4508 void Assembler::prefix(Address adr) {
never@739 4509 if (adr.base_needs_rex()) {
never@739 4510 if (adr.index_needs_rex()) {
never@739 4511 prefix(REX_XB);
never@739 4512 } else {
never@739 4513 prefix(REX_B);
never@739 4514 }
never@739 4515 } else {
never@739 4516 if (adr.index_needs_rex()) {
never@739 4517 prefix(REX_X);
never@739 4518 }
never@739 4519 }
never@739 4520 }
never@739 4521
never@739 4522 void Assembler::prefixq(Address adr) {
never@739 4523 if (adr.base_needs_rex()) {
never@739 4524 if (adr.index_needs_rex()) {
never@739 4525 prefix(REX_WXB);
never@739 4526 } else {
never@739 4527 prefix(REX_WB);
never@739 4528 }
never@739 4529 } else {
never@739 4530 if (adr.index_needs_rex()) {
never@739 4531 prefix(REX_WX);
never@739 4532 } else {
never@739 4533 prefix(REX_W);
never@739 4534 }
never@739 4535 }
never@739 4536 }
never@739 4537
never@739 4538
never@739 4539 void Assembler::prefix(Address adr, Register reg, bool byteinst) {
never@739 4540 if (reg->encoding() < 8) {
never@739 4541 if (adr.base_needs_rex()) {
never@739 4542 if (adr.index_needs_rex()) {
never@739 4543 prefix(REX_XB);
never@739 4544 } else {
never@739 4545 prefix(REX_B);
never@739 4546 }
never@739 4547 } else {
never@739 4548 if (adr.index_needs_rex()) {
never@739 4549 prefix(REX_X);
twisti@3053 4550 } else if (byteinst && reg->encoding() >= 4 ) {
never@739 4551 prefix(REX);
never@739 4552 }
never@739 4553 }
never@739 4554 } else {
never@739 4555 if (adr.base_needs_rex()) {
never@739 4556 if (adr.index_needs_rex()) {
never@739 4557 prefix(REX_RXB);
never@739 4558 } else {
never@739 4559 prefix(REX_RB);
never@739 4560 }
never@739 4561 } else {
never@739 4562 if (adr.index_needs_rex()) {
never@739 4563 prefix(REX_RX);
never@739 4564 } else {
never@739 4565 prefix(REX_R);
never@739 4566 }
never@739 4567 }
never@739 4568 }
never@739 4569 }
never@739 4570
never@739 4571 void Assembler::prefixq(Address adr, Register src) {
never@739 4572 if (src->encoding() < 8) {
never@739 4573 if (adr.base_needs_rex()) {
never@739 4574 if (adr.index_needs_rex()) {
never@739 4575 prefix(REX_WXB);
never@739 4576 } else {
never@739 4577 prefix(REX_WB);
never@739 4578 }
never@739 4579 } else {
never@739 4580 if (adr.index_needs_rex()) {
never@739 4581 prefix(REX_WX);
never@739 4582 } else {
never@739 4583 prefix(REX_W);
never@739 4584 }
never@739 4585 }
never@739 4586 } else {
never@739 4587 if (adr.base_needs_rex()) {
never@739 4588 if (adr.index_needs_rex()) {
never@739 4589 prefix(REX_WRXB);
never@739 4590 } else {
never@739 4591 prefix(REX_WRB);
never@739 4592 }
never@739 4593 } else {
never@739 4594 if (adr.index_needs_rex()) {
never@739 4595 prefix(REX_WRX);
never@739 4596 } else {
never@739 4597 prefix(REX_WR);
never@739 4598 }
never@739 4599 }
never@739 4600 }
never@739 4601 }
never@739 4602
never@739 4603 void Assembler::prefix(Address adr, XMMRegister reg) {
never@739 4604 if (reg->encoding() < 8) {
never@739 4605 if (adr.base_needs_rex()) {
never@739 4606 if (adr.index_needs_rex()) {
never@739 4607 prefix(REX_XB);
never@739 4608 } else {
never@739 4609 prefix(REX_B);
never@739 4610 }
never@739 4611 } else {
never@739 4612 if (adr.index_needs_rex()) {
never@739 4613 prefix(REX_X);
never@739 4614 }
never@739 4615 }
never@739 4616 } else {
never@739 4617 if (adr.base_needs_rex()) {
never@739 4618 if (adr.index_needs_rex()) {
never@739 4619 prefix(REX_RXB);
never@739 4620 } else {
never@739 4621 prefix(REX_RB);
never@739 4622 }
never@739 4623 } else {
never@739 4624 if (adr.index_needs_rex()) {
never@739 4625 prefix(REX_RX);
never@739 4626 } else {
never@739 4627 prefix(REX_R);
never@739 4628 }
never@739 4629 }
never@739 4630 }
never@739 4631 }
never@739 4632
kvn@3388 4633 void Assembler::prefixq(Address adr, XMMRegister src) {
kvn@3388 4634 if (src->encoding() < 8) {
kvn@3388 4635 if (adr.base_needs_rex()) {
kvn@3388 4636 if (adr.index_needs_rex()) {
kvn@3388 4637 prefix(REX_WXB);
kvn@3388 4638 } else {
kvn@3388 4639 prefix(REX_WB);
kvn@3388 4640 }
kvn@3388 4641 } else {
kvn@3388 4642 if (adr.index_needs_rex()) {
kvn@3388 4643 prefix(REX_WX);
kvn@3388 4644 } else {
kvn@3388 4645 prefix(REX_W);
kvn@3388 4646 }
kvn@3388 4647 }
kvn@3388 4648 } else {
kvn@3388 4649 if (adr.base_needs_rex()) {
kvn@3388 4650 if (adr.index_needs_rex()) {
kvn@3388 4651 prefix(REX_WRXB);
kvn@3388 4652 } else {
kvn@3388 4653 prefix(REX_WRB);
kvn@3388 4654 }
kvn@3388 4655 } else {
kvn@3388 4656 if (adr.index_needs_rex()) {
kvn@3388 4657 prefix(REX_WRX);
kvn@3388 4658 } else {
kvn@3388 4659 prefix(REX_WR);
kvn@3388 4660 }
kvn@3388 4661 }
kvn@3388 4662 }
kvn@3388 4663 }
kvn@3388 4664
never@739 4665 void Assembler::adcq(Register dst, int32_t imm32) {
never@739 4666 (void) prefixq_and_encode(dst->encoding());
never@739 4667 emit_arith(0x81, 0xD0, dst, imm32);
never@739 4668 }
never@739 4669
never@739 4670 void Assembler::adcq(Register dst, Address src) {
never@739 4671 InstructionMark im(this);
never@739 4672 prefixq(src, dst);
never@739 4673 emit_byte(0x13);
never@739 4674 emit_operand(dst, src);
never@739 4675 }
never@739 4676
never@739 4677 void Assembler::adcq(Register dst, Register src) {
never@739 4678 (int) prefixq_and_encode(dst->encoding(), src->encoding());
never@739 4679 emit_arith(0x13, 0xC0, dst, src);
never@739 4680 }
never@739 4681
never@739 4682 void Assembler::addq(Address dst, int32_t imm32) {
never@739 4683 InstructionMark im(this);
never@739 4684 prefixq(dst);
never@739 4685 emit_arith_operand(0x81, rax, dst,imm32);
never@739 4686 }
never@739 4687
never@739 4688 void Assembler::addq(Address dst, Register src) {
never@739 4689 InstructionMark im(this);
never@739 4690 prefixq(dst, src);
never@739 4691 emit_byte(0x01);
never@739 4692 emit_operand(src, dst);
never@739 4693 }
never@739 4694
never@739 4695 void Assembler::addq(Register dst, int32_t imm32) {
never@739 4696 (void) prefixq_and_encode(dst->encoding());
never@739 4697 emit_arith(0x81, 0xC0, dst, imm32);
never@739 4698 }
never@739 4699
never@739 4700 void Assembler::addq(Register dst, Address src) {
never@739 4701 InstructionMark im(this);
never@739 4702 prefixq(src, dst);
never@739 4703 emit_byte(0x03);
never@739 4704 emit_operand(dst, src);
never@739 4705 }
never@739 4706
never@739 4707 void Assembler::addq(Register dst, Register src) {
never@739 4708 (void) prefixq_and_encode(dst->encoding(), src->encoding());
never@739 4709 emit_arith(0x03, 0xC0, dst, src);
never@739 4710 }
never@739 4711
never@2980 4712 void Assembler::andq(Address dst, int32_t imm32) {
never@2980 4713 InstructionMark im(this);
never@2980 4714 prefixq(dst);
never@2980 4715 emit_byte(0x81);
never@2980 4716 emit_operand(rsp, dst, 4);
never@2980 4717 emit_long(imm32);
never@2980 4718 }
never@2980 4719
never@739 4720 void Assembler::andq(Register dst, int32_t imm32) {
never@739 4721 (void) prefixq_and_encode(dst->encoding());
never@739 4722 emit_arith(0x81, 0xE0, dst, imm32);
never@739 4723 }
never@739 4724
never@739 4725 void Assembler::andq(Register dst, Address src) {
never@739 4726 InstructionMark im(this);
never@739 4727 prefixq(src, dst);
never@739 4728 emit_byte(0x23);
never@739 4729 emit_operand(dst, src);
never@739 4730 }
never@739 4731
never@739 4732 void Assembler::andq(Register dst, Register src) {
never@739 4733 (int) prefixq_and_encode(dst->encoding(), src->encoding());
never@739 4734 emit_arith(0x23, 0xC0, dst, src);
never@739 4735 }
never@739 4736
twisti@1210 4737 void Assembler::bsfq(Register dst, Register src) {
twisti@1210 4738 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
twisti@1210 4739 emit_byte(0x0F);
twisti@1210 4740 emit_byte(0xBC);
twisti@1210 4741 emit_byte(0xC0 | encode);
twisti@1210 4742 }
twisti@1210 4743
twisti@1210 4744 void Assembler::bsrq(Register dst, Register src) {
twisti@1210 4745 assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
twisti@1210 4746 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
twisti@1210 4747 emit_byte(0x0F);
twisti@1210 4748 emit_byte(0xBD);
twisti@1210 4749 emit_byte(0xC0 | encode);
twisti@1210 4750 }
twisti@1210 4751
never@739 4752 void Assembler::bswapq(Register reg) {
never@739 4753 int encode = prefixq_and_encode(reg->encoding());
never@739 4754 emit_byte(0x0F);
never@739 4755 emit_byte(0xC8 | encode);
never@739 4756 }
never@739 4757
never@739 4758 void Assembler::cdqq() {
never@739 4759 prefix(REX_W);
never@739 4760 emit_byte(0x99);
never@739 4761 }
never@739 4762
never@739 4763 void Assembler::clflush(Address adr) {
never@739 4764 prefix(adr);
never@739 4765 emit_byte(0x0F);
never@739 4766 emit_byte(0xAE);
never@739 4767 emit_operand(rdi, adr);
never@739 4768 }
never@739 4769
never@739 4770 void Assembler::cmovq(Condition cc, Register dst, Register src) {
never@739 4771 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
never@739 4772 emit_byte(0x0F);
never@739 4773 emit_byte(0x40 | cc);
never@739 4774 emit_byte(0xC0 | encode);
never@739 4775 }
never@739 4776
never@739 4777 void Assembler::cmovq(Condition cc, Register dst, Address src) {
never@739 4778 InstructionMark im(this);
never@739 4779 prefixq(src, dst);
never@739 4780 emit_byte(0x0F);
never@739 4781 emit_byte(0x40 | cc);
never@739 4782 emit_operand(dst, src);
never@739 4783 }
never@739 4784
never@739 4785 void Assembler::cmpq(Address dst, int32_t imm32) {
never@739 4786 InstructionMark im(this);
never@739 4787 prefixq(dst);
never@739 4788 emit_byte(0x81);
never@739 4789 emit_operand(rdi, dst, 4);
never@739 4790 emit_long(imm32);
never@739 4791 }
never@739 4792
never@739 4793 void Assembler::cmpq(Register dst, int32_t imm32) {
never@739 4794 (void) prefixq_and_encode(dst->encoding());
never@739 4795 emit_arith(0x81, 0xF8, dst, imm32);
never@739 4796 }
never@739 4797
never@739 4798 void Assembler::cmpq(Address dst, Register src) {
never@739 4799 InstructionMark im(this);
never@739 4800 prefixq(dst, src);
never@739 4801 emit_byte(0x3B);
never@739 4802 emit_operand(src, dst);
never@739 4803 }
never@739 4804
never@739 4805 void Assembler::cmpq(Register dst, Register src) {
never@739 4806 (void) prefixq_and_encode(dst->encoding(), src->encoding());
never@739 4807 emit_arith(0x3B, 0xC0, dst, src);
never@739 4808 }
never@739 4809
never@739 4810 void Assembler::cmpq(Register dst, Address src) {
never@739 4811 InstructionMark im(this);
never@739 4812 prefixq(src, dst);
never@739 4813 emit_byte(0x3B);
never@739 4814 emit_operand(dst, src);
never@739 4815 }
never@739 4816
never@739 4817 void Assembler::cmpxchgq(Register reg, Address adr) {
never@739 4818 InstructionMark im(this);
never@739 4819 prefixq(adr, reg);
never@739 4820 emit_byte(0x0F);
never@739 4821 emit_byte(0xB1);
never@739 4822 emit_operand(reg, adr);
never@739 4823 }
never@739 4824
never@739 4825 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
never@739 4826 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 4827 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2);
never@739 4828 emit_byte(0x2A);
never@739 4829 emit_byte(0xC0 | encode);
never@739 4830 }
never@739 4831
kvn@3388 4832 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
kvn@3388 4833 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 4834 InstructionMark im(this);
kvn@3388 4835 simd_prefix_q(dst, dst, src, VEX_SIMD_F2);
kvn@3388 4836 emit_byte(0x2A);
kvn@3388 4837 emit_operand(dst, src);
kvn@3388 4838 }
kvn@3388 4839
never@739 4840 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
never@739 4841 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@3388 4842 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3);
never@739 4843 emit_byte(0x2A);
never@739 4844 emit_byte(0xC0 | encode);
never@739 4845 }
never@739 4846
kvn@3388 4847 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
kvn@3388 4848 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@3388 4849 InstructionMark im(this);
kvn@3388 4850 simd_prefix_q(dst, dst, src, VEX_SIMD_F3);
kvn@3388 4851 emit_byte(0x2A);
kvn@3388 4852 emit_operand(dst, src);
kvn@3388 4853 }
kvn@3388 4854
never@739 4855 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
never@739 4856 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 4857 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2);
never@739 4858 emit_byte(0x2C);
never@739 4859 emit_byte(0xC0 | encode);
never@739 4860 }
never@739 4861
never@739 4862 void Assembler::cvttss2siq(Register dst, XMMRegister src) {
never@739 4863 NOT_LP64(assert(VM_Version::supports_sse(), ""));
kvn@3388 4864 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3);
never@739 4865 emit_byte(0x2C);
never@739 4866 emit_byte(0xC0 | encode);
never@739 4867 }
never@739 4868
never@739 4869 void Assembler::decl(Register dst) {
never@739 4870 // Don't use it directly. Use MacroAssembler::decrementl() instead.
never@739 4871 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
never@739 4872 int encode = prefix_and_encode(dst->encoding());
never@739 4873 emit_byte(0xFF);
never@739 4874 emit_byte(0xC8 | encode);
never@739 4875 }
never@739 4876
never@739 4877 void Assembler::decq(Register dst) {
never@739 4878 // Don't use it directly. Use MacroAssembler::decrementq() instead.
never@739 4879 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
never@739 4880 int encode = prefixq_and_encode(dst->encoding());
never@739 4881 emit_byte(0xFF);
never@739 4882 emit_byte(0xC8 | encode);
never@739 4883 }
never@739 4884
never@739 4885 void Assembler::decq(Address dst) {
never@739 4886 // Don't use it directly. Use MacroAssembler::decrementq() instead.
never@739 4887 InstructionMark im(this);
never@739 4888 prefixq(dst);
never@739 4889 emit_byte(0xFF);
never@739 4890 emit_operand(rcx, dst);
never@739 4891 }
never@739 4892
never@739 4893 void Assembler::fxrstor(Address src) {
never@739 4894 prefixq(src);
never@739 4895 emit_byte(0x0F);
never@739 4896 emit_byte(0xAE);
never@739 4897 emit_operand(as_Register(1), src);
never@739 4898 }
never@739 4899
never@739 4900 void Assembler::fxsave(Address dst) {
never@739 4901 prefixq(dst);
never@739 4902 emit_byte(0x0F);
never@739 4903 emit_byte(0xAE);
never@739 4904 emit_operand(as_Register(0), dst);
never@739 4905 }
never@739 4906
never@739 4907 void Assembler::idivq(Register src) {
never@739 4908 int encode = prefixq_and_encode(src->encoding());
never@739 4909 emit_byte(0xF7);
never@739 4910 emit_byte(0xF8 | encode);
never@739 4911 }
never@739 4912
never@739 4913 void Assembler::imulq(Register dst, Register src) {
never@739 4914 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
never@739 4915 emit_byte(0x0F);
never@739 4916 emit_byte(0xAF);
never@739 4917 emit_byte(0xC0 | encode);
never@739 4918 }
never@739 4919
never@739 4920 void Assembler::imulq(Register dst, Register src, int value) {
never@739 4921 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
never@739 4922 if (is8bit(value)) {
never@739 4923 emit_byte(0x6B);
never@739 4924 emit_byte(0xC0 | encode);
kvn@2269 4925 emit_byte(value & 0xFF);
never@739 4926 } else {
never@739 4927 emit_byte(0x69);
never@739 4928 emit_byte(0xC0 | encode);
never@739 4929 emit_long(value);
never@739 4930 }
never@739 4931 }
never@739 4932
never@739 4933 void Assembler::incl(Register dst) {
never@739 4934 // Don't use it directly. Use MacroAssembler::incrementl() instead.
never@739 4935 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
never@739 4936 int encode = prefix_and_encode(dst->encoding());
never@739 4937 emit_byte(0xFF);
never@739 4938 emit_byte(0xC0 | encode);
never@739 4939 }
never@739 4940
never@739 4941 void Assembler::incq(Register dst) {
never@739 4942 // Don't use it directly. Use MacroAssembler::incrementq() instead.
never@739 4943 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
never@739 4944 int encode = prefixq_and_encode(dst->encoding());
never@739 4945 emit_byte(0xFF);
never@739 4946 emit_byte(0xC0 | encode);
never@739 4947 }
never@739 4948
never@739 4949 void Assembler::incq(Address dst) {
never@739 4950 // Don't use it directly. Use MacroAssembler::incrementq() instead.
never@739 4951 InstructionMark im(this);
never@739 4952 prefixq(dst);
never@739 4953 emit_byte(0xFF);
never@739 4954 emit_operand(rax, dst);
never@739 4955 }
never@739 4956
never@739 4957 void Assembler::lea(Register dst, Address src) {
never@739 4958 leaq(dst, src);
never@739 4959 }
never@739 4960
never@739 4961 void Assembler::leaq(Register dst, Address src) {
never@739 4962 InstructionMark im(this);
never@739 4963 prefixq(src, dst);
never@739 4964 emit_byte(0x8D);
never@739 4965 emit_operand(dst, src);
never@739 4966 }
never@739 4967
never@739 4968 void Assembler::mov64(Register dst, int64_t imm64) {
never@739 4969 InstructionMark im(this);
never@739 4970 int encode = prefixq_and_encode(dst->encoding());
never@739 4971 emit_byte(0xB8 | encode);
twisti@4317 4972 emit_int64(imm64);
never@739 4973 }
never@739 4974
never@739 4975 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
never@739 4976 InstructionMark im(this);
never@739 4977 int encode = prefixq_and_encode(dst->encoding());
never@739 4978 emit_byte(0xB8 | encode);
never@739 4979 emit_data64(imm64, rspec);
never@739 4980 }
never@739 4981
kvn@1077 4982 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
kvn@1077 4983 InstructionMark im(this);
kvn@1077 4984 int encode = prefix_and_encode(dst->encoding());
kvn@1077 4985 emit_byte(0xB8 | encode);
kvn@1077 4986 emit_data((int)imm32, rspec, narrow_oop_operand);
kvn@1077 4987 }
kvn@1077 4988
kvn@1077 4989 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) {
kvn@1077 4990 InstructionMark im(this);
kvn@1077 4991 prefix(dst);
kvn@1077 4992 emit_byte(0xC7);
kvn@1077 4993 emit_operand(rax, dst, 4);
kvn@1077 4994 emit_data((int)imm32, rspec, narrow_oop_operand);
kvn@1077 4995 }
kvn@1077 4996
kvn@1077 4997 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
kvn@1077 4998 InstructionMark im(this);
kvn@1077 4999 int encode = prefix_and_encode(src1->encoding());
kvn@1077 5000 emit_byte(0x81);
kvn@1077 5001 emit_byte(0xF8 | encode);
kvn@1077 5002 emit_data((int)imm32, rspec, narrow_oop_operand);
kvn@1077 5003 }
kvn@1077 5004
kvn@1077 5005 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
kvn@1077 5006 InstructionMark im(this);
kvn@1077 5007 prefix(src1);
kvn@1077 5008 emit_byte(0x81);
kvn@1077 5009 emit_operand(rax, src1, 4);
kvn@1077 5010 emit_data((int)imm32, rspec, narrow_oop_operand);
kvn@1077 5011 }
kvn@1077 5012
twisti@1210 5013 void Assembler::lzcntq(Register dst, Register src) {
twisti@1210 5014 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
twisti@1210 5015 emit_byte(0xF3);
twisti@1210 5016 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
twisti@1210 5017 emit_byte(0x0F);
twisti@1210 5018 emit_byte(0xBD);
twisti@1210 5019 emit_byte(0xC0 | encode);
twisti@1210 5020 }
twisti@1210 5021
never@739 5022 void Assembler::movdq(XMMRegister dst, Register src) {
never@739 5023 // table D-1 says MMX/SSE2
kvn@3388 5024 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
kvn@3388 5025 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66);
never@739 5026 emit_byte(0x6E);
never@739 5027 emit_byte(0xC0 | encode);
never@739 5028 }
never@739 5029
never@739 5030 void Assembler::movdq(Register dst, XMMRegister src) {
never@739 5031 // table D-1 says MMX/SSE2
kvn@3388 5032 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
never@739 5033 // swap src/dst to get correct prefix
kvn@3388 5034 int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66);
duke@435 5035 emit_byte(0x7E);
never@739 5036 emit_byte(0xC0 | encode);
never@739 5037 }
never@739 5038
never@739 5039 void Assembler::movq(Register dst, Register src) {
never@739 5040 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
never@739 5041 emit_byte(0x8B);
never@739 5042 emit_byte(0xC0 | encode);
never@739 5043 }
never@739 5044
never@739 5045 void Assembler::movq(Register dst, Address src) {
never@739 5046 InstructionMark im(this);
never@739 5047 prefixq(src, dst);
never@739 5048 emit_byte(0x8B);
never@739 5049 emit_operand(dst, src);
never@739 5050 }
never@739 5051
never@739 5052 void Assembler::movq(Address dst, Register src) {
never@739 5053 InstructionMark im(this);
never@739 5054 prefixq(dst, src);
never@739 5055 emit_byte(0x89);
never@739 5056 emit_operand(src, dst);
never@739 5057 }
never@739 5058
twisti@1059 5059 void Assembler::movsbq(Register dst, Address src) {
twisti@1059 5060 InstructionMark im(this);
twisti@1059 5061 prefixq(src, dst);
twisti@1059 5062 emit_byte(0x0F);
twisti@1059 5063 emit_byte(0xBE);
twisti@1059 5064 emit_operand(dst, src);
twisti@1059 5065 }
twisti@1059 5066
twisti@1059 5067 void Assembler::movsbq(Register dst, Register src) {
twisti@1059 5068 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
twisti@1059 5069 emit_byte(0x0F);
twisti@1059 5070 emit_byte(0xBE);
twisti@1059 5071 emit_byte(0xC0 | encode);
twisti@1059 5072 }
twisti@1059 5073
never@739 5074 void Assembler::movslq(Register dst, int32_t imm32) {
never@739 5075 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx)
never@739 5076 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
never@739 5077 // as a result we shouldn't use until tested at runtime...
never@739 5078 ShouldNotReachHere();
never@739 5079 InstructionMark im(this);
never@739 5080 int encode = prefixq_and_encode(dst->encoding());
never@739 5081 emit_byte(0xC7 | encode);
never@739 5082 emit_long(imm32);
never@739 5083 }
never@739 5084
never@739 5085 void Assembler::movslq(Address dst, int32_t imm32) {
never@739 5086 assert(is_simm32(imm32), "lost bits");
never@739 5087 InstructionMark im(this);
never@739 5088 prefixq(dst);
never@739 5089 emit_byte(0xC7);
never@739 5090 emit_operand(rax, dst, 4);
never@739 5091 emit_long(imm32);
never@739 5092 }
never@739 5093
never@739 5094 void Assembler::movslq(Register dst, Address src) {
never@739 5095 InstructionMark im(this);
never@739 5096 prefixq(src, dst);
never@739 5097 emit_byte(0x63);
never@739 5098 emit_operand(dst, src);
never@739 5099 }
never@739 5100
never@739 5101 void Assembler::movslq(Register dst, Register src) {
never@739 5102 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
never@739 5103 emit_byte(0x63);
never@739 5104 emit_byte(0xC0 | encode);
never@739 5105 }
never@739 5106
twisti@1059 5107 void Assembler::movswq(Register dst, Address src) {
twisti@1059 5108 InstructionMark im(this);
twisti@1059 5109 prefixq(src, dst);
twisti@1059 5110 emit_byte(0x0F);
twisti@1059 5111 emit_byte(0xBF);
twisti@1059 5112 emit_operand(dst, src);
twisti@1059 5113 }
twisti@1059 5114
twisti@1059 5115 void Assembler::movswq(Register dst, Register src) {
twisti@1059 5116 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
twisti@1059 5117 emit_byte(0x0F);
twisti@1059 5118 emit_byte(0xBF);
twisti@1059 5119 emit_byte(0xC0 | encode);
twisti@1059 5120 }
twisti@1059 5121
twisti@1059 5122 void Assembler::movzbq(Register dst, Address src) {
twisti@1059 5123 InstructionMark im(this);
twisti@1059 5124 prefixq(src, dst);
twisti@1059 5125 emit_byte(0x0F);
twisti@1059 5126 emit_byte(0xB6);
twisti@1059 5127 emit_operand(dst, src);
twisti@1059 5128 }
twisti@1059 5129
twisti@1059 5130 void Assembler::movzbq(Register dst, Register src) {
twisti@1059 5131 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
twisti@1059 5132 emit_byte(0x0F);
twisti@1059 5133 emit_byte(0xB6);
twisti@1059 5134 emit_byte(0xC0 | encode);
twisti@1059 5135 }
twisti@1059 5136
twisti@1059 5137 void Assembler::movzwq(Register dst, Address src) {
twisti@1059 5138 InstructionMark im(this);
twisti@1059 5139 prefixq(src, dst);
twisti@1059 5140 emit_byte(0x0F);
twisti@1059 5141 emit_byte(0xB7);
twisti@1059 5142 emit_operand(dst, src);
twisti@1059 5143 }
twisti@1059 5144
twisti@1059 5145 void Assembler::movzwq(Register dst, Register src) {
twisti@1059 5146 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
twisti@1059 5147 emit_byte(0x0F);
twisti@1059 5148 emit_byte(0xB7);
twisti@1059 5149 emit_byte(0xC0 | encode);
twisti@1059 5150 }
twisti@1059 5151
never@739 5152 void Assembler::negq(Register dst) {
never@739 5153 int encode = prefixq_and_encode(dst->encoding());
never@739 5154 emit_byte(0xF7);
never@739 5155 emit_byte(0xD8 | encode);
never@739 5156 }
never@739 5157
never@739 5158 void Assembler::notq(Register dst) {
never@739 5159 int encode = prefixq_and_encode(dst->encoding());
never@739 5160 emit_byte(0xF7);
never@739 5161 emit_byte(0xD0 | encode);
never@739 5162 }
never@739 5163
never@739 5164 void Assembler::orq(Address dst, int32_t imm32) {
never@739 5165 InstructionMark im(this);
never@739 5166 prefixq(dst);
never@739 5167 emit_byte(0x81);
never@739 5168 emit_operand(rcx, dst, 4);
never@739 5169 emit_long(imm32);
never@739 5170 }
never@739 5171
never@739 5172 void Assembler::orq(Register dst, int32_t imm32) {
never@739 5173 (void) prefixq_and_encode(dst->encoding());
never@739 5174 emit_arith(0x81, 0xC8, dst, imm32);
never@739 5175 }
never@739 5176
never@739 5177 void Assembler::orq(Register dst, Address src) {
never@739 5178 InstructionMark im(this);
never@739 5179 prefixq(src, dst);
never@739 5180 emit_byte(0x0B);
never@739 5181 emit_operand(dst, src);
never@739 5182 }
never@739 5183
never@739 5184 void Assembler::orq(Register dst, Register src) {
never@739 5185 (void) prefixq_and_encode(dst->encoding(), src->encoding());
never@739 5186 emit_arith(0x0B, 0xC0, dst, src);
never@739 5187 }
never@739 5188
never@739 5189 void Assembler::popa() { // 64bit
never@739 5190 movq(r15, Address(rsp, 0));
never@739 5191 movq(r14, Address(rsp, wordSize));
never@739 5192 movq(r13, Address(rsp, 2 * wordSize));
never@739 5193 movq(r12, Address(rsp, 3 * wordSize));
never@739 5194 movq(r11, Address(rsp, 4 * wordSize));
never@739 5195 movq(r10, Address(rsp, 5 * wordSize));
never@739 5196 movq(r9, Address(rsp, 6 * wordSize));
never@739 5197 movq(r8, Address(rsp, 7 * wordSize));
never@739 5198 movq(rdi, Address(rsp, 8 * wordSize));
never@739 5199 movq(rsi, Address(rsp, 9 * wordSize));
never@739 5200 movq(rbp, Address(rsp, 10 * wordSize));
never@739 5201 // skip rsp
never@739 5202 movq(rbx, Address(rsp, 12 * wordSize));
never@739 5203 movq(rdx, Address(rsp, 13 * wordSize));
never@739 5204 movq(rcx, Address(rsp, 14 * wordSize));
never@739 5205 movq(rax, Address(rsp, 15 * wordSize));
never@739 5206
never@739 5207 addq(rsp, 16 * wordSize);
never@739 5208 }
never@739 5209
twisti@1078 5210 void Assembler::popcntq(Register dst, Address src) {
twisti@1078 5211 assert(VM_Version::supports_popcnt(), "must support");
twisti@1078 5212 InstructionMark im(this);
twisti@1078 5213 emit_byte(0xF3);
twisti@1078 5214 prefixq(src, dst);
twisti@1078 5215 emit_byte(0x0F);
twisti@1078 5216 emit_byte(0xB8);
twisti@1078 5217 emit_operand(dst, src);
twisti@1078 5218 }
twisti@1078 5219
twisti@1078 5220 void Assembler::popcntq(Register dst, Register src) {
twisti@1078 5221 assert(VM_Version::supports_popcnt(), "must support");
twisti@1078 5222 emit_byte(0xF3);
twisti@1078 5223 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
twisti@1078 5224 emit_byte(0x0F);
twisti@1078 5225 emit_byte(0xB8);
twisti@1078 5226 emit_byte(0xC0 | encode);
twisti@1078 5227 }
twisti@1078 5228
never@739 5229 void Assembler::popq(Address dst) {
never@739 5230 InstructionMark im(this);
never@739 5231 prefixq(dst);
never@739 5232 emit_byte(0x8F);
never@739 5233 emit_operand(rax, dst);
never@739 5234 }
never@739 5235
never@739 5236 void Assembler::pusha() { // 64bit
never@739 5237 // we have to store original rsp. ABI says that 128 bytes
never@739 5238 // below rsp are local scratch.
never@739 5239 movq(Address(rsp, -5 * wordSize), rsp);
never@739 5240
never@739 5241 subq(rsp, 16 * wordSize);
never@739 5242
never@739 5243 movq(Address(rsp, 15 * wordSize), rax);
never@739 5244 movq(Address(rsp, 14 * wordSize), rcx);
never@739 5245 movq(Address(rsp, 13 * wordSize), rdx);
never@739 5246 movq(Address(rsp, 12 * wordSize), rbx);
never@739 5247 // skip rsp
never@739 5248 movq(Address(rsp, 10 * wordSize), rbp);
never@739 5249 movq(Address(rsp, 9 * wordSize), rsi);
never@739 5250 movq(Address(rsp, 8 * wordSize), rdi);
never@739 5251 movq(Address(rsp, 7 * wordSize), r8);
never@739 5252 movq(Address(rsp, 6 * wordSize), r9);
never@739 5253 movq(Address(rsp, 5 * wordSize), r10);
never@739 5254 movq(Address(rsp, 4 * wordSize), r11);
never@739 5255 movq(Address(rsp, 3 * wordSize), r12);
never@739 5256 movq(Address(rsp, 2 * wordSize), r13);
never@739 5257 movq(Address(rsp, wordSize), r14);
never@739 5258 movq(Address(rsp, 0), r15);
never@739 5259 }
never@739 5260
never@739 5261 void Assembler::pushq(Address src) {
never@739 5262 InstructionMark im(this);
never@739 5263 prefixq(src);
never@739 5264 emit_byte(0xFF);
never@739 5265 emit_operand(rsi, src);
never@739 5266 }
never@739 5267
never@739 5268 void Assembler::rclq(Register dst, int imm8) {
never@739 5269 assert(isShiftCount(imm8 >> 1), "illegal shift count");
never@739 5270 int encode = prefixq_and_encode(dst->encoding());
never@739 5271 if (imm8 == 1) {
never@739 5272 emit_byte(0xD1);
never@739 5273 emit_byte(0xD0 | encode);
never@739 5274 } else {
never@739 5275 emit_byte(0xC1);
never@739 5276 emit_byte(0xD0 | encode);
never@739 5277 emit_byte(imm8);
never@739 5278 }
never@739 5279 }
never@739 5280 void Assembler::sarq(Register dst, int imm8) {
never@739 5281 assert(isShiftCount(imm8 >> 1), "illegal shift count");
never@739 5282 int encode = prefixq_and_encode(dst->encoding());
never@739 5283 if (imm8 == 1) {
never@739 5284 emit_byte(0xD1);
never@739 5285 emit_byte(0xF8 | encode);
never@739 5286 } else {
never@739 5287 emit_byte(0xC1);
never@739 5288 emit_byte(0xF8 | encode);
never@739 5289 emit_byte(imm8);
never@739 5290 }
never@739 5291 }
never@739 5292
never@739 5293 void Assembler::sarq(Register dst) {
never@739 5294 int encode = prefixq_and_encode(dst->encoding());
never@739 5295 emit_byte(0xD3);
never@739 5296 emit_byte(0xF8 | encode);
never@739 5297 }
phh@2423 5298
never@739 5299 void Assembler::sbbq(Address dst, int32_t imm32) {
never@739 5300 InstructionMark im(this);
never@739 5301 prefixq(dst);
never@739 5302 emit_arith_operand(0x81, rbx, dst, imm32);
never@739 5303 }
never@739 5304
never@739 5305 void Assembler::sbbq(Register dst, int32_t imm32) {
never@739 5306 (void) prefixq_and_encode(dst->encoding());
never@739 5307 emit_arith(0x81, 0xD8, dst, imm32);
never@739 5308 }
never@739 5309
never@739 5310 void Assembler::sbbq(Register dst, Address src) {
never@739 5311 InstructionMark im(this);
never@739 5312 prefixq(src, dst);
never@739 5313 emit_byte(0x1B);
never@739 5314 emit_operand(dst, src);
never@739 5315 }
never@739 5316
never@739 5317 void Assembler::sbbq(Register dst, Register src) {
never@739 5318 (void) prefixq_and_encode(dst->encoding(), src->encoding());
never@739 5319 emit_arith(0x1B, 0xC0, dst, src);
never@739 5320 }
never@739 5321
never@739 5322 void Assembler::shlq(Register dst, int imm8) {
never@739 5323 assert(isShiftCount(imm8 >> 1), "illegal shift count");
never@739 5324 int encode = prefixq_and_encode(dst->encoding());
never@739 5325 if (imm8 == 1) {
never@739 5326 emit_byte(0xD1);
never@739 5327 emit_byte(0xE0 | encode);
never@739 5328 } else {
never@739 5329 emit_byte(0xC1);
never@739 5330 emit_byte(0xE0 | encode);
never@739 5331 emit_byte(imm8);
never@739 5332 }
never@739 5333 }
never@739 5334
never@739 5335 void Assembler::shlq(Register dst) {
never@739 5336 int encode = prefixq_and_encode(dst->encoding());
never@739 5337 emit_byte(0xD3);
never@739 5338 emit_byte(0xE0 | encode);
never@739 5339 }
never@739 5340
never@739 5341 void Assembler::shrq(Register dst, int imm8) {
never@739 5342 assert(isShiftCount(imm8 >> 1), "illegal shift count");
never@739 5343 int encode = prefixq_and_encode(dst->encoding());
never@739 5344 emit_byte(0xC1);
never@739 5345 emit_byte(0xE8 | encode);
never@739 5346 emit_byte(imm8);
never@739 5347 }
never@739 5348
never@739 5349 void Assembler::shrq(Register dst) {
never@739 5350 int encode = prefixq_and_encode(dst->encoding());
never@739 5351 emit_byte(0xD3);
never@739 5352 emit_byte(0xE8 | encode);
never@739 5353 }
never@739 5354
never@739 5355 void Assembler::subq(Address dst, int32_t imm32) {
never@739 5356 InstructionMark im(this);
never@739 5357 prefixq(dst);
phh@2423 5358 emit_arith_operand(0x81, rbp, dst, imm32);
phh@2423 5359 }
phh@2423 5360
phh@2423 5361 void Assembler::subq(Address dst, Register src) {
phh@2423 5362 InstructionMark im(this);
phh@2423 5363 prefixq(dst, src);
phh@2423 5364 emit_byte(0x29);
phh@2423 5365 emit_operand(src, dst);
never@739 5366 }
never@739 5367
never@739 5368 void Assembler::subq(Register dst, int32_t imm32) {
never@739 5369 (void) prefixq_and_encode(dst->encoding());
never@739 5370 emit_arith(0x81, 0xE8, dst, imm32);
never@739 5371 }
never@739 5372
kvn@3574 5373 // Force generation of a 4 byte immediate value even if it fits into 8bit
kvn@3574 5374 void Assembler::subq_imm32(Register dst, int32_t imm32) {
kvn@3574 5375 (void) prefixq_and_encode(dst->encoding());
kvn@3574 5376 emit_arith_imm32(0x81, 0xE8, dst, imm32);
kvn@3574 5377 }
kvn@3574 5378
never@739 5379 void Assembler::subq(Register dst, Address src) {
never@739 5380 InstructionMark im(this);
never@739 5381 prefixq(src, dst);
never@739 5382 emit_byte(0x2B);
never@739 5383 emit_operand(dst, src);
never@739 5384 }
never@739 5385
never@739 5386 void Assembler::subq(Register dst, Register src) {
never@739 5387 (void) prefixq_and_encode(dst->encoding(), src->encoding());
never@739 5388 emit_arith(0x2B, 0xC0, dst, src);
never@739 5389 }
never@739 5390
never@739 5391 void Assembler::testq(Register dst, int32_t imm32) {
never@739 5392 // not using emit_arith because test
never@739 5393 // doesn't support sign-extension of
never@739 5394 // 8bit operands
never@739 5395 int encode = dst->encoding();
never@739 5396 if (encode == 0) {
never@739 5397 prefix(REX_W);
never@739 5398 emit_byte(0xA9);
never@739 5399 } else {
never@739 5400 encode = prefixq_and_encode(encode);
never@739 5401 emit_byte(0xF7);
never@739 5402 emit_byte(0xC0 | encode);
never@739 5403 }
never@739 5404 emit_long(imm32);
never@739 5405 }
never@739 5406
never@739 5407 void Assembler::testq(Register dst, Register src) {
never@739 5408 (void) prefixq_and_encode(dst->encoding(), src->encoding());
never@739 5409 emit_arith(0x85, 0xC0, dst, src);
never@739 5410 }
never@739 5411
never@739 5412 void Assembler::xaddq(Address dst, Register src) {
never@739 5413 InstructionMark im(this);
never@739 5414 prefixq(dst, src);
duke@435 5415 emit_byte(0x0F);
never@739 5416 emit_byte(0xC1);
never@739 5417 emit_operand(src, dst);
never@739 5418 }
never@739 5419
never@739 5420 void Assembler::xchgq(Register dst, Address src) {
never@739 5421 InstructionMark im(this);
never@739 5422 prefixq(src, dst);
never@739 5423 emit_byte(0x87);
never@739 5424 emit_operand(dst, src);
never@739 5425 }
never@739 5426
never@739 5427 void Assembler::xchgq(Register dst, Register src) {
never@739 5428 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
never@739 5429 emit_byte(0x87);
never@739 5430 emit_byte(0xc0 | encode);
never@739 5431 }
never@739 5432
never@739 5433 void Assembler::xorq(Register dst, Register src) {
never@739 5434 (void) prefixq_and_encode(dst->encoding(), src->encoding());
never@739 5435 emit_arith(0x33, 0xC0, dst, src);
never@739 5436 }
never@739 5437
never@739 5438 void Assembler::xorq(Register dst, Address src) {
never@739 5439 InstructionMark im(this);
never@739 5440 prefixq(src, dst);
never@739 5441 emit_byte(0x33);
never@739 5442 emit_operand(dst, src);
never@739 5443 }
never@739 5444
never@739 5445 #endif // !LP64

mercurial