src/cpu/x86/vm/assembler_x86.cpp

Tue, 02 Jul 2013 20:42:12 -0400

author
drchase
date
Tue, 02 Jul 2013 20:42:12 -0400
changeset 5353
b800986664f4
parent 4889
cc32ccaaf47f
child 5797
f2512d89ad0c
permissions
-rw-r--r--

7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
Summary: add intrinsics using new instruction to interpreter, C1, C2, for suitable x86; add test
Reviewed-by: kvn, twisti

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "asm/assembler.hpp"
    27 #include "asm/assembler.inline.hpp"
    28 #include "gc_interface/collectedHeap.inline.hpp"
    29 #include "interpreter/interpreter.hpp"
    30 #include "memory/cardTableModRefBS.hpp"
    31 #include "memory/resourceArea.hpp"
    32 #include "prims/methodHandles.hpp"
    33 #include "runtime/biasedLocking.hpp"
    34 #include "runtime/interfaceSupport.hpp"
    35 #include "runtime/objectMonitor.hpp"
    36 #include "runtime/os.hpp"
    37 #include "runtime/sharedRuntime.hpp"
    38 #include "runtime/stubRoutines.hpp"
    39 #include "utilities/macros.hpp"
    40 #if INCLUDE_ALL_GCS
    41 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    43 #include "gc_implementation/g1/heapRegion.hpp"
    44 #endif // INCLUDE_ALL_GCS
    46 #ifdef PRODUCT
    47 #define BLOCK_COMMENT(str) /* nothing */
    48 #define STOP(error) stop(error)
    49 #else
    50 #define BLOCK_COMMENT(str) block_comment(str)
    51 #define STOP(error) block_comment(error); stop(error)
    52 #endif
    54 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
    55 // Implementation of AddressLiteral
    57 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
    58   _is_lval = false;
    59   _target = target;
    60   switch (rtype) {
    61   case relocInfo::oop_type:
    62   case relocInfo::metadata_type:
    63     // Oops are a special case. Normally they would be their own section
    64     // but in cases like icBuffer they are literals in the code stream that
    65     // we don't have a section for. We use none so that we get a literal address
    66     // which is always patchable.
    67     break;
    68   case relocInfo::external_word_type:
    69     _rspec = external_word_Relocation::spec(target);
    70     break;
    71   case relocInfo::internal_word_type:
    72     _rspec = internal_word_Relocation::spec(target);
    73     break;
    74   case relocInfo::opt_virtual_call_type:
    75     _rspec = opt_virtual_call_Relocation::spec();
    76     break;
    77   case relocInfo::static_call_type:
    78     _rspec = static_call_Relocation::spec();
    79     break;
    80   case relocInfo::runtime_call_type:
    81     _rspec = runtime_call_Relocation::spec();
    82     break;
    83   case relocInfo::poll_type:
    84   case relocInfo::poll_return_type:
    85     _rspec = Relocation::spec_simple(rtype);
    86     break;
    87   case relocInfo::none:
    88     break;
    89   default:
    90     ShouldNotReachHere();
    91     break;
    92   }
    93 }
    95 // Implementation of Address
    97 #ifdef _LP64
    99 Address Address::make_array(ArrayAddress adr) {
   100   // Not implementable on 64bit machines
   101   // Should have been handled higher up the call chain.
   102   ShouldNotReachHere();
   103   return Address();
   104 }
   106 // exceedingly dangerous constructor
   107 Address::Address(int disp, address loc, relocInfo::relocType rtype) {
   108   _base  = noreg;
   109   _index = noreg;
   110   _scale = no_scale;
   111   _disp  = disp;
   112   switch (rtype) {
   113     case relocInfo::external_word_type:
   114       _rspec = external_word_Relocation::spec(loc);
   115       break;
   116     case relocInfo::internal_word_type:
   117       _rspec = internal_word_Relocation::spec(loc);
   118       break;
   119     case relocInfo::runtime_call_type:
   120       // HMM
   121       _rspec = runtime_call_Relocation::spec();
   122       break;
   123     case relocInfo::poll_type:
   124     case relocInfo::poll_return_type:
   125       _rspec = Relocation::spec_simple(rtype);
   126       break;
   127     case relocInfo::none:
   128       break;
   129     default:
   130       ShouldNotReachHere();
   131   }
   132 }
   133 #else // LP64
   135 Address Address::make_array(ArrayAddress adr) {
   136   AddressLiteral base = adr.base();
   137   Address index = adr.index();
   138   assert(index._disp == 0, "must not have disp"); // maybe it can?
   139   Address array(index._base, index._index, index._scale, (intptr_t) base.target());
   140   array._rspec = base._rspec;
   141   return array;
   142 }
   144 // exceedingly dangerous constructor
   145 Address::Address(address loc, RelocationHolder spec) {
   146   _base  = noreg;
   147   _index = noreg;
   148   _scale = no_scale;
   149   _disp  = (intptr_t) loc;
   150   _rspec = spec;
   151 }
   153 #endif // _LP64
   157 // Convert the raw encoding form into the form expected by the constructor for
   158 // Address.  An index of 4 (rsp) corresponds to having no index, so convert
   159 // that to noreg for the Address constructor.
   160 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
   161   RelocationHolder rspec;
   162   if (disp_reloc != relocInfo::none) {
   163     rspec = Relocation::spec_simple(disp_reloc);
   164   }
   165   bool valid_index = index != rsp->encoding();
   166   if (valid_index) {
   167     Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
   168     madr._rspec = rspec;
   169     return madr;
   170   } else {
   171     Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
   172     madr._rspec = rspec;
   173     return madr;
   174   }
   175 }
   177 // Implementation of Assembler
   179 int AbstractAssembler::code_fill_byte() {
   180   return (u_char)'\xF4'; // hlt
   181 }
   183 // make this go away someday
   184 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
   185   if (rtype == relocInfo::none)
   186         emit_int32(data);
   187   else  emit_data(data, Relocation::spec_simple(rtype), format);
   188 }
   190 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
   191   assert(imm_operand == 0, "default format must be immediate in this file");
   192   assert(inst_mark() != NULL, "must be inside InstructionMark");
   193   if (rspec.type() !=  relocInfo::none) {
   194     #ifdef ASSERT
   195       check_relocation(rspec, format);
   196     #endif
   197     // Do not use AbstractAssembler::relocate, which is not intended for
   198     // embedded words.  Instead, relocate to the enclosing instruction.
   200     // hack. call32 is too wide for mask so use disp32
   201     if (format == call32_operand)
   202       code_section()->relocate(inst_mark(), rspec, disp32_operand);
   203     else
   204       code_section()->relocate(inst_mark(), rspec, format);
   205   }
   206   emit_int32(data);
   207 }
   209 static int encode(Register r) {
   210   int enc = r->encoding();
   211   if (enc >= 8) {
   212     enc -= 8;
   213   }
   214   return enc;
   215 }
   217 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
   218   assert(dst->has_byte_register(), "must have byte register");
   219   assert(isByte(op1) && isByte(op2), "wrong opcode");
   220   assert(isByte(imm8), "not a byte");
   221   assert((op1 & 0x01) == 0, "should be 8bit operation");
   222   emit_int8(op1);
   223   emit_int8(op2 | encode(dst));
   224   emit_int8(imm8);
   225 }
   228 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
   229   assert(isByte(op1) && isByte(op2), "wrong opcode");
   230   assert((op1 & 0x01) == 1, "should be 32bit operation");
   231   assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
   232   if (is8bit(imm32)) {
   233     emit_int8(op1 | 0x02); // set sign bit
   234     emit_int8(op2 | encode(dst));
   235     emit_int8(imm32 & 0xFF);
   236   } else {
   237     emit_int8(op1);
   238     emit_int8(op2 | encode(dst));
   239     emit_int32(imm32);
   240   }
   241 }
   243 // Force generation of a 4 byte immediate value even if it fits into 8bit
   244 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
   245   assert(isByte(op1) && isByte(op2), "wrong opcode");
   246   assert((op1 & 0x01) == 1, "should be 32bit operation");
   247   assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
   248   emit_int8(op1);
   249   emit_int8(op2 | encode(dst));
   250   emit_int32(imm32);
   251 }
   253 // immediate-to-memory forms
   254 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
   255   assert((op1 & 0x01) == 1, "should be 32bit operation");
   256   assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
   257   if (is8bit(imm32)) {
   258     emit_int8(op1 | 0x02); // set sign bit
   259     emit_operand(rm, adr, 1);
   260     emit_int8(imm32 & 0xFF);
   261   } else {
   262     emit_int8(op1);
   263     emit_operand(rm, adr, 4);
   264     emit_int32(imm32);
   265   }
   266 }
   269 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
   270   assert(isByte(op1) && isByte(op2), "wrong opcode");
   271   emit_int8(op1);
   272   emit_int8(op2 | encode(dst) << 3 | encode(src));
   273 }
   276 void Assembler::emit_operand(Register reg, Register base, Register index,
   277                              Address::ScaleFactor scale, int disp,
   278                              RelocationHolder const& rspec,
   279                              int rip_relative_correction) {
   280   relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
   282   // Encode the registers as needed in the fields they are used in
   284   int regenc = encode(reg) << 3;
   285   int indexenc = index->is_valid() ? encode(index) << 3 : 0;
   286   int baseenc = base->is_valid() ? encode(base) : 0;
   288   if (base->is_valid()) {
   289     if (index->is_valid()) {
   290       assert(scale != Address::no_scale, "inconsistent address");
   291       // [base + index*scale + disp]
   292       if (disp == 0 && rtype == relocInfo::none  &&
   293           base != rbp LP64_ONLY(&& base != r13)) {
   294         // [base + index*scale]
   295         // [00 reg 100][ss index base]
   296         assert(index != rsp, "illegal addressing mode");
   297         emit_int8(0x04 | regenc);
   298         emit_int8(scale << 6 | indexenc | baseenc);
   299       } else if (is8bit(disp) && rtype == relocInfo::none) {
   300         // [base + index*scale + imm8]
   301         // [01 reg 100][ss index base] imm8
   302         assert(index != rsp, "illegal addressing mode");
   303         emit_int8(0x44 | regenc);
   304         emit_int8(scale << 6 | indexenc | baseenc);
   305         emit_int8(disp & 0xFF);
   306       } else {
   307         // [base + index*scale + disp32]
   308         // [10 reg 100][ss index base] disp32
   309         assert(index != rsp, "illegal addressing mode");
   310         emit_int8(0x84 | regenc);
   311         emit_int8(scale << 6 | indexenc | baseenc);
   312         emit_data(disp, rspec, disp32_operand);
   313       }
   314     } else if (base == rsp LP64_ONLY(|| base == r12)) {
   315       // [rsp + disp]
   316       if (disp == 0 && rtype == relocInfo::none) {
   317         // [rsp]
   318         // [00 reg 100][00 100 100]
   319         emit_int8(0x04 | regenc);
   320         emit_int8(0x24);
   321       } else if (is8bit(disp) && rtype == relocInfo::none) {
   322         // [rsp + imm8]
   323         // [01 reg 100][00 100 100] disp8
   324         emit_int8(0x44 | regenc);
   325         emit_int8(0x24);
   326         emit_int8(disp & 0xFF);
   327       } else {
   328         // [rsp + imm32]
   329         // [10 reg 100][00 100 100] disp32
   330         emit_int8(0x84 | regenc);
   331         emit_int8(0x24);
   332         emit_data(disp, rspec, disp32_operand);
   333       }
   334     } else {
   335       // [base + disp]
   336       assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
   337       if (disp == 0 && rtype == relocInfo::none &&
   338           base != rbp LP64_ONLY(&& base != r13)) {
   339         // [base]
   340         // [00 reg base]
   341         emit_int8(0x00 | regenc | baseenc);
   342       } else if (is8bit(disp) && rtype == relocInfo::none) {
   343         // [base + disp8]
   344         // [01 reg base] disp8
   345         emit_int8(0x40 | regenc | baseenc);
   346         emit_int8(disp & 0xFF);
   347       } else {
   348         // [base + disp32]
   349         // [10 reg base] disp32
   350         emit_int8(0x80 | regenc | baseenc);
   351         emit_data(disp, rspec, disp32_operand);
   352       }
   353     }
   354   } else {
   355     if (index->is_valid()) {
   356       assert(scale != Address::no_scale, "inconsistent address");
   357       // [index*scale + disp]
   358       // [00 reg 100][ss index 101] disp32
   359       assert(index != rsp, "illegal addressing mode");
   360       emit_int8(0x04 | regenc);
   361       emit_int8(scale << 6 | indexenc | 0x05);
   362       emit_data(disp, rspec, disp32_operand);
   363     } else if (rtype != relocInfo::none ) {
   364       // [disp] (64bit) RIP-RELATIVE (32bit) abs
   365       // [00 000 101] disp32
   367       emit_int8(0x05 | regenc);
   368       // Note that the RIP-rel. correction applies to the generated
   369       // disp field, but _not_ to the target address in the rspec.
   371       // disp was created by converting the target address minus the pc
   372       // at the start of the instruction. That needs more correction here.
   373       // intptr_t disp = target - next_ip;
   374       assert(inst_mark() != NULL, "must be inside InstructionMark");
   375       address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
   376       int64_t adjusted = disp;
   377       // Do rip-rel adjustment for 64bit
   378       LP64_ONLY(adjusted -=  (next_ip - inst_mark()));
   379       assert(is_simm32(adjusted),
   380              "must be 32bit offset (RIP relative address)");
   381       emit_data((int32_t) adjusted, rspec, disp32_operand);
   383     } else {
   384       // 32bit never did this, did everything as the rip-rel/disp code above
   385       // [disp] ABSOLUTE
   386       // [00 reg 100][00 100 101] disp32
   387       emit_int8(0x04 | regenc);
   388       emit_int8(0x25);
   389       emit_data(disp, rspec, disp32_operand);
   390     }
   391   }
   392 }
   394 void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
   395                              Address::ScaleFactor scale, int disp,
   396                              RelocationHolder const& rspec) {
   397   emit_operand((Register)reg, base, index, scale, disp, rspec);
   398 }
   400 // Secret local extension to Assembler::WhichOperand:
   401 #define end_pc_operand (_WhichOperand_limit)
   403 address Assembler::locate_operand(address inst, WhichOperand which) {
   404   // Decode the given instruction, and return the address of
   405   // an embedded 32-bit operand word.
   407   // If "which" is disp32_operand, selects the displacement portion
   408   // of an effective address specifier.
   409   // If "which" is imm64_operand, selects the trailing immediate constant.
   410   // If "which" is call32_operand, selects the displacement of a call or jump.
   411   // Caller is responsible for ensuring that there is such an operand,
   412   // and that it is 32/64 bits wide.
   414   // If "which" is end_pc_operand, find the end of the instruction.
   416   address ip = inst;
   417   bool is_64bit = false;
   419   debug_only(bool has_disp32 = false);
   420   int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
   422   again_after_prefix:
   423   switch (0xFF & *ip++) {
   425   // These convenience macros generate groups of "case" labels for the switch.
   426 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
   427 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
   428              case (x)+4: case (x)+5: case (x)+6: case (x)+7
   429 #define REP16(x) REP8((x)+0): \
   430               case REP8((x)+8)
   432   case CS_segment:
   433   case SS_segment:
   434   case DS_segment:
   435   case ES_segment:
   436   case FS_segment:
   437   case GS_segment:
   438     // Seems dubious
   439     LP64_ONLY(assert(false, "shouldn't have that prefix"));
   440     assert(ip == inst+1, "only one prefix allowed");
   441     goto again_after_prefix;
   443   case 0x67:
   444   case REX:
   445   case REX_B:
   446   case REX_X:
   447   case REX_XB:
   448   case REX_R:
   449   case REX_RB:
   450   case REX_RX:
   451   case REX_RXB:
   452     NOT_LP64(assert(false, "64bit prefixes"));
   453     goto again_after_prefix;
   455   case REX_W:
   456   case REX_WB:
   457   case REX_WX:
   458   case REX_WXB:
   459   case REX_WR:
   460   case REX_WRB:
   461   case REX_WRX:
   462   case REX_WRXB:
   463     NOT_LP64(assert(false, "64bit prefixes"));
   464     is_64bit = true;
   465     goto again_after_prefix;
   467   case 0xFF: // pushq a; decl a; incl a; call a; jmp a
   468   case 0x88: // movb a, r
   469   case 0x89: // movl a, r
   470   case 0x8A: // movb r, a
   471   case 0x8B: // movl r, a
   472   case 0x8F: // popl a
   473     debug_only(has_disp32 = true);
   474     break;
   476   case 0x68: // pushq #32
   477     if (which == end_pc_operand) {
   478       return ip + 4;
   479     }
   480     assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
   481     return ip;                  // not produced by emit_operand
   483   case 0x66: // movw ... (size prefix)
   484     again_after_size_prefix2:
   485     switch (0xFF & *ip++) {
   486     case REX:
   487     case REX_B:
   488     case REX_X:
   489     case REX_XB:
   490     case REX_R:
   491     case REX_RB:
   492     case REX_RX:
   493     case REX_RXB:
   494     case REX_W:
   495     case REX_WB:
   496     case REX_WX:
   497     case REX_WXB:
   498     case REX_WR:
   499     case REX_WRB:
   500     case REX_WRX:
   501     case REX_WRXB:
   502       NOT_LP64(assert(false, "64bit prefix found"));
   503       goto again_after_size_prefix2;
   504     case 0x8B: // movw r, a
   505     case 0x89: // movw a, r
   506       debug_only(has_disp32 = true);
   507       break;
   508     case 0xC7: // movw a, #16
   509       debug_only(has_disp32 = true);
   510       tail_size = 2;  // the imm16
   511       break;
   512     case 0x0F: // several SSE/SSE2 variants
   513       ip--;    // reparse the 0x0F
   514       goto again_after_prefix;
   515     default:
   516       ShouldNotReachHere();
   517     }
   518     break;
   520   case REP8(0xB8): // movl/q r, #32/#64(oop?)
   521     if (which == end_pc_operand)  return ip + (is_64bit ? 8 : 4);
   522     // these asserts are somewhat nonsensical
   523 #ifndef _LP64
   524     assert(which == imm_operand || which == disp32_operand,
   525            err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
   526 #else
   527     assert((which == call32_operand || which == imm_operand) && is_64bit ||
   528            which == narrow_oop_operand && !is_64bit,
   529            err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
   530 #endif // _LP64
   531     return ip;
   533   case 0x69: // imul r, a, #32
   534   case 0xC7: // movl a, #32(oop?)
   535     tail_size = 4;
   536     debug_only(has_disp32 = true); // has both kinds of operands!
   537     break;
   539   case 0x0F: // movx..., etc.
   540     switch (0xFF & *ip++) {
   541     case 0x3A: // pcmpestri
   542       tail_size = 1;
   543     case 0x38: // ptest, pmovzxbw
   544       ip++; // skip opcode
   545       debug_only(has_disp32 = true); // has both kinds of operands!
   546       break;
   548     case 0x70: // pshufd r, r/a, #8
   549       debug_only(has_disp32 = true); // has both kinds of operands!
   550     case 0x73: // psrldq r, #8
   551       tail_size = 1;
   552       break;
   554     case 0x12: // movlps
   555     case 0x28: // movaps
   556     case 0x2E: // ucomiss
   557     case 0x2F: // comiss
   558     case 0x54: // andps
   559     case 0x55: // andnps
   560     case 0x56: // orps
   561     case 0x57: // xorps
   562     case 0x6E: // movd
   563     case 0x7E: // movd
   564     case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
   565       debug_only(has_disp32 = true);
   566       break;
   568     case 0xAD: // shrd r, a, %cl
   569     case 0xAF: // imul r, a
   570     case 0xBE: // movsbl r, a (movsxb)
   571     case 0xBF: // movswl r, a (movsxw)
   572     case 0xB6: // movzbl r, a (movzxb)
   573     case 0xB7: // movzwl r, a (movzxw)
   574     case REP16(0x40): // cmovl cc, r, a
   575     case 0xB0: // cmpxchgb
   576     case 0xB1: // cmpxchg
   577     case 0xC1: // xaddl
   578     case 0xC7: // cmpxchg8
   579     case REP16(0x90): // setcc a
   580       debug_only(has_disp32 = true);
   581       // fall out of the switch to decode the address
   582       break;
   584     case 0xC4: // pinsrw r, a, #8
   585       debug_only(has_disp32 = true);
   586     case 0xC5: // pextrw r, r, #8
   587       tail_size = 1;  // the imm8
   588       break;
   590     case 0xAC: // shrd r, a, #8
   591       debug_only(has_disp32 = true);
   592       tail_size = 1;  // the imm8
   593       break;
   595     case REP16(0x80): // jcc rdisp32
   596       if (which == end_pc_operand)  return ip + 4;
   597       assert(which == call32_operand, "jcc has no disp32 or imm");
   598       return ip;
   599     default:
   600       ShouldNotReachHere();
   601     }
   602     break;
   604   case 0x81: // addl a, #32; addl r, #32
   605     // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
   606     // on 32bit in the case of cmpl, the imm might be an oop
   607     tail_size = 4;
   608     debug_only(has_disp32 = true); // has both kinds of operands!
   609     break;
   611   case 0x83: // addl a, #8; addl r, #8
   612     // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
   613     debug_only(has_disp32 = true); // has both kinds of operands!
   614     tail_size = 1;
   615     break;
   617   case 0x9B:
   618     switch (0xFF & *ip++) {
   619     case 0xD9: // fnstcw a
   620       debug_only(has_disp32 = true);
   621       break;
   622     default:
   623       ShouldNotReachHere();
   624     }
   625     break;
   627   case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
   628   case REP4(0x10): // adc...
   629   case REP4(0x20): // and...
   630   case REP4(0x30): // xor...
   631   case REP4(0x08): // or...
   632   case REP4(0x18): // sbb...
   633   case REP4(0x28): // sub...
   634   case 0xF7: // mull a
   635   case 0x8D: // lea r, a
   636   case 0x87: // xchg r, a
   637   case REP4(0x38): // cmp...
   638   case 0x85: // test r, a
   639     debug_only(has_disp32 = true); // has both kinds of operands!
   640     break;
   642   case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
   643   case 0xC6: // movb a, #8
   644   case 0x80: // cmpb a, #8
   645   case 0x6B: // imul r, a, #8
   646     debug_only(has_disp32 = true); // has both kinds of operands!
   647     tail_size = 1; // the imm8
   648     break;
   650   case 0xC4: // VEX_3bytes
   651   case 0xC5: // VEX_2bytes
   652     assert((UseAVX > 0), "shouldn't have VEX prefix");
   653     assert(ip == inst+1, "no prefixes allowed");
   654     // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions
   655     // but they have prefix 0x0F and processed when 0x0F processed above.
   656     //
   657     // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES
   658     // instructions (these instructions are not supported in 64-bit mode).
   659     // To distinguish them bits [7:6] are set in the VEX second byte since
   660     // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set
   661     // those VEX bits REX and vvvv bits are inverted.
   662     //
   663     // Fortunately C2 doesn't generate these instructions so we don't need
   664     // to check for them in product version.
   666     // Check second byte
   667     NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));
   669     // First byte
   670     if ((0xFF & *inst) == VEX_3bytes) {
   671       ip++; // third byte
   672       is_64bit = ((VEX_W & *ip) == VEX_W);
   673     }
   674     ip++; // opcode
   675     // To find the end of instruction (which == end_pc_operand).
   676     switch (0xFF & *ip) {
   677     case 0x61: // pcmpestri r, r/a, #8
   678     case 0x70: // pshufd r, r/a, #8
   679     case 0x73: // psrldq r, #8
   680       tail_size = 1;  // the imm8
   681       break;
   682     default:
   683       break;
   684     }
   685     ip++; // skip opcode
   686     debug_only(has_disp32 = true); // has both kinds of operands!
   687     break;
   689   case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
   690   case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
   691   case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
   692   case 0xDD: // fld_d a; fst_d a; fstp_d a
   693   case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
   694   case 0xDF: // fild_d a; fistp_d a
   695   case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
   696   case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
   697   case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
   698     debug_only(has_disp32 = true);
   699     break;
   701   case 0xE8: // call rdisp32
   702   case 0xE9: // jmp  rdisp32
   703     if (which == end_pc_operand)  return ip + 4;
   704     assert(which == call32_operand, "call has no disp32 or imm");
   705     return ip;
   707   case 0xF0:                    // Lock
   708     assert(os::is_MP(), "only on MP");
   709     goto again_after_prefix;
   711   case 0xF3:                    // For SSE
   712   case 0xF2:                    // For SSE2
   713     switch (0xFF & *ip++) {
   714     case REX:
   715     case REX_B:
   716     case REX_X:
   717     case REX_XB:
   718     case REX_R:
   719     case REX_RB:
   720     case REX_RX:
   721     case REX_RXB:
   722     case REX_W:
   723     case REX_WB:
   724     case REX_WX:
   725     case REX_WXB:
   726     case REX_WR:
   727     case REX_WRB:
   728     case REX_WRX:
   729     case REX_WRXB:
   730       NOT_LP64(assert(false, "found 64bit prefix"));
   731       ip++;
   732     default:
   733       ip++;
   734     }
   735     debug_only(has_disp32 = true); // has both kinds of operands!
   736     break;
   738   default:
   739     ShouldNotReachHere();
   741 #undef REP8
   742 #undef REP16
   743   }
   745   assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
   746 #ifdef _LP64
   747   assert(which != imm_operand, "instruction is not a movq reg, imm64");
   748 #else
   749   // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
   750   assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
   751 #endif // LP64
   752   assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
   754   // parse the output of emit_operand
   755   int op2 = 0xFF & *ip++;
   756   int base = op2 & 0x07;
   757   int op3 = -1;
   758   const int b100 = 4;
   759   const int b101 = 5;
   760   if (base == b100 && (op2 >> 6) != 3) {
   761     op3 = 0xFF & *ip++;
   762     base = op3 & 0x07;   // refetch the base
   763   }
   764   // now ip points at the disp (if any)
   766   switch (op2 >> 6) {
   767   case 0:
   768     // [00 reg  100][ss index base]
   769     // [00 reg  100][00   100  esp]
   770     // [00 reg base]
   771     // [00 reg  100][ss index  101][disp32]
   772     // [00 reg  101]               [disp32]
   774     if (base == b101) {
   775       if (which == disp32_operand)
   776         return ip;              // caller wants the disp32
   777       ip += 4;                  // skip the disp32
   778     }
   779     break;
   781   case 1:
   782     // [01 reg  100][ss index base][disp8]
   783     // [01 reg  100][00   100  esp][disp8]
   784     // [01 reg base]               [disp8]
   785     ip += 1;                    // skip the disp8
   786     break;
   788   case 2:
   789     // [10 reg  100][ss index base][disp32]
   790     // [10 reg  100][00   100  esp][disp32]
   791     // [10 reg base]               [disp32]
   792     if (which == disp32_operand)
   793       return ip;                // caller wants the disp32
   794     ip += 4;                    // skip the disp32
   795     break;
   797   case 3:
   798     // [11 reg base]  (not a memory addressing mode)
   799     break;
   800   }
   802   if (which == end_pc_operand) {
   803     return ip + tail_size;
   804   }
   806 #ifdef _LP64
   807   assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
   808 #else
   809   assert(which == imm_operand, "instruction has only an imm field");
   810 #endif // LP64
   811   return ip;
   812 }
   814 address Assembler::locate_next_instruction(address inst) {
   815   // Secretly share code with locate_operand:
   816   return locate_operand(inst, end_pc_operand);
   817 }
   820 #ifdef ASSERT
   821 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
   822   address inst = inst_mark();
   823   assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
   824   address opnd;
   826   Relocation* r = rspec.reloc();
   827   if (r->type() == relocInfo::none) {
   828     return;
   829   } else if (r->is_call() || format == call32_operand) {
   830     // assert(format == imm32_operand, "cannot specify a nonzero format");
   831     opnd = locate_operand(inst, call32_operand);
   832   } else if (r->is_data()) {
   833     assert(format == imm_operand || format == disp32_operand
   834            LP64_ONLY(|| format == narrow_oop_operand), "format ok");
   835     opnd = locate_operand(inst, (WhichOperand)format);
   836   } else {
   837     assert(format == imm_operand, "cannot specify a format");
   838     return;
   839   }
   840   assert(opnd == pc(), "must put operand where relocs can find it");
   841 }
   842 #endif // ASSERT
   844 void Assembler::emit_operand32(Register reg, Address adr) {
   845   assert(reg->encoding() < 8, "no extended registers");
   846   assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
   847   emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
   848                adr._rspec);
   849 }
   851 void Assembler::emit_operand(Register reg, Address adr,
   852                              int rip_relative_correction) {
   853   emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
   854                adr._rspec,
   855                rip_relative_correction);
   856 }
   858 void Assembler::emit_operand(XMMRegister reg, Address adr) {
   859   emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
   860                adr._rspec);
   861 }
   863 // MMX operations
   864 void Assembler::emit_operand(MMXRegister reg, Address adr) {
   865   assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
   866   emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
   867 }
   869 // work around gcc (3.2.1-7a) bug
   870 void Assembler::emit_operand(Address adr, MMXRegister reg) {
   871   assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
   872   emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
   873 }
   876 void Assembler::emit_farith(int b1, int b2, int i) {
   877   assert(isByte(b1) && isByte(b2), "wrong opcode");
   878   assert(0 <= i &&  i < 8, "illegal stack offset");
   879   emit_int8(b1);
   880   emit_int8(b2 + i);
   881 }
   884 // Now the Assembler instructions (identical for 32/64 bits)
   886 void Assembler::adcl(Address dst, int32_t imm32) {
   887   InstructionMark im(this);
   888   prefix(dst);
   889   emit_arith_operand(0x81, rdx, dst, imm32);
   890 }
   892 void Assembler::adcl(Address dst, Register src) {
   893   InstructionMark im(this);
   894   prefix(dst, src);
   895   emit_int8(0x11);
   896   emit_operand(src, dst);
   897 }
   899 void Assembler::adcl(Register dst, int32_t imm32) {
   900   prefix(dst);
   901   emit_arith(0x81, 0xD0, dst, imm32);
   902 }
   904 void Assembler::adcl(Register dst, Address src) {
   905   InstructionMark im(this);
   906   prefix(src, dst);
   907   emit_int8(0x13);
   908   emit_operand(dst, src);
   909 }
   911 void Assembler::adcl(Register dst, Register src) {
   912   (void) prefix_and_encode(dst->encoding(), src->encoding());
   913   emit_arith(0x13, 0xC0, dst, src);
   914 }
   916 void Assembler::addl(Address dst, int32_t imm32) {
   917   InstructionMark im(this);
   918   prefix(dst);
   919   emit_arith_operand(0x81, rax, dst, imm32);
   920 }
   922 void Assembler::addl(Address dst, Register src) {
   923   InstructionMark im(this);
   924   prefix(dst, src);
   925   emit_int8(0x01);
   926   emit_operand(src, dst);
   927 }
   929 void Assembler::addl(Register dst, int32_t imm32) {
   930   prefix(dst);
   931   emit_arith(0x81, 0xC0, dst, imm32);
   932 }
   934 void Assembler::addl(Register dst, Address src) {
   935   InstructionMark im(this);
   936   prefix(src, dst);
   937   emit_int8(0x03);
   938   emit_operand(dst, src);
   939 }
   941 void Assembler::addl(Register dst, Register src) {
   942   (void) prefix_and_encode(dst->encoding(), src->encoding());
   943   emit_arith(0x03, 0xC0, dst, src);
   944 }
   946 void Assembler::addr_nop_4() {
   947   assert(UseAddressNop, "no CPU support");
   948   // 4 bytes: NOP DWORD PTR [EAX+0]
   949   emit_int8(0x0F);
   950   emit_int8(0x1F);
   951   emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
   952   emit_int8(0);    // 8-bits offset (1 byte)
   953 }
   955 void Assembler::addr_nop_5() {
   956   assert(UseAddressNop, "no CPU support");
   957   // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
   958   emit_int8(0x0F);
   959   emit_int8(0x1F);
   960   emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
   961   emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
   962   emit_int8(0);    // 8-bits offset (1 byte)
   963 }
   965 void Assembler::addr_nop_7() {
   966   assert(UseAddressNop, "no CPU support");
   967   // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
   968   emit_int8(0x0F);
   969   emit_int8(0x1F);
   970   emit_int8((unsigned char)0x80);
   971                    // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
   972   emit_int32(0);   // 32-bits offset (4 bytes)
   973 }
   975 void Assembler::addr_nop_8() {
   976   assert(UseAddressNop, "no CPU support");
   977   // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
   978   emit_int8(0x0F);
   979   emit_int8(0x1F);
   980   emit_int8((unsigned char)0x84);
   981                    // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
   982   emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
   983   emit_int32(0);   // 32-bits offset (4 bytes)
   984 }
   986 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
   987   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   988   emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
   989 }
   991 void Assembler::addsd(XMMRegister dst, Address src) {
   992   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   993   emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
   994 }
   996 void Assembler::addss(XMMRegister dst, XMMRegister src) {
   997   NOT_LP64(assert(VM_Version::supports_sse(), ""));
   998   emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
   999 }
  1001 void Assembler::addss(XMMRegister dst, Address src) {
  1002   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1003   emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
  1006 void Assembler::aesdec(XMMRegister dst, Address src) {
  1007   assert(VM_Version::supports_aes(), "");
  1008   InstructionMark im(this);
  1009   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  1010   emit_int8((unsigned char)0xDE);
  1011   emit_operand(dst, src);
  1014 void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
  1015   assert(VM_Version::supports_aes(), "");
  1016   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  1017   emit_int8((unsigned char)0xDE);
  1018   emit_int8(0xC0 | encode);
  1021 void Assembler::aesdeclast(XMMRegister dst, Address src) {
  1022   assert(VM_Version::supports_aes(), "");
  1023   InstructionMark im(this);
  1024   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  1025   emit_int8((unsigned char)0xDF);
  1026   emit_operand(dst, src);
  1029 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
  1030   assert(VM_Version::supports_aes(), "");
  1031   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  1032   emit_int8((unsigned char)0xDF);
  1033   emit_int8((unsigned char)(0xC0 | encode));
  1036 void Assembler::aesenc(XMMRegister dst, Address src) {
  1037   assert(VM_Version::supports_aes(), "");
  1038   InstructionMark im(this);
  1039   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  1040   emit_int8((unsigned char)0xDC);
  1041   emit_operand(dst, src);
  1044 void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
  1045   assert(VM_Version::supports_aes(), "");
  1046   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  1047   emit_int8((unsigned char)0xDC);
  1048   emit_int8(0xC0 | encode);
  1051 void Assembler::aesenclast(XMMRegister dst, Address src) {
  1052   assert(VM_Version::supports_aes(), "");
  1053   InstructionMark im(this);
  1054   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  1055   emit_int8((unsigned char)0xDD);
  1056   emit_operand(dst, src);
  1059 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
  1060   assert(VM_Version::supports_aes(), "");
  1061   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  1062   emit_int8((unsigned char)0xDD);
  1063   emit_int8((unsigned char)(0xC0 | encode));
  1067 void Assembler::andl(Address dst, int32_t imm32) {
  1068   InstructionMark im(this);
  1069   prefix(dst);
  1070   emit_int8((unsigned char)0x81);
  1071   emit_operand(rsp, dst, 4);
  1072   emit_int32(imm32);
  1075 void Assembler::andl(Register dst, int32_t imm32) {
  1076   prefix(dst);
  1077   emit_arith(0x81, 0xE0, dst, imm32);
  1080 void Assembler::andl(Register dst, Address src) {
  1081   InstructionMark im(this);
  1082   prefix(src, dst);
  1083   emit_int8(0x23);
  1084   emit_operand(dst, src);
  1087 void Assembler::andl(Register dst, Register src) {
  1088   (void) prefix_and_encode(dst->encoding(), src->encoding());
  1089   emit_arith(0x23, 0xC0, dst, src);
  1092 void Assembler::bsfl(Register dst, Register src) {
  1093   int encode = prefix_and_encode(dst->encoding(), src->encoding());
  1094   emit_int8(0x0F);
  1095   emit_int8((unsigned char)0xBC);
  1096   emit_int8((unsigned char)(0xC0 | encode));
  1099 void Assembler::bsrl(Register dst, Register src) {
  1100   assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
  1101   int encode = prefix_and_encode(dst->encoding(), src->encoding());
  1102   emit_int8(0x0F);
  1103   emit_int8((unsigned char)0xBD);
  1104   emit_int8((unsigned char)(0xC0 | encode));
  1107 void Assembler::bswapl(Register reg) { // bswap
  1108   int encode = prefix_and_encode(reg->encoding());
  1109   emit_int8(0x0F);
  1110   emit_int8((unsigned char)(0xC8 | encode));
  1113 void Assembler::call(Label& L, relocInfo::relocType rtype) {
  1114   // suspect disp32 is always good
  1115   int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
  1117   if (L.is_bound()) {
  1118     const int long_size = 5;
  1119     int offs = (int)( target(L) - pc() );
  1120     assert(offs <= 0, "assembler error");
  1121     InstructionMark im(this);
  1122     // 1110 1000 #32-bit disp
  1123     emit_int8((unsigned char)0xE8);
  1124     emit_data(offs - long_size, rtype, operand);
  1125   } else {
  1126     InstructionMark im(this);
  1127     // 1110 1000 #32-bit disp
  1128     L.add_patch_at(code(), locator());
  1130     emit_int8((unsigned char)0xE8);
  1131     emit_data(int(0), rtype, operand);
  1135 void Assembler::call(Register dst) {
  1136   int encode = prefix_and_encode(dst->encoding());
  1137   emit_int8((unsigned char)0xFF);
  1138   emit_int8((unsigned char)(0xD0 | encode));
  1142 void Assembler::call(Address adr) {
  1143   InstructionMark im(this);
  1144   prefix(adr);
  1145   emit_int8((unsigned char)0xFF);
  1146   emit_operand(rdx, adr);
  1149 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
  1150   assert(entry != NULL, "call most probably wrong");
  1151   InstructionMark im(this);
  1152   emit_int8((unsigned char)0xE8);
  1153   intptr_t disp = entry - (pc() + sizeof(int32_t));
  1154   assert(is_simm32(disp), "must be 32bit offset (call2)");
  1155   // Technically, should use call32_operand, but this format is
  1156   // implied by the fact that we're emitting a call instruction.
  1158   int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
  1159   emit_data((int) disp, rspec, operand);
  1162 void Assembler::cdql() {
  1163   emit_int8((unsigned char)0x99);
  1166 void Assembler::cld() {
  1167   emit_int8((unsigned char)0xFC);
  1170 void Assembler::cmovl(Condition cc, Register dst, Register src) {
  1171   NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
  1172   int encode = prefix_and_encode(dst->encoding(), src->encoding());
  1173   emit_int8(0x0F);
  1174   emit_int8(0x40 | cc);
  1175   emit_int8((unsigned char)(0xC0 | encode));
  1179 void Assembler::cmovl(Condition cc, Register dst, Address src) {
  1180   NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
  1181   prefix(src, dst);
  1182   emit_int8(0x0F);
  1183   emit_int8(0x40 | cc);
  1184   emit_operand(dst, src);
  1187 void Assembler::cmpb(Address dst, int imm8) {
  1188   InstructionMark im(this);
  1189   prefix(dst);
  1190   emit_int8((unsigned char)0x80);
  1191   emit_operand(rdi, dst, 1);
  1192   emit_int8(imm8);
  1195 void Assembler::cmpl(Address dst, int32_t imm32) {
  1196   InstructionMark im(this);
  1197   prefix(dst);
  1198   emit_int8((unsigned char)0x81);
  1199   emit_operand(rdi, dst, 4);
  1200   emit_int32(imm32);
  1203 void Assembler::cmpl(Register dst, int32_t imm32) {
  1204   prefix(dst);
  1205   emit_arith(0x81, 0xF8, dst, imm32);
  1208 void Assembler::cmpl(Register dst, Register src) {
  1209   (void) prefix_and_encode(dst->encoding(), src->encoding());
  1210   emit_arith(0x3B, 0xC0, dst, src);
  1214 void Assembler::cmpl(Register dst, Address  src) {
  1215   InstructionMark im(this);
  1216   prefix(src, dst);
  1217   emit_int8((unsigned char)0x3B);
  1218   emit_operand(dst, src);
  1221 void Assembler::cmpw(Address dst, int imm16) {
  1222   InstructionMark im(this);
  1223   assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
  1224   emit_int8(0x66);
  1225   emit_int8((unsigned char)0x81);
  1226   emit_operand(rdi, dst, 2);
  1227   emit_int16(imm16);
  1230 // The 32-bit cmpxchg compares the value at adr with the contents of rax,
  1231 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
  1232 // The ZF is set if the compared values were equal, and cleared otherwise.
  1233 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
  1234   InstructionMark im(this);
  1235   prefix(adr, reg);
  1236   emit_int8(0x0F);
  1237   emit_int8((unsigned char)0xB1);
  1238   emit_operand(reg, adr);
  1241 void Assembler::comisd(XMMRegister dst, Address src) {
  1242   // NOTE: dbx seems to decode this as comiss even though the
  1243   // 0x66 is there. Strangly ucomisd comes out correct
  1244   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1245   emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
  1248 void Assembler::comisd(XMMRegister dst, XMMRegister src) {
  1249   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1250   emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
  1253 void Assembler::comiss(XMMRegister dst, Address src) {
  1254   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1255   emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
  1258 void Assembler::comiss(XMMRegister dst, XMMRegister src) {
  1259   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1260   emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
  1263 void Assembler::cpuid() {
  1264   emit_int8(0x0F);
  1265   emit_int8((unsigned char)0xA2);
  1268 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
  1269   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1270   emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3);
  1273 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
  1274   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1275   emit_simd_arith_nonds(0x5B, dst, src, VEX_SIMD_NONE);
  1278 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
  1279   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1280   emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
  1283 void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
  1284   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1285   emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
  1288 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
  1289   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1290   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2);
  1291   emit_int8(0x2A);
  1292   emit_int8((unsigned char)(0xC0 | encode));
  1295 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
  1296   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1297   emit_simd_arith(0x2A, dst, src, VEX_SIMD_F2);
  1300 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
  1301   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1302   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3);
  1303   emit_int8(0x2A);
  1304   emit_int8((unsigned char)(0xC0 | encode));
  1307 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
  1308   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1309   emit_simd_arith(0x2A, dst, src, VEX_SIMD_F3);
  1312 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
  1313   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1314   emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
  1317 void Assembler::cvtss2sd(XMMRegister dst, Address src) {
  1318   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1319   emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
  1323 void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
  1324   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1325   int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2);
  1326   emit_int8(0x2C);
  1327   emit_int8((unsigned char)(0xC0 | encode));
  1330 void Assembler::cvttss2sil(Register dst, XMMRegister src) {
  1331   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1332   int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3);
  1333   emit_int8(0x2C);
  1334   emit_int8((unsigned char)(0xC0 | encode));
  1337 void Assembler::decl(Address dst) {
  1338   // Don't use it directly. Use MacroAssembler::decrement() instead.
  1339   InstructionMark im(this);
  1340   prefix(dst);
  1341   emit_int8((unsigned char)0xFF);
  1342   emit_operand(rcx, dst);
  1345 void Assembler::divsd(XMMRegister dst, Address src) {
  1346   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1347   emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
  1350 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
  1351   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1352   emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
  1355 void Assembler::divss(XMMRegister dst, Address src) {
  1356   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1357   emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
  1360 void Assembler::divss(XMMRegister dst, XMMRegister src) {
  1361   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1362   emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
  1365 void Assembler::emms() {
  1366   NOT_LP64(assert(VM_Version::supports_mmx(), ""));
  1367   emit_int8(0x0F);
  1368   emit_int8(0x77);
  1371 void Assembler::hlt() {
  1372   emit_int8((unsigned char)0xF4);
  1375 void Assembler::idivl(Register src) {
  1376   int encode = prefix_and_encode(src->encoding());
  1377   emit_int8((unsigned char)0xF7);
  1378   emit_int8((unsigned char)(0xF8 | encode));
  1381 void Assembler::divl(Register src) { // Unsigned
  1382   int encode = prefix_and_encode(src->encoding());
  1383   emit_int8((unsigned char)0xF7);
  1384   emit_int8((unsigned char)(0xF0 | encode));
  1387 void Assembler::imull(Register dst, Register src) {
  1388   int encode = prefix_and_encode(dst->encoding(), src->encoding());
  1389   emit_int8(0x0F);
  1390   emit_int8((unsigned char)0xAF);
  1391   emit_int8((unsigned char)(0xC0 | encode));
  1395 void Assembler::imull(Register dst, Register src, int value) {
  1396   int encode = prefix_and_encode(dst->encoding(), src->encoding());
  1397   if (is8bit(value)) {
  1398     emit_int8(0x6B);
  1399     emit_int8((unsigned char)(0xC0 | encode));
  1400     emit_int8(value & 0xFF);
  1401   } else {
  1402     emit_int8(0x69);
  1403     emit_int8((unsigned char)(0xC0 | encode));
  1404     emit_int32(value);
  1408 void Assembler::incl(Address dst) {
  1409   // Don't use it directly. Use MacroAssembler::increment() instead.
  1410   InstructionMark im(this);
  1411   prefix(dst);
  1412   emit_int8((unsigned char)0xFF);
  1413   emit_operand(rax, dst);
  1416 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
  1417   InstructionMark im(this);
  1418   assert((0 <= cc) && (cc < 16), "illegal cc");
  1419   if (L.is_bound()) {
  1420     address dst = target(L);
  1421     assert(dst != NULL, "jcc most probably wrong");
  1423     const int short_size = 2;
  1424     const int long_size = 6;
  1425     intptr_t offs = (intptr_t)dst - (intptr_t)pc();
  1426     if (maybe_short && is8bit(offs - short_size)) {
  1427       // 0111 tttn #8-bit disp
  1428       emit_int8(0x70 | cc);
  1429       emit_int8((offs - short_size) & 0xFF);
  1430     } else {
  1431       // 0000 1111 1000 tttn #32-bit disp
  1432       assert(is_simm32(offs - long_size),
  1433              "must be 32bit offset (call4)");
  1434       emit_int8(0x0F);
  1435       emit_int8((unsigned char)(0x80 | cc));
  1436       emit_int32(offs - long_size);
  1438   } else {
  1439     // Note: could eliminate cond. jumps to this jump if condition
  1440     //       is the same however, seems to be rather unlikely case.
  1441     // Note: use jccb() if label to be bound is very close to get
  1442     //       an 8-bit displacement
  1443     L.add_patch_at(code(), locator());
  1444     emit_int8(0x0F);
  1445     emit_int8((unsigned char)(0x80 | cc));
  1446     emit_int32(0);
  1450 void Assembler::jccb(Condition cc, Label& L) {
  1451   if (L.is_bound()) {
  1452     const int short_size = 2;
  1453     address entry = target(L);
  1454 #ifdef ASSERT
  1455     intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
  1456     intptr_t delta = short_branch_delta();
  1457     if (delta != 0) {
  1458       dist += (dist < 0 ? (-delta) :delta);
  1460     assert(is8bit(dist), "Dispacement too large for a short jmp");
  1461 #endif
  1462     intptr_t offs = (intptr_t)entry - (intptr_t)pc();
  1463     // 0111 tttn #8-bit disp
  1464     emit_int8(0x70 | cc);
  1465     emit_int8((offs - short_size) & 0xFF);
  1466   } else {
  1467     InstructionMark im(this);
  1468     L.add_patch_at(code(), locator());
  1469     emit_int8(0x70 | cc);
  1470     emit_int8(0);
  1474 void Assembler::jmp(Address adr) {
  1475   InstructionMark im(this);
  1476   prefix(adr);
  1477   emit_int8((unsigned char)0xFF);
  1478   emit_operand(rsp, adr);
  1481 void Assembler::jmp(Label& L, bool maybe_short) {
  1482   if (L.is_bound()) {
  1483     address entry = target(L);
  1484     assert(entry != NULL, "jmp most probably wrong");
  1485     InstructionMark im(this);
  1486     const int short_size = 2;
  1487     const int long_size = 5;
  1488     intptr_t offs = entry - pc();
  1489     if (maybe_short && is8bit(offs - short_size)) {
  1490       emit_int8((unsigned char)0xEB);
  1491       emit_int8((offs - short_size) & 0xFF);
  1492     } else {
  1493       emit_int8((unsigned char)0xE9);
  1494       emit_int32(offs - long_size);
  1496   } else {
  1497     // By default, forward jumps are always 32-bit displacements, since
  1498     // we can't yet know where the label will be bound.  If you're sure that
  1499     // the forward jump will not run beyond 256 bytes, use jmpb to
  1500     // force an 8-bit displacement.
  1501     InstructionMark im(this);
  1502     L.add_patch_at(code(), locator());
  1503     emit_int8((unsigned char)0xE9);
  1504     emit_int32(0);
  1508 void Assembler::jmp(Register entry) {
  1509   int encode = prefix_and_encode(entry->encoding());
  1510   emit_int8((unsigned char)0xFF);
  1511   emit_int8((unsigned char)(0xE0 | encode));
  1514 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
  1515   InstructionMark im(this);
  1516   emit_int8((unsigned char)0xE9);
  1517   assert(dest != NULL, "must have a target");
  1518   intptr_t disp = dest - (pc() + sizeof(int32_t));
  1519   assert(is_simm32(disp), "must be 32bit offset (jmp)");
  1520   emit_data(disp, rspec.reloc(), call32_operand);
  1523 void Assembler::jmpb(Label& L) {
  1524   if (L.is_bound()) {
  1525     const int short_size = 2;
  1526     address entry = target(L);
  1527     assert(entry != NULL, "jmp most probably wrong");
  1528 #ifdef ASSERT
  1529     intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
  1530     intptr_t delta = short_branch_delta();
  1531     if (delta != 0) {
  1532       dist += (dist < 0 ? (-delta) :delta);
  1534     assert(is8bit(dist), "Dispacement too large for a short jmp");
  1535 #endif
  1536     intptr_t offs = entry - pc();
  1537     emit_int8((unsigned char)0xEB);
  1538     emit_int8((offs - short_size) & 0xFF);
  1539   } else {
  1540     InstructionMark im(this);
  1541     L.add_patch_at(code(), locator());
  1542     emit_int8((unsigned char)0xEB);
  1543     emit_int8(0);
  1547 void Assembler::ldmxcsr( Address src) {
  1548   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1549   InstructionMark im(this);
  1550   prefix(src);
  1551   emit_int8(0x0F);
  1552   emit_int8((unsigned char)0xAE);
  1553   emit_operand(as_Register(2), src);
  1556 void Assembler::leal(Register dst, Address src) {
  1557   InstructionMark im(this);
  1558 #ifdef _LP64
  1559   emit_int8(0x67); // addr32
  1560   prefix(src, dst);
  1561 #endif // LP64
  1562   emit_int8((unsigned char)0x8D);
  1563   emit_operand(dst, src);
  1566 void Assembler::lfence() {
  1567   emit_int8(0x0F);
  1568   emit_int8((unsigned char)0xAE);
  1569   emit_int8((unsigned char)0xE8);
  1572 void Assembler::lock() {
  1573   emit_int8((unsigned char)0xF0);
  1576 void Assembler::lzcntl(Register dst, Register src) {
  1577   assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
  1578   emit_int8((unsigned char)0xF3);
  1579   int encode = prefix_and_encode(dst->encoding(), src->encoding());
  1580   emit_int8(0x0F);
  1581   emit_int8((unsigned char)0xBD);
  1582   emit_int8((unsigned char)(0xC0 | encode));
  1585 // Emit mfence instruction
  1586 void Assembler::mfence() {
  1587   NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
  1588   emit_int8(0x0F);
  1589   emit_int8((unsigned char)0xAE);
  1590   emit_int8((unsigned char)0xF0);
  1593 void Assembler::mov(Register dst, Register src) {
  1594   LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
  1597 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
  1598   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1599   emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_66);
  1602 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
  1603   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1604   emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_NONE);
  1607 void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
  1608   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1609   int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE);
  1610   emit_int8(0x16);
  1611   emit_int8((unsigned char)(0xC0 | encode));
  1614 void Assembler::movb(Register dst, Address src) {
  1615   NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
  1616   InstructionMark im(this);
  1617   prefix(src, dst, true);
  1618   emit_int8((unsigned char)0x8A);
  1619   emit_operand(dst, src);
  1623 void Assembler::movb(Address dst, int imm8) {
  1624   InstructionMark im(this);
  1625    prefix(dst);
  1626   emit_int8((unsigned char)0xC6);
  1627   emit_operand(rax, dst, 1);
  1628   emit_int8(imm8);
  1632 void Assembler::movb(Address dst, Register src) {
  1633   assert(src->has_byte_register(), "must have byte register");
  1634   InstructionMark im(this);
  1635   prefix(dst, src, true);
  1636   emit_int8((unsigned char)0x88);
  1637   emit_operand(src, dst);
  1640 void Assembler::movdl(XMMRegister dst, Register src) {
  1641   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1642   int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66);
  1643   emit_int8(0x6E);
  1644   emit_int8((unsigned char)(0xC0 | encode));
  1647 void Assembler::movdl(Register dst, XMMRegister src) {
  1648   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1649   // swap src/dst to get correct prefix
  1650   int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66);
  1651   emit_int8(0x7E);
  1652   emit_int8((unsigned char)(0xC0 | encode));
  1655 void Assembler::movdl(XMMRegister dst, Address src) {
  1656   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1657   InstructionMark im(this);
  1658   simd_prefix(dst, src, VEX_SIMD_66);
  1659   emit_int8(0x6E);
  1660   emit_operand(dst, src);
  1663 void Assembler::movdl(Address dst, XMMRegister src) {
  1664   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1665   InstructionMark im(this);
  1666   simd_prefix(dst, src, VEX_SIMD_66);
  1667   emit_int8(0x7E);
  1668   emit_operand(src, dst);
  1671 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
  1672   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1673   emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
  1676 void Assembler::movdqa(XMMRegister dst, Address src) {
  1677   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1678   emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
  1681 void Assembler::movdqu(XMMRegister dst, Address src) {
  1682   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1683   emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
  1686 void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
  1687   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1688   emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
  1691 void Assembler::movdqu(Address dst, XMMRegister src) {
  1692   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1693   InstructionMark im(this);
  1694   simd_prefix(dst, src, VEX_SIMD_F3);
  1695   emit_int8(0x7F);
  1696   emit_operand(src, dst);
  1699 // Move Unaligned 256bit Vector
  1700 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
  1701   assert(UseAVX, "");
  1702   bool vector256 = true;
  1703   int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256);
  1704   emit_int8(0x6F);
  1705   emit_int8((unsigned char)(0xC0 | encode));
  1708 void Assembler::vmovdqu(XMMRegister dst, Address src) {
  1709   assert(UseAVX, "");
  1710   InstructionMark im(this);
  1711   bool vector256 = true;
  1712   vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256);
  1713   emit_int8(0x6F);
  1714   emit_operand(dst, src);
  1717 void Assembler::vmovdqu(Address dst, XMMRegister src) {
  1718   assert(UseAVX, "");
  1719   InstructionMark im(this);
  1720   bool vector256 = true;
  1721   // swap src<->dst for encoding
  1722   assert(src != xnoreg, "sanity");
  1723   vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector256);
  1724   emit_int8(0x7F);
  1725   emit_operand(src, dst);
  1728 // Uses zero extension on 64bit
  1730 void Assembler::movl(Register dst, int32_t imm32) {
  1731   int encode = prefix_and_encode(dst->encoding());
  1732   emit_int8((unsigned char)(0xB8 | encode));
  1733   emit_int32(imm32);
  1736 void Assembler::movl(Register dst, Register src) {
  1737   int encode = prefix_and_encode(dst->encoding(), src->encoding());
  1738   emit_int8((unsigned char)0x8B);
  1739   emit_int8((unsigned char)(0xC0 | encode));
  1742 void Assembler::movl(Register dst, Address src) {
  1743   InstructionMark im(this);
  1744   prefix(src, dst);
  1745   emit_int8((unsigned char)0x8B);
  1746   emit_operand(dst, src);
  1749 void Assembler::movl(Address dst, int32_t imm32) {
  1750   InstructionMark im(this);
  1751   prefix(dst);
  1752   emit_int8((unsigned char)0xC7);
  1753   emit_operand(rax, dst, 4);
  1754   emit_int32(imm32);
  1757 void Assembler::movl(Address dst, Register src) {
  1758   InstructionMark im(this);
  1759   prefix(dst, src);
  1760   emit_int8((unsigned char)0x89);
  1761   emit_operand(src, dst);
  1764 // New cpus require to use movsd and movss to avoid partial register stall
  1765 // when loading from memory. But for old Opteron use movlpd instead of movsd.
  1766 // The selection is done in MacroAssembler::movdbl() and movflt().
  1767 void Assembler::movlpd(XMMRegister dst, Address src) {
  1768   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1769   emit_simd_arith(0x12, dst, src, VEX_SIMD_66);
  1772 void Assembler::movq( MMXRegister dst, Address src ) {
  1773   assert( VM_Version::supports_mmx(), "" );
  1774   emit_int8(0x0F);
  1775   emit_int8(0x6F);
  1776   emit_operand(dst, src);
  1779 void Assembler::movq( Address dst, MMXRegister src ) {
  1780   assert( VM_Version::supports_mmx(), "" );
  1781   emit_int8(0x0F);
  1782   emit_int8(0x7F);
  1783   // workaround gcc (3.2.1-7a) bug
  1784   // In that version of gcc with only an emit_operand(MMX, Address)
  1785   // gcc will tail jump and try and reverse the parameters completely
  1786   // obliterating dst in the process. By having a version available
  1787   // that doesn't need to swap the args at the tail jump the bug is
  1788   // avoided.
  1789   emit_operand(dst, src);
  1792 void Assembler::movq(XMMRegister dst, Address src) {
  1793   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1794   InstructionMark im(this);
  1795   simd_prefix(dst, src, VEX_SIMD_F3);
  1796   emit_int8(0x7E);
  1797   emit_operand(dst, src);
  1800 void Assembler::movq(Address dst, XMMRegister src) {
  1801   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1802   InstructionMark im(this);
  1803   simd_prefix(dst, src, VEX_SIMD_66);
  1804   emit_int8((unsigned char)0xD6);
  1805   emit_operand(src, dst);
  1808 void Assembler::movsbl(Register dst, Address src) { // movsxb
  1809   InstructionMark im(this);
  1810   prefix(src, dst);
  1811   emit_int8(0x0F);
  1812   emit_int8((unsigned char)0xBE);
  1813   emit_operand(dst, src);
  1816 void Assembler::movsbl(Register dst, Register src) { // movsxb
  1817   NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
  1818   int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
  1819   emit_int8(0x0F);
  1820   emit_int8((unsigned char)0xBE);
  1821   emit_int8((unsigned char)(0xC0 | encode));
  1824 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
  1825   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1826   emit_simd_arith(0x10, dst, src, VEX_SIMD_F2);
  1829 void Assembler::movsd(XMMRegister dst, Address src) {
  1830   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1831   emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F2);
  1834 void Assembler::movsd(Address dst, XMMRegister src) {
  1835   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1836   InstructionMark im(this);
  1837   simd_prefix(dst, src, VEX_SIMD_F2);
  1838   emit_int8(0x11);
  1839   emit_operand(src, dst);
  1842 void Assembler::movss(XMMRegister dst, XMMRegister src) {
  1843   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1844   emit_simd_arith(0x10, dst, src, VEX_SIMD_F3);
  1847 void Assembler::movss(XMMRegister dst, Address src) {
  1848   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1849   emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F3);
  1852 void Assembler::movss(Address dst, XMMRegister src) {
  1853   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1854   InstructionMark im(this);
  1855   simd_prefix(dst, src, VEX_SIMD_F3);
  1856   emit_int8(0x11);
  1857   emit_operand(src, dst);
  1860 void Assembler::movswl(Register dst, Address src) { // movsxw
  1861   InstructionMark im(this);
  1862   prefix(src, dst);
  1863   emit_int8(0x0F);
  1864   emit_int8((unsigned char)0xBF);
  1865   emit_operand(dst, src);
  1868 void Assembler::movswl(Register dst, Register src) { // movsxw
  1869   int encode = prefix_and_encode(dst->encoding(), src->encoding());
  1870   emit_int8(0x0F);
  1871   emit_int8((unsigned char)0xBF);
  1872   emit_int8((unsigned char)(0xC0 | encode));
  1875 void Assembler::movw(Address dst, int imm16) {
  1876   InstructionMark im(this);
  1878   emit_int8(0x66); // switch to 16-bit mode
  1879   prefix(dst);
  1880   emit_int8((unsigned char)0xC7);
  1881   emit_operand(rax, dst, 2);
  1882   emit_int16(imm16);
  1885 void Assembler::movw(Register dst, Address src) {
  1886   InstructionMark im(this);
  1887   emit_int8(0x66);
  1888   prefix(src, dst);
  1889   emit_int8((unsigned char)0x8B);
  1890   emit_operand(dst, src);
  1893 void Assembler::movw(Address dst, Register src) {
  1894   InstructionMark im(this);
  1895   emit_int8(0x66);
  1896   prefix(dst, src);
  1897   emit_int8((unsigned char)0x89);
  1898   emit_operand(src, dst);
  1901 void Assembler::movzbl(Register dst, Address src) { // movzxb
  1902   InstructionMark im(this);
  1903   prefix(src, dst);
  1904   emit_int8(0x0F);
  1905   emit_int8((unsigned char)0xB6);
  1906   emit_operand(dst, src);
  1909 void Assembler::movzbl(Register dst, Register src) { // movzxb
  1910   NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
  1911   int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
  1912   emit_int8(0x0F);
  1913   emit_int8((unsigned char)0xB6);
  1914   emit_int8(0xC0 | encode);
  1917 void Assembler::movzwl(Register dst, Address src) { // movzxw
  1918   InstructionMark im(this);
  1919   prefix(src, dst);
  1920   emit_int8(0x0F);
  1921   emit_int8((unsigned char)0xB7);
  1922   emit_operand(dst, src);
  1925 void Assembler::movzwl(Register dst, Register src) { // movzxw
  1926   int encode = prefix_and_encode(dst->encoding(), src->encoding());
  1927   emit_int8(0x0F);
  1928   emit_int8((unsigned char)0xB7);
  1929   emit_int8(0xC0 | encode);
  1932 void Assembler::mull(Address src) {
  1933   InstructionMark im(this);
  1934   prefix(src);
  1935   emit_int8((unsigned char)0xF7);
  1936   emit_operand(rsp, src);
  1939 void Assembler::mull(Register src) {
  1940   int encode = prefix_and_encode(src->encoding());
  1941   emit_int8((unsigned char)0xF7);
  1942   emit_int8((unsigned char)(0xE0 | encode));
  1945 void Assembler::mulsd(XMMRegister dst, Address src) {
  1946   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1947   emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
  1950 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
  1951   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  1952   emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
  1955 void Assembler::mulss(XMMRegister dst, Address src) {
  1956   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1957   emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
  1960 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
  1961   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  1962   emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
  1965 void Assembler::negl(Register dst) {
  1966   int encode = prefix_and_encode(dst->encoding());
  1967   emit_int8((unsigned char)0xF7);
  1968   emit_int8((unsigned char)(0xD8 | encode));
  1971 void Assembler::nop(int i) {
  1972 #ifdef ASSERT
  1973   assert(i > 0, " ");
  1974   // The fancy nops aren't currently recognized by debuggers making it a
  1975   // pain to disassemble code while debugging. If asserts are on clearly
  1976   // speed is not an issue so simply use the single byte traditional nop
  1977   // to do alignment.
  1979   for (; i > 0 ; i--) emit_int8((unsigned char)0x90);
  1980   return;
  1982 #endif // ASSERT
  1984   if (UseAddressNop && VM_Version::is_intel()) {
  1985     //
  1986     // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
  1987     //  1: 0x90
  1988     //  2: 0x66 0x90
  1989     //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
  1990     //  4: 0x0F 0x1F 0x40 0x00
  1991     //  5: 0x0F 0x1F 0x44 0x00 0x00
  1992     //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
  1993     //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
  1994     //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
  1995     //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
  1996     // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
  1997     // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
  1999     // The rest coding is Intel specific - don't use consecutive address nops
  2001     // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
  2002     // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
  2003     // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
  2004     // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
  2006     while(i >= 15) {
  2007       // For Intel don't generate consecutive addess nops (mix with regular nops)
  2008       i -= 15;
  2009       emit_int8(0x66);   // size prefix
  2010       emit_int8(0x66);   // size prefix
  2011       emit_int8(0x66);   // size prefix
  2012       addr_nop_8();
  2013       emit_int8(0x66);   // size prefix
  2014       emit_int8(0x66);   // size prefix
  2015       emit_int8(0x66);   // size prefix
  2016       emit_int8((unsigned char)0x90);
  2017                          // nop
  2019     switch (i) {
  2020       case 14:
  2021         emit_int8(0x66); // size prefix
  2022       case 13:
  2023         emit_int8(0x66); // size prefix
  2024       case 12:
  2025         addr_nop_8();
  2026         emit_int8(0x66); // size prefix
  2027         emit_int8(0x66); // size prefix
  2028         emit_int8(0x66); // size prefix
  2029         emit_int8((unsigned char)0x90);
  2030                          // nop
  2031         break;
  2032       case 11:
  2033         emit_int8(0x66); // size prefix
  2034       case 10:
  2035         emit_int8(0x66); // size prefix
  2036       case 9:
  2037         emit_int8(0x66); // size prefix
  2038       case 8:
  2039         addr_nop_8();
  2040         break;
  2041       case 7:
  2042         addr_nop_7();
  2043         break;
  2044       case 6:
  2045         emit_int8(0x66); // size prefix
  2046       case 5:
  2047         addr_nop_5();
  2048         break;
  2049       case 4:
  2050         addr_nop_4();
  2051         break;
  2052       case 3:
  2053         // Don't use "0x0F 0x1F 0x00" - need patching safe padding
  2054         emit_int8(0x66); // size prefix
  2055       case 2:
  2056         emit_int8(0x66); // size prefix
  2057       case 1:
  2058         emit_int8((unsigned char)0x90);
  2059                          // nop
  2060         break;
  2061       default:
  2062         assert(i == 0, " ");
  2064     return;
  2066   if (UseAddressNop && VM_Version::is_amd()) {
  2067     //
  2068     // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
  2069     //  1: 0x90
  2070     //  2: 0x66 0x90
  2071     //  3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
  2072     //  4: 0x0F 0x1F 0x40 0x00
  2073     //  5: 0x0F 0x1F 0x44 0x00 0x00
  2074     //  6: 0x66 0x0F 0x1F 0x44 0x00 0x00
  2075     //  7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
  2076     //  8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
  2077     //  9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
  2078     // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
  2079     // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
  2081     // The rest coding is AMD specific - use consecutive address nops
  2083     // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
  2084     // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
  2085     // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
  2086     // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
  2087     // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
  2088     //     Size prefixes (0x66) are added for larger sizes
  2090     while(i >= 22) {
  2091       i -= 11;
  2092       emit_int8(0x66); // size prefix
  2093       emit_int8(0x66); // size prefix
  2094       emit_int8(0x66); // size prefix
  2095       addr_nop_8();
  2097     // Generate first nop for size between 21-12
  2098     switch (i) {
  2099       case 21:
  2100         i -= 1;
  2101         emit_int8(0x66); // size prefix
  2102       case 20:
  2103       case 19:
  2104         i -= 1;
  2105         emit_int8(0x66); // size prefix
  2106       case 18:
  2107       case 17:
  2108         i -= 1;
  2109         emit_int8(0x66); // size prefix
  2110       case 16:
  2111       case 15:
  2112         i -= 8;
  2113         addr_nop_8();
  2114         break;
  2115       case 14:
  2116       case 13:
  2117         i -= 7;
  2118         addr_nop_7();
  2119         break;
  2120       case 12:
  2121         i -= 6;
  2122         emit_int8(0x66); // size prefix
  2123         addr_nop_5();
  2124         break;
  2125       default:
  2126         assert(i < 12, " ");
  2129     // Generate second nop for size between 11-1
  2130     switch (i) {
  2131       case 11:
  2132         emit_int8(0x66); // size prefix
  2133       case 10:
  2134         emit_int8(0x66); // size prefix
  2135       case 9:
  2136         emit_int8(0x66); // size prefix
  2137       case 8:
  2138         addr_nop_8();
  2139         break;
  2140       case 7:
  2141         addr_nop_7();
  2142         break;
  2143       case 6:
  2144         emit_int8(0x66); // size prefix
  2145       case 5:
  2146         addr_nop_5();
  2147         break;
  2148       case 4:
  2149         addr_nop_4();
  2150         break;
  2151       case 3:
  2152         // Don't use "0x0F 0x1F 0x00" - need patching safe padding
  2153         emit_int8(0x66); // size prefix
  2154       case 2:
  2155         emit_int8(0x66); // size prefix
  2156       case 1:
  2157         emit_int8((unsigned char)0x90);
  2158                          // nop
  2159         break;
  2160       default:
  2161         assert(i == 0, " ");
  2163     return;
  2166   // Using nops with size prefixes "0x66 0x90".
  2167   // From AMD Optimization Guide:
  2168   //  1: 0x90
  2169   //  2: 0x66 0x90
  2170   //  3: 0x66 0x66 0x90
  2171   //  4: 0x66 0x66 0x66 0x90
  2172   //  5: 0x66 0x66 0x90 0x66 0x90
  2173   //  6: 0x66 0x66 0x90 0x66 0x66 0x90
  2174   //  7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
  2175   //  8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
  2176   //  9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
  2177   // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
  2178   //
  2179   while(i > 12) {
  2180     i -= 4;
  2181     emit_int8(0x66); // size prefix
  2182     emit_int8(0x66);
  2183     emit_int8(0x66);
  2184     emit_int8((unsigned char)0x90);
  2185                      // nop
  2187   // 1 - 12 nops
  2188   if(i > 8) {
  2189     if(i > 9) {
  2190       i -= 1;
  2191       emit_int8(0x66);
  2193     i -= 3;
  2194     emit_int8(0x66);
  2195     emit_int8(0x66);
  2196     emit_int8((unsigned char)0x90);
  2198   // 1 - 8 nops
  2199   if(i > 4) {
  2200     if(i > 6) {
  2201       i -= 1;
  2202       emit_int8(0x66);
  2204     i -= 3;
  2205     emit_int8(0x66);
  2206     emit_int8(0x66);
  2207     emit_int8((unsigned char)0x90);
  2209   switch (i) {
  2210     case 4:
  2211       emit_int8(0x66);
  2212     case 3:
  2213       emit_int8(0x66);
  2214     case 2:
  2215       emit_int8(0x66);
  2216     case 1:
  2217       emit_int8((unsigned char)0x90);
  2218       break;
  2219     default:
  2220       assert(i == 0, " ");
  2224 void Assembler::notl(Register dst) {
  2225   int encode = prefix_and_encode(dst->encoding());
  2226   emit_int8((unsigned char)0xF7);
  2227   emit_int8((unsigned char)(0xD0 | encode));
  2230 void Assembler::orl(Address dst, int32_t imm32) {
  2231   InstructionMark im(this);
  2232   prefix(dst);
  2233   emit_arith_operand(0x81, rcx, dst, imm32);
  2236 void Assembler::orl(Register dst, int32_t imm32) {
  2237   prefix(dst);
  2238   emit_arith(0x81, 0xC8, dst, imm32);
  2241 void Assembler::orl(Register dst, Address src) {
  2242   InstructionMark im(this);
  2243   prefix(src, dst);
  2244   emit_int8(0x0B);
  2245   emit_operand(dst, src);
  2248 void Assembler::orl(Register dst, Register src) {
  2249   (void) prefix_and_encode(dst->encoding(), src->encoding());
  2250   emit_arith(0x0B, 0xC0, dst, src);
  2253 void Assembler::packuswb(XMMRegister dst, Address src) {
  2254   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2255   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  2256   emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
  2259 void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
  2260   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2261   emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
  2264 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  2265   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  2266   emit_vex_arith(0x67, dst, nds, src, VEX_SIMD_66, vector256);
  2269 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256) {
  2270   assert(VM_Version::supports_avx2(), "");
  2271   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true, vector256);
  2272   emit_int8(0x00);
  2273   emit_int8(0xC0 | encode);
  2274   emit_int8(imm8);
  2277 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
  2278   assert(VM_Version::supports_sse4_2(), "");
  2279   InstructionMark im(this);
  2280   simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
  2281   emit_int8(0x61);
  2282   emit_operand(dst, src);
  2283   emit_int8(imm8);
  2286 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
  2287   assert(VM_Version::supports_sse4_2(), "");
  2288   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
  2289   emit_int8(0x61);
  2290   emit_int8((unsigned char)(0xC0 | encode));
  2291   emit_int8(imm8);
  2294 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
  2295   assert(VM_Version::supports_sse4_1(), "");
  2296   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
  2297   emit_int8(0x16);
  2298   emit_int8((unsigned char)(0xC0 | encode));
  2299   emit_int8(imm8);
  2302 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
  2303   assert(VM_Version::supports_sse4_1(), "");
  2304   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
  2305   emit_int8(0x16);
  2306   emit_int8((unsigned char)(0xC0 | encode));
  2307   emit_int8(imm8);
  2310 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
  2311   assert(VM_Version::supports_sse4_1(), "");
  2312   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
  2313   emit_int8(0x22);
  2314   emit_int8((unsigned char)(0xC0 | encode));
  2315   emit_int8(imm8);
  2318 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
  2319   assert(VM_Version::supports_sse4_1(), "");
  2320   int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
  2321   emit_int8(0x22);
  2322   emit_int8((unsigned char)(0xC0 | encode));
  2323   emit_int8(imm8);
  2326 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
  2327   assert(VM_Version::supports_sse4_1(), "");
  2328   InstructionMark im(this);
  2329   simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  2330   emit_int8(0x30);
  2331   emit_operand(dst, src);
  2334 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
  2335   assert(VM_Version::supports_sse4_1(), "");
  2336   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  2337   emit_int8(0x30);
  2338   emit_int8((unsigned char)(0xC0 | encode));
  2341 // generic
  2342 void Assembler::pop(Register dst) {
  2343   int encode = prefix_and_encode(dst->encoding());
  2344   emit_int8(0x58 | encode);
  2347 void Assembler::popcntl(Register dst, Address src) {
  2348   assert(VM_Version::supports_popcnt(), "must support");
  2349   InstructionMark im(this);
  2350   emit_int8((unsigned char)0xF3);
  2351   prefix(src, dst);
  2352   emit_int8(0x0F);
  2353   emit_int8((unsigned char)0xB8);
  2354   emit_operand(dst, src);
  2357 void Assembler::popcntl(Register dst, Register src) {
  2358   assert(VM_Version::supports_popcnt(), "must support");
  2359   emit_int8((unsigned char)0xF3);
  2360   int encode = prefix_and_encode(dst->encoding(), src->encoding());
  2361   emit_int8(0x0F);
  2362   emit_int8((unsigned char)0xB8);
  2363   emit_int8((unsigned char)(0xC0 | encode));
  2366 void Assembler::popf() {
  2367   emit_int8((unsigned char)0x9D);
  2370 #ifndef _LP64 // no 32bit push/pop on amd64
  2371 void Assembler::popl(Address dst) {
  2372   // NOTE: this will adjust stack by 8byte on 64bits
  2373   InstructionMark im(this);
  2374   prefix(dst);
  2375   emit_int8((unsigned char)0x8F);
  2376   emit_operand(rax, dst);
  2378 #endif
  2380 void Assembler::prefetch_prefix(Address src) {
  2381   prefix(src);
  2382   emit_int8(0x0F);
  2385 void Assembler::prefetchnta(Address src) {
  2386   NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  2387   InstructionMark im(this);
  2388   prefetch_prefix(src);
  2389   emit_int8(0x18);
  2390   emit_operand(rax, src); // 0, src
  2393 void Assembler::prefetchr(Address src) {
  2394   assert(VM_Version::supports_3dnow_prefetch(), "must support");
  2395   InstructionMark im(this);
  2396   prefetch_prefix(src);
  2397   emit_int8(0x0D);
  2398   emit_operand(rax, src); // 0, src
  2401 void Assembler::prefetcht0(Address src) {
  2402   NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  2403   InstructionMark im(this);
  2404   prefetch_prefix(src);
  2405   emit_int8(0x18);
  2406   emit_operand(rcx, src); // 1, src
  2409 void Assembler::prefetcht1(Address src) {
  2410   NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  2411   InstructionMark im(this);
  2412   prefetch_prefix(src);
  2413   emit_int8(0x18);
  2414   emit_operand(rdx, src); // 2, src
  2417 void Assembler::prefetcht2(Address src) {
  2418   NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  2419   InstructionMark im(this);
  2420   prefetch_prefix(src);
  2421   emit_int8(0x18);
  2422   emit_operand(rbx, src); // 3, src
  2425 void Assembler::prefetchw(Address src) {
  2426   assert(VM_Version::supports_3dnow_prefetch(), "must support");
  2427   InstructionMark im(this);
  2428   prefetch_prefix(src);
  2429   emit_int8(0x0D);
  2430   emit_operand(rcx, src); // 1, src
  2433 void Assembler::prefix(Prefix p) {
  2434   emit_int8(p);
  2437 void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
  2438   assert(VM_Version::supports_ssse3(), "");
  2439   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  2440   emit_int8(0x00);
  2441   emit_int8((unsigned char)(0xC0 | encode));
  2444 void Assembler::pshufb(XMMRegister dst, Address src) {
  2445   assert(VM_Version::supports_ssse3(), "");
  2446   InstructionMark im(this);
  2447   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  2448   emit_int8(0x00);
  2449   emit_operand(dst, src);
  2452 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
  2453   assert(isByte(mode), "invalid value");
  2454   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2455   emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66);
  2456   emit_int8(mode & 0xFF);
  2460 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
  2461   assert(isByte(mode), "invalid value");
  2462   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2463   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  2464   InstructionMark im(this);
  2465   simd_prefix(dst, src, VEX_SIMD_66);
  2466   emit_int8(0x70);
  2467   emit_operand(dst, src);
  2468   emit_int8(mode & 0xFF);
  2471 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
  2472   assert(isByte(mode), "invalid value");
  2473   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2474   emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2);
  2475   emit_int8(mode & 0xFF);
  2478 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
  2479   assert(isByte(mode), "invalid value");
  2480   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2481   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  2482   InstructionMark im(this);
  2483   simd_prefix(dst, src, VEX_SIMD_F2);
  2484   emit_int8(0x70);
  2485   emit_operand(dst, src);
  2486   emit_int8(mode & 0xFF);
  2489 void Assembler::psrldq(XMMRegister dst, int shift) {
  2490   // Shift 128 bit value in xmm register by number of bytes.
  2491   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2492   int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66);
  2493   emit_int8(0x73);
  2494   emit_int8((unsigned char)(0xC0 | encode));
  2495   emit_int8(shift);
  2498 void Assembler::ptest(XMMRegister dst, Address src) {
  2499   assert(VM_Version::supports_sse4_1(), "");
  2500   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  2501   InstructionMark im(this);
  2502   simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  2503   emit_int8(0x17);
  2504   emit_operand(dst, src);
  2507 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
  2508   assert(VM_Version::supports_sse4_1(), "");
  2509   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  2510   emit_int8(0x17);
  2511   emit_int8((unsigned char)(0xC0 | encode));
  2514 void Assembler::vptest(XMMRegister dst, Address src) {
  2515   assert(VM_Version::supports_avx(), "");
  2516   InstructionMark im(this);
  2517   bool vector256 = true;
  2518   assert(dst != xnoreg, "sanity");
  2519   int dst_enc = dst->encoding();
  2520   // swap src<->dst for encoding
  2521   vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
  2522   emit_int8(0x17);
  2523   emit_operand(dst, src);
  2526 void Assembler::vptest(XMMRegister dst, XMMRegister src) {
  2527   assert(VM_Version::supports_avx(), "");
  2528   bool vector256 = true;
  2529   int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  2530   emit_int8(0x17);
  2531   emit_int8((unsigned char)(0xC0 | encode));
  2534 void Assembler::punpcklbw(XMMRegister dst, Address src) {
  2535   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2536   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  2537   emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
  2540 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
  2541   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2542   emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
  2545 void Assembler::punpckldq(XMMRegister dst, Address src) {
  2546   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2547   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  2548   emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
  2551 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
  2552   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2553   emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
  2556 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
  2557   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2558   emit_simd_arith(0x6C, dst, src, VEX_SIMD_66);
  2561 void Assembler::push(int32_t imm32) {
  2562   // in 64bits we push 64bits onto the stack but only
  2563   // take a 32bit immediate
  2564   emit_int8(0x68);
  2565   emit_int32(imm32);
  2568 void Assembler::push(Register src) {
  2569   int encode = prefix_and_encode(src->encoding());
  2571   emit_int8(0x50 | encode);
  2574 void Assembler::pushf() {
  2575   emit_int8((unsigned char)0x9C);
  2578 #ifndef _LP64 // no 32bit push/pop on amd64
  2579 void Assembler::pushl(Address src) {
  2580   // Note this will push 64bit on 64bit
  2581   InstructionMark im(this);
  2582   prefix(src);
  2583   emit_int8((unsigned char)0xFF);
  2584   emit_operand(rsi, src);
  2586 #endif
  2588 void Assembler::rcll(Register dst, int imm8) {
  2589   assert(isShiftCount(imm8), "illegal shift count");
  2590   int encode = prefix_and_encode(dst->encoding());
  2591   if (imm8 == 1) {
  2592     emit_int8((unsigned char)0xD1);
  2593     emit_int8((unsigned char)(0xD0 | encode));
  2594   } else {
  2595     emit_int8((unsigned char)0xC1);
  2596     emit_int8((unsigned char)0xD0 | encode);
  2597     emit_int8(imm8);
  2601 // copies data from [esi] to [edi] using rcx pointer sized words
  2602 // generic
  2603 void Assembler::rep_mov() {
  2604   emit_int8((unsigned char)0xF3);
  2605   // MOVSQ
  2606   LP64_ONLY(prefix(REX_W));
  2607   emit_int8((unsigned char)0xA5);
  2610 // sets rcx bytes with rax, value at [edi]
  2611 void Assembler::rep_stosb() {
  2612   emit_int8((unsigned char)0xF3); // REP
  2613   LP64_ONLY(prefix(REX_W));
  2614   emit_int8((unsigned char)0xAA); // STOSB
  2617 // sets rcx pointer sized words with rax, value at [edi]
  2618 // generic
  2619 void Assembler::rep_stos() {
  2620   emit_int8((unsigned char)0xF3); // REP
  2621   LP64_ONLY(prefix(REX_W));       // LP64:STOSQ, LP32:STOSD
  2622   emit_int8((unsigned char)0xAB);
  2625 // scans rcx pointer sized words at [edi] for occurance of rax,
  2626 // generic
  2627 void Assembler::repne_scan() { // repne_scan
  2628   emit_int8((unsigned char)0xF2);
  2629   // SCASQ
  2630   LP64_ONLY(prefix(REX_W));
  2631   emit_int8((unsigned char)0xAF);
  2634 #ifdef _LP64
  2635 // scans rcx 4 byte words at [edi] for occurance of rax,
  2636 // generic
  2637 void Assembler::repne_scanl() { // repne_scan
  2638   emit_int8((unsigned char)0xF2);
  2639   // SCASL
  2640   emit_int8((unsigned char)0xAF);
  2642 #endif
  2644 void Assembler::ret(int imm16) {
  2645   if (imm16 == 0) {
  2646     emit_int8((unsigned char)0xC3);
  2647   } else {
  2648     emit_int8((unsigned char)0xC2);
  2649     emit_int16(imm16);
  2653 void Assembler::sahf() {
  2654 #ifdef _LP64
  2655   // Not supported in 64bit mode
  2656   ShouldNotReachHere();
  2657 #endif
  2658   emit_int8((unsigned char)0x9E);
  2661 void Assembler::sarl(Register dst, int imm8) {
  2662   int encode = prefix_and_encode(dst->encoding());
  2663   assert(isShiftCount(imm8), "illegal shift count");
  2664   if (imm8 == 1) {
  2665     emit_int8((unsigned char)0xD1);
  2666     emit_int8((unsigned char)(0xF8 | encode));
  2667   } else {
  2668     emit_int8((unsigned char)0xC1);
  2669     emit_int8((unsigned char)(0xF8 | encode));
  2670     emit_int8(imm8);
  2674 void Assembler::sarl(Register dst) {
  2675   int encode = prefix_and_encode(dst->encoding());
  2676   emit_int8((unsigned char)0xD3);
  2677   emit_int8((unsigned char)(0xF8 | encode));
  2680 void Assembler::sbbl(Address dst, int32_t imm32) {
  2681   InstructionMark im(this);
  2682   prefix(dst);
  2683   emit_arith_operand(0x81, rbx, dst, imm32);
  2686 void Assembler::sbbl(Register dst, int32_t imm32) {
  2687   prefix(dst);
  2688   emit_arith(0x81, 0xD8, dst, imm32);
  2692 void Assembler::sbbl(Register dst, Address src) {
  2693   InstructionMark im(this);
  2694   prefix(src, dst);
  2695   emit_int8(0x1B);
  2696   emit_operand(dst, src);
  2699 void Assembler::sbbl(Register dst, Register src) {
  2700   (void) prefix_and_encode(dst->encoding(), src->encoding());
  2701   emit_arith(0x1B, 0xC0, dst, src);
  2704 void Assembler::setb(Condition cc, Register dst) {
  2705   assert(0 <= cc && cc < 16, "illegal cc");
  2706   int encode = prefix_and_encode(dst->encoding(), true);
  2707   emit_int8(0x0F);
  2708   emit_int8((unsigned char)0x90 | cc);
  2709   emit_int8((unsigned char)(0xC0 | encode));
  2712 void Assembler::shll(Register dst, int imm8) {
  2713   assert(isShiftCount(imm8), "illegal shift count");
  2714   int encode = prefix_and_encode(dst->encoding());
  2715   if (imm8 == 1 ) {
  2716     emit_int8((unsigned char)0xD1);
  2717     emit_int8((unsigned char)(0xE0 | encode));
  2718   } else {
  2719     emit_int8((unsigned char)0xC1);
  2720     emit_int8((unsigned char)(0xE0 | encode));
  2721     emit_int8(imm8);
  2725 void Assembler::shll(Register dst) {
  2726   int encode = prefix_and_encode(dst->encoding());
  2727   emit_int8((unsigned char)0xD3);
  2728   emit_int8((unsigned char)(0xE0 | encode));
  2731 void Assembler::shrl(Register dst, int imm8) {
  2732   assert(isShiftCount(imm8), "illegal shift count");
  2733   int encode = prefix_and_encode(dst->encoding());
  2734   emit_int8((unsigned char)0xC1);
  2735   emit_int8((unsigned char)(0xE8 | encode));
  2736   emit_int8(imm8);
  2739 void Assembler::shrl(Register dst) {
  2740   int encode = prefix_and_encode(dst->encoding());
  2741   emit_int8((unsigned char)0xD3);
  2742   emit_int8((unsigned char)(0xE8 | encode));
  2745 // copies a single word from [esi] to [edi]
  2746 void Assembler::smovl() {
  2747   emit_int8((unsigned char)0xA5);
  2750 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
  2751   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2752   emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
  2755 void Assembler::sqrtsd(XMMRegister dst, Address src) {
  2756   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2757   emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
  2760 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
  2761   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  2762   emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
  2765 void Assembler::std() {
  2766   emit_int8((unsigned char)0xFD);
  2769 void Assembler::sqrtss(XMMRegister dst, Address src) {
  2770   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  2771   emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
  2774 void Assembler::stmxcsr( Address dst) {
  2775   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  2776   InstructionMark im(this);
  2777   prefix(dst);
  2778   emit_int8(0x0F);
  2779   emit_int8((unsigned char)0xAE);
  2780   emit_operand(as_Register(3), dst);
  2783 void Assembler::subl(Address dst, int32_t imm32) {
  2784   InstructionMark im(this);
  2785   prefix(dst);
  2786   emit_arith_operand(0x81, rbp, dst, imm32);
  2789 void Assembler::subl(Address dst, Register src) {
  2790   InstructionMark im(this);
  2791   prefix(dst, src);
  2792   emit_int8(0x29);
  2793   emit_operand(src, dst);
  2796 void Assembler::subl(Register dst, int32_t imm32) {
  2797   prefix(dst);
  2798   emit_arith(0x81, 0xE8, dst, imm32);
  2801 // Force generation of a 4 byte immediate value even if it fits into 8bit
  2802 void Assembler::subl_imm32(Register dst, int32_t imm32) {
  2803   prefix(dst);
  2804   emit_arith_imm32(0x81, 0xE8, dst, imm32);
  2807 void Assembler::subl(Register dst, Address src) {
  2808   InstructionMark im(this);
  2809   prefix(src, dst);
  2810   emit_int8(0x2B);
  2811   emit_operand(dst, src);
  2814 void Assembler::subl(Register dst, Register src) {
  2815   (void) prefix_and_encode(dst->encoding(), src->encoding());
  2816   emit_arith(0x2B, 0xC0, dst, src);
  2819 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
  2820   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2821   emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
  2824 void Assembler::subsd(XMMRegister dst, Address src) {
  2825   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2826   emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
  2829 void Assembler::subss(XMMRegister dst, XMMRegister src) {
  2830   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  2831   emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
  2834 void Assembler::subss(XMMRegister dst, Address src) {
  2835   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  2836   emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
  2839 void Assembler::testb(Register dst, int imm8) {
  2840   NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
  2841   (void) prefix_and_encode(dst->encoding(), true);
  2842   emit_arith_b(0xF6, 0xC0, dst, imm8);
  2845 void Assembler::testl(Register dst, int32_t imm32) {
  2846   // not using emit_arith because test
  2847   // doesn't support sign-extension of
  2848   // 8bit operands
  2849   int encode = dst->encoding();
  2850   if (encode == 0) {
  2851     emit_int8((unsigned char)0xA9);
  2852   } else {
  2853     encode = prefix_and_encode(encode);
  2854     emit_int8((unsigned char)0xF7);
  2855     emit_int8((unsigned char)(0xC0 | encode));
  2857   emit_int32(imm32);
  2860 void Assembler::testl(Register dst, Register src) {
  2861   (void) prefix_and_encode(dst->encoding(), src->encoding());
  2862   emit_arith(0x85, 0xC0, dst, src);
  2865 void Assembler::testl(Register dst, Address  src) {
  2866   InstructionMark im(this);
  2867   prefix(src, dst);
  2868   emit_int8((unsigned char)0x85);
  2869   emit_operand(dst, src);
  2872 void Assembler::ucomisd(XMMRegister dst, Address src) {
  2873   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2874   emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
  2877 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
  2878   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  2879   emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
  2882 void Assembler::ucomiss(XMMRegister dst, Address src) {
  2883   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  2884   emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
  2887 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
  2888   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  2889   emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
  2893 void Assembler::xaddl(Address dst, Register src) {
  2894   InstructionMark im(this);
  2895   prefix(dst, src);
  2896   emit_int8(0x0F);
  2897   emit_int8((unsigned char)0xC1);
  2898   emit_operand(src, dst);
  2901 void Assembler::xchgl(Register dst, Address src) { // xchg
  2902   InstructionMark im(this);
  2903   prefix(src, dst);
  2904   emit_int8((unsigned char)0x87);
  2905   emit_operand(dst, src);
  2908 void Assembler::xchgl(Register dst, Register src) {
  2909   int encode = prefix_and_encode(dst->encoding(), src->encoding());
  2910   emit_int8((unsigned char)0x87);
  2911   emit_int8((unsigned char)(0xC0 | encode));
  2914 void Assembler::xgetbv() {
  2915   emit_int8(0x0F);
  2916   emit_int8(0x01);
  2917   emit_int8((unsigned char)0xD0);
  2920 void Assembler::xorl(Register dst, int32_t imm32) {
  2921   prefix(dst);
  2922   emit_arith(0x81, 0xF0, dst, imm32);
  2925 void Assembler::xorl(Register dst, Address src) {
  2926   InstructionMark im(this);
  2927   prefix(src, dst);
  2928   emit_int8(0x33);
  2929   emit_operand(dst, src);
  2932 void Assembler::xorl(Register dst, Register src) {
  2933   (void) prefix_and_encode(dst->encoding(), src->encoding());
  2934   emit_arith(0x33, 0xC0, dst, src);
  2938 // AVX 3-operands scalar float-point arithmetic instructions
  2940 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
  2941   assert(VM_Version::supports_avx(), "");
  2942   emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
  2945 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  2946   assert(VM_Version::supports_avx(), "");
  2947   emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
  2950 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
  2951   assert(VM_Version::supports_avx(), "");
  2952   emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
  2955 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  2956   assert(VM_Version::supports_avx(), "");
  2957   emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
  2960 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
  2961   assert(VM_Version::supports_avx(), "");
  2962   emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
  2965 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  2966   assert(VM_Version::supports_avx(), "");
  2967   emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
  2970 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
  2971   assert(VM_Version::supports_avx(), "");
  2972   emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
  2975 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  2976   assert(VM_Version::supports_avx(), "");
  2977   emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
  2980 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
  2981   assert(VM_Version::supports_avx(), "");
  2982   emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
  2985 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  2986   assert(VM_Version::supports_avx(), "");
  2987   emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
  2990 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
  2991   assert(VM_Version::supports_avx(), "");
  2992   emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
  2995 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  2996   assert(VM_Version::supports_avx(), "");
  2997   emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
  3000 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
  3001   assert(VM_Version::supports_avx(), "");
  3002   emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
  3005 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  3006   assert(VM_Version::supports_avx(), "");
  3007   emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
  3010 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
  3011   assert(VM_Version::supports_avx(), "");
  3012   emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
  3015 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  3016   assert(VM_Version::supports_avx(), "");
  3017   emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
  3020 //====================VECTOR ARITHMETIC=====================================
  3022 // Float-point vector arithmetic
  3024 void Assembler::addpd(XMMRegister dst, XMMRegister src) {
  3025   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3026   emit_simd_arith(0x58, dst, src, VEX_SIMD_66);
  3029 void Assembler::addps(XMMRegister dst, XMMRegister src) {
  3030   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3031   emit_simd_arith(0x58, dst, src, VEX_SIMD_NONE);
  3034 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3035   assert(VM_Version::supports_avx(), "");
  3036   emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
  3039 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3040   assert(VM_Version::supports_avx(), "");
  3041   emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
  3044 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3045   assert(VM_Version::supports_avx(), "");
  3046   emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
  3049 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3050   assert(VM_Version::supports_avx(), "");
  3051   emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
  3054 void Assembler::subpd(XMMRegister dst, XMMRegister src) {
  3055   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3056   emit_simd_arith(0x5C, dst, src, VEX_SIMD_66);
  3059 void Assembler::subps(XMMRegister dst, XMMRegister src) {
  3060   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3061   emit_simd_arith(0x5C, dst, src, VEX_SIMD_NONE);
  3064 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3065   assert(VM_Version::supports_avx(), "");
  3066   emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
  3069 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3070   assert(VM_Version::supports_avx(), "");
  3071   emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
  3074 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3075   assert(VM_Version::supports_avx(), "");
  3076   emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
  3079 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3080   assert(VM_Version::supports_avx(), "");
  3081   emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
  3084 void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
  3085   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3086   emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
  3089 void Assembler::mulps(XMMRegister dst, XMMRegister src) {
  3090   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3091   emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE);
  3094 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3095   assert(VM_Version::supports_avx(), "");
  3096   emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
  3099 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3100   assert(VM_Version::supports_avx(), "");
  3101   emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
  3104 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3105   assert(VM_Version::supports_avx(), "");
  3106   emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
  3109 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3110   assert(VM_Version::supports_avx(), "");
  3111   emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
  3114 void Assembler::divpd(XMMRegister dst, XMMRegister src) {
  3115   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3116   emit_simd_arith(0x5E, dst, src, VEX_SIMD_66);
  3119 void Assembler::divps(XMMRegister dst, XMMRegister src) {
  3120   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3121   emit_simd_arith(0x5E, dst, src, VEX_SIMD_NONE);
  3124 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3125   assert(VM_Version::supports_avx(), "");
  3126   emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
  3129 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3130   assert(VM_Version::supports_avx(), "");
  3131   emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
  3134 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3135   assert(VM_Version::supports_avx(), "");
  3136   emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
  3139 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3140   assert(VM_Version::supports_avx(), "");
  3141   emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
  3144 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
  3145   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3146   emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
  3149 void Assembler::andps(XMMRegister dst, XMMRegister src) {
  3150   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  3151   emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
  3154 void Assembler::andps(XMMRegister dst, Address src) {
  3155   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  3156   emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
  3159 void Assembler::andpd(XMMRegister dst, Address src) {
  3160   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3161   emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
  3164 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3165   assert(VM_Version::supports_avx(), "");
  3166   emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
  3169 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3170   assert(VM_Version::supports_avx(), "");
  3171   emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
  3174 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3175   assert(VM_Version::supports_avx(), "");
  3176   emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
  3179 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3180   assert(VM_Version::supports_avx(), "");
  3181   emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
  3184 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
  3185   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3186   emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
  3189 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
  3190   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  3191   emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
  3194 void Assembler::xorpd(XMMRegister dst, Address src) {
  3195   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3196   emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
  3199 void Assembler::xorps(XMMRegister dst, Address src) {
  3200   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  3201   emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
  3204 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3205   assert(VM_Version::supports_avx(), "");
  3206   emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
  3209 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3210   assert(VM_Version::supports_avx(), "");
  3211   emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
  3214 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3215   assert(VM_Version::supports_avx(), "");
  3216   emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
  3219 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3220   assert(VM_Version::supports_avx(), "");
  3221   emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
  3225 // Integer vector arithmetic
  3226 void Assembler::paddb(XMMRegister dst, XMMRegister src) {
  3227   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3228   emit_simd_arith(0xFC, dst, src, VEX_SIMD_66);
  3231 void Assembler::paddw(XMMRegister dst, XMMRegister src) {
  3232   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3233   emit_simd_arith(0xFD, dst, src, VEX_SIMD_66);
  3236 void Assembler::paddd(XMMRegister dst, XMMRegister src) {
  3237   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3238   emit_simd_arith(0xFE, dst, src, VEX_SIMD_66);
  3241 void Assembler::paddq(XMMRegister dst, XMMRegister src) {
  3242   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3243   emit_simd_arith(0xD4, dst, src, VEX_SIMD_66);
  3246 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3247   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3248   emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
  3251 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3252   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3253   emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
  3256 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3257   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3258   emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
  3261 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3262   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3263   emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
  3266 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3267   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3268   emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
  3271 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3272   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3273   emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
  3276 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3277   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3278   emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
  3281 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3282   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3283   emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
  3286 void Assembler::psubb(XMMRegister dst, XMMRegister src) {
  3287   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3288   emit_simd_arith(0xF8, dst, src, VEX_SIMD_66);
  3291 void Assembler::psubw(XMMRegister dst, XMMRegister src) {
  3292   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3293   emit_simd_arith(0xF9, dst, src, VEX_SIMD_66);
  3296 void Assembler::psubd(XMMRegister dst, XMMRegister src) {
  3297   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3298   emit_simd_arith(0xFA, dst, src, VEX_SIMD_66);
  3301 void Assembler::psubq(XMMRegister dst, XMMRegister src) {
  3302   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3303   emit_simd_arith(0xFB, dst, src, VEX_SIMD_66);
  3306 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3307   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3308   emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
  3311 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3312   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3313   emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
  3316 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3317   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3318   emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
  3321 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3322   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3323   emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
  3326 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3327   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3328   emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
  3331 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3332   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3333   emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
  3336 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3337   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3338   emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
  3341 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3342   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3343   emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
  3346 void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
  3347   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3348   emit_simd_arith(0xD5, dst, src, VEX_SIMD_66);
  3351 void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
  3352   assert(VM_Version::supports_sse4_1(), "");
  3353   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  3354   emit_int8(0x40);
  3355   emit_int8((unsigned char)(0xC0 | encode));
  3358 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3359   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3360   emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
  3363 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3364   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3365   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  3366   emit_int8(0x40);
  3367   emit_int8((unsigned char)(0xC0 | encode));
  3370 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3371   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3372   emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
  3375 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3376   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3377   InstructionMark im(this);
  3378   int dst_enc = dst->encoding();
  3379   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
  3380   vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
  3381   emit_int8(0x40);
  3382   emit_operand(dst, src);
  3385 // Shift packed integers left by specified number of bits.
  3386 void Assembler::psllw(XMMRegister dst, int shift) {
  3387   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3388   // XMM6 is for /6 encoding: 66 0F 71 /6 ib
  3389   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
  3390   emit_int8(0x71);
  3391   emit_int8((unsigned char)(0xC0 | encode));
  3392   emit_int8(shift & 0xFF);
  3395 void Assembler::pslld(XMMRegister dst, int shift) {
  3396   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3397   // XMM6 is for /6 encoding: 66 0F 72 /6 ib
  3398   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
  3399   emit_int8(0x72);
  3400   emit_int8((unsigned char)(0xC0 | encode));
  3401   emit_int8(shift & 0xFF);
  3404 void Assembler::psllq(XMMRegister dst, int shift) {
  3405   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3406   // XMM6 is for /6 encoding: 66 0F 73 /6 ib
  3407   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
  3408   emit_int8(0x73);
  3409   emit_int8((unsigned char)(0xC0 | encode));
  3410   emit_int8(shift & 0xFF);
  3413 void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
  3414   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3415   emit_simd_arith(0xF1, dst, shift, VEX_SIMD_66);
  3418 void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
  3419   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3420   emit_simd_arith(0xF2, dst, shift, VEX_SIMD_66);
  3423 void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
  3424   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3425   emit_simd_arith(0xF3, dst, shift, VEX_SIMD_66);
  3428 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  3429   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3430   // XMM6 is for /6 encoding: 66 0F 71 /6 ib
  3431   emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector256);
  3432   emit_int8(shift & 0xFF);
  3435 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  3436   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3437   // XMM6 is for /6 encoding: 66 0F 72 /6 ib
  3438   emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector256);
  3439   emit_int8(shift & 0xFF);
  3442 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  3443   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3444   // XMM6 is for /6 encoding: 66 0F 73 /6 ib
  3445   emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector256);
  3446   emit_int8(shift & 0xFF);
  3449 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  3450   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3451   emit_vex_arith(0xF1, dst, src, shift, VEX_SIMD_66, vector256);
  3454 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  3455   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3456   emit_vex_arith(0xF2, dst, src, shift, VEX_SIMD_66, vector256);
  3459 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  3460   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3461   emit_vex_arith(0xF3, dst, src, shift, VEX_SIMD_66, vector256);
  3464 // Shift packed integers logically right by specified number of bits.
  3465 void Assembler::psrlw(XMMRegister dst, int shift) {
  3466   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3467   // XMM2 is for /2 encoding: 66 0F 71 /2 ib
  3468   int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
  3469   emit_int8(0x71);
  3470   emit_int8((unsigned char)(0xC0 | encode));
  3471   emit_int8(shift & 0xFF);
  3474 void Assembler::psrld(XMMRegister dst, int shift) {
  3475   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3476   // XMM2 is for /2 encoding: 66 0F 72 /2 ib
  3477   int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
  3478   emit_int8(0x72);
  3479   emit_int8((unsigned char)(0xC0 | encode));
  3480   emit_int8(shift & 0xFF);
  3483 void Assembler::psrlq(XMMRegister dst, int shift) {
  3484   // Do not confuse it with psrldq SSE2 instruction which
  3485   // shifts 128 bit value in xmm register by number of bytes.
  3486   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3487   // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  3488   int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
  3489   emit_int8(0x73);
  3490   emit_int8((unsigned char)(0xC0 | encode));
  3491   emit_int8(shift & 0xFF);
  3494 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
  3495   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3496   emit_simd_arith(0xD1, dst, shift, VEX_SIMD_66);
  3499 void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
  3500   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3501   emit_simd_arith(0xD2, dst, shift, VEX_SIMD_66);
  3504 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
  3505   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3506   emit_simd_arith(0xD3, dst, shift, VEX_SIMD_66);
  3509 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  3510   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3511   // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  3512   emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector256);
  3513   emit_int8(shift & 0xFF);
  3516 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  3517   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3518   // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  3519   emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector256);
  3520   emit_int8(shift & 0xFF);
  3523 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  3524   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3525   // XMM2 is for /2 encoding: 66 0F 73 /2 ib
  3526   emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector256);
  3527   emit_int8(shift & 0xFF);
  3530 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  3531   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3532   emit_vex_arith(0xD1, dst, src, shift, VEX_SIMD_66, vector256);
  3535 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  3536   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3537   emit_vex_arith(0xD2, dst, src, shift, VEX_SIMD_66, vector256);
  3540 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  3541   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3542   emit_vex_arith(0xD3, dst, src, shift, VEX_SIMD_66, vector256);
  3545 // Shift packed integers arithmetically right by specified number of bits.
  3546 void Assembler::psraw(XMMRegister dst, int shift) {
  3547   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3548   // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  3549   int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
  3550   emit_int8(0x71);
  3551   emit_int8((unsigned char)(0xC0 | encode));
  3552   emit_int8(shift & 0xFF);
  3555 void Assembler::psrad(XMMRegister dst, int shift) {
  3556   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3557   // XMM4 is for /4 encoding: 66 0F 72 /4 ib
  3558   int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
  3559   emit_int8(0x72);
  3560   emit_int8((unsigned char)(0xC0 | encode));
  3561   emit_int8(shift & 0xFF);
  3564 void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
  3565   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3566   emit_simd_arith(0xE1, dst, shift, VEX_SIMD_66);
  3569 void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
  3570   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3571   emit_simd_arith(0xE2, dst, shift, VEX_SIMD_66);
  3574 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  3575   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3576   // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  3577   emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector256);
  3578   emit_int8(shift & 0xFF);
  3581 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  3582   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3583   // XMM4 is for /4 encoding: 66 0F 71 /4 ib
  3584   emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector256);
  3585   emit_int8(shift & 0xFF);
  3588 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  3589   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3590   emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector256);
  3593 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  3594   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3595   emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector256);
  3599 // AND packed integers
  3600 void Assembler::pand(XMMRegister dst, XMMRegister src) {
  3601   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3602   emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);
  3605 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3606   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3607   emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
  3610 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3611   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3612   emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
  3615 void Assembler::por(XMMRegister dst, XMMRegister src) {
  3616   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3617   emit_simd_arith(0xEB, dst, src, VEX_SIMD_66);
  3620 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3621   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3622   emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
  3625 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3626   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3627   emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
  3630 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
  3631   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  3632   emit_simd_arith(0xEF, dst, src, VEX_SIMD_66);
  3635 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  3636   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3637   emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
  3640 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  3641   assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  3642   emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
  3646 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  3647   assert(VM_Version::supports_avx(), "");
  3648   bool vector256 = true;
  3649   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
  3650   emit_int8(0x18);
  3651   emit_int8((unsigned char)(0xC0 | encode));
  3652   // 0x00 - insert into lower 128 bits
  3653   // 0x01 - insert into upper 128 bits
  3654   emit_int8(0x01);
  3657 void Assembler::vinsertf128h(XMMRegister dst, Address src) {
  3658   assert(VM_Version::supports_avx(), "");
  3659   InstructionMark im(this);
  3660   bool vector256 = true;
  3661   assert(dst != xnoreg, "sanity");
  3662   int dst_enc = dst->encoding();
  3663   // swap src<->dst for encoding
  3664   vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
  3665   emit_int8(0x18);
  3666   emit_operand(dst, src);
  3667   // 0x01 - insert into upper 128 bits
  3668   emit_int8(0x01);
  3671 void Assembler::vextractf128h(Address dst, XMMRegister src) {
  3672   assert(VM_Version::supports_avx(), "");
  3673   InstructionMark im(this);
  3674   bool vector256 = true;
  3675   assert(src != xnoreg, "sanity");
  3676   int src_enc = src->encoding();
  3677   vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
  3678   emit_int8(0x19);
  3679   emit_operand(src, dst);
  3680   // 0x01 - extract from upper 128 bits
  3681   emit_int8(0x01);
  3684 void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  3685   assert(VM_Version::supports_avx2(), "");
  3686   bool vector256 = true;
  3687   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
  3688   emit_int8(0x38);
  3689   emit_int8((unsigned char)(0xC0 | encode));
  3690   // 0x00 - insert into lower 128 bits
  3691   // 0x01 - insert into upper 128 bits
  3692   emit_int8(0x01);
  3695 void Assembler::vinserti128h(XMMRegister dst, Address src) {
  3696   assert(VM_Version::supports_avx2(), "");
  3697   InstructionMark im(this);
  3698   bool vector256 = true;
  3699   assert(dst != xnoreg, "sanity");
  3700   int dst_enc = dst->encoding();
  3701   // swap src<->dst for encoding
  3702   vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
  3703   emit_int8(0x38);
  3704   emit_operand(dst, src);
  3705   // 0x01 - insert into upper 128 bits
  3706   emit_int8(0x01);
  3709 void Assembler::vextracti128h(Address dst, XMMRegister src) {
  3710   assert(VM_Version::supports_avx2(), "");
  3711   InstructionMark im(this);
  3712   bool vector256 = true;
  3713   assert(src != xnoreg, "sanity");
  3714   int src_enc = src->encoding();
  3715   vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
  3716   emit_int8(0x39);
  3717   emit_operand(src, dst);
  3718   // 0x01 - extract from upper 128 bits
  3719   emit_int8(0x01);
  3722 // duplicate 4-bytes integer data from src into 8 locations in dest
  3723 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
  3724   assert(VM_Version::supports_avx2(), "");
  3725   bool vector256 = true;
  3726   int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  3727   emit_int8(0x58);
  3728   emit_int8((unsigned char)(0xC0 | encode));
  3731 // Carry-Less Multiplication Quadword
  3732 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
  3733   assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
  3734   bool vector256 = false;
  3735   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
  3736   emit_int8(0x44);
  3737   emit_int8((unsigned char)(0xC0 | encode));
  3738   emit_int8((unsigned char)mask);
  3741 void Assembler::vzeroupper() {
  3742   assert(VM_Version::supports_avx(), "");
  3743   (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
  3744   emit_int8(0x77);
  3748 #ifndef _LP64
  3749 // 32bit only pieces of the assembler
  3751 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
  3752   // NO PREFIX AS NEVER 64BIT
  3753   InstructionMark im(this);
  3754   emit_int8((unsigned char)0x81);
  3755   emit_int8((unsigned char)(0xF8 | src1->encoding()));
  3756   emit_data(imm32, rspec, 0);
  3759 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
  3760   // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
  3761   InstructionMark im(this);
  3762   emit_int8((unsigned char)0x81);
  3763   emit_operand(rdi, src1);
  3764   emit_data(imm32, rspec, 0);
  3767 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
  3768 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
  3769 // into rdx:rax.  The ZF is set if the compared values were equal, and cleared otherwise.
  3770 void Assembler::cmpxchg8(Address adr) {
  3771   InstructionMark im(this);
  3772   emit_int8(0x0F);
  3773   emit_int8((unsigned char)0xC7);
  3774   emit_operand(rcx, adr);
  3777 void Assembler::decl(Register dst) {
  3778   // Don't use it directly. Use MacroAssembler::decrementl() instead.
  3779  emit_int8(0x48 | dst->encoding());
  3782 #endif // _LP64
  3784 // 64bit typically doesn't use the x87 but needs to for the trig funcs
  3786 void Assembler::fabs() {
  3787   emit_int8((unsigned char)0xD9);
  3788   emit_int8((unsigned char)0xE1);
  3791 void Assembler::fadd(int i) {
  3792   emit_farith(0xD8, 0xC0, i);
  3795 void Assembler::fadd_d(Address src) {
  3796   InstructionMark im(this);
  3797   emit_int8((unsigned char)0xDC);
  3798   emit_operand32(rax, src);
  3801 void Assembler::fadd_s(Address src) {
  3802   InstructionMark im(this);
  3803   emit_int8((unsigned char)0xD8);
  3804   emit_operand32(rax, src);
  3807 void Assembler::fadda(int i) {
  3808   emit_farith(0xDC, 0xC0, i);
  3811 void Assembler::faddp(int i) {
  3812   emit_farith(0xDE, 0xC0, i);
  3815 void Assembler::fchs() {
  3816   emit_int8((unsigned char)0xD9);
  3817   emit_int8((unsigned char)0xE0);
  3820 void Assembler::fcom(int i) {
  3821   emit_farith(0xD8, 0xD0, i);
  3824 void Assembler::fcomp(int i) {
  3825   emit_farith(0xD8, 0xD8, i);
  3828 void Assembler::fcomp_d(Address src) {
  3829   InstructionMark im(this);
  3830   emit_int8((unsigned char)0xDC);
  3831   emit_operand32(rbx, src);
  3834 void Assembler::fcomp_s(Address src) {
  3835   InstructionMark im(this);
  3836   emit_int8((unsigned char)0xD8);
  3837   emit_operand32(rbx, src);
  3840 void Assembler::fcompp() {
  3841   emit_int8((unsigned char)0xDE);
  3842   emit_int8((unsigned char)0xD9);
  3845 void Assembler::fcos() {
  3846   emit_int8((unsigned char)0xD9);
  3847   emit_int8((unsigned char)0xFF);
  3850 void Assembler::fdecstp() {
  3851   emit_int8((unsigned char)0xD9);
  3852   emit_int8((unsigned char)0xF6);
  3855 void Assembler::fdiv(int i) {
  3856   emit_farith(0xD8, 0xF0, i);
  3859 void Assembler::fdiv_d(Address src) {
  3860   InstructionMark im(this);
  3861   emit_int8((unsigned char)0xDC);
  3862   emit_operand32(rsi, src);
  3865 void Assembler::fdiv_s(Address src) {
  3866   InstructionMark im(this);
  3867   emit_int8((unsigned char)0xD8);
  3868   emit_operand32(rsi, src);
  3871 void Assembler::fdiva(int i) {
  3872   emit_farith(0xDC, 0xF8, i);
  3875 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
  3876 //       is erroneous for some of the floating-point instructions below.
  3878 void Assembler::fdivp(int i) {
  3879   emit_farith(0xDE, 0xF8, i);                    // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
  3882 void Assembler::fdivr(int i) {
  3883   emit_farith(0xD8, 0xF8, i);
  3886 void Assembler::fdivr_d(Address src) {
  3887   InstructionMark im(this);
  3888   emit_int8((unsigned char)0xDC);
  3889   emit_operand32(rdi, src);
  3892 void Assembler::fdivr_s(Address src) {
  3893   InstructionMark im(this);
  3894   emit_int8((unsigned char)0xD8);
  3895   emit_operand32(rdi, src);
  3898 void Assembler::fdivra(int i) {
  3899   emit_farith(0xDC, 0xF0, i);
  3902 void Assembler::fdivrp(int i) {
  3903   emit_farith(0xDE, 0xF0, i);                    // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
  3906 void Assembler::ffree(int i) {
  3907   emit_farith(0xDD, 0xC0, i);
  3910 void Assembler::fild_d(Address adr) {
  3911   InstructionMark im(this);
  3912   emit_int8((unsigned char)0xDF);
  3913   emit_operand32(rbp, adr);
  3916 void Assembler::fild_s(Address adr) {
  3917   InstructionMark im(this);
  3918   emit_int8((unsigned char)0xDB);
  3919   emit_operand32(rax, adr);
  3922 void Assembler::fincstp() {
  3923   emit_int8((unsigned char)0xD9);
  3924   emit_int8((unsigned char)0xF7);
  3927 void Assembler::finit() {
  3928   emit_int8((unsigned char)0x9B);
  3929   emit_int8((unsigned char)0xDB);
  3930   emit_int8((unsigned char)0xE3);
  3933 void Assembler::fist_s(Address adr) {
  3934   InstructionMark im(this);
  3935   emit_int8((unsigned char)0xDB);
  3936   emit_operand32(rdx, adr);
  3939 void Assembler::fistp_d(Address adr) {
  3940   InstructionMark im(this);
  3941   emit_int8((unsigned char)0xDF);
  3942   emit_operand32(rdi, adr);
  3945 void Assembler::fistp_s(Address adr) {
  3946   InstructionMark im(this);
  3947   emit_int8((unsigned char)0xDB);
  3948   emit_operand32(rbx, adr);
  3951 void Assembler::fld1() {
  3952   emit_int8((unsigned char)0xD9);
  3953   emit_int8((unsigned char)0xE8);
  3956 void Assembler::fld_d(Address adr) {
  3957   InstructionMark im(this);
  3958   emit_int8((unsigned char)0xDD);
  3959   emit_operand32(rax, adr);
  3962 void Assembler::fld_s(Address adr) {
  3963   InstructionMark im(this);
  3964   emit_int8((unsigned char)0xD9);
  3965   emit_operand32(rax, adr);
  3969 void Assembler::fld_s(int index) {
  3970   emit_farith(0xD9, 0xC0, index);
  3973 void Assembler::fld_x(Address adr) {
  3974   InstructionMark im(this);
  3975   emit_int8((unsigned char)0xDB);
  3976   emit_operand32(rbp, adr);
  3979 void Assembler::fldcw(Address src) {
  3980   InstructionMark im(this);
  3981   emit_int8((unsigned char)0xD9);
  3982   emit_operand32(rbp, src);
  3985 void Assembler::fldenv(Address src) {
  3986   InstructionMark im(this);
  3987   emit_int8((unsigned char)0xD9);
  3988   emit_operand32(rsp, src);
  3991 void Assembler::fldlg2() {
  3992   emit_int8((unsigned char)0xD9);
  3993   emit_int8((unsigned char)0xEC);
  3996 void Assembler::fldln2() {
  3997   emit_int8((unsigned char)0xD9);
  3998   emit_int8((unsigned char)0xED);
  4001 void Assembler::fldz() {
  4002   emit_int8((unsigned char)0xD9);
  4003   emit_int8((unsigned char)0xEE);
  4006 void Assembler::flog() {
  4007   fldln2();
  4008   fxch();
  4009   fyl2x();
  4012 void Assembler::flog10() {
  4013   fldlg2();
  4014   fxch();
  4015   fyl2x();
  4018 void Assembler::fmul(int i) {
  4019   emit_farith(0xD8, 0xC8, i);
  4022 void Assembler::fmul_d(Address src) {
  4023   InstructionMark im(this);
  4024   emit_int8((unsigned char)0xDC);
  4025   emit_operand32(rcx, src);
  4028 void Assembler::fmul_s(Address src) {
  4029   InstructionMark im(this);
  4030   emit_int8((unsigned char)0xD8);
  4031   emit_operand32(rcx, src);
  4034 void Assembler::fmula(int i) {
  4035   emit_farith(0xDC, 0xC8, i);
  4038 void Assembler::fmulp(int i) {
  4039   emit_farith(0xDE, 0xC8, i);
  4042 void Assembler::fnsave(Address dst) {
  4043   InstructionMark im(this);
  4044   emit_int8((unsigned char)0xDD);
  4045   emit_operand32(rsi, dst);
  4048 void Assembler::fnstcw(Address src) {
  4049   InstructionMark im(this);
  4050   emit_int8((unsigned char)0x9B);
  4051   emit_int8((unsigned char)0xD9);
  4052   emit_operand32(rdi, src);
  4055 void Assembler::fnstsw_ax() {
  4056   emit_int8((unsigned char)0xDF);
  4057   emit_int8((unsigned char)0xE0);
  4060 void Assembler::fprem() {
  4061   emit_int8((unsigned char)0xD9);
  4062   emit_int8((unsigned char)0xF8);
  4065 void Assembler::fprem1() {
  4066   emit_int8((unsigned char)0xD9);
  4067   emit_int8((unsigned char)0xF5);
  4070 void Assembler::frstor(Address src) {
  4071   InstructionMark im(this);
  4072   emit_int8((unsigned char)0xDD);
  4073   emit_operand32(rsp, src);
  4076 void Assembler::fsin() {
  4077   emit_int8((unsigned char)0xD9);
  4078   emit_int8((unsigned char)0xFE);
  4081 void Assembler::fsqrt() {
  4082   emit_int8((unsigned char)0xD9);
  4083   emit_int8((unsigned char)0xFA);
  4086 void Assembler::fst_d(Address adr) {
  4087   InstructionMark im(this);
  4088   emit_int8((unsigned char)0xDD);
  4089   emit_operand32(rdx, adr);
  4092 void Assembler::fst_s(Address adr) {
  4093   InstructionMark im(this);
  4094   emit_int8((unsigned char)0xD9);
  4095   emit_operand32(rdx, adr);
  4098 void Assembler::fstp_d(Address adr) {
  4099   InstructionMark im(this);
  4100   emit_int8((unsigned char)0xDD);
  4101   emit_operand32(rbx, adr);
  4104 void Assembler::fstp_d(int index) {
  4105   emit_farith(0xDD, 0xD8, index);
  4108 void Assembler::fstp_s(Address adr) {
  4109   InstructionMark im(this);
  4110   emit_int8((unsigned char)0xD9);
  4111   emit_operand32(rbx, adr);
  4114 void Assembler::fstp_x(Address adr) {
  4115   InstructionMark im(this);
  4116   emit_int8((unsigned char)0xDB);
  4117   emit_operand32(rdi, adr);
  4120 void Assembler::fsub(int i) {
  4121   emit_farith(0xD8, 0xE0, i);
  4124 void Assembler::fsub_d(Address src) {
  4125   InstructionMark im(this);
  4126   emit_int8((unsigned char)0xDC);
  4127   emit_operand32(rsp, src);
  4130 void Assembler::fsub_s(Address src) {
  4131   InstructionMark im(this);
  4132   emit_int8((unsigned char)0xD8);
  4133   emit_operand32(rsp, src);
  4136 void Assembler::fsuba(int i) {
  4137   emit_farith(0xDC, 0xE8, i);
  4140 void Assembler::fsubp(int i) {
  4141   emit_farith(0xDE, 0xE8, i);                    // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
  4144 void Assembler::fsubr(int i) {
  4145   emit_farith(0xD8, 0xE8, i);
  4148 void Assembler::fsubr_d(Address src) {
  4149   InstructionMark im(this);
  4150   emit_int8((unsigned char)0xDC);
  4151   emit_operand32(rbp, src);
  4154 void Assembler::fsubr_s(Address src) {
  4155   InstructionMark im(this);
  4156   emit_int8((unsigned char)0xD8);
  4157   emit_operand32(rbp, src);
  4160 void Assembler::fsubra(int i) {
  4161   emit_farith(0xDC, 0xE0, i);
  4164 void Assembler::fsubrp(int i) {
  4165   emit_farith(0xDE, 0xE0, i);                    // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
  4168 void Assembler::ftan() {
  4169   emit_int8((unsigned char)0xD9);
  4170   emit_int8((unsigned char)0xF2);
  4171   emit_int8((unsigned char)0xDD);
  4172   emit_int8((unsigned char)0xD8);
  4175 void Assembler::ftst() {
  4176   emit_int8((unsigned char)0xD9);
  4177   emit_int8((unsigned char)0xE4);
  4180 void Assembler::fucomi(int i) {
  4181   // make sure the instruction is supported (introduced for P6, together with cmov)
  4182   guarantee(VM_Version::supports_cmov(), "illegal instruction");
  4183   emit_farith(0xDB, 0xE8, i);
  4186 void Assembler::fucomip(int i) {
  4187   // make sure the instruction is supported (introduced for P6, together with cmov)
  4188   guarantee(VM_Version::supports_cmov(), "illegal instruction");
  4189   emit_farith(0xDF, 0xE8, i);
  4192 void Assembler::fwait() {
  4193   emit_int8((unsigned char)0x9B);
  4196 void Assembler::fxch(int i) {
  4197   emit_farith(0xD9, 0xC8, i);
  4200 void Assembler::fyl2x() {
  4201   emit_int8((unsigned char)0xD9);
  4202   emit_int8((unsigned char)0xF1);
  4205 void Assembler::frndint() {
  4206   emit_int8((unsigned char)0xD9);
  4207   emit_int8((unsigned char)0xFC);
  4210 void Assembler::f2xm1() {
  4211   emit_int8((unsigned char)0xD9);
  4212   emit_int8((unsigned char)0xF0);
  4215 void Assembler::fldl2e() {
  4216   emit_int8((unsigned char)0xD9);
  4217   emit_int8((unsigned char)0xEA);
  4220 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
  4221 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
  4222 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding.
  4223 static int simd_opc[4] = { 0,    0, 0x38, 0x3A };
  4225 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
  4226 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
  4227   if (pre > 0) {
  4228     emit_int8(simd_pre[pre]);
  4230   if (rex_w) {
  4231     prefixq(adr, xreg);
  4232   } else {
  4233     prefix(adr, xreg);
  4235   if (opc > 0) {
  4236     emit_int8(0x0F);
  4237     int opc2 = simd_opc[opc];
  4238     if (opc2 > 0) {
  4239       emit_int8(opc2);
  4244 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
  4245   if (pre > 0) {
  4246     emit_int8(simd_pre[pre]);
  4248   int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) :
  4249                           prefix_and_encode(dst_enc, src_enc);
  4250   if (opc > 0) {
  4251     emit_int8(0x0F);
  4252     int opc2 = simd_opc[opc];
  4253     if (opc2 > 0) {
  4254       emit_int8(opc2);
  4257   return encode;
  4261 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool vector256) {
  4262   if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) {
  4263     prefix(VEX_3bytes);
  4265     int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
  4266     byte1 = (~byte1) & 0xE0;
  4267     byte1 |= opc;
  4268     emit_int8(byte1);
  4270     int byte2 = ((~nds_enc) & 0xf) << 3;
  4271     byte2 |= (vex_w ? VEX_W : 0) | (vector256 ? 4 : 0) | pre;
  4272     emit_int8(byte2);
  4273   } else {
  4274     prefix(VEX_2bytes);
  4276     int byte1 = vex_r ? VEX_R : 0;
  4277     byte1 = (~byte1) & 0x80;
  4278     byte1 |= ((~nds_enc) & 0xf) << 3;
  4279     byte1 |= (vector256 ? 4 : 0) | pre;
  4280     emit_int8(byte1);
  4284 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256){
  4285   bool vex_r = (xreg_enc >= 8);
  4286   bool vex_b = adr.base_needs_rex();
  4287   bool vex_x = adr.index_needs_rex();
  4288   vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
  4291 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256) {
  4292   bool vex_r = (dst_enc >= 8);
  4293   bool vex_b = (src_enc >= 8);
  4294   bool vex_x = false;
  4295   vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
  4296   return (((dst_enc & 7) << 3) | (src_enc & 7));
  4300 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
  4301   if (UseAVX > 0) {
  4302     int xreg_enc = xreg->encoding();
  4303     int  nds_enc = nds->is_valid() ? nds->encoding() : 0;
  4304     vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector256);
  4305   } else {
  4306     assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding");
  4307     rex_prefix(adr, xreg, pre, opc, rex_w);
  4311 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
  4312   int dst_enc = dst->encoding();
  4313   int src_enc = src->encoding();
  4314   if (UseAVX > 0) {
  4315     int nds_enc = nds->is_valid() ? nds->encoding() : 0;
  4316     return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector256);
  4317   } else {
  4318     assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding");
  4319     return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, rex_w);
  4323 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
  4324   InstructionMark im(this);
  4325   simd_prefix(dst, dst, src, pre);
  4326   emit_int8(opcode);
  4327   emit_operand(dst, src);
  4330 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
  4331   int encode = simd_prefix_and_encode(dst, dst, src, pre);
  4332   emit_int8(opcode);
  4333   emit_int8((unsigned char)(0xC0 | encode));
  4336 // Versions with no second source register (non-destructive source).
  4337 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
  4338   InstructionMark im(this);
  4339   simd_prefix(dst, xnoreg, src, pre);
  4340   emit_int8(opcode);
  4341   emit_operand(dst, src);
  4344 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
  4345   int encode = simd_prefix_and_encode(dst, xnoreg, src, pre);
  4346   emit_int8(opcode);
  4347   emit_int8((unsigned char)(0xC0 | encode));
  4350 // 3-operands AVX instructions
  4351 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
  4352                                Address src, VexSimdPrefix pre, bool vector256) {
  4353   InstructionMark im(this);
  4354   vex_prefix(dst, nds, src, pre, vector256);
  4355   emit_int8(opcode);
  4356   emit_operand(dst, src);
  4359 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
  4360                                XMMRegister src, VexSimdPrefix pre, bool vector256) {
  4361   int encode = vex_prefix_and_encode(dst, nds, src, pre, vector256);
  4362   emit_int8(opcode);
  4363   emit_int8((unsigned char)(0xC0 | encode));
  4366 #ifndef _LP64
  4368 void Assembler::incl(Register dst) {
  4369   // Don't use it directly. Use MacroAssembler::incrementl() instead.
  4370   emit_int8(0x40 | dst->encoding());
  4373 void Assembler::lea(Register dst, Address src) {
  4374   leal(dst, src);
  4377 void Assembler::mov_literal32(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
  4378   InstructionMark im(this);
  4379   emit_int8((unsigned char)0xC7);
  4380   emit_operand(rax, dst);
  4381   emit_data((int)imm32, rspec, 0);
  4384 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
  4385   InstructionMark im(this);
  4386   int encode = prefix_and_encode(dst->encoding());
  4387   emit_int8((unsigned char)(0xB8 | encode));
  4388   emit_data((int)imm32, rspec, 0);
  4391 void Assembler::popa() { // 32bit
  4392   emit_int8(0x61);
  4395 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
  4396   InstructionMark im(this);
  4397   emit_int8(0x68);
  4398   emit_data(imm32, rspec, 0);
  4401 void Assembler::pusha() { // 32bit
  4402   emit_int8(0x60);
  4405 void Assembler::set_byte_if_not_zero(Register dst) {
  4406   emit_int8(0x0F);
  4407   emit_int8((unsigned char)0x95);
  4408   emit_int8((unsigned char)(0xE0 | dst->encoding()));
  4411 void Assembler::shldl(Register dst, Register src) {
  4412   emit_int8(0x0F);
  4413   emit_int8((unsigned char)0xA5);
  4414   emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
  4417 void Assembler::shrdl(Register dst, Register src) {
  4418   emit_int8(0x0F);
  4419   emit_int8((unsigned char)0xAD);
  4420   emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
  4423 #else // LP64
  4425 void Assembler::set_byte_if_not_zero(Register dst) {
  4426   int enc = prefix_and_encode(dst->encoding(), true);
  4427   emit_int8(0x0F);
  4428   emit_int8((unsigned char)0x95);
  4429   emit_int8((unsigned char)(0xE0 | enc));
  4432 // 64bit only pieces of the assembler
  4433 // This should only be used by 64bit instructions that can use rip-relative
  4434 // it cannot be used by instructions that want an immediate value.
  4436 bool Assembler::reachable(AddressLiteral adr) {
  4437   int64_t disp;
  4438   // None will force a 64bit literal to the code stream. Likely a placeholder
  4439   // for something that will be patched later and we need to certain it will
  4440   // always be reachable.
  4441   if (adr.reloc() == relocInfo::none) {
  4442     return false;
  4444   if (adr.reloc() == relocInfo::internal_word_type) {
  4445     // This should be rip relative and easily reachable.
  4446     return true;
  4448   if (adr.reloc() == relocInfo::virtual_call_type ||
  4449       adr.reloc() == relocInfo::opt_virtual_call_type ||
  4450       adr.reloc() == relocInfo::static_call_type ||
  4451       adr.reloc() == relocInfo::static_stub_type ) {
  4452     // This should be rip relative within the code cache and easily
  4453     // reachable until we get huge code caches. (At which point
  4454     // ic code is going to have issues).
  4455     return true;
  4457   if (adr.reloc() != relocInfo::external_word_type &&
  4458       adr.reloc() != relocInfo::poll_return_type &&  // these are really external_word but need special
  4459       adr.reloc() != relocInfo::poll_type &&         // relocs to identify them
  4460       adr.reloc() != relocInfo::runtime_call_type ) {
  4461     return false;
  4464   // Stress the correction code
  4465   if (ForceUnreachable) {
  4466     // Must be runtimecall reloc, see if it is in the codecache
  4467     // Flipping stuff in the codecache to be unreachable causes issues
  4468     // with things like inline caches where the additional instructions
  4469     // are not handled.
  4470     if (CodeCache::find_blob(adr._target) == NULL) {
  4471       return false;
  4474   // For external_word_type/runtime_call_type if it is reachable from where we
  4475   // are now (possibly a temp buffer) and where we might end up
  4476   // anywhere in the codeCache then we are always reachable.
  4477   // This would have to change if we ever save/restore shared code
  4478   // to be more pessimistic.
  4479   disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
  4480   if (!is_simm32(disp)) return false;
  4481   disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
  4482   if (!is_simm32(disp)) return false;
  4484   disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int));
  4486   // Because rip relative is a disp + address_of_next_instruction and we
  4487   // don't know the value of address_of_next_instruction we apply a fudge factor
  4488   // to make sure we will be ok no matter the size of the instruction we get placed into.
  4489   // We don't have to fudge the checks above here because they are already worst case.
  4491   // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
  4492   // + 4 because better safe than sorry.
  4493   const int fudge = 12 + 4;
  4494   if (disp < 0) {
  4495     disp -= fudge;
  4496   } else {
  4497     disp += fudge;
  4499   return is_simm32(disp);
  4502 // Check if the polling page is not reachable from the code cache using rip-relative
  4503 // addressing.
  4504 bool Assembler::is_polling_page_far() {
  4505   intptr_t addr = (intptr_t)os::get_polling_page();
  4506   return ForceUnreachable ||
  4507          !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
  4508          !is_simm32(addr - (intptr_t)CodeCache::high_bound());
  4511 void Assembler::emit_data64(jlong data,
  4512                             relocInfo::relocType rtype,
  4513                             int format) {
  4514   if (rtype == relocInfo::none) {
  4515     emit_int64(data);
  4516   } else {
  4517     emit_data64(data, Relocation::spec_simple(rtype), format);
  4521 void Assembler::emit_data64(jlong data,
  4522                             RelocationHolder const& rspec,
  4523                             int format) {
  4524   assert(imm_operand == 0, "default format must be immediate in this file");
  4525   assert(imm_operand == format, "must be immediate");
  4526   assert(inst_mark() != NULL, "must be inside InstructionMark");
  4527   // Do not use AbstractAssembler::relocate, which is not intended for
  4528   // embedded words.  Instead, relocate to the enclosing instruction.
  4529   code_section()->relocate(inst_mark(), rspec, format);
  4530 #ifdef ASSERT
  4531   check_relocation(rspec, format);
  4532 #endif
  4533   emit_int64(data);
  4536 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
  4537   if (reg_enc >= 8) {
  4538     prefix(REX_B);
  4539     reg_enc -= 8;
  4540   } else if (byteinst && reg_enc >= 4) {
  4541     prefix(REX);
  4543   return reg_enc;
  4546 int Assembler::prefixq_and_encode(int reg_enc) {
  4547   if (reg_enc < 8) {
  4548     prefix(REX_W);
  4549   } else {
  4550     prefix(REX_WB);
  4551     reg_enc -= 8;
  4553   return reg_enc;
  4556 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
  4557   if (dst_enc < 8) {
  4558     if (src_enc >= 8) {
  4559       prefix(REX_B);
  4560       src_enc -= 8;
  4561     } else if (byteinst && src_enc >= 4) {
  4562       prefix(REX);
  4564   } else {
  4565     if (src_enc < 8) {
  4566       prefix(REX_R);
  4567     } else {
  4568       prefix(REX_RB);
  4569       src_enc -= 8;
  4571     dst_enc -= 8;
  4573   return dst_enc << 3 | src_enc;
  4576 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
  4577   if (dst_enc < 8) {
  4578     if (src_enc < 8) {
  4579       prefix(REX_W);
  4580     } else {
  4581       prefix(REX_WB);
  4582       src_enc -= 8;
  4584   } else {
  4585     if (src_enc < 8) {
  4586       prefix(REX_WR);
  4587     } else {
  4588       prefix(REX_WRB);
  4589       src_enc -= 8;
  4591     dst_enc -= 8;
  4593   return dst_enc << 3 | src_enc;
  4596 void Assembler::prefix(Register reg) {
  4597   if (reg->encoding() >= 8) {
  4598     prefix(REX_B);
  4602 void Assembler::prefix(Address adr) {
  4603   if (adr.base_needs_rex()) {
  4604     if (adr.index_needs_rex()) {
  4605       prefix(REX_XB);
  4606     } else {
  4607       prefix(REX_B);
  4609   } else {
  4610     if (adr.index_needs_rex()) {
  4611       prefix(REX_X);
  4616 void Assembler::prefixq(Address adr) {
  4617   if (adr.base_needs_rex()) {
  4618     if (adr.index_needs_rex()) {
  4619       prefix(REX_WXB);
  4620     } else {
  4621       prefix(REX_WB);
  4623   } else {
  4624     if (adr.index_needs_rex()) {
  4625       prefix(REX_WX);
  4626     } else {
  4627       prefix(REX_W);
  4633 void Assembler::prefix(Address adr, Register reg, bool byteinst) {
  4634   if (reg->encoding() < 8) {
  4635     if (adr.base_needs_rex()) {
  4636       if (adr.index_needs_rex()) {
  4637         prefix(REX_XB);
  4638       } else {
  4639         prefix(REX_B);
  4641     } else {
  4642       if (adr.index_needs_rex()) {
  4643         prefix(REX_X);
  4644       } else if (byteinst && reg->encoding() >= 4 ) {
  4645         prefix(REX);
  4648   } else {
  4649     if (adr.base_needs_rex()) {
  4650       if (adr.index_needs_rex()) {
  4651         prefix(REX_RXB);
  4652       } else {
  4653         prefix(REX_RB);
  4655     } else {
  4656       if (adr.index_needs_rex()) {
  4657         prefix(REX_RX);
  4658       } else {
  4659         prefix(REX_R);
  4665 void Assembler::prefixq(Address adr, Register src) {
  4666   if (src->encoding() < 8) {
  4667     if (adr.base_needs_rex()) {
  4668       if (adr.index_needs_rex()) {
  4669         prefix(REX_WXB);
  4670       } else {
  4671         prefix(REX_WB);
  4673     } else {
  4674       if (adr.index_needs_rex()) {
  4675         prefix(REX_WX);
  4676       } else {
  4677         prefix(REX_W);
  4680   } else {
  4681     if (adr.base_needs_rex()) {
  4682       if (adr.index_needs_rex()) {
  4683         prefix(REX_WRXB);
  4684       } else {
  4685         prefix(REX_WRB);
  4687     } else {
  4688       if (adr.index_needs_rex()) {
  4689         prefix(REX_WRX);
  4690       } else {
  4691         prefix(REX_WR);
  4697 void Assembler::prefix(Address adr, XMMRegister reg) {
  4698   if (reg->encoding() < 8) {
  4699     if (adr.base_needs_rex()) {
  4700       if (adr.index_needs_rex()) {
  4701         prefix(REX_XB);
  4702       } else {
  4703         prefix(REX_B);
  4705     } else {
  4706       if (adr.index_needs_rex()) {
  4707         prefix(REX_X);
  4710   } else {
  4711     if (adr.base_needs_rex()) {
  4712       if (adr.index_needs_rex()) {
  4713         prefix(REX_RXB);
  4714       } else {
  4715         prefix(REX_RB);
  4717     } else {
  4718       if (adr.index_needs_rex()) {
  4719         prefix(REX_RX);
  4720       } else {
  4721         prefix(REX_R);
  4727 void Assembler::prefixq(Address adr, XMMRegister src) {
  4728   if (src->encoding() < 8) {
  4729     if (adr.base_needs_rex()) {
  4730       if (adr.index_needs_rex()) {
  4731         prefix(REX_WXB);
  4732       } else {
  4733         prefix(REX_WB);
  4735     } else {
  4736       if (adr.index_needs_rex()) {
  4737         prefix(REX_WX);
  4738       } else {
  4739         prefix(REX_W);
  4742   } else {
  4743     if (adr.base_needs_rex()) {
  4744       if (adr.index_needs_rex()) {
  4745         prefix(REX_WRXB);
  4746       } else {
  4747         prefix(REX_WRB);
  4749     } else {
  4750       if (adr.index_needs_rex()) {
  4751         prefix(REX_WRX);
  4752       } else {
  4753         prefix(REX_WR);
  4759 void Assembler::adcq(Register dst, int32_t imm32) {
  4760   (void) prefixq_and_encode(dst->encoding());
  4761   emit_arith(0x81, 0xD0, dst, imm32);
  4764 void Assembler::adcq(Register dst, Address src) {
  4765   InstructionMark im(this);
  4766   prefixq(src, dst);
  4767   emit_int8(0x13);
  4768   emit_operand(dst, src);
  4771 void Assembler::adcq(Register dst, Register src) {
  4772   (int) prefixq_and_encode(dst->encoding(), src->encoding());
  4773   emit_arith(0x13, 0xC0, dst, src);
  4776 void Assembler::addq(Address dst, int32_t imm32) {
  4777   InstructionMark im(this);
  4778   prefixq(dst);
  4779   emit_arith_operand(0x81, rax, dst,imm32);
  4782 void Assembler::addq(Address dst, Register src) {
  4783   InstructionMark im(this);
  4784   prefixq(dst, src);
  4785   emit_int8(0x01);
  4786   emit_operand(src, dst);
  4789 void Assembler::addq(Register dst, int32_t imm32) {
  4790   (void) prefixq_and_encode(dst->encoding());
  4791   emit_arith(0x81, 0xC0, dst, imm32);
  4794 void Assembler::addq(Register dst, Address src) {
  4795   InstructionMark im(this);
  4796   prefixq(src, dst);
  4797   emit_int8(0x03);
  4798   emit_operand(dst, src);
  4801 void Assembler::addq(Register dst, Register src) {
  4802   (void) prefixq_and_encode(dst->encoding(), src->encoding());
  4803   emit_arith(0x03, 0xC0, dst, src);
  4806 void Assembler::andq(Address dst, int32_t imm32) {
  4807   InstructionMark im(this);
  4808   prefixq(dst);
  4809   emit_int8((unsigned char)0x81);
  4810   emit_operand(rsp, dst, 4);
  4811   emit_int32(imm32);
  4814 void Assembler::andq(Register dst, int32_t imm32) {
  4815   (void) prefixq_and_encode(dst->encoding());
  4816   emit_arith(0x81, 0xE0, dst, imm32);
  4819 void Assembler::andq(Register dst, Address src) {
  4820   InstructionMark im(this);
  4821   prefixq(src, dst);
  4822   emit_int8(0x23);
  4823   emit_operand(dst, src);
  4826 void Assembler::andq(Register dst, Register src) {
  4827   (int) prefixq_and_encode(dst->encoding(), src->encoding());
  4828   emit_arith(0x23, 0xC0, dst, src);
  4831 void Assembler::bsfq(Register dst, Register src) {
  4832   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  4833   emit_int8(0x0F);
  4834   emit_int8((unsigned char)0xBC);
  4835   emit_int8((unsigned char)(0xC0 | encode));
  4838 void Assembler::bsrq(Register dst, Register src) {
  4839   assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
  4840   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  4841   emit_int8(0x0F);
  4842   emit_int8((unsigned char)0xBD);
  4843   emit_int8((unsigned char)(0xC0 | encode));
  4846 void Assembler::bswapq(Register reg) {
  4847   int encode = prefixq_and_encode(reg->encoding());
  4848   emit_int8(0x0F);
  4849   emit_int8((unsigned char)(0xC8 | encode));
  4852 void Assembler::cdqq() {
  4853   prefix(REX_W);
  4854   emit_int8((unsigned char)0x99);
  4857 void Assembler::clflush(Address adr) {
  4858   prefix(adr);
  4859   emit_int8(0x0F);
  4860   emit_int8((unsigned char)0xAE);
  4861   emit_operand(rdi, adr);
  4864 void Assembler::cmovq(Condition cc, Register dst, Register src) {
  4865   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  4866   emit_int8(0x0F);
  4867   emit_int8(0x40 | cc);
  4868   emit_int8((unsigned char)(0xC0 | encode));
  4871 void Assembler::cmovq(Condition cc, Register dst, Address src) {
  4872   InstructionMark im(this);
  4873   prefixq(src, dst);
  4874   emit_int8(0x0F);
  4875   emit_int8(0x40 | cc);
  4876   emit_operand(dst, src);
  4879 void Assembler::cmpq(Address dst, int32_t imm32) {
  4880   InstructionMark im(this);
  4881   prefixq(dst);
  4882   emit_int8((unsigned char)0x81);
  4883   emit_operand(rdi, dst, 4);
  4884   emit_int32(imm32);
  4887 void Assembler::cmpq(Register dst, int32_t imm32) {
  4888   (void) prefixq_and_encode(dst->encoding());
  4889   emit_arith(0x81, 0xF8, dst, imm32);
  4892 void Assembler::cmpq(Address dst, Register src) {
  4893   InstructionMark im(this);
  4894   prefixq(dst, src);
  4895   emit_int8(0x3B);
  4896   emit_operand(src, dst);
  4899 void Assembler::cmpq(Register dst, Register src) {
  4900   (void) prefixq_and_encode(dst->encoding(), src->encoding());
  4901   emit_arith(0x3B, 0xC0, dst, src);
  4904 void Assembler::cmpq(Register dst, Address  src) {
  4905   InstructionMark im(this);
  4906   prefixq(src, dst);
  4907   emit_int8(0x3B);
  4908   emit_operand(dst, src);
  4911 void Assembler::cmpxchgq(Register reg, Address adr) {
  4912   InstructionMark im(this);
  4913   prefixq(adr, reg);
  4914   emit_int8(0x0F);
  4915   emit_int8((unsigned char)0xB1);
  4916   emit_operand(reg, adr);
  4919 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
  4920   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  4921   int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2);
  4922   emit_int8(0x2A);
  4923   emit_int8((unsigned char)(0xC0 | encode));
  4926 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
  4927   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  4928   InstructionMark im(this);
  4929   simd_prefix_q(dst, dst, src, VEX_SIMD_F2);
  4930   emit_int8(0x2A);
  4931   emit_operand(dst, src);
  4934 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
  4935   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  4936   int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3);
  4937   emit_int8(0x2A);
  4938   emit_int8((unsigned char)(0xC0 | encode));
  4941 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
  4942   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  4943   InstructionMark im(this);
  4944   simd_prefix_q(dst, dst, src, VEX_SIMD_F3);
  4945   emit_int8(0x2A);
  4946   emit_operand(dst, src);
  4949 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
  4950   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  4951   int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2);
  4952   emit_int8(0x2C);
  4953   emit_int8((unsigned char)(0xC0 | encode));
  4956 void Assembler::cvttss2siq(Register dst, XMMRegister src) {
  4957   NOT_LP64(assert(VM_Version::supports_sse(), ""));
  4958   int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3);
  4959   emit_int8(0x2C);
  4960   emit_int8((unsigned char)(0xC0 | encode));
  4963 void Assembler::decl(Register dst) {
  4964   // Don't use it directly. Use MacroAssembler::decrementl() instead.
  4965   // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
  4966   int encode = prefix_and_encode(dst->encoding());
  4967   emit_int8((unsigned char)0xFF);
  4968   emit_int8((unsigned char)(0xC8 | encode));
  4971 void Assembler::decq(Register dst) {
  4972   // Don't use it directly. Use MacroAssembler::decrementq() instead.
  4973   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  4974   int encode = prefixq_and_encode(dst->encoding());
  4975   emit_int8((unsigned char)0xFF);
  4976   emit_int8(0xC8 | encode);
  4979 void Assembler::decq(Address dst) {
  4980   // Don't use it directly. Use MacroAssembler::decrementq() instead.
  4981   InstructionMark im(this);
  4982   prefixq(dst);
  4983   emit_int8((unsigned char)0xFF);
  4984   emit_operand(rcx, dst);
  4987 void Assembler::fxrstor(Address src) {
  4988   prefixq(src);
  4989   emit_int8(0x0F);
  4990   emit_int8((unsigned char)0xAE);
  4991   emit_operand(as_Register(1), src);
  4994 void Assembler::fxsave(Address dst) {
  4995   prefixq(dst);
  4996   emit_int8(0x0F);
  4997   emit_int8((unsigned char)0xAE);
  4998   emit_operand(as_Register(0), dst);
  5001 void Assembler::idivq(Register src) {
  5002   int encode = prefixq_and_encode(src->encoding());
  5003   emit_int8((unsigned char)0xF7);
  5004   emit_int8((unsigned char)(0xF8 | encode));
  5007 void Assembler::imulq(Register dst, Register src) {
  5008   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  5009   emit_int8(0x0F);
  5010   emit_int8((unsigned char)0xAF);
  5011   emit_int8((unsigned char)(0xC0 | encode));
  5014 void Assembler::imulq(Register dst, Register src, int value) {
  5015   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  5016   if (is8bit(value)) {
  5017     emit_int8(0x6B);
  5018     emit_int8((unsigned char)(0xC0 | encode));
  5019     emit_int8(value & 0xFF);
  5020   } else {
  5021     emit_int8(0x69);
  5022     emit_int8((unsigned char)(0xC0 | encode));
  5023     emit_int32(value);
  5027 void Assembler::incl(Register dst) {
  5028   // Don't use it directly. Use MacroAssembler::incrementl() instead.
  5029   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  5030   int encode = prefix_and_encode(dst->encoding());
  5031   emit_int8((unsigned char)0xFF);
  5032   emit_int8((unsigned char)(0xC0 | encode));
  5035 void Assembler::incq(Register dst) {
  5036   // Don't use it directly. Use MacroAssembler::incrementq() instead.
  5037   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
  5038   int encode = prefixq_and_encode(dst->encoding());
  5039   emit_int8((unsigned char)0xFF);
  5040   emit_int8((unsigned char)(0xC0 | encode));
  5043 void Assembler::incq(Address dst) {
  5044   // Don't use it directly. Use MacroAssembler::incrementq() instead.
  5045   InstructionMark im(this);
  5046   prefixq(dst);
  5047   emit_int8((unsigned char)0xFF);
  5048   emit_operand(rax, dst);
  5051 void Assembler::lea(Register dst, Address src) {
  5052   leaq(dst, src);
  5055 void Assembler::leaq(Register dst, Address src) {
  5056   InstructionMark im(this);
  5057   prefixq(src, dst);
  5058   emit_int8((unsigned char)0x8D);
  5059   emit_operand(dst, src);
  5062 void Assembler::mov64(Register dst, int64_t imm64) {
  5063   InstructionMark im(this);
  5064   int encode = prefixq_and_encode(dst->encoding());
  5065   emit_int8((unsigned char)(0xB8 | encode));
  5066   emit_int64(imm64);
  5069 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
  5070   InstructionMark im(this);
  5071   int encode = prefixq_and_encode(dst->encoding());
  5072   emit_int8(0xB8 | encode);
  5073   emit_data64(imm64, rspec);
  5076 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
  5077   InstructionMark im(this);
  5078   int encode = prefix_and_encode(dst->encoding());
  5079   emit_int8((unsigned char)(0xB8 | encode));
  5080   emit_data((int)imm32, rspec, narrow_oop_operand);
  5083 void Assembler::mov_narrow_oop(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
  5084   InstructionMark im(this);
  5085   prefix(dst);
  5086   emit_int8((unsigned char)0xC7);
  5087   emit_operand(rax, dst, 4);
  5088   emit_data((int)imm32, rspec, narrow_oop_operand);
  5091 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
  5092   InstructionMark im(this);
  5093   int encode = prefix_and_encode(src1->encoding());
  5094   emit_int8((unsigned char)0x81);
  5095   emit_int8((unsigned char)(0xF8 | encode));
  5096   emit_data((int)imm32, rspec, narrow_oop_operand);
  5099 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
  5100   InstructionMark im(this);
  5101   prefix(src1);
  5102   emit_int8((unsigned char)0x81);
  5103   emit_operand(rax, src1, 4);
  5104   emit_data((int)imm32, rspec, narrow_oop_operand);
  5107 void Assembler::lzcntq(Register dst, Register src) {
  5108   assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
  5109   emit_int8((unsigned char)0xF3);
  5110   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  5111   emit_int8(0x0F);
  5112   emit_int8((unsigned char)0xBD);
  5113   emit_int8((unsigned char)(0xC0 | encode));
  5116 void Assembler::movdq(XMMRegister dst, Register src) {
  5117   // table D-1 says MMX/SSE2
  5118   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  5119   int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66);
  5120   emit_int8(0x6E);
  5121   emit_int8((unsigned char)(0xC0 | encode));
  5124 void Assembler::movdq(Register dst, XMMRegister src) {
  5125   // table D-1 says MMX/SSE2
  5126   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  5127   // swap src/dst to get correct prefix
  5128   int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66);
  5129   emit_int8(0x7E);
  5130   emit_int8((unsigned char)(0xC0 | encode));
  5133 void Assembler::movq(Register dst, Register src) {
  5134   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  5135   emit_int8((unsigned char)0x8B);
  5136   emit_int8((unsigned char)(0xC0 | encode));
  5139 void Assembler::movq(Register dst, Address src) {
  5140   InstructionMark im(this);
  5141   prefixq(src, dst);
  5142   emit_int8((unsigned char)0x8B);
  5143   emit_operand(dst, src);
  5146 void Assembler::movq(Address dst, Register src) {
  5147   InstructionMark im(this);
  5148   prefixq(dst, src);
  5149   emit_int8((unsigned char)0x89);
  5150   emit_operand(src, dst);
  5153 void Assembler::movsbq(Register dst, Address src) {
  5154   InstructionMark im(this);
  5155   prefixq(src, dst);
  5156   emit_int8(0x0F);
  5157   emit_int8((unsigned char)0xBE);
  5158   emit_operand(dst, src);
  5161 void Assembler::movsbq(Register dst, Register src) {
  5162   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  5163   emit_int8(0x0F);
  5164   emit_int8((unsigned char)0xBE);
  5165   emit_int8((unsigned char)(0xC0 | encode));
  5168 void Assembler::movslq(Register dst, int32_t imm32) {
  5169   // dbx shows movslq(rcx, 3) as movq     $0x0000000049000000,(%rbx)
  5170   // and movslq(r8, 3); as movl     $0x0000000048000000,(%rbx)
  5171   // as a result we shouldn't use until tested at runtime...
  5172   ShouldNotReachHere();
  5173   InstructionMark im(this);
  5174   int encode = prefixq_and_encode(dst->encoding());
  5175   emit_int8((unsigned char)(0xC7 | encode));
  5176   emit_int32(imm32);
  5179 void Assembler::movslq(Address dst, int32_t imm32) {
  5180   assert(is_simm32(imm32), "lost bits");
  5181   InstructionMark im(this);
  5182   prefixq(dst);
  5183   emit_int8((unsigned char)0xC7);
  5184   emit_operand(rax, dst, 4);
  5185   emit_int32(imm32);
  5188 void Assembler::movslq(Register dst, Address src) {
  5189   InstructionMark im(this);
  5190   prefixq(src, dst);
  5191   emit_int8(0x63);
  5192   emit_operand(dst, src);
  5195 void Assembler::movslq(Register dst, Register src) {
  5196   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  5197   emit_int8(0x63);
  5198   emit_int8((unsigned char)(0xC0 | encode));
  5201 void Assembler::movswq(Register dst, Address src) {
  5202   InstructionMark im(this);
  5203   prefixq(src, dst);
  5204   emit_int8(0x0F);
  5205   emit_int8((unsigned char)0xBF);
  5206   emit_operand(dst, src);
  5209 void Assembler::movswq(Register dst, Register src) {
  5210   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  5211   emit_int8((unsigned char)0x0F);
  5212   emit_int8((unsigned char)0xBF);
  5213   emit_int8((unsigned char)(0xC0 | encode));
  5216 void Assembler::movzbq(Register dst, Address src) {
  5217   InstructionMark im(this);
  5218   prefixq(src, dst);
  5219   emit_int8((unsigned char)0x0F);
  5220   emit_int8((unsigned char)0xB6);
  5221   emit_operand(dst, src);
  5224 void Assembler::movzbq(Register dst, Register src) {
  5225   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  5226   emit_int8(0x0F);
  5227   emit_int8((unsigned char)0xB6);
  5228   emit_int8(0xC0 | encode);
  5231 void Assembler::movzwq(Register dst, Address src) {
  5232   InstructionMark im(this);
  5233   prefixq(src, dst);
  5234   emit_int8((unsigned char)0x0F);
  5235   emit_int8((unsigned char)0xB7);
  5236   emit_operand(dst, src);
  5239 void Assembler::movzwq(Register dst, Register src) {
  5240   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  5241   emit_int8((unsigned char)0x0F);
  5242   emit_int8((unsigned char)0xB7);
  5243   emit_int8((unsigned char)(0xC0 | encode));
  5246 void Assembler::negq(Register dst) {
  5247   int encode = prefixq_and_encode(dst->encoding());
  5248   emit_int8((unsigned char)0xF7);
  5249   emit_int8((unsigned char)(0xD8 | encode));
  5252 void Assembler::notq(Register dst) {
  5253   int encode = prefixq_and_encode(dst->encoding());
  5254   emit_int8((unsigned char)0xF7);
  5255   emit_int8((unsigned char)(0xD0 | encode));
  5258 void Assembler::orq(Address dst, int32_t imm32) {
  5259   InstructionMark im(this);
  5260   prefixq(dst);
  5261   emit_int8((unsigned char)0x81);
  5262   emit_operand(rcx, dst, 4);
  5263   emit_int32(imm32);
  5266 void Assembler::orq(Register dst, int32_t imm32) {
  5267   (void) prefixq_and_encode(dst->encoding());
  5268   emit_arith(0x81, 0xC8, dst, imm32);
  5271 void Assembler::orq(Register dst, Address src) {
  5272   InstructionMark im(this);
  5273   prefixq(src, dst);
  5274   emit_int8(0x0B);
  5275   emit_operand(dst, src);
  5278 void Assembler::orq(Register dst, Register src) {
  5279   (void) prefixq_and_encode(dst->encoding(), src->encoding());
  5280   emit_arith(0x0B, 0xC0, dst, src);
  5283 void Assembler::popa() { // 64bit
  5284   movq(r15, Address(rsp, 0));
  5285   movq(r14, Address(rsp, wordSize));
  5286   movq(r13, Address(rsp, 2 * wordSize));
  5287   movq(r12, Address(rsp, 3 * wordSize));
  5288   movq(r11, Address(rsp, 4 * wordSize));
  5289   movq(r10, Address(rsp, 5 * wordSize));
  5290   movq(r9,  Address(rsp, 6 * wordSize));
  5291   movq(r8,  Address(rsp, 7 * wordSize));
  5292   movq(rdi, Address(rsp, 8 * wordSize));
  5293   movq(rsi, Address(rsp, 9 * wordSize));
  5294   movq(rbp, Address(rsp, 10 * wordSize));
  5295   // skip rsp
  5296   movq(rbx, Address(rsp, 12 * wordSize));
  5297   movq(rdx, Address(rsp, 13 * wordSize));
  5298   movq(rcx, Address(rsp, 14 * wordSize));
  5299   movq(rax, Address(rsp, 15 * wordSize));
  5301   addq(rsp, 16 * wordSize);
  5304 void Assembler::popcntq(Register dst, Address src) {
  5305   assert(VM_Version::supports_popcnt(), "must support");
  5306   InstructionMark im(this);
  5307   emit_int8((unsigned char)0xF3);
  5308   prefixq(src, dst);
  5309   emit_int8((unsigned char)0x0F);
  5310   emit_int8((unsigned char)0xB8);
  5311   emit_operand(dst, src);
  5314 void Assembler::popcntq(Register dst, Register src) {
  5315   assert(VM_Version::supports_popcnt(), "must support");
  5316   emit_int8((unsigned char)0xF3);
  5317   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  5318   emit_int8((unsigned char)0x0F);
  5319   emit_int8((unsigned char)0xB8);
  5320   emit_int8((unsigned char)(0xC0 | encode));
  5323 void Assembler::popq(Address dst) {
  5324   InstructionMark im(this);
  5325   prefixq(dst);
  5326   emit_int8((unsigned char)0x8F);
  5327   emit_operand(rax, dst);
  5330 void Assembler::pusha() { // 64bit
  5331   // we have to store original rsp.  ABI says that 128 bytes
  5332   // below rsp are local scratch.
  5333   movq(Address(rsp, -5 * wordSize), rsp);
  5335   subq(rsp, 16 * wordSize);
  5337   movq(Address(rsp, 15 * wordSize), rax);
  5338   movq(Address(rsp, 14 * wordSize), rcx);
  5339   movq(Address(rsp, 13 * wordSize), rdx);
  5340   movq(Address(rsp, 12 * wordSize), rbx);
  5341   // skip rsp
  5342   movq(Address(rsp, 10 * wordSize), rbp);
  5343   movq(Address(rsp, 9 * wordSize), rsi);
  5344   movq(Address(rsp, 8 * wordSize), rdi);
  5345   movq(Address(rsp, 7 * wordSize), r8);
  5346   movq(Address(rsp, 6 * wordSize), r9);
  5347   movq(Address(rsp, 5 * wordSize), r10);
  5348   movq(Address(rsp, 4 * wordSize), r11);
  5349   movq(Address(rsp, 3 * wordSize), r12);
  5350   movq(Address(rsp, 2 * wordSize), r13);
  5351   movq(Address(rsp, wordSize), r14);
  5352   movq(Address(rsp, 0), r15);
  5355 void Assembler::pushq(Address src) {
  5356   InstructionMark im(this);
  5357   prefixq(src);
  5358   emit_int8((unsigned char)0xFF);
  5359   emit_operand(rsi, src);
  5362 void Assembler::rclq(Register dst, int imm8) {
  5363   assert(isShiftCount(imm8 >> 1), "illegal shift count");
  5364   int encode = prefixq_and_encode(dst->encoding());
  5365   if (imm8 == 1) {
  5366     emit_int8((unsigned char)0xD1);
  5367     emit_int8((unsigned char)(0xD0 | encode));
  5368   } else {
  5369     emit_int8((unsigned char)0xC1);
  5370     emit_int8((unsigned char)(0xD0 | encode));
  5371     emit_int8(imm8);
  5374 void Assembler::sarq(Register dst, int imm8) {
  5375   assert(isShiftCount(imm8 >> 1), "illegal shift count");
  5376   int encode = prefixq_and_encode(dst->encoding());
  5377   if (imm8 == 1) {
  5378     emit_int8((unsigned char)0xD1);
  5379     emit_int8((unsigned char)(0xF8 | encode));
  5380   } else {
  5381     emit_int8((unsigned char)0xC1);
  5382     emit_int8((unsigned char)(0xF8 | encode));
  5383     emit_int8(imm8);
  5387 void Assembler::sarq(Register dst) {
  5388   int encode = prefixq_and_encode(dst->encoding());
  5389   emit_int8((unsigned char)0xD3);
  5390   emit_int8((unsigned char)(0xF8 | encode));
  5393 void Assembler::sbbq(Address dst, int32_t imm32) {
  5394   InstructionMark im(this);
  5395   prefixq(dst);
  5396   emit_arith_operand(0x81, rbx, dst, imm32);
  5399 void Assembler::sbbq(Register dst, int32_t imm32) {
  5400   (void) prefixq_and_encode(dst->encoding());
  5401   emit_arith(0x81, 0xD8, dst, imm32);
  5404 void Assembler::sbbq(Register dst, Address src) {
  5405   InstructionMark im(this);
  5406   prefixq(src, dst);
  5407   emit_int8(0x1B);
  5408   emit_operand(dst, src);
  5411 void Assembler::sbbq(Register dst, Register src) {
  5412   (void) prefixq_and_encode(dst->encoding(), src->encoding());
  5413   emit_arith(0x1B, 0xC0, dst, src);
  5416 void Assembler::shlq(Register dst, int imm8) {
  5417   assert(isShiftCount(imm8 >> 1), "illegal shift count");
  5418   int encode = prefixq_and_encode(dst->encoding());
  5419   if (imm8 == 1) {
  5420     emit_int8((unsigned char)0xD1);
  5421     emit_int8((unsigned char)(0xE0 | encode));
  5422   } else {
  5423     emit_int8((unsigned char)0xC1);
  5424     emit_int8((unsigned char)(0xE0 | encode));
  5425     emit_int8(imm8);
  5429 void Assembler::shlq(Register dst) {
  5430   int encode = prefixq_and_encode(dst->encoding());
  5431   emit_int8((unsigned char)0xD3);
  5432   emit_int8((unsigned char)(0xE0 | encode));
  5435 void Assembler::shrq(Register dst, int imm8) {
  5436   assert(isShiftCount(imm8 >> 1), "illegal shift count");
  5437   int encode = prefixq_and_encode(dst->encoding());
  5438   emit_int8((unsigned char)0xC1);
  5439   emit_int8((unsigned char)(0xE8 | encode));
  5440   emit_int8(imm8);
  5443 void Assembler::shrq(Register dst) {
  5444   int encode = prefixq_and_encode(dst->encoding());
  5445   emit_int8((unsigned char)0xD3);
  5446   emit_int8(0xE8 | encode);
  5449 void Assembler::subq(Address dst, int32_t imm32) {
  5450   InstructionMark im(this);
  5451   prefixq(dst);
  5452   emit_arith_operand(0x81, rbp, dst, imm32);
  5455 void Assembler::subq(Address dst, Register src) {
  5456   InstructionMark im(this);
  5457   prefixq(dst, src);
  5458   emit_int8(0x29);
  5459   emit_operand(src, dst);
  5462 void Assembler::subq(Register dst, int32_t imm32) {
  5463   (void) prefixq_and_encode(dst->encoding());
  5464   emit_arith(0x81, 0xE8, dst, imm32);
  5467 // Force generation of a 4 byte immediate value even if it fits into 8bit
  5468 void Assembler::subq_imm32(Register dst, int32_t imm32) {
  5469   (void) prefixq_and_encode(dst->encoding());
  5470   emit_arith_imm32(0x81, 0xE8, dst, imm32);
  5473 void Assembler::subq(Register dst, Address src) {
  5474   InstructionMark im(this);
  5475   prefixq(src, dst);
  5476   emit_int8(0x2B);
  5477   emit_operand(dst, src);
  5480 void Assembler::subq(Register dst, Register src) {
  5481   (void) prefixq_and_encode(dst->encoding(), src->encoding());
  5482   emit_arith(0x2B, 0xC0, dst, src);
  5485 void Assembler::testq(Register dst, int32_t imm32) {
  5486   // not using emit_arith because test
  5487   // doesn't support sign-extension of
  5488   // 8bit operands
  5489   int encode = dst->encoding();
  5490   if (encode == 0) {
  5491     prefix(REX_W);
  5492     emit_int8((unsigned char)0xA9);
  5493   } else {
  5494     encode = prefixq_and_encode(encode);
  5495     emit_int8((unsigned char)0xF7);
  5496     emit_int8((unsigned char)(0xC0 | encode));
  5498   emit_int32(imm32);
  5501 void Assembler::testq(Register dst, Register src) {
  5502   (void) prefixq_and_encode(dst->encoding(), src->encoding());
  5503   emit_arith(0x85, 0xC0, dst, src);
  5506 void Assembler::xaddq(Address dst, Register src) {
  5507   InstructionMark im(this);
  5508   prefixq(dst, src);
  5509   emit_int8(0x0F);
  5510   emit_int8((unsigned char)0xC1);
  5511   emit_operand(src, dst);
  5514 void Assembler::xchgq(Register dst, Address src) {
  5515   InstructionMark im(this);
  5516   prefixq(src, dst);
  5517   emit_int8((unsigned char)0x87);
  5518   emit_operand(dst, src);
  5521 void Assembler::xchgq(Register dst, Register src) {
  5522   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  5523   emit_int8((unsigned char)0x87);
  5524   emit_int8((unsigned char)(0xc0 | encode));
  5527 void Assembler::xorq(Register dst, Register src) {
  5528   (void) prefixq_and_encode(dst->encoding(), src->encoding());
  5529   emit_arith(0x33, 0xC0, dst, src);
  5532 void Assembler::xorq(Register dst, Address src) {
  5533   InstructionMark im(this);
  5534   prefixq(src, dst);
  5535   emit_int8(0x33);
  5536   emit_operand(dst, src);
  5539 #endif // !LP64

mercurial