src/cpu/x86/vm/assembler_x86.hpp

Thu, 24 May 2018 17:06:56 +0800

author
aoqi
date
Thu, 24 May 2018 17:06:56 +0800
changeset 8604
04d83ba48607
parent 8307
daaf806995b3
parent 7994
04ff2f6cd0eb
child 9806
758c07667682
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP
    26 #define CPU_X86_VM_ASSEMBLER_X86_HPP
    28 #include "asm/register.hpp"
    30 class BiasedLockingCounters;
    32 // Contains all the definitions needed for x86 assembly code generation.
    34 // Calling convention
    35 class Argument VALUE_OBJ_CLASS_SPEC {
    36  public:
    37   enum {
    38 #ifdef _LP64
    39 #ifdef _WIN64
    40     n_int_register_parameters_c   = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
    41     n_float_register_parameters_c = 4,  // xmm0 - xmm3 (c_farg0, c_farg1, ... )
    42 #else
    43     n_int_register_parameters_c   = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
    44     n_float_register_parameters_c = 8,  // xmm0 - xmm7 (c_farg0, c_farg1, ... )
    45 #endif // _WIN64
    46     n_int_register_parameters_j   = 6, // j_rarg0, j_rarg1, ...
    47     n_float_register_parameters_j = 8  // j_farg0, j_farg1, ...
    48 #else
    49     n_register_parameters = 0   // 0 registers used to pass arguments
    50 #endif // _LP64
    51   };
    52 };
    55 #ifdef _LP64
    56 // Symbolically name the register arguments used by the c calling convention.
    57 // Windows is different from linux/solaris. So much for standards...
    59 #ifdef _WIN64
    61 REGISTER_DECLARATION(Register, c_rarg0, rcx);
    62 REGISTER_DECLARATION(Register, c_rarg1, rdx);
    63 REGISTER_DECLARATION(Register, c_rarg2, r8);
    64 REGISTER_DECLARATION(Register, c_rarg3, r9);
    66 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
    67 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
    68 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
    69 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
    71 #else
    73 REGISTER_DECLARATION(Register, c_rarg0, rdi);
    74 REGISTER_DECLARATION(Register, c_rarg1, rsi);
    75 REGISTER_DECLARATION(Register, c_rarg2, rdx);
    76 REGISTER_DECLARATION(Register, c_rarg3, rcx);
    77 REGISTER_DECLARATION(Register, c_rarg4, r8);
    78 REGISTER_DECLARATION(Register, c_rarg5, r9);
    80 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
    81 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
    82 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
    83 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
    84 REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4);
    85 REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5);
    86 REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
    87 REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
    89 #endif // _WIN64
    91 // Symbolically name the register arguments used by the Java calling convention.
    92 // We have control over the convention for java so we can do what we please.
    93 // What pleases us is to offset the java calling convention so that when
    94 // we call a suitable jni method the arguments are lined up and we don't
    95 // have to do little shuffling. A suitable jni method is non-static and a
    96 // small number of arguments (two fewer args on windows)
    97 //
    98 //        |-------------------------------------------------------|
    99 //        | c_rarg0   c_rarg1  c_rarg2 c_rarg3 c_rarg4 c_rarg5    |
   100 //        |-------------------------------------------------------|
   101 //        | rcx       rdx      r8      r9      rdi*    rsi*       | windows (* not a c_rarg)
   102 //        | rdi       rsi      rdx     rcx     r8      r9         | solaris/linux
   103 //        |-------------------------------------------------------|
   104 //        | j_rarg5   j_rarg0  j_rarg1 j_rarg2 j_rarg3 j_rarg4    |
   105 //        |-------------------------------------------------------|
   107 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1);
   108 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2);
   109 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3);
   110 // Windows runs out of register args here
   111 #ifdef _WIN64
   112 REGISTER_DECLARATION(Register, j_rarg3, rdi);
   113 REGISTER_DECLARATION(Register, j_rarg4, rsi);
   114 #else
   115 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4);
   116 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
   117 #endif /* _WIN64 */
   118 REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
   120 REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
   121 REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1);
   122 REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2);
   123 REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3);
   124 REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4);
   125 REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5);
   126 REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6);
   127 REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7);
   129 REGISTER_DECLARATION(Register, rscratch1, r10);  // volatile
   130 REGISTER_DECLARATION(Register, rscratch2, r11);  // volatile
   132 REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
   133 REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
   135 #else
   136 // rscratch1 will apear in 32bit code that is dead but of course must compile
   137 // Using noreg ensures if the dead code is incorrectly live and executed it
   138 // will cause an assertion failure
   139 #define rscratch1 noreg
   140 #define rscratch2 noreg
   142 #endif // _LP64
   144 // JSR 292
   145 // On x86, the SP does not have to be saved when invoking method handle intrinsics
   146 // or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg.
   147 REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg);
   149 // Address is an abstraction used to represent a memory location
   150 // using any of the amd64 addressing modes with one object.
   151 //
   152 // Note: A register location is represented via a Register, not
   153 //       via an address for efficiency & simplicity reasons.
   155 class ArrayAddress;
   157 class Address VALUE_OBJ_CLASS_SPEC {
   158  public:
   159   enum ScaleFactor {
   160     no_scale = -1,
   161     times_1  =  0,
   162     times_2  =  1,
   163     times_4  =  2,
   164     times_8  =  3,
   165     times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4)
   166   };
   167   static ScaleFactor times(int size) {
   168     assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size");
   169     if (size == 8)  return times_8;
   170     if (size == 4)  return times_4;
   171     if (size == 2)  return times_2;
   172     return times_1;
   173   }
   174   static int scale_size(ScaleFactor scale) {
   175     assert(scale != no_scale, "");
   176     assert(((1 << (int)times_1) == 1 &&
   177             (1 << (int)times_2) == 2 &&
   178             (1 << (int)times_4) == 4 &&
   179             (1 << (int)times_8) == 8), "");
   180     return (1 << (int)scale);
   181   }
   183  private:
   184   Register         _base;
   185   Register         _index;
   186   ScaleFactor      _scale;
   187   int              _disp;
   188   RelocationHolder _rspec;
   190   // Easily misused constructors make them private
   191   // %%% can we make these go away?
   192   NOT_LP64(Address(address loc, RelocationHolder spec);)
   193   Address(int disp, address loc, relocInfo::relocType rtype);
   194   Address(int disp, address loc, RelocationHolder spec);
   196  public:
   198  int disp() { return _disp; }
   199   // creation
   200   Address()
   201     : _base(noreg),
   202       _index(noreg),
   203       _scale(no_scale),
   204       _disp(0) {
   205   }
   207   // No default displacement otherwise Register can be implicitly
   208   // converted to 0(Register) which is quite a different animal.
   210   Address(Register base, int disp)
   211     : _base(base),
   212       _index(noreg),
   213       _scale(no_scale),
   214       _disp(disp) {
   215   }
   217   Address(Register base, Register index, ScaleFactor scale, int disp = 0)
   218     : _base (base),
   219       _index(index),
   220       _scale(scale),
   221       _disp (disp) {
   222     assert(!index->is_valid() == (scale == Address::no_scale),
   223            "inconsistent address");
   224   }
   226   Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0)
   227     : _base (base),
   228       _index(index.register_or_noreg()),
   229       _scale(scale),
   230       _disp (disp + (index.constant_or_zero() * scale_size(scale))) {
   231     if (!index.is_register())  scale = Address::no_scale;
   232     assert(!_index->is_valid() == (scale == Address::no_scale),
   233            "inconsistent address");
   234   }
   236   Address plus_disp(int disp) const {
   237     Address a = (*this);
   238     a._disp += disp;
   239     return a;
   240   }
   241   Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const {
   242     Address a = (*this);
   243     a._disp += disp.constant_or_zero() * scale_size(scale);
   244     if (disp.is_register()) {
   245       assert(!a.index()->is_valid(), "competing indexes");
   246       a._index = disp.as_register();
   247       a._scale = scale;
   248     }
   249     return a;
   250   }
   251   bool is_same_address(Address a) const {
   252     // disregard _rspec
   253     return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale;
   254   }
   256   // The following two overloads are used in connection with the
   257   // ByteSize type (see sizes.hpp).  They simplify the use of
   258   // ByteSize'd arguments in assembly code. Note that their equivalent
   259   // for the optimized build are the member functions with int disp
   260   // argument since ByteSize is mapped to an int type in that case.
   261   //
   262   // Note: DO NOT introduce similar overloaded functions for WordSize
   263   // arguments as in the optimized mode, both ByteSize and WordSize
   264   // are mapped to the same type and thus the compiler cannot make a
   265   // distinction anymore (=> compiler errors).
   267 #ifdef ASSERT
   268   Address(Register base, ByteSize disp)
   269     : _base(base),
   270       _index(noreg),
   271       _scale(no_scale),
   272       _disp(in_bytes(disp)) {
   273   }
   275   Address(Register base, Register index, ScaleFactor scale, ByteSize disp)
   276     : _base(base),
   277       _index(index),
   278       _scale(scale),
   279       _disp(in_bytes(disp)) {
   280     assert(!index->is_valid() == (scale == Address::no_scale),
   281            "inconsistent address");
   282   }
   284   Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp)
   285     : _base (base),
   286       _index(index.register_or_noreg()),
   287       _scale(scale),
   288       _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))) {
   289     if (!index.is_register())  scale = Address::no_scale;
   290     assert(!_index->is_valid() == (scale == Address::no_scale),
   291            "inconsistent address");
   292   }
   294 #endif // ASSERT
   296   // accessors
   297   bool        uses(Register reg) const { return _base == reg || _index == reg; }
   298   Register    base()             const { return _base;  }
   299   Register    index()            const { return _index; }
   300   ScaleFactor scale()            const { return _scale; }
   301   int         disp()             const { return _disp;  }
   303   // Convert the raw encoding form into the form expected by the constructor for
   304   // Address.  An index of 4 (rsp) corresponds to having no index, so convert
   305   // that to noreg for the Address constructor.
   306   static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
   308   static Address make_array(ArrayAddress);
   310  private:
   311   bool base_needs_rex() const {
   312     return _base != noreg && _base->encoding() >= 8;
   313   }
   315   bool index_needs_rex() const {
   316     return _index != noreg &&_index->encoding() >= 8;
   317   }
   319   relocInfo::relocType reloc() const { return _rspec.type(); }
   321   friend class Assembler;
   322   friend class MacroAssembler;
   323   friend class LIR_Assembler; // base/index/scale/disp
   324 };
   326 //
   327 // AddressLiteral has been split out from Address because operands of this type
   328 // need to be treated specially on 32bit vs. 64bit platforms. By splitting it out
   329 // the few instructions that need to deal with address literals are unique and the
   330 // MacroAssembler does not have to implement every instruction in the Assembler
   331 // in order to search for address literals that may need special handling depending
   332 // on the instruction and the platform. As small step on the way to merging i486/amd64
   333 // directories.
   334 //
   335 class AddressLiteral VALUE_OBJ_CLASS_SPEC {
   336   friend class ArrayAddress;
   337   RelocationHolder _rspec;
   338   // Typically we use AddressLiterals we want to use their rval
   339   // However in some situations we want the lval (effect address) of the item.
   340   // We provide a special factory for making those lvals.
   341   bool _is_lval;
   343   // If the target is far we'll need to load the ea of this to
   344   // a register to reach it. Otherwise if near we can do rip
   345   // relative addressing.
   347   address          _target;
   349  protected:
   350   // creation
   351   AddressLiteral()
   352     : _is_lval(false),
   353       _target(NULL)
   354   {}
   356   public:
   359   AddressLiteral(address target, relocInfo::relocType rtype);
   361   AddressLiteral(address target, RelocationHolder const& rspec)
   362     : _rspec(rspec),
   363       _is_lval(false),
   364       _target(target)
   365   {}
   367   AddressLiteral addr() {
   368     AddressLiteral ret = *this;
   369     ret._is_lval = true;
   370     return ret;
   371   }
   374  private:
   376   address target() { return _target; }
   377   bool is_lval() { return _is_lval; }
   379   relocInfo::relocType reloc() const { return _rspec.type(); }
   380   const RelocationHolder& rspec() const { return _rspec; }
   382   friend class Assembler;
   383   friend class MacroAssembler;
   384   friend class Address;
   385   friend class LIR_Assembler;
   386 };
   388 // Convience classes
   389 class RuntimeAddress: public AddressLiteral {
   391   public:
   393   RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {}
   395 };
   397 class ExternalAddress: public AddressLiteral {
   398  private:
   399   static relocInfo::relocType reloc_for_target(address target) {
   400     // Sometimes ExternalAddress is used for values which aren't
   401     // exactly addresses, like the card table base.
   402     // external_word_type can't be used for values in the first page
   403     // so just skip the reloc in that case.
   404     return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
   405   }
   407  public:
   409   ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {}
   411 };
   413 class InternalAddress: public AddressLiteral {
   415   public:
   417   InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {}
   419 };
   421 // x86 can do array addressing as a single operation since disp can be an absolute
   422 // address amd64 can't. We create a class that expresses the concept but does extra
   423 // magic on amd64 to get the final result
   425 class ArrayAddress VALUE_OBJ_CLASS_SPEC {
   426   private:
   428   AddressLiteral _base;
   429   Address        _index;
   431   public:
   433   ArrayAddress() {};
   434   ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {};
   435   AddressLiteral base() { return _base; }
   436   Address index() { return _index; }
   438 };
   440 const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY( 512 / wordSize);
   442 // The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction
   443 // level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write
   444 // is what you get. The Assembler is generating code into a CodeBuffer.
   446 class Assembler : public AbstractAssembler  {
   447   friend class AbstractAssembler; // for the non-virtual hack
   448   friend class LIR_Assembler; // as_Address()
   449   friend class StubGenerator;
   451  public:
   452   enum Condition {                     // The x86 condition codes used for conditional jumps/moves.
   453     zero          = 0x4,
   454     notZero       = 0x5,
   455     equal         = 0x4,
   456     notEqual      = 0x5,
   457     less          = 0xc,
   458     lessEqual     = 0xe,
   459     greater       = 0xf,
   460     greaterEqual  = 0xd,
   461     below         = 0x2,
   462     belowEqual    = 0x6,
   463     above         = 0x7,
   464     aboveEqual    = 0x3,
   465     overflow      = 0x0,
   466     noOverflow    = 0x1,
   467     carrySet      = 0x2,
   468     carryClear    = 0x3,
   469     negative      = 0x8,
   470     positive      = 0x9,
   471     parity        = 0xa,
   472     noParity      = 0xb
   473   };
   475   enum Prefix {
   476     // segment overrides
   477     CS_segment = 0x2e,
   478     SS_segment = 0x36,
   479     DS_segment = 0x3e,
   480     ES_segment = 0x26,
   481     FS_segment = 0x64,
   482     GS_segment = 0x65,
   484     REX        = 0x40,
   486     REX_B      = 0x41,
   487     REX_X      = 0x42,
   488     REX_XB     = 0x43,
   489     REX_R      = 0x44,
   490     REX_RB     = 0x45,
   491     REX_RX     = 0x46,
   492     REX_RXB    = 0x47,
   494     REX_W      = 0x48,
   496     REX_WB     = 0x49,
   497     REX_WX     = 0x4A,
   498     REX_WXB    = 0x4B,
   499     REX_WR     = 0x4C,
   500     REX_WRB    = 0x4D,
   501     REX_WRX    = 0x4E,
   502     REX_WRXB   = 0x4F,
   504     VEX_3bytes = 0xC4,
   505     VEX_2bytes = 0xC5
   506   };
   508   enum VexPrefix {
   509     VEX_B = 0x20,
   510     VEX_X = 0x40,
   511     VEX_R = 0x80,
   512     VEX_W = 0x80
   513   };
   515   enum VexSimdPrefix {
   516     VEX_SIMD_NONE = 0x0,
   517     VEX_SIMD_66   = 0x1,
   518     VEX_SIMD_F3   = 0x2,
   519     VEX_SIMD_F2   = 0x3
   520   };
   522   enum VexOpcode {
   523     VEX_OPCODE_NONE  = 0x0,
   524     VEX_OPCODE_0F    = 0x1,
   525     VEX_OPCODE_0F_38 = 0x2,
   526     VEX_OPCODE_0F_3A = 0x3
   527   };
   529   enum WhichOperand {
   530     // input to locate_operand, and format code for relocations
   531     imm_operand  = 0,            // embedded 32-bit|64-bit immediate operand
   532     disp32_operand = 1,          // embedded 32-bit displacement or address
   533     call32_operand = 2,          // embedded 32-bit self-relative displacement
   534 #ifndef _LP64
   535     _WhichOperand_limit = 3
   536 #else
   537      narrow_oop_operand = 3,     // embedded 32-bit immediate narrow oop
   538     _WhichOperand_limit = 4
   539 #endif
   540   };
   544   // NOTE: The general philopsophy of the declarations here is that 64bit versions
   545   // of instructions are freely declared without the need for wrapping them an ifdef.
   546   // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
   547   // In the .cpp file the implementations are wrapped so that they are dropped out
   548   // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL
   549   // to the size it was prior to merging up the 32bit and 64bit assemblers.
   550   //
   551   // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
   552   // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
   554 private:
   557   // 64bit prefixes
   558   int prefix_and_encode(int reg_enc, bool byteinst = false);
   559   int prefixq_and_encode(int reg_enc);
   561   int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false);
   562   int prefixq_and_encode(int dst_enc, int src_enc);
   564   void prefix(Register reg);
   565   void prefix(Address adr);
   566   void prefixq(Address adr);
   568   void prefix(Address adr, Register reg,  bool byteinst = false);
   569   void prefix(Address adr, XMMRegister reg);
   570   void prefixq(Address adr, Register reg);
   571   void prefixq(Address adr, XMMRegister reg);
   573   void prefetch_prefix(Address src);
   575   void rex_prefix(Address adr, XMMRegister xreg,
   576                   VexSimdPrefix pre, VexOpcode opc, bool rex_w);
   577   int  rex_prefix_and_encode(int dst_enc, int src_enc,
   578                              VexSimdPrefix pre, VexOpcode opc, bool rex_w);
   580   void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w,
   581                   int nds_enc, VexSimdPrefix pre, VexOpcode opc,
   582                   bool vector256);
   584   void vex_prefix(Address adr, int nds_enc, int xreg_enc,
   585                   VexSimdPrefix pre, VexOpcode opc,
   586                   bool vex_w, bool vector256);
   588   void vex_prefix(XMMRegister dst, XMMRegister nds, Address src,
   589                   VexSimdPrefix pre, bool vector256 = false) {
   590     int dst_enc = dst->encoding();
   591     int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   592     vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256);
   593   }
   595   void vex_prefix_0F38(Register dst, Register nds, Address src) {
   596     bool vex_w = false;
   597     bool vector256 = false;
   598     vex_prefix(src, nds->encoding(), dst->encoding(),
   599                VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
   600   }
   602   void vex_prefix_0F38_q(Register dst, Register nds, Address src) {
   603     bool vex_w = true;
   604     bool vector256 = false;
   605     vex_prefix(src, nds->encoding(), dst->encoding(),
   606                VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
   607   }
   608   int  vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
   609                              VexSimdPrefix pre, VexOpcode opc,
   610                              bool vex_w, bool vector256);
   612   int  vex_prefix_0F38_and_encode(Register dst, Register nds, Register src) {
   613     bool vex_w = false;
   614     bool vector256 = false;
   615     return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
   616                                  VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
   617   }
   618   int  vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src) {
   619     bool vex_w = true;
   620     bool vector256 = false;
   621     return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
   622                                  VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
   623   }
   624   int  vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
   625                              VexSimdPrefix pre, bool vector256 = false,
   626                              VexOpcode opc = VEX_OPCODE_0F) {
   627     int src_enc = src->encoding();
   628     int dst_enc = dst->encoding();
   629     int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   630     return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector256);
   631   }
   633   void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
   634                    VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
   635                    bool rex_w = false, bool vector256 = false);
   637   void simd_prefix(XMMRegister dst, Address src,
   638                    VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
   639     simd_prefix(dst, xnoreg, src, pre, opc);
   640   }
   642   void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) {
   643     simd_prefix(src, dst, pre);
   644   }
   645   void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
   646                      VexSimdPrefix pre) {
   647     bool rex_w = true;
   648     simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w);
   649   }
   651   int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
   652                              VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
   653                              bool rex_w = false, bool vector256 = false);
   655   // Move/convert 32-bit integer value.
   656   int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src,
   657                              VexSimdPrefix pre) {
   658     // It is OK to cast from Register to XMMRegister to pass argument here
   659     // since only encoding is used in simd_prefix_and_encode() and number of
   660     // Gen and Xmm registers are the same.
   661     return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre);
   662   }
   663   int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre) {
   664     return simd_prefix_and_encode(dst, xnoreg, src, pre);
   665   }
   666   int simd_prefix_and_encode(Register dst, XMMRegister src,
   667                              VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
   668     return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc);
   669   }
   671   // Move/convert 64-bit integer value.
   672   int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src,
   673                                VexSimdPrefix pre) {
   674     bool rex_w = true;
   675     return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, VEX_OPCODE_0F, rex_w);
   676   }
   677   int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre) {
   678     return simd_prefix_and_encode_q(dst, xnoreg, src, pre);
   679   }
   680   int simd_prefix_and_encode_q(Register dst, XMMRegister src,
   681                              VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
   682     bool rex_w = true;
   683     return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc, rex_w);
   684   }
   686   // Helper functions for groups of instructions
   687   void emit_arith_b(int op1, int op2, Register dst, int imm8);
   689   void emit_arith(int op1, int op2, Register dst, int32_t imm32);
   690   // Force generation of a 4 byte immediate value even if it fits into 8bit
   691   void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32);
   692   void emit_arith(int op1, int op2, Register dst, Register src);
   694   void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre);
   695   void emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre);
   696   void emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre);
   697   void emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre);
   698   void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
   699                       Address src, VexSimdPrefix pre, bool vector256);
   700   void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
   701                       XMMRegister src, VexSimdPrefix pre, bool vector256);
   703   void emit_operand(Register reg,
   704                     Register base, Register index, Address::ScaleFactor scale,
   705                     int disp,
   706                     RelocationHolder const& rspec,
   707                     int rip_relative_correction = 0);
   709   void emit_operand(Register reg, Address adr, int rip_relative_correction = 0);
   711   // operands that only take the original 32bit registers
   712   void emit_operand32(Register reg, Address adr);
   714   void emit_operand(XMMRegister reg,
   715                     Register base, Register index, Address::ScaleFactor scale,
   716                     int disp,
   717                     RelocationHolder const& rspec);
   719   void emit_operand(XMMRegister reg, Address adr);
   721   void emit_operand(MMXRegister reg, Address adr);
   723   // workaround gcc (3.2.1-7) bug
   724   void emit_operand(Address adr, MMXRegister reg);
   727   // Immediate-to-memory forms
   728   void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32);
   730   void emit_farith(int b1, int b2, int i);
   733  protected:
   734   #ifdef ASSERT
   735   void check_relocation(RelocationHolder const& rspec, int format);
   736   #endif
   738   void emit_data(jint data, relocInfo::relocType    rtype, int format);
   739   void emit_data(jint data, RelocationHolder const& rspec, int format);
   740   void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
   741   void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
   743   bool reachable(AddressLiteral adr) NOT_LP64({ return true;});
   745   // These are all easily abused and hence protected
   747   // 32BIT ONLY SECTION
   748 #ifndef _LP64
   749   // Make these disappear in 64bit mode since they would never be correct
   750   void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec);   // 32BIT ONLY
   751   void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec);    // 32BIT ONLY
   753   void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec);    // 32BIT ONLY
   754   void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec);     // 32BIT ONLY
   756   void push_literal32(int32_t imm32, RelocationHolder const& rspec);                 // 32BIT ONLY
   757 #else
   758   // 64BIT ONLY SECTION
   759   void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec);   // 64BIT ONLY
   761   void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec);
   762   void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec);
   764   void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec);
   765   void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec);
   766 #endif // _LP64
   768   // These are unique in that we are ensured by the caller that the 32bit
   769   // relative in these instructions will always be able to reach the potentially
   770   // 64bit address described by entry. Since they can take a 64bit address they
   771   // don't have the 32 suffix like the other instructions in this class.
   773   void call_literal(address entry, RelocationHolder const& rspec);
   774   void jmp_literal(address entry, RelocationHolder const& rspec);
   776   // Avoid using directly section
   777   // Instructions in this section are actually usable by anyone without danger
   778   // of failure but have performance issues that are addressed my enhanced
   779   // instructions which will do the proper thing base on the particular cpu.
   780   // We protect them because we don't trust you...
   782   // Don't use next inc() and dec() methods directly. INC & DEC instructions
   783   // could cause a partial flag stall since they don't set CF flag.
   784   // Use MacroAssembler::decrement() & MacroAssembler::increment() methods
   785   // which call inc() & dec() or add() & sub() in accordance with
   786   // the product flag UseIncDec value.
   788   void decl(Register dst);
   789   void decl(Address dst);
   790   void decq(Register dst);
   791   void decq(Address dst);
   793   void incl(Register dst);
   794   void incl(Address dst);
   795   void incq(Register dst);
   796   void incq(Address dst);
   798   // New cpus require use of movsd and movss to avoid partial register stall
   799   // when loading from memory. But for old Opteron use movlpd instead of movsd.
   800   // The selection is done in MacroAssembler::movdbl() and movflt().
   802   // Move Scalar Single-Precision Floating-Point Values
   803   void movss(XMMRegister dst, Address src);
   804   void movss(XMMRegister dst, XMMRegister src);
   805   void movss(Address dst, XMMRegister src);
   807   // Move Scalar Double-Precision Floating-Point Values
   808   void movsd(XMMRegister dst, Address src);
   809   void movsd(XMMRegister dst, XMMRegister src);
   810   void movsd(Address dst, XMMRegister src);
   811   void movlpd(XMMRegister dst, Address src);
   813   // New cpus require use of movaps and movapd to avoid partial register stall
   814   // when moving between registers.
   815   void movaps(XMMRegister dst, XMMRegister src);
   816   void movapd(XMMRegister dst, XMMRegister src);
   818   // End avoid using directly
   821   // Instruction prefixes
   822   void prefix(Prefix p);
   824   public:
   826   // Creation
   827   Assembler(CodeBuffer* code) : AbstractAssembler(code) {}
   829   // Decoding
   830   static address locate_operand(address inst, WhichOperand which);
   831   static address locate_next_instruction(address inst);
   833   // Utilities
   834   static bool is_polling_page_far() NOT_LP64({ return false;});
   836   // Generic instructions
   837   // Does 32bit or 64bit as needed for the platform. In some sense these
   838   // belong in macro assembler but there is no need for both varieties to exist
   840   void lea(Register dst, Address src);
   842   void mov(Register dst, Register src);
   844   void pusha();
   845   void popa();
   847   void pushf();
   848   void popf();
   850   void push(int32_t imm32);
   852   void push(Register src);
   854   void pop(Register dst);
   856   // These are dummies to prevent surprise implicit conversions to Register
   857   void push(void* v);
   858   void pop(void* v);
   860   // These do register sized moves/scans
   861   void rep_mov();
   862   void rep_stos();
   863   void rep_stosb();
   864   void repne_scan();
   865 #ifdef _LP64
   866   void repne_scanl();
   867 #endif
   869   // Vanilla instructions in lexical order
   871   void adcl(Address dst, int32_t imm32);
   872   void adcl(Address dst, Register src);
   873   void adcl(Register dst, int32_t imm32);
   874   void adcl(Register dst, Address src);
   875   void adcl(Register dst, Register src);
   877   void adcq(Register dst, int32_t imm32);
   878   void adcq(Register dst, Address src);
   879   void adcq(Register dst, Register src);
   881   void addl(Address dst, int32_t imm32);
   882   void addl(Address dst, Register src);
   883   void addl(Register dst, int32_t imm32);
   884   void addl(Register dst, Address src);
   885   void addl(Register dst, Register src);
   887   void addq(Address dst, int32_t imm32);
   888   void addq(Address dst, Register src);
   889   void addq(Register dst, int32_t imm32);
   890   void addq(Register dst, Address src);
   891   void addq(Register dst, Register src);
   893 #ifdef _LP64
   894  //Add Unsigned Integers with Carry Flag
   895   void adcxq(Register dst, Register src);
   897  //Add Unsigned Integers with Overflow Flag
   898   void adoxq(Register dst, Register src);
   899 #endif
   901   void addr_nop_4();
   902   void addr_nop_5();
   903   void addr_nop_7();
   904   void addr_nop_8();
   906   // Add Scalar Double-Precision Floating-Point Values
   907   void addsd(XMMRegister dst, Address src);
   908   void addsd(XMMRegister dst, XMMRegister src);
   910   // Add Scalar Single-Precision Floating-Point Values
   911   void addss(XMMRegister dst, Address src);
   912   void addss(XMMRegister dst, XMMRegister src);
   914   // AES instructions
   915   void aesdec(XMMRegister dst, Address src);
   916   void aesdec(XMMRegister dst, XMMRegister src);
   917   void aesdeclast(XMMRegister dst, Address src);
   918   void aesdeclast(XMMRegister dst, XMMRegister src);
   919   void aesenc(XMMRegister dst, Address src);
   920   void aesenc(XMMRegister dst, XMMRegister src);
   921   void aesenclast(XMMRegister dst, Address src);
   922   void aesenclast(XMMRegister dst, XMMRegister src);
   925   void andl(Address  dst, int32_t imm32);
   926   void andl(Register dst, int32_t imm32);
   927   void andl(Register dst, Address src);
   928   void andl(Register dst, Register src);
   930   void andq(Address  dst, int32_t imm32);
   931   void andq(Register dst, int32_t imm32);
   932   void andq(Register dst, Address src);
   933   void andq(Register dst, Register src);
   935   // BMI instructions
   936   void andnl(Register dst, Register src1, Register src2);
   937   void andnl(Register dst, Register src1, Address src2);
   938   void andnq(Register dst, Register src1, Register src2);
   939   void andnq(Register dst, Register src1, Address src2);
   941   void blsil(Register dst, Register src);
   942   void blsil(Register dst, Address src);
   943   void blsiq(Register dst, Register src);
   944   void blsiq(Register dst, Address src);
   946   void blsmskl(Register dst, Register src);
   947   void blsmskl(Register dst, Address src);
   948   void blsmskq(Register dst, Register src);
   949   void blsmskq(Register dst, Address src);
   951   void blsrl(Register dst, Register src);
   952   void blsrl(Register dst, Address src);
   953   void blsrq(Register dst, Register src);
   954   void blsrq(Register dst, Address src);
   956   void bsfl(Register dst, Register src);
   957   void bsrl(Register dst, Register src);
   959 #ifdef _LP64
   960   void bsfq(Register dst, Register src);
   961   void bsrq(Register dst, Register src);
   962 #endif
   964   void bswapl(Register reg);
   966   void bswapq(Register reg);
   968   void call(Label& L, relocInfo::relocType rtype);
   969   void call(Register reg);  // push pc; pc <- reg
   970   void call(Address adr);   // push pc; pc <- adr
   972   void cdql();
   974   void cdqq();
   976   void cld();
   978   void clflush(Address adr);
   980   void cmovl(Condition cc, Register dst, Register src);
   981   void cmovl(Condition cc, Register dst, Address src);
   983   void cmovq(Condition cc, Register dst, Register src);
   984   void cmovq(Condition cc, Register dst, Address src);
   987   void cmpb(Address dst, int imm8);
   989   void cmpl(Address dst, int32_t imm32);
   991   void cmpl(Register dst, int32_t imm32);
   992   void cmpl(Register dst, Register src);
   993   void cmpl(Register dst, Address src);
   995   void cmpq(Address dst, int32_t imm32);
   996   void cmpq(Address dst, Register src);
   998   void cmpq(Register dst, int32_t imm32);
   999   void cmpq(Register dst, Register src);
  1000   void cmpq(Register dst, Address src);
  1002   // these are dummies used to catch attempting to convert NULL to Register
  1003   void cmpl(Register dst, void* junk); // dummy
  1004   void cmpq(Register dst, void* junk); // dummy
  1006   void cmpw(Address dst, int imm16);
  1008   void cmpxchg8 (Address adr);
  1010   void cmpxchgl(Register reg, Address adr);
  1012   void cmpxchgq(Register reg, Address adr);
  1014   // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
  1015   void comisd(XMMRegister dst, Address src);
  1016   void comisd(XMMRegister dst, XMMRegister src);
  1018   // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
  1019   void comiss(XMMRegister dst, Address src);
  1020   void comiss(XMMRegister dst, XMMRegister src);
  1022   // Identify processor type and features
  1023   void cpuid();
  1025   // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
  1026   void cvtsd2ss(XMMRegister dst, XMMRegister src);
  1027   void cvtsd2ss(XMMRegister dst, Address src);
  1029   // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
  1030   void cvtsi2sdl(XMMRegister dst, Register src);
  1031   void cvtsi2sdl(XMMRegister dst, Address src);
  1032   void cvtsi2sdq(XMMRegister dst, Register src);
  1033   void cvtsi2sdq(XMMRegister dst, Address src);
  1035   // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
  1036   void cvtsi2ssl(XMMRegister dst, Register src);
  1037   void cvtsi2ssl(XMMRegister dst, Address src);
  1038   void cvtsi2ssq(XMMRegister dst, Register src);
  1039   void cvtsi2ssq(XMMRegister dst, Address src);
  1041   // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value
  1042   void cvtdq2pd(XMMRegister dst, XMMRegister src);
  1044   // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value
  1045   void cvtdq2ps(XMMRegister dst, XMMRegister src);
  1047   // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
  1048   void cvtss2sd(XMMRegister dst, XMMRegister src);
  1049   void cvtss2sd(XMMRegister dst, Address src);
  1051   // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
  1052   void cvttsd2sil(Register dst, Address src);
  1053   void cvttsd2sil(Register dst, XMMRegister src);
  1054   void cvttsd2siq(Register dst, XMMRegister src);
  1056   // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer
  1057   void cvttss2sil(Register dst, XMMRegister src);
  1058   void cvttss2siq(Register dst, XMMRegister src);
  1060   // Divide Scalar Double-Precision Floating-Point Values
  1061   void divsd(XMMRegister dst, Address src);
  1062   void divsd(XMMRegister dst, XMMRegister src);
  1064   // Divide Scalar Single-Precision Floating-Point Values
  1065   void divss(XMMRegister dst, Address src);
  1066   void divss(XMMRegister dst, XMMRegister src);
  1068   void emms();
  1070   void fabs();
  1072   void fadd(int i);
  1074   void fadd_d(Address src);
  1075   void fadd_s(Address src);
  1077   // "Alternate" versions of x87 instructions place result down in FPU
  1078   // stack instead of on TOS
  1080   void fadda(int i); // "alternate" fadd
  1081   void faddp(int i = 1);
  1083   void fchs();
  1085   void fcom(int i);
  1087   void fcomp(int i = 1);
  1088   void fcomp_d(Address src);
  1089   void fcomp_s(Address src);
  1091   void fcompp();
  1093   void fcos();
  1095   void fdecstp();
  1097   void fdiv(int i);
  1098   void fdiv_d(Address src);
  1099   void fdivr_s(Address src);
  1100   void fdiva(int i);  // "alternate" fdiv
  1101   void fdivp(int i = 1);
  1103   void fdivr(int i);
  1104   void fdivr_d(Address src);
  1105   void fdiv_s(Address src);
  1107   void fdivra(int i); // "alternate" reversed fdiv
  1109   void fdivrp(int i = 1);
  1111   void ffree(int i = 0);
  1113   void fild_d(Address adr);
  1114   void fild_s(Address adr);
  1116   void fincstp();
  1118   void finit();
  1120   void fist_s (Address adr);
  1121   void fistp_d(Address adr);
  1122   void fistp_s(Address adr);
  1124   void fld1();
  1126   void fld_d(Address adr);
  1127   void fld_s(Address adr);
  1128   void fld_s(int index);
  1129   void fld_x(Address adr);  // extended-precision (80-bit) format
  1131   void fldcw(Address src);
  1133   void fldenv(Address src);
  1135   void fldlg2();
  1137   void fldln2();
  1139   void fldz();
  1141   void flog();
  1142   void flog10();
  1144   void fmul(int i);
  1146   void fmul_d(Address src);
  1147   void fmul_s(Address src);
  1149   void fmula(int i);  // "alternate" fmul
  1151   void fmulp(int i = 1);
  1153   void fnsave(Address dst);
  1155   void fnstcw(Address src);
  1157   void fnstsw_ax();
  1159   void fprem();
  1160   void fprem1();
  1162   void frstor(Address src);
  1164   void fsin();
  1166   void fsqrt();
  1168   void fst_d(Address adr);
  1169   void fst_s(Address adr);
  1171   void fstp_d(Address adr);
  1172   void fstp_d(int index);
  1173   void fstp_s(Address adr);
  1174   void fstp_x(Address adr); // extended-precision (80-bit) format
  1176   void fsub(int i);
  1177   void fsub_d(Address src);
  1178   void fsub_s(Address src);
  1180   void fsuba(int i);  // "alternate" fsub
  1182   void fsubp(int i = 1);
  1184   void fsubr(int i);
  1185   void fsubr_d(Address src);
  1186   void fsubr_s(Address src);
  1188   void fsubra(int i); // "alternate" reversed fsub
  1190   void fsubrp(int i = 1);
  1192   void ftan();
  1194   void ftst();
  1196   void fucomi(int i = 1);
  1197   void fucomip(int i = 1);
  1199   void fwait();
  1201   void fxch(int i = 1);
  1203   void fxrstor(Address src);
  1205   void fxsave(Address dst);
  1207   void fyl2x();
  1208   void frndint();
  1209   void f2xm1();
  1210   void fldl2e();
  1212   void hlt();
  1214   void idivl(Register src);
  1215   void divl(Register src); // Unsigned division
  1217 #ifdef _LP64
  1218   void idivq(Register src);
  1219 #endif
  1221   void imull(Register dst, Register src);
  1222   void imull(Register dst, Register src, int value);
  1223   void imull(Register dst, Address src);
  1225 #ifdef _LP64
  1226   void imulq(Register dst, Register src);
  1227   void imulq(Register dst, Register src, int value);
  1228   void imulq(Register dst, Address src);
  1229 #endif
  1231   // jcc is the generic conditional branch generator to run-
  1232   // time routines, jcc is used for branches to labels. jcc
  1233   // takes a branch opcode (cc) and a label (L) and generates
  1234   // either a backward branch or a forward branch and links it
  1235   // to the label fixup chain. Usage:
  1236   //
  1237   // Label L;      // unbound label
  1238   // jcc(cc, L);   // forward branch to unbound label
  1239   // bind(L);      // bind label to the current pc
  1240   // jcc(cc, L);   // backward branch to bound label
  1241   // bind(L);      // illegal: a label may be bound only once
  1242   //
  1243   // Note: The same Label can be used for forward and backward branches
  1244   // but it may be bound only once.
  1246   void jcc(Condition cc, Label& L, bool maybe_short = true);
  1248   // Conditional jump to a 8-bit offset to L.
  1249   // WARNING: be very careful using this for forward jumps.  If the label is
  1250   // not bound within an 8-bit offset of this instruction, a run-time error
  1251   // will occur.
  1252   void jccb(Condition cc, Label& L);
  1254   void jmp(Address entry);    // pc <- entry
  1256   // Label operations & relative jumps (PPUM Appendix D)
  1257   void jmp(Label& L, bool maybe_short = true);   // unconditional jump to L
  1259   void jmp(Register entry); // pc <- entry
  1261   // Unconditional 8-bit offset jump to L.
  1262   // WARNING: be very careful using this for forward jumps.  If the label is
  1263   // not bound within an 8-bit offset of this instruction, a run-time error
  1264   // will occur.
  1265   void jmpb(Label& L);
  1267   void ldmxcsr( Address src );
  1269   void leal(Register dst, Address src);
  1271   void leaq(Register dst, Address src);
  1273   void lfence();
  1275   void lock();
  1277   void lzcntl(Register dst, Register src);
  1279 #ifdef _LP64
  1280   void lzcntq(Register dst, Register src);
  1281 #endif
  1283   enum Membar_mask_bits {
  1284     StoreStore = 1 << 3,
  1285     LoadStore  = 1 << 2,
  1286     StoreLoad  = 1 << 1,
  1287     LoadLoad   = 1 << 0
  1288   };
  1290   // Serializes memory and blows flags
  1291   void membar(Membar_mask_bits order_constraint) {
  1292     if (os::is_MP()) {
  1293       // We only have to handle StoreLoad
  1294       if (order_constraint & StoreLoad) {
  1295         // All usable chips support "locked" instructions which suffice
  1296         // as barriers, and are much faster than the alternative of
  1297         // using cpuid instruction. We use here a locked add [esp],0.
  1298         // This is conveniently otherwise a no-op except for blowing
  1299         // flags.
  1300         // Any change to this code may need to revisit other places in
  1301         // the code where this idiom is used, in particular the
  1302         // orderAccess code.
  1303         lock();
  1304         addl(Address(rsp, 0), 0);// Assert the lock# signal here
  1309   void mfence();
  1311   // Moves
  1313   void mov64(Register dst, int64_t imm64);
  1315   void movb(Address dst, Register src);
  1316   void movb(Address dst, int imm8);
  1317   void movb(Register dst, Address src);
  1319   void movdl(XMMRegister dst, Register src);
  1320   void movdl(Register dst, XMMRegister src);
  1321   void movdl(XMMRegister dst, Address src);
  1322   void movdl(Address dst, XMMRegister src);
  1324   // Move Double Quadword
  1325   void movdq(XMMRegister dst, Register src);
  1326   void movdq(Register dst, XMMRegister src);
  1328   // Move Aligned Double Quadword
  1329   void movdqa(XMMRegister dst, XMMRegister src);
  1330   void movdqa(XMMRegister dst, Address src);
  1332   // Move Unaligned Double Quadword
  1333   void movdqu(Address     dst, XMMRegister src);
  1334   void movdqu(XMMRegister dst, Address src);
  1335   void movdqu(XMMRegister dst, XMMRegister src);
  1337   // Move Unaligned 256bit Vector
  1338   void vmovdqu(Address dst, XMMRegister src);
  1339   void vmovdqu(XMMRegister dst, Address src);
  1340   void vmovdqu(XMMRegister dst, XMMRegister src);
  1342   // Move lower 64bit to high 64bit in 128bit register
  1343   void movlhps(XMMRegister dst, XMMRegister src);
  1345   void movl(Register dst, int32_t imm32);
  1346   void movl(Address dst, int32_t imm32);
  1347   void movl(Register dst, Register src);
  1348   void movl(Register dst, Address src);
  1349   void movl(Address dst, Register src);
  1351   // These dummies prevent using movl from converting a zero (like NULL) into Register
  1352   // by giving the compiler two choices it can't resolve
  1354   void movl(Address  dst, void* junk);
  1355   void movl(Register dst, void* junk);
  1357 #ifdef _LP64
  1358   void movq(Register dst, Register src);
  1359   void movq(Register dst, Address src);
  1360   void movq(Address  dst, Register src);
  1361 #endif
  1363   void movq(Address     dst, MMXRegister src );
  1364   void movq(MMXRegister dst, Address src );
  1366 #ifdef _LP64
  1367   // These dummies prevent using movq from converting a zero (like NULL) into Register
  1368   // by giving the compiler two choices it can't resolve
  1370   void movq(Address  dst, void* dummy);
  1371   void movq(Register dst, void* dummy);
  1372 #endif
  1374   // Move Quadword
  1375   void movq(Address     dst, XMMRegister src);
  1376   void movq(XMMRegister dst, Address src);
  1378   void movsbl(Register dst, Address src);
  1379   void movsbl(Register dst, Register src);
  1381 #ifdef _LP64
  1382   void movsbq(Register dst, Address src);
  1383   void movsbq(Register dst, Register src);
  1385   // Move signed 32bit immediate to 64bit extending sign
  1386   void movslq(Address  dst, int32_t imm64);
  1387   void movslq(Register dst, int32_t imm64);
  1389   void movslq(Register dst, Address src);
  1390   void movslq(Register dst, Register src);
  1391   void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous
  1392 #endif
  1394   void movswl(Register dst, Address src);
  1395   void movswl(Register dst, Register src);
  1397 #ifdef _LP64
  1398   void movswq(Register dst, Address src);
  1399   void movswq(Register dst, Register src);
  1400 #endif
  1402   void movw(Address dst, int imm16);
  1403   void movw(Register dst, Address src);
  1404   void movw(Address dst, Register src);
  1406   void movzbl(Register dst, Address src);
  1407   void movzbl(Register dst, Register src);
  1409 #ifdef _LP64
  1410   void movzbq(Register dst, Address src);
  1411   void movzbq(Register dst, Register src);
  1412 #endif
  1414   void movzwl(Register dst, Address src);
  1415   void movzwl(Register dst, Register src);
  1417 #ifdef _LP64
  1418   void movzwq(Register dst, Address src);
  1419   void movzwq(Register dst, Register src);
  1420 #endif
  1422   // Unsigned multiply with RAX destination register
  1423   void mull(Address src);
  1424   void mull(Register src);
  1426 #ifdef _LP64
  1427   void mulq(Address src);
  1428   void mulq(Register src);
  1429   void mulxq(Register dst1, Register dst2, Register src);
  1430 #endif
  1432   // Multiply Scalar Double-Precision Floating-Point Values
  1433   void mulsd(XMMRegister dst, Address src);
  1434   void mulsd(XMMRegister dst, XMMRegister src);
  1436   // Multiply Scalar Single-Precision Floating-Point Values
  1437   void mulss(XMMRegister dst, Address src);
  1438   void mulss(XMMRegister dst, XMMRegister src);
  1440   void negl(Register dst);
  1442 #ifdef _LP64
  1443   void negq(Register dst);
  1444 #endif
  1446   void nop(int i = 1);
  1448   void notl(Register dst);
  1450 #ifdef _LP64
  1451   void notq(Register dst);
  1452 #endif
  1454   void orl(Address dst, int32_t imm32);
  1455   void orl(Register dst, int32_t imm32);
  1456   void orl(Register dst, Address src);
  1457   void orl(Register dst, Register src);
  1458   void orl(Address dst, Register src);
  1460   void orq(Address dst, int32_t imm32);
  1461   void orq(Register dst, int32_t imm32);
  1462   void orq(Register dst, Address src);
  1463   void orq(Register dst, Register src);
  1465   // Pack with unsigned saturation
  1466   void packuswb(XMMRegister dst, XMMRegister src);
  1467   void packuswb(XMMRegister dst, Address src);
  1468   void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1470   // Pemutation of 64bit words
  1471   void vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256);
  1473   void pause();
  1475   // SSE4.2 string instructions
  1476   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
  1477   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
  1479   // SSE 4.1 extract
  1480   void pextrd(Register dst, XMMRegister src, int imm8);
  1481   void pextrq(Register dst, XMMRegister src, int imm8);
  1483   // SSE 4.1 insert
  1484   void pinsrd(XMMRegister dst, Register src, int imm8);
  1485   void pinsrq(XMMRegister dst, Register src, int imm8);
  1487   // SSE4.1 packed move
  1488   void pmovzxbw(XMMRegister dst, XMMRegister src);
  1489   void pmovzxbw(XMMRegister dst, Address src);
  1491 #ifndef _LP64 // no 32bit push/pop on amd64
  1492   void popl(Address dst);
  1493 #endif
  1495 #ifdef _LP64
  1496   void popq(Address dst);
  1497 #endif
  1499   void popcntl(Register dst, Address src);
  1500   void popcntl(Register dst, Register src);
  1502 #ifdef _LP64
  1503   void popcntq(Register dst, Address src);
  1504   void popcntq(Register dst, Register src);
  1505 #endif
  1507   // Prefetches (SSE, SSE2, 3DNOW only)
  1509   void prefetchnta(Address src);
  1510   void prefetchr(Address src);
  1511   void prefetcht0(Address src);
  1512   void prefetcht1(Address src);
  1513   void prefetcht2(Address src);
  1514   void prefetchw(Address src);
  1516   // Shuffle Bytes
  1517   void pshufb(XMMRegister dst, XMMRegister src);
  1518   void pshufb(XMMRegister dst, Address src);
  1520   // Shuffle Packed Doublewords
  1521   void pshufd(XMMRegister dst, XMMRegister src, int mode);
  1522   void pshufd(XMMRegister dst, Address src,     int mode);
  1524   // Shuffle Packed Low Words
  1525   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
  1526   void pshuflw(XMMRegister dst, Address src,     int mode);
  1528   // Shift Right by bytes Logical DoubleQuadword Immediate
  1529   void psrldq(XMMRegister dst, int shift);
  1531   // Logical Compare 128bit
  1532   void ptest(XMMRegister dst, XMMRegister src);
  1533   void ptest(XMMRegister dst, Address src);
  1534   // Logical Compare 256bit
  1535   void vptest(XMMRegister dst, XMMRegister src);
  1536   void vptest(XMMRegister dst, Address src);
  1538   // Interleave Low Bytes
  1539   void punpcklbw(XMMRegister dst, XMMRegister src);
  1540   void punpcklbw(XMMRegister dst, Address src);
  1542   // Interleave Low Doublewords
  1543   void punpckldq(XMMRegister dst, XMMRegister src);
  1544   void punpckldq(XMMRegister dst, Address src);
  1546   // Interleave Low Quadwords
  1547   void punpcklqdq(XMMRegister dst, XMMRegister src);
  1549 #ifndef _LP64 // no 32bit push/pop on amd64
  1550   void pushl(Address src);
  1551 #endif
  1553   void pushq(Address src);
  1555   void rcll(Register dst, int imm8);
  1557   void rclq(Register dst, int imm8);
  1559   void rcrq(Register dst, int imm8);
  1561   void rdtsc();
  1563   void ret(int imm16);
  1565 #ifdef _LP64
  1566   void rorq(Register dst, int imm8);
  1567   void rorxq(Register dst, Register src, int imm8);
  1568 #endif
  1570   void sahf();
  1572   void sarl(Register dst, int imm8);
  1573   void sarl(Register dst);
  1575   void sarq(Register dst, int imm8);
  1576   void sarq(Register dst);
  1578   void sbbl(Address dst, int32_t imm32);
  1579   void sbbl(Register dst, int32_t imm32);
  1580   void sbbl(Register dst, Address src);
  1581   void sbbl(Register dst, Register src);
  1583   void sbbq(Address dst, int32_t imm32);
  1584   void sbbq(Register dst, int32_t imm32);
  1585   void sbbq(Register dst, Address src);
  1586   void sbbq(Register dst, Register src);
  1588   void setb(Condition cc, Register dst);
  1590   void shldl(Register dst, Register src);
  1592   void shll(Register dst, int imm8);
  1593   void shll(Register dst);
  1595   void shlq(Register dst, int imm8);
  1596   void shlq(Register dst);
  1598   void shrdl(Register dst, Register src);
  1600   void shrl(Register dst, int imm8);
  1601   void shrl(Register dst);
  1603   void shrq(Register dst, int imm8);
  1604   void shrq(Register dst);
  1606   void smovl(); // QQQ generic?
  1608   // Compute Square Root of Scalar Double-Precision Floating-Point Value
  1609   void sqrtsd(XMMRegister dst, Address src);
  1610   void sqrtsd(XMMRegister dst, XMMRegister src);
  1612   // Compute Square Root of Scalar Single-Precision Floating-Point Value
  1613   void sqrtss(XMMRegister dst, Address src);
  1614   void sqrtss(XMMRegister dst, XMMRegister src);
  1616   void std();
  1618   void stmxcsr( Address dst );
  1620   void subl(Address dst, int32_t imm32);
  1621   void subl(Address dst, Register src);
  1622   void subl(Register dst, int32_t imm32);
  1623   void subl(Register dst, Address src);
  1624   void subl(Register dst, Register src);
  1626   void subq(Address dst, int32_t imm32);
  1627   void subq(Address dst, Register src);
  1628   void subq(Register dst, int32_t imm32);
  1629   void subq(Register dst, Address src);
  1630   void subq(Register dst, Register src);
  1632   // Force generation of a 4 byte immediate value even if it fits into 8bit
  1633   void subl_imm32(Register dst, int32_t imm32);
  1634   void subq_imm32(Register dst, int32_t imm32);
  1636   // Subtract Scalar Double-Precision Floating-Point Values
  1637   void subsd(XMMRegister dst, Address src);
  1638   void subsd(XMMRegister dst, XMMRegister src);
  1640   // Subtract Scalar Single-Precision Floating-Point Values
  1641   void subss(XMMRegister dst, Address src);
  1642   void subss(XMMRegister dst, XMMRegister src);
  1644   void testb(Register dst, int imm8);
  1646   void testl(Register dst, int32_t imm32);
  1647   void testl(Register dst, Register src);
  1648   void testl(Register dst, Address src);
  1650   void testq(Register dst, int32_t imm32);
  1651   void testq(Register dst, Register src);
  1653   // BMI - count trailing zeros
  1654   void tzcntl(Register dst, Register src);
  1655   void tzcntq(Register dst, Register src);
  1657   // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
  1658   void ucomisd(XMMRegister dst, Address src);
  1659   void ucomisd(XMMRegister dst, XMMRegister src);
  1661   // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
  1662   void ucomiss(XMMRegister dst, Address src);
  1663   void ucomiss(XMMRegister dst, XMMRegister src);
  1665   void xabort(int8_t imm8);
  1667   void xaddl(Address dst, Register src);
  1669   void xaddq(Address dst, Register src);
  1671   void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none);
  1673   void xchgl(Register reg, Address adr);
  1674   void xchgl(Register dst, Register src);
  1676   void xchgq(Register reg, Address adr);
  1677   void xchgq(Register dst, Register src);
  1679   void xend();
  1681   // Get Value of Extended Control Register
  1682   void xgetbv();
  1684   void xorl(Register dst, int32_t imm32);
  1685   void xorl(Register dst, Address src);
  1686   void xorl(Register dst, Register src);
  1688   void xorq(Register dst, Address src);
  1689   void xorq(Register dst, Register src);
  1691   void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
  1693   // AVX 3-operands scalar instructions (encoded with VEX prefix)
  1695   void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
  1696   void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
  1697   void vaddss(XMMRegister dst, XMMRegister nds, Address src);
  1698   void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
  1699   void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
  1700   void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
  1701   void vdivss(XMMRegister dst, XMMRegister nds, Address src);
  1702   void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src);
  1703   void vmulsd(XMMRegister dst, XMMRegister nds, Address src);
  1704   void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
  1705   void vmulss(XMMRegister dst, XMMRegister nds, Address src);
  1706   void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src);
  1707   void vsubsd(XMMRegister dst, XMMRegister nds, Address src);
  1708   void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
  1709   void vsubss(XMMRegister dst, XMMRegister nds, Address src);
  1710   void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
  1713   //====================VECTOR ARITHMETIC=====================================
  1715   // Add Packed Floating-Point Values
  1716   void addpd(XMMRegister dst, XMMRegister src);
  1717   void addps(XMMRegister dst, XMMRegister src);
  1718   void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1719   void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1720   void vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1721   void vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1723   // Subtract Packed Floating-Point Values
  1724   void subpd(XMMRegister dst, XMMRegister src);
  1725   void subps(XMMRegister dst, XMMRegister src);
  1726   void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1727   void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1728   void vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1729   void vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1731   // Multiply Packed Floating-Point Values
  1732   void mulpd(XMMRegister dst, XMMRegister src);
  1733   void mulps(XMMRegister dst, XMMRegister src);
  1734   void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1735   void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1736   void vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1737   void vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1739   // Divide Packed Floating-Point Values
  1740   void divpd(XMMRegister dst, XMMRegister src);
  1741   void divps(XMMRegister dst, XMMRegister src);
  1742   void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1743   void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1744   void vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1745   void vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1747   // Bitwise Logical AND of Packed Floating-Point Values
  1748   void andpd(XMMRegister dst, XMMRegister src);
  1749   void andps(XMMRegister dst, XMMRegister src);
  1750   void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1751   void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1752   void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1753   void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1755   // Bitwise Logical XOR of Packed Floating-Point Values
  1756   void xorpd(XMMRegister dst, XMMRegister src);
  1757   void xorps(XMMRegister dst, XMMRegister src);
  1758   void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1759   void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1760   void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1761   void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1763   // Add packed integers
  1764   void paddb(XMMRegister dst, XMMRegister src);
  1765   void paddw(XMMRegister dst, XMMRegister src);
  1766   void paddd(XMMRegister dst, XMMRegister src);
  1767   void paddq(XMMRegister dst, XMMRegister src);
  1768   void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1769   void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1770   void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1771   void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1772   void vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1773   void vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1774   void vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1775   void vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1777   // Sub packed integers
  1778   void psubb(XMMRegister dst, XMMRegister src);
  1779   void psubw(XMMRegister dst, XMMRegister src);
  1780   void psubd(XMMRegister dst, XMMRegister src);
  1781   void psubq(XMMRegister dst, XMMRegister src);
  1782   void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1783   void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1784   void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1785   void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1786   void vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1787   void vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1788   void vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1789   void vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1791   // Multiply packed integers (only shorts and ints)
  1792   void pmullw(XMMRegister dst, XMMRegister src);
  1793   void pmulld(XMMRegister dst, XMMRegister src);
  1794   void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1795   void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1796   void vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1797   void vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1799   // Shift left packed integers
  1800   void psllw(XMMRegister dst, int shift);
  1801   void pslld(XMMRegister dst, int shift);
  1802   void psllq(XMMRegister dst, int shift);
  1803   void psllw(XMMRegister dst, XMMRegister shift);
  1804   void pslld(XMMRegister dst, XMMRegister shift);
  1805   void psllq(XMMRegister dst, XMMRegister shift);
  1806   void vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  1807   void vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  1808   void vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  1809   void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  1810   void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  1811   void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  1813   // Logical shift right packed integers
  1814   void psrlw(XMMRegister dst, int shift);
  1815   void psrld(XMMRegister dst, int shift);
  1816   void psrlq(XMMRegister dst, int shift);
  1817   void psrlw(XMMRegister dst, XMMRegister shift);
  1818   void psrld(XMMRegister dst, XMMRegister shift);
  1819   void psrlq(XMMRegister dst, XMMRegister shift);
  1820   void vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  1821   void vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  1822   void vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  1823   void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  1824   void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  1825   void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  1827   // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs)
  1828   void psraw(XMMRegister dst, int shift);
  1829   void psrad(XMMRegister dst, int shift);
  1830   void psraw(XMMRegister dst, XMMRegister shift);
  1831   void psrad(XMMRegister dst, XMMRegister shift);
  1832   void vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  1833   void vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  1834   void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  1835   void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  1837   // And packed integers
  1838   void pand(XMMRegister dst, XMMRegister src);
  1839   void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1840   void vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1842   // Or packed integers
  1843   void por(XMMRegister dst, XMMRegister src);
  1844   void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1845   void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1847   // Xor packed integers
  1848   void pxor(XMMRegister dst, XMMRegister src);
  1849   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  1850   void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  1852   // Copy low 128bit into high 128bit of YMM registers.
  1853   void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
  1854   void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
  1856   // Load/store high 128bit of YMM registers which does not destroy other half.
  1857   void vinsertf128h(XMMRegister dst, Address src);
  1858   void vinserti128h(XMMRegister dst, Address src);
  1859   void vextractf128h(Address dst, XMMRegister src);
  1860   void vextracti128h(Address dst, XMMRegister src);
  1862   // duplicate 4-bytes integer data from src into 8 locations in dest
  1863   void vpbroadcastd(XMMRegister dst, XMMRegister src);
  1865   // Carry-Less Multiplication Quadword
  1866   void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
  1867   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
  1869   // AVX instruction which is used to clear upper 128 bits of YMM registers and
  1870   // to avoid transaction penalty between AVX and SSE states. There is no
  1871   // penalty if legacy SSE instructions are encoded using VEX prefix because
  1872   // they always clear upper 128 bits. It should be used before calling
  1873   // runtime code and native libraries.
  1874   void vzeroupper();
  1876  protected:
  1877   // Next instructions require address alignment 16 bytes SSE mode.
  1878   // They should be called only from corresponding MacroAssembler instructions.
  1879   void andpd(XMMRegister dst, Address src);
  1880   void andps(XMMRegister dst, Address src);
  1881   void xorpd(XMMRegister dst, Address src);
  1882   void xorps(XMMRegister dst, Address src);
  1884 };
  1886 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP

mercurial