src/cpu/x86/vm/assembler_x86.hpp

Wed, 21 Jan 2015 12:38:11 +0100

author
goetz
date
Wed, 21 Jan 2015 12:38:11 +0100
changeset 7574
a51071796915
parent 7152
166d744df0de
child 7535
7ae4e26cb1e0
child 7854
e8260b6328fb
permissions
-rw-r--r--

8068013: [TESTBUG] Aix support in hotspot jtreg tests
Reviewed-by: ctornqvi, fzhinkin, farvidsson

duke@435 1 /*
zgu@4492 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP
stefank@2314 26 #define CPU_X86_VM_ASSEMBLER_X86_HPP
stefank@2314 27
twisti@4318 28 #include "asm/register.hpp"
twisti@4318 29
duke@435 30 class BiasedLockingCounters;
duke@435 31
duke@435 32 // Contains all the definitions needed for x86 assembly code generation.
duke@435 33
duke@435 34 // Calling convention
duke@435 35 class Argument VALUE_OBJ_CLASS_SPEC {
duke@435 36 public:
duke@435 37 enum {
duke@435 38 #ifdef _LP64
duke@435 39 #ifdef _WIN64
duke@435 40 n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
duke@435 41 n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... )
duke@435 42 #else
duke@435 43 n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
duke@435 44 n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... )
duke@435 45 #endif // _WIN64
duke@435 46 n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ...
duke@435 47 n_float_register_parameters_j = 8 // j_farg0, j_farg1, ...
duke@435 48 #else
duke@435 49 n_register_parameters = 0 // 0 registers used to pass arguments
duke@435 50 #endif // _LP64
duke@435 51 };
duke@435 52 };
duke@435 53
duke@435 54
duke@435 55 #ifdef _LP64
duke@435 56 // Symbolically name the register arguments used by the c calling convention.
duke@435 57 // Windows is different from linux/solaris. So much for standards...
duke@435 58
duke@435 59 #ifdef _WIN64
duke@435 60
duke@435 61 REGISTER_DECLARATION(Register, c_rarg0, rcx);
duke@435 62 REGISTER_DECLARATION(Register, c_rarg1, rdx);
duke@435 63 REGISTER_DECLARATION(Register, c_rarg2, r8);
duke@435 64 REGISTER_DECLARATION(Register, c_rarg3, r9);
duke@435 65
never@739 66 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
never@739 67 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
never@739 68 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
never@739 69 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
duke@435 70
duke@435 71 #else
duke@435 72
duke@435 73 REGISTER_DECLARATION(Register, c_rarg0, rdi);
duke@435 74 REGISTER_DECLARATION(Register, c_rarg1, rsi);
duke@435 75 REGISTER_DECLARATION(Register, c_rarg2, rdx);
duke@435 76 REGISTER_DECLARATION(Register, c_rarg3, rcx);
duke@435 77 REGISTER_DECLARATION(Register, c_rarg4, r8);
duke@435 78 REGISTER_DECLARATION(Register, c_rarg5, r9);
duke@435 79
never@739 80 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
never@739 81 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
never@739 82 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
never@739 83 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
never@739 84 REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4);
never@739 85 REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5);
never@739 86 REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
never@739 87 REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
duke@435 88
duke@435 89 #endif // _WIN64
duke@435 90
duke@435 91 // Symbolically name the register arguments used by the Java calling convention.
duke@435 92 // We have control over the convention for java so we can do what we please.
duke@435 93 // What pleases us is to offset the java calling convention so that when
duke@435 94 // we call a suitable jni method the arguments are lined up and we don't
duke@435 95 // have to do little shuffling. A suitable jni method is non-static and a
duke@435 96 // small number of arguments (two fewer args on windows)
duke@435 97 //
duke@435 98 // |-------------------------------------------------------|
duke@435 99 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 |
duke@435 100 // |-------------------------------------------------------|
duke@435 101 // | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg)
duke@435 102 // | rdi rsi rdx rcx r8 r9 | solaris/linux
duke@435 103 // |-------------------------------------------------------|
duke@435 104 // | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 |
duke@435 105 // |-------------------------------------------------------|
duke@435 106
duke@435 107 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1);
duke@435 108 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2);
duke@435 109 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3);
duke@435 110 // Windows runs out of register args here
duke@435 111 #ifdef _WIN64
duke@435 112 REGISTER_DECLARATION(Register, j_rarg3, rdi);
duke@435 113 REGISTER_DECLARATION(Register, j_rarg4, rsi);
duke@435 114 #else
duke@435 115 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4);
duke@435 116 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
duke@435 117 #endif /* _WIN64 */
duke@435 118 REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
duke@435 119
never@739 120 REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
never@739 121 REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1);
never@739 122 REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2);
never@739 123 REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3);
never@739 124 REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4);
never@739 125 REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5);
never@739 126 REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6);
never@739 127 REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7);
duke@435 128
duke@435 129 REGISTER_DECLARATION(Register, rscratch1, r10); // volatile
duke@435 130 REGISTER_DECLARATION(Register, rscratch2, r11); // volatile
duke@435 131
never@739 132 REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
duke@435 133 REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
duke@435 134
never@739 135 #else
never@739 136 // rscratch1 will apear in 32bit code that is dead but of course must compile
never@739 137 // Using noreg ensures if the dead code is incorrectly live and executed it
never@739 138 // will cause an assertion failure
never@739 139 #define rscratch1 noreg
iveresov@2344 140 #define rscratch2 noreg
never@739 141
duke@435 142 #endif // _LP64
duke@435 143
twisti@1919 144 // JSR 292 fixed register usages:
twisti@1919 145 REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp);
twisti@1919 146
duke@435 147 // Address is an abstraction used to represent a memory location
duke@435 148 // using any of the amd64 addressing modes with one object.
duke@435 149 //
duke@435 150 // Note: A register location is represented via a Register, not
duke@435 151 // via an address for efficiency & simplicity reasons.
duke@435 152
duke@435 153 class ArrayAddress;
duke@435 154
duke@435 155 class Address VALUE_OBJ_CLASS_SPEC {
duke@435 156 public:
duke@435 157 enum ScaleFactor {
duke@435 158 no_scale = -1,
duke@435 159 times_1 = 0,
duke@435 160 times_2 = 1,
duke@435 161 times_4 = 2,
never@739 162 times_8 = 3,
never@739 163 times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4)
duke@435 164 };
jrose@1057 165 static ScaleFactor times(int size) {
jrose@1057 166 assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size");
jrose@1057 167 if (size == 8) return times_8;
jrose@1057 168 if (size == 4) return times_4;
jrose@1057 169 if (size == 2) return times_2;
jrose@1057 170 return times_1;
jrose@1057 171 }
jrose@1057 172 static int scale_size(ScaleFactor scale) {
jrose@1057 173 assert(scale != no_scale, "");
jrose@1057 174 assert(((1 << (int)times_1) == 1 &&
jrose@1057 175 (1 << (int)times_2) == 2 &&
jrose@1057 176 (1 << (int)times_4) == 4 &&
jrose@1057 177 (1 << (int)times_8) == 8), "");
jrose@1057 178 return (1 << (int)scale);
jrose@1057 179 }
duke@435 180
duke@435 181 private:
duke@435 182 Register _base;
duke@435 183 Register _index;
duke@435 184 ScaleFactor _scale;
duke@435 185 int _disp;
duke@435 186 RelocationHolder _rspec;
duke@435 187
never@739 188 // Easily misused constructors make them private
never@739 189 // %%% can we make these go away?
never@739 190 NOT_LP64(Address(address loc, RelocationHolder spec);)
never@739 191 Address(int disp, address loc, relocInfo::relocType rtype);
never@739 192 Address(int disp, address loc, RelocationHolder spec);
duke@435 193
duke@435 194 public:
never@739 195
never@739 196 int disp() { return _disp; }
duke@435 197 // creation
duke@435 198 Address()
duke@435 199 : _base(noreg),
duke@435 200 _index(noreg),
duke@435 201 _scale(no_scale),
duke@435 202 _disp(0) {
duke@435 203 }
duke@435 204
duke@435 205 // No default displacement otherwise Register can be implicitly
duke@435 206 // converted to 0(Register) which is quite a different animal.
duke@435 207
duke@435 208 Address(Register base, int disp)
duke@435 209 : _base(base),
duke@435 210 _index(noreg),
duke@435 211 _scale(no_scale),
duke@435 212 _disp(disp) {
duke@435 213 }
duke@435 214
duke@435 215 Address(Register base, Register index, ScaleFactor scale, int disp = 0)
duke@435 216 : _base (base),
duke@435 217 _index(index),
duke@435 218 _scale(scale),
duke@435 219 _disp (disp) {
duke@435 220 assert(!index->is_valid() == (scale == Address::no_scale),
duke@435 221 "inconsistent address");
duke@435 222 }
duke@435 223
jrose@1100 224 Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0)
jrose@1057 225 : _base (base),
jrose@1057 226 _index(index.register_or_noreg()),
jrose@1057 227 _scale(scale),
jrose@1057 228 _disp (disp + (index.constant_or_zero() * scale_size(scale))) {
jrose@1057 229 if (!index.is_register()) scale = Address::no_scale;
jrose@1057 230 assert(!_index->is_valid() == (scale == Address::no_scale),
jrose@1057 231 "inconsistent address");
jrose@1057 232 }
jrose@1057 233
jrose@1057 234 Address plus_disp(int disp) const {
jrose@1057 235 Address a = (*this);
jrose@1057 236 a._disp += disp;
jrose@1057 237 return a;
jrose@1057 238 }
never@2895 239 Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const {
never@2895 240 Address a = (*this);
never@2895 241 a._disp += disp.constant_or_zero() * scale_size(scale);
never@2895 242 if (disp.is_register()) {
never@2895 243 assert(!a.index()->is_valid(), "competing indexes");
never@2895 244 a._index = disp.as_register();
never@2895 245 a._scale = scale;
never@2895 246 }
never@2895 247 return a;
never@2895 248 }
never@2895 249 bool is_same_address(Address a) const {
never@2895 250 // disregard _rspec
never@2895 251 return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale;
never@2895 252 }
jrose@1057 253
duke@435 254 // The following two overloads are used in connection with the
duke@435 255 // ByteSize type (see sizes.hpp). They simplify the use of
duke@435 256 // ByteSize'd arguments in assembly code. Note that their equivalent
duke@435 257 // for the optimized build are the member functions with int disp
duke@435 258 // argument since ByteSize is mapped to an int type in that case.
duke@435 259 //
duke@435 260 // Note: DO NOT introduce similar overloaded functions for WordSize
duke@435 261 // arguments as in the optimized mode, both ByteSize and WordSize
duke@435 262 // are mapped to the same type and thus the compiler cannot make a
duke@435 263 // distinction anymore (=> compiler errors).
duke@435 264
duke@435 265 #ifdef ASSERT
duke@435 266 Address(Register base, ByteSize disp)
duke@435 267 : _base(base),
duke@435 268 _index(noreg),
duke@435 269 _scale(no_scale),
duke@435 270 _disp(in_bytes(disp)) {
duke@435 271 }
duke@435 272
duke@435 273 Address(Register base, Register index, ScaleFactor scale, ByteSize disp)
duke@435 274 : _base(base),
duke@435 275 _index(index),
duke@435 276 _scale(scale),
duke@435 277 _disp(in_bytes(disp)) {
duke@435 278 assert(!index->is_valid() == (scale == Address::no_scale),
duke@435 279 "inconsistent address");
duke@435 280 }
jrose@1057 281
jrose@1100 282 Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp)
jrose@1057 283 : _base (base),
jrose@1057 284 _index(index.register_or_noreg()),
jrose@1057 285 _scale(scale),
jrose@1057 286 _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))) {
jrose@1057 287 if (!index.is_register()) scale = Address::no_scale;
jrose@1057 288 assert(!_index->is_valid() == (scale == Address::no_scale),
jrose@1057 289 "inconsistent address");
jrose@1057 290 }
jrose@1057 291
duke@435 292 #endif // ASSERT
duke@435 293
duke@435 294 // accessors
ysr@777 295 bool uses(Register reg) const { return _base == reg || _index == reg; }
ysr@777 296 Register base() const { return _base; }
ysr@777 297 Register index() const { return _index; }
ysr@777 298 ScaleFactor scale() const { return _scale; }
ysr@777 299 int disp() const { return _disp; }
duke@435 300
duke@435 301 // Convert the raw encoding form into the form expected by the constructor for
duke@435 302 // Address. An index of 4 (rsp) corresponds to having no index, so convert
duke@435 303 // that to noreg for the Address constructor.
coleenp@4037 304 static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
duke@435 305
duke@435 306 static Address make_array(ArrayAddress);
duke@435 307
duke@435 308 private:
duke@435 309 bool base_needs_rex() const {
duke@435 310 return _base != noreg && _base->encoding() >= 8;
duke@435 311 }
duke@435 312
duke@435 313 bool index_needs_rex() const {
duke@435 314 return _index != noreg &&_index->encoding() >= 8;
duke@435 315 }
duke@435 316
duke@435 317 relocInfo::relocType reloc() const { return _rspec.type(); }
duke@435 318
duke@435 319 friend class Assembler;
duke@435 320 friend class MacroAssembler;
duke@435 321 friend class LIR_Assembler; // base/index/scale/disp
duke@435 322 };
duke@435 323
duke@435 324 //
duke@435 325 // AddressLiteral has been split out from Address because operands of this type
duke@435 326 // need to be treated specially on 32bit vs. 64bit platforms. By splitting it out
duke@435 327 // the few instructions that need to deal with address literals are unique and the
duke@435 328 // MacroAssembler does not have to implement every instruction in the Assembler
duke@435 329 // in order to search for address literals that may need special handling depending
duke@435 330 // on the instruction and the platform. As small step on the way to merging i486/amd64
duke@435 331 // directories.
duke@435 332 //
duke@435 333 class AddressLiteral VALUE_OBJ_CLASS_SPEC {
duke@435 334 friend class ArrayAddress;
duke@435 335 RelocationHolder _rspec;
duke@435 336 // Typically we use AddressLiterals we want to use their rval
duke@435 337 // However in some situations we want the lval (effect address) of the item.
duke@435 338 // We provide a special factory for making those lvals.
duke@435 339 bool _is_lval;
duke@435 340
duke@435 341 // If the target is far we'll need to load the ea of this to
duke@435 342 // a register to reach it. Otherwise if near we can do rip
duke@435 343 // relative addressing.
duke@435 344
duke@435 345 address _target;
duke@435 346
duke@435 347 protected:
duke@435 348 // creation
duke@435 349 AddressLiteral()
duke@435 350 : _is_lval(false),
duke@435 351 _target(NULL)
duke@435 352 {}
duke@435 353
duke@435 354 public:
duke@435 355
duke@435 356
duke@435 357 AddressLiteral(address target, relocInfo::relocType rtype);
duke@435 358
duke@435 359 AddressLiteral(address target, RelocationHolder const& rspec)
duke@435 360 : _rspec(rspec),
duke@435 361 _is_lval(false),
duke@435 362 _target(target)
duke@435 363 {}
duke@435 364
duke@435 365 AddressLiteral addr() {
duke@435 366 AddressLiteral ret = *this;
duke@435 367 ret._is_lval = true;
duke@435 368 return ret;
duke@435 369 }
duke@435 370
duke@435 371
duke@435 372 private:
duke@435 373
duke@435 374 address target() { return _target; }
duke@435 375 bool is_lval() { return _is_lval; }
duke@435 376
duke@435 377 relocInfo::relocType reloc() const { return _rspec.type(); }
duke@435 378 const RelocationHolder& rspec() const { return _rspec; }
duke@435 379
duke@435 380 friend class Assembler;
duke@435 381 friend class MacroAssembler;
duke@435 382 friend class Address;
duke@435 383 friend class LIR_Assembler;
duke@435 384 };
duke@435 385
duke@435 386 // Convience classes
duke@435 387 class RuntimeAddress: public AddressLiteral {
duke@435 388
duke@435 389 public:
duke@435 390
duke@435 391 RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {}
duke@435 392
duke@435 393 };
duke@435 394
duke@435 395 class ExternalAddress: public AddressLiteral {
never@2737 396 private:
never@2737 397 static relocInfo::relocType reloc_for_target(address target) {
never@2737 398 // Sometimes ExternalAddress is used for values which aren't
never@2737 399 // exactly addresses, like the card table base.
never@2737 400 // external_word_type can't be used for values in the first page
never@2737 401 // so just skip the reloc in that case.
never@2737 402 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
never@2737 403 }
never@2737 404
never@2737 405 public:
never@2737 406
never@2737 407 ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {}
duke@435 408
duke@435 409 };
duke@435 410
duke@435 411 class InternalAddress: public AddressLiteral {
duke@435 412
duke@435 413 public:
duke@435 414
duke@435 415 InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {}
duke@435 416
duke@435 417 };
duke@435 418
duke@435 419 // x86 can do array addressing as a single operation since disp can be an absolute
duke@435 420 // address amd64 can't. We create a class that expresses the concept but does extra
duke@435 421 // magic on amd64 to get the final result
duke@435 422
duke@435 423 class ArrayAddress VALUE_OBJ_CLASS_SPEC {
duke@435 424 private:
duke@435 425
duke@435 426 AddressLiteral _base;
duke@435 427 Address _index;
duke@435 428
duke@435 429 public:
duke@435 430
duke@435 431 ArrayAddress() {};
duke@435 432 ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {};
duke@435 433 AddressLiteral base() { return _base; }
duke@435 434 Address index() { return _index; }
duke@435 435
duke@435 436 };
duke@435 437
never@739 438 const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY( 512 / wordSize);
duke@435 439
duke@435 440 // The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction
duke@435 441 // level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write
duke@435 442 // is what you get. The Assembler is generating code into a CodeBuffer.
duke@435 443
duke@435 444 class Assembler : public AbstractAssembler {
duke@435 445 friend class AbstractAssembler; // for the non-virtual hack
duke@435 446 friend class LIR_Assembler; // as_Address()
never@739 447 friend class StubGenerator;
duke@435 448
duke@435 449 public:
duke@435 450 enum Condition { // The x86 condition codes used for conditional jumps/moves.
duke@435 451 zero = 0x4,
duke@435 452 notZero = 0x5,
duke@435 453 equal = 0x4,
duke@435 454 notEqual = 0x5,
duke@435 455 less = 0xc,
duke@435 456 lessEqual = 0xe,
duke@435 457 greater = 0xf,
duke@435 458 greaterEqual = 0xd,
duke@435 459 below = 0x2,
duke@435 460 belowEqual = 0x6,
duke@435 461 above = 0x7,
duke@435 462 aboveEqual = 0x3,
duke@435 463 overflow = 0x0,
duke@435 464 noOverflow = 0x1,
duke@435 465 carrySet = 0x2,
duke@435 466 carryClear = 0x3,
duke@435 467 negative = 0x8,
duke@435 468 positive = 0x9,
duke@435 469 parity = 0xa,
duke@435 470 noParity = 0xb
duke@435 471 };
duke@435 472
duke@435 473 enum Prefix {
duke@435 474 // segment overrides
duke@435 475 CS_segment = 0x2e,
duke@435 476 SS_segment = 0x36,
duke@435 477 DS_segment = 0x3e,
duke@435 478 ES_segment = 0x26,
duke@435 479 FS_segment = 0x64,
duke@435 480 GS_segment = 0x65,
duke@435 481
duke@435 482 REX = 0x40,
duke@435 483
duke@435 484 REX_B = 0x41,
duke@435 485 REX_X = 0x42,
duke@435 486 REX_XB = 0x43,
duke@435 487 REX_R = 0x44,
duke@435 488 REX_RB = 0x45,
duke@435 489 REX_RX = 0x46,
duke@435 490 REX_RXB = 0x47,
duke@435 491
duke@435 492 REX_W = 0x48,
duke@435 493
duke@435 494 REX_WB = 0x49,
duke@435 495 REX_WX = 0x4A,
duke@435 496 REX_WXB = 0x4B,
duke@435 497 REX_WR = 0x4C,
duke@435 498 REX_WRB = 0x4D,
duke@435 499 REX_WRX = 0x4E,
kvn@3388 500 REX_WRXB = 0x4F,
kvn@3388 501
kvn@3388 502 VEX_3bytes = 0xC4,
kvn@3388 503 VEX_2bytes = 0xC5
kvn@3388 504 };
kvn@3388 505
kvn@3388 506 enum VexPrefix {
kvn@3388 507 VEX_B = 0x20,
kvn@3388 508 VEX_X = 0x40,
kvn@3388 509 VEX_R = 0x80,
kvn@3388 510 VEX_W = 0x80
kvn@3388 511 };
kvn@3388 512
kvn@3388 513 enum VexSimdPrefix {
kvn@3388 514 VEX_SIMD_NONE = 0x0,
kvn@3388 515 VEX_SIMD_66 = 0x1,
kvn@3388 516 VEX_SIMD_F3 = 0x2,
kvn@3388 517 VEX_SIMD_F2 = 0x3
kvn@3388 518 };
kvn@3388 519
kvn@3388 520 enum VexOpcode {
kvn@3388 521 VEX_OPCODE_NONE = 0x0,
kvn@3388 522 VEX_OPCODE_0F = 0x1,
kvn@3388 523 VEX_OPCODE_0F_38 = 0x2,
kvn@3388 524 VEX_OPCODE_0F_3A = 0x3
duke@435 525 };
duke@435 526
duke@435 527 enum WhichOperand {
duke@435 528 // input to locate_operand, and format code for relocations
never@739 529 imm_operand = 0, // embedded 32-bit|64-bit immediate operand
duke@435 530 disp32_operand = 1, // embedded 32-bit displacement or address
duke@435 531 call32_operand = 2, // embedded 32-bit self-relative displacement
never@739 532 #ifndef _LP64
duke@435 533 _WhichOperand_limit = 3
never@739 534 #else
never@739 535 narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop
never@739 536 _WhichOperand_limit = 4
never@739 537 #endif
duke@435 538 };
duke@435 539
never@739 540
never@739 541
never@739 542 // NOTE: The general philopsophy of the declarations here is that 64bit versions
never@739 543 // of instructions are freely declared without the need for wrapping them an ifdef.
never@739 544 // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
never@739 545 // In the .cpp file the implementations are wrapped so that they are dropped out
zgu@4492 546 // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL
never@739 547 // to the size it was prior to merging up the 32bit and 64bit assemblers.
never@739 548 //
never@739 549 // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
never@739 550 // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
never@739 551
never@739 552 private:
never@739 553
never@739 554
never@739 555 // 64bit prefixes
never@739 556 int prefix_and_encode(int reg_enc, bool byteinst = false);
never@739 557 int prefixq_and_encode(int reg_enc);
never@739 558
never@739 559 int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false);
never@739 560 int prefixq_and_encode(int dst_enc, int src_enc);
never@739 561
never@739 562 void prefix(Register reg);
never@739 563 void prefix(Address adr);
never@739 564 void prefixq(Address adr);
never@739 565
never@739 566 void prefix(Address adr, Register reg, bool byteinst = false);
kvn@3388 567 void prefix(Address adr, XMMRegister reg);
never@739 568 void prefixq(Address adr, Register reg);
kvn@3388 569 void prefixq(Address adr, XMMRegister reg);
never@739 570
never@739 571 void prefetch_prefix(Address src);
never@739 572
kvn@3388 573 void rex_prefix(Address adr, XMMRegister xreg,
kvn@3388 574 VexSimdPrefix pre, VexOpcode opc, bool rex_w);
kvn@3388 575 int rex_prefix_and_encode(int dst_enc, int src_enc,
kvn@3388 576 VexSimdPrefix pre, VexOpcode opc, bool rex_w);
kvn@3388 577
kvn@3388 578 void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w,
kvn@3388 579 int nds_enc, VexSimdPrefix pre, VexOpcode opc,
kvn@3388 580 bool vector256);
kvn@3388 581
kvn@3388 582 void vex_prefix(Address adr, int nds_enc, int xreg_enc,
kvn@3388 583 VexSimdPrefix pre, VexOpcode opc,
kvn@3388 584 bool vex_w, bool vector256);
kvn@3388 585
kvn@3390 586 void vex_prefix(XMMRegister dst, XMMRegister nds, Address src,
kvn@3390 587 VexSimdPrefix pre, bool vector256 = false) {
kvn@3882 588 int dst_enc = dst->encoding();
kvn@3882 589 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
kvn@3882 590 vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256);
kvn@3390 591 }
kvn@3390 592
iveresov@6378 593 void vex_prefix_0F38(Register dst, Register nds, Address src) {
iveresov@6378 594 bool vex_w = false;
iveresov@6378 595 bool vector256 = false;
iveresov@6378 596 vex_prefix(src, nds->encoding(), dst->encoding(),
iveresov@6378 597 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
iveresov@6378 598 }
iveresov@6378 599
iveresov@6378 600 void vex_prefix_0F38_q(Register dst, Register nds, Address src) {
iveresov@6378 601 bool vex_w = true;
iveresov@6378 602 bool vector256 = false;
iveresov@6378 603 vex_prefix(src, nds->encoding(), dst->encoding(),
iveresov@6378 604 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
iveresov@6378 605 }
kvn@3388 606 int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
kvn@3388 607 VexSimdPrefix pre, VexOpcode opc,
kvn@3388 608 bool vex_w, bool vector256);
kvn@3388 609
iveresov@6378 610 int vex_prefix_0F38_and_encode(Register dst, Register nds, Register src) {
iveresov@6378 611 bool vex_w = false;
iveresov@6378 612 bool vector256 = false;
iveresov@6378 613 return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
iveresov@6378 614 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
iveresov@6378 615 }
iveresov@6378 616 int vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src) {
iveresov@6378 617 bool vex_w = true;
iveresov@6378 618 bool vector256 = false;
iveresov@6378 619 return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
iveresov@6378 620 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
iveresov@6378 621 }
kvn@3390 622 int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
kvn@3882 623 VexSimdPrefix pre, bool vector256 = false,
kvn@3882 624 VexOpcode opc = VEX_OPCODE_0F) {
kvn@3882 625 int src_enc = src->encoding();
kvn@3882 626 int dst_enc = dst->encoding();
kvn@3882 627 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
kvn@3882 628 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector256);
kvn@3390 629 }
kvn@3388 630
kvn@3388 631 void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
kvn@3388 632 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
kvn@3388 633 bool rex_w = false, bool vector256 = false);
kvn@3388 634
kvn@3388 635 void simd_prefix(XMMRegister dst, Address src,
kvn@3388 636 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
kvn@3388 637 simd_prefix(dst, xnoreg, src, pre, opc);
kvn@3388 638 }
kvn@4001 639
kvn@3388 640 void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) {
kvn@3388 641 simd_prefix(src, dst, pre);
kvn@3388 642 }
kvn@3388 643 void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
kvn@3388 644 VexSimdPrefix pre) {
kvn@3388 645 bool rex_w = true;
kvn@3388 646 simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w);
kvn@3388 647 }
kvn@3388 648
kvn@3388 649 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
kvn@3388 650 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
kvn@3388 651 bool rex_w = false, bool vector256 = false);
kvn@3388 652
kvn@3388 653 // Move/convert 32-bit integer value.
kvn@3388 654 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src,
kvn@3388 655 VexSimdPrefix pre) {
kvn@3388 656 // It is OK to cast from Register to XMMRegister to pass argument here
kvn@3388 657 // since only encoding is used in simd_prefix_and_encode() and number of
kvn@3388 658 // Gen and Xmm registers are the same.
kvn@3388 659 return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre);
kvn@3388 660 }
kvn@3388 661 int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre) {
kvn@3388 662 return simd_prefix_and_encode(dst, xnoreg, src, pre);
kvn@3388 663 }
kvn@3388 664 int simd_prefix_and_encode(Register dst, XMMRegister src,
kvn@3388 665 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
kvn@3388 666 return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc);
kvn@3388 667 }
kvn@3388 668
kvn@3388 669 // Move/convert 64-bit integer value.
kvn@3388 670 int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src,
kvn@3388 671 VexSimdPrefix pre) {
kvn@3388 672 bool rex_w = true;
kvn@3388 673 return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, VEX_OPCODE_0F, rex_w);
kvn@3388 674 }
kvn@3388 675 int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre) {
kvn@3388 676 return simd_prefix_and_encode_q(dst, xnoreg, src, pre);
kvn@3388 677 }
kvn@3388 678 int simd_prefix_and_encode_q(Register dst, XMMRegister src,
kvn@3388 679 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
kvn@3388 680 bool rex_w = true;
kvn@3388 681 return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc, rex_w);
kvn@3388 682 }
kvn@3388 683
never@739 684 // Helper functions for groups of instructions
never@739 685 void emit_arith_b(int op1, int op2, Register dst, int imm8);
never@739 686
never@739 687 void emit_arith(int op1, int op2, Register dst, int32_t imm32);
kvn@3574 688 // Force generation of a 4 byte immediate value even if it fits into 8bit
kvn@3574 689 void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32);
never@739 690 void emit_arith(int op1, int op2, Register dst, Register src);
never@739 691
kvn@4001 692 void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre);
kvn@4001 693 void emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre);
kvn@4001 694 void emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre);
kvn@4001 695 void emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre);
kvn@4001 696 void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
kvn@4001 697 Address src, VexSimdPrefix pre, bool vector256);
kvn@4001 698 void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
kvn@4001 699 XMMRegister src, VexSimdPrefix pre, bool vector256);
kvn@4001 700
never@739 701 void emit_operand(Register reg,
never@739 702 Register base, Register index, Address::ScaleFactor scale,
never@739 703 int disp,
never@739 704 RelocationHolder const& rspec,
never@739 705 int rip_relative_correction = 0);
never@739 706
never@739 707 void emit_operand(Register reg, Address adr, int rip_relative_correction = 0);
never@739 708
never@739 709 // operands that only take the original 32bit registers
never@739 710 void emit_operand32(Register reg, Address adr);
never@739 711
never@739 712 void emit_operand(XMMRegister reg,
never@739 713 Register base, Register index, Address::ScaleFactor scale,
never@739 714 int disp,
never@739 715 RelocationHolder const& rspec);
never@739 716
never@739 717 void emit_operand(XMMRegister reg, Address adr);
never@739 718
never@739 719 void emit_operand(MMXRegister reg, Address adr);
never@739 720
never@739 721 // workaround gcc (3.2.1-7) bug
never@739 722 void emit_operand(Address adr, MMXRegister reg);
never@739 723
never@739 724
never@739 725 // Immediate-to-memory forms
never@739 726 void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32);
never@739 727
never@739 728 void emit_farith(int b1, int b2, int i);
never@739 729
duke@435 730
duke@435 731 protected:
never@739 732 #ifdef ASSERT
never@739 733 void check_relocation(RelocationHolder const& rspec, int format);
never@739 734 #endif
never@739 735
never@739 736 void emit_data(jint data, relocInfo::relocType rtype, int format);
never@739 737 void emit_data(jint data, RelocationHolder const& rspec, int format);
never@739 738 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
never@739 739 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
never@739 740
never@739 741 bool reachable(AddressLiteral adr) NOT_LP64({ return true;});
never@739 742
never@739 743 // These are all easily abused and hence protected
never@739 744
never@739 745 // 32BIT ONLY SECTION
never@739 746 #ifndef _LP64
never@739 747 // Make these disappear in 64bit mode since they would never be correct
never@739 748 void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
never@739 749 void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
never@739 750
kvn@1077 751 void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
never@739 752 void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
never@739 753
never@739 754 void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
never@739 755 #else
never@739 756 // 64BIT ONLY SECTION
never@739 757 void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY
kvn@1077 758
kvn@1077 759 void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec);
kvn@1077 760 void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec);
kvn@1077 761
kvn@1077 762 void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec);
kvn@1077 763 void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec);
never@739 764 #endif // _LP64
never@739 765
never@739 766 // These are unique in that we are ensured by the caller that the 32bit
never@739 767 // relative in these instructions will always be able to reach the potentially
never@739 768 // 64bit address described by entry. Since they can take a 64bit address they
never@739 769 // don't have the 32 suffix like the other instructions in this class.
never@739 770
never@739 771 void call_literal(address entry, RelocationHolder const& rspec);
never@739 772 void jmp_literal(address entry, RelocationHolder const& rspec);
never@739 773
never@739 774 // Avoid using directly section
never@739 775 // Instructions in this section are actually usable by anyone without danger
never@739 776 // of failure but have performance issues that are addressed my enhanced
never@739 777 // instructions which will do the proper thing base on the particular cpu.
never@739 778 // We protect them because we don't trust you...
never@739 779
duke@435 780 // Don't use next inc() and dec() methods directly. INC & DEC instructions
duke@435 781 // could cause a partial flag stall since they don't set CF flag.
duke@435 782 // Use MacroAssembler::decrement() & MacroAssembler::increment() methods
duke@435 783 // which call inc() & dec() or add() & sub() in accordance with
duke@435 784 // the product flag UseIncDec value.
duke@435 785
duke@435 786 void decl(Register dst);
duke@435 787 void decl(Address dst);
never@739 788 void decq(Register dst);
never@739 789 void decq(Address dst);
duke@435 790
duke@435 791 void incl(Register dst);
duke@435 792 void incl(Address dst);
never@739 793 void incq(Register dst);
never@739 794 void incq(Address dst);
never@739 795
never@739 796 // New cpus require use of movsd and movss to avoid partial register stall
never@739 797 // when loading from memory. But for old Opteron use movlpd instead of movsd.
never@739 798 // The selection is done in MacroAssembler::movdbl() and movflt().
never@739 799
never@739 800 // Move Scalar Single-Precision Floating-Point Values
never@739 801 void movss(XMMRegister dst, Address src);
never@739 802 void movss(XMMRegister dst, XMMRegister src);
never@739 803 void movss(Address dst, XMMRegister src);
never@739 804
never@739 805 // Move Scalar Double-Precision Floating-Point Values
never@739 806 void movsd(XMMRegister dst, Address src);
never@739 807 void movsd(XMMRegister dst, XMMRegister src);
never@739 808 void movsd(Address dst, XMMRegister src);
never@739 809 void movlpd(XMMRegister dst, Address src);
never@739 810
never@739 811 // New cpus require use of movaps and movapd to avoid partial register stall
never@739 812 // when moving between registers.
never@739 813 void movaps(XMMRegister dst, XMMRegister src);
never@739 814 void movapd(XMMRegister dst, XMMRegister src);
never@739 815
never@739 816 // End avoid using directly
never@739 817
never@739 818
never@739 819 // Instruction prefixes
never@739 820 void prefix(Prefix p);
never@739 821
never@739 822 public:
never@739 823
never@739 824 // Creation
never@739 825 Assembler(CodeBuffer* code) : AbstractAssembler(code) {}
never@739 826
never@739 827 // Decoding
never@739 828 static address locate_operand(address inst, WhichOperand which);
never@739 829 static address locate_next_instruction(address inst);
never@739 830
never@739 831 // Utilities
iveresov@2686 832 static bool is_polling_page_far() NOT_LP64({ return false;});
iveresov@2686 833
never@739 834 // Generic instructions
never@739 835 // Does 32bit or 64bit as needed for the platform. In some sense these
never@739 836 // belong in macro assembler but there is no need for both varieties to exist
never@739 837
never@739 838 void lea(Register dst, Address src);
never@739 839
never@739 840 void mov(Register dst, Register src);
never@739 841
never@739 842 void pusha();
never@739 843 void popa();
never@739 844
never@739 845 void pushf();
never@739 846 void popf();
never@739 847
never@739 848 void push(int32_t imm32);
never@739 849
never@739 850 void push(Register src);
never@739 851
never@739 852 void pop(Register dst);
never@739 853
never@739 854 // These are dummies to prevent surprise implicit conversions to Register
never@739 855 void push(void* v);
never@739 856 void pop(void* v);
never@739 857
never@739 858 // These do register sized moves/scans
never@739 859 void rep_mov();
kvn@4410 860 void rep_stos();
kvn@4410 861 void rep_stosb();
never@739 862 void repne_scan();
never@739 863 #ifdef _LP64
never@739 864 void repne_scanl();
never@739 865 #endif
never@739 866
never@739 867 // Vanilla instructions in lexical order
never@739 868
phh@2423 869 void adcl(Address dst, int32_t imm32);
phh@2423 870 void adcl(Address dst, Register src);
never@739 871 void adcl(Register dst, int32_t imm32);
never@739 872 void adcl(Register dst, Address src);
never@739 873 void adcl(Register dst, Register src);
never@739 874
never@739 875 void adcq(Register dst, int32_t imm32);
never@739 876 void adcq(Register dst, Address src);
never@739 877 void adcq(Register dst, Register src);
never@739 878
never@739 879 void addl(Address dst, int32_t imm32);
never@739 880 void addl(Address dst, Register src);
never@739 881 void addl(Register dst, int32_t imm32);
never@739 882 void addl(Register dst, Address src);
never@739 883 void addl(Register dst, Register src);
never@739 884
never@739 885 void addq(Address dst, int32_t imm32);
never@739 886 void addq(Address dst, Register src);
never@739 887 void addq(Register dst, int32_t imm32);
never@739 888 void addq(Register dst, Address src);
never@739 889 void addq(Register dst, Register src);
never@739 890
kvn@7152 891 #ifdef _LP64
kvn@7152 892 //Add Unsigned Integers with Carry Flag
kvn@7152 893 void adcxq(Register dst, Register src);
kvn@7152 894
kvn@7152 895 //Add Unsigned Integers with Overflow Flag
kvn@7152 896 void adoxq(Register dst, Register src);
kvn@7152 897 #endif
kvn@7152 898
duke@435 899 void addr_nop_4();
duke@435 900 void addr_nop_5();
duke@435 901 void addr_nop_7();
duke@435 902 void addr_nop_8();
duke@435 903
never@739 904 // Add Scalar Double-Precision Floating-Point Values
never@739 905 void addsd(XMMRegister dst, Address src);
never@739 906 void addsd(XMMRegister dst, XMMRegister src);
never@739 907
never@739 908 // Add Scalar Single-Precision Floating-Point Values
never@739 909 void addss(XMMRegister dst, Address src);
never@739 910 void addss(XMMRegister dst, XMMRegister src);
never@739 911
kvn@4205 912 // AES instructions
kvn@4205 913 void aesdec(XMMRegister dst, Address src);
kvn@4205 914 void aesdec(XMMRegister dst, XMMRegister src);
kvn@4205 915 void aesdeclast(XMMRegister dst, Address src);
kvn@4205 916 void aesdeclast(XMMRegister dst, XMMRegister src);
kvn@4205 917 void aesenc(XMMRegister dst, Address src);
kvn@4205 918 void aesenc(XMMRegister dst, XMMRegister src);
kvn@4205 919 void aesenclast(XMMRegister dst, Address src);
kvn@4205 920 void aesenclast(XMMRegister dst, XMMRegister src);
kvn@4205 921
kvn@4205 922
kvn@3388 923 void andl(Address dst, int32_t imm32);
never@739 924 void andl(Register dst, int32_t imm32);
never@739 925 void andl(Register dst, Address src);
never@739 926 void andl(Register dst, Register src);
never@739 927
never@2980 928 void andq(Address dst, int32_t imm32);
never@739 929 void andq(Register dst, int32_t imm32);
never@739 930 void andq(Register dst, Address src);
never@739 931 void andq(Register dst, Register src);
never@739 932
iveresov@6378 933 // BMI instructions
iveresov@6378 934 void andnl(Register dst, Register src1, Register src2);
iveresov@6378 935 void andnl(Register dst, Register src1, Address src2);
iveresov@6378 936 void andnq(Register dst, Register src1, Register src2);
iveresov@6378 937 void andnq(Register dst, Register src1, Address src2);
iveresov@6378 938
iveresov@6378 939 void blsil(Register dst, Register src);
iveresov@6378 940 void blsil(Register dst, Address src);
iveresov@6378 941 void blsiq(Register dst, Register src);
iveresov@6378 942 void blsiq(Register dst, Address src);
iveresov@6378 943
iveresov@6378 944 void blsmskl(Register dst, Register src);
iveresov@6378 945 void blsmskl(Register dst, Address src);
iveresov@6378 946 void blsmskq(Register dst, Register src);
iveresov@6378 947 void blsmskq(Register dst, Address src);
iveresov@6378 948
iveresov@6378 949 void blsrl(Register dst, Register src);
iveresov@6378 950 void blsrl(Register dst, Address src);
iveresov@6378 951 void blsrq(Register dst, Register src);
iveresov@6378 952 void blsrq(Register dst, Address src);
iveresov@6378 953
twisti@1210 954 void bsfl(Register dst, Register src);
twisti@1210 955 void bsrl(Register dst, Register src);
twisti@1210 956
twisti@1210 957 #ifdef _LP64
twisti@1210 958 void bsfq(Register dst, Register src);
twisti@1210 959 void bsrq(Register dst, Register src);
twisti@1210 960 #endif
twisti@1210 961
never@739 962 void bswapl(Register reg);
never@739 963
never@739 964 void bswapq(Register reg);
never@739 965
duke@435 966 void call(Label& L, relocInfo::relocType rtype);
duke@435 967 void call(Register reg); // push pc; pc <- reg
duke@435 968 void call(Address adr); // push pc; pc <- adr
duke@435 969
never@739 970 void cdql();
never@739 971
never@739 972 void cdqq();
never@739 973
twisti@4318 974 void cld();
never@739 975
never@739 976 void clflush(Address adr);
never@739 977
never@739 978 void cmovl(Condition cc, Register dst, Register src);
never@739 979 void cmovl(Condition cc, Register dst, Address src);
never@739 980
never@739 981 void cmovq(Condition cc, Register dst, Register src);
never@739 982 void cmovq(Condition cc, Register dst, Address src);
never@739 983
never@739 984
never@739 985 void cmpb(Address dst, int imm8);
never@739 986
never@739 987 void cmpl(Address dst, int32_t imm32);
never@739 988
never@739 989 void cmpl(Register dst, int32_t imm32);
never@739 990 void cmpl(Register dst, Register src);
never@739 991 void cmpl(Register dst, Address src);
never@739 992
never@739 993 void cmpq(Address dst, int32_t imm32);
never@739 994 void cmpq(Address dst, Register src);
never@739 995
never@739 996 void cmpq(Register dst, int32_t imm32);
never@739 997 void cmpq(Register dst, Register src);
never@739 998 void cmpq(Register dst, Address src);
never@739 999
never@739 1000 // these are dummies used to catch attempting to convert NULL to Register
never@739 1001 void cmpl(Register dst, void* junk); // dummy
never@739 1002 void cmpq(Register dst, void* junk); // dummy
never@739 1003
never@739 1004 void cmpw(Address dst, int imm16);
never@739 1005
never@739 1006 void cmpxchg8 (Address adr);
never@739 1007
never@739 1008 void cmpxchgl(Register reg, Address adr);
never@739 1009
never@739 1010 void cmpxchgq(Register reg, Address adr);
never@739 1011
never@739 1012 // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
never@739 1013 void comisd(XMMRegister dst, Address src);
kvn@3388 1014 void comisd(XMMRegister dst, XMMRegister src);
never@739 1015
never@739 1016 // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
never@739 1017 void comiss(XMMRegister dst, Address src);
kvn@3388 1018 void comiss(XMMRegister dst, XMMRegister src);
never@739 1019
never@739 1020 // Identify processor type and features
twisti@4318 1021 void cpuid();
never@739 1022
never@739 1023 // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
never@739 1024 void cvtsd2ss(XMMRegister dst, XMMRegister src);
kvn@3388 1025 void cvtsd2ss(XMMRegister dst, Address src);
never@739 1026
never@739 1027 // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
never@739 1028 void cvtsi2sdl(XMMRegister dst, Register src);
kvn@3388 1029 void cvtsi2sdl(XMMRegister dst, Address src);
never@739 1030 void cvtsi2sdq(XMMRegister dst, Register src);
kvn@3388 1031 void cvtsi2sdq(XMMRegister dst, Address src);
never@739 1032
never@739 1033 // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
never@739 1034 void cvtsi2ssl(XMMRegister dst, Register src);
kvn@3388 1035 void cvtsi2ssl(XMMRegister dst, Address src);
never@739 1036 void cvtsi2ssq(XMMRegister dst, Register src);
kvn@3388 1037 void cvtsi2ssq(XMMRegister dst, Address src);
never@739 1038
never@739 1039 // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value
never@739 1040 void cvtdq2pd(XMMRegister dst, XMMRegister src);
never@739 1041
never@739 1042 // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value
never@739 1043 void cvtdq2ps(XMMRegister dst, XMMRegister src);
never@739 1044
never@739 1045 // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
never@739 1046 void cvtss2sd(XMMRegister dst, XMMRegister src);
kvn@3388 1047 void cvtss2sd(XMMRegister dst, Address src);
never@739 1048
never@739 1049 // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
never@739 1050 void cvttsd2sil(Register dst, Address src);
never@739 1051 void cvttsd2sil(Register dst, XMMRegister src);
never@739 1052 void cvttsd2siq(Register dst, XMMRegister src);
never@739 1053
never@739 1054 // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer
never@739 1055 void cvttss2sil(Register dst, XMMRegister src);
never@739 1056 void cvttss2siq(Register dst, XMMRegister src);
never@739 1057
never@739 1058 // Divide Scalar Double-Precision Floating-Point Values
never@739 1059 void divsd(XMMRegister dst, Address src);
never@739 1060 void divsd(XMMRegister dst, XMMRegister src);
never@739 1061
never@739 1062 // Divide Scalar Single-Precision Floating-Point Values
never@739 1063 void divss(XMMRegister dst, Address src);
never@739 1064 void divss(XMMRegister dst, XMMRegister src);
never@739 1065
never@739 1066 void emms();
never@739 1067
never@739 1068 void fabs();
never@739 1069
never@739 1070 void fadd(int i);
never@739 1071
never@739 1072 void fadd_d(Address src);
never@739 1073 void fadd_s(Address src);
never@739 1074
never@739 1075 // "Alternate" versions of x87 instructions place result down in FPU
never@739 1076 // stack instead of on TOS
never@739 1077
never@739 1078 void fadda(int i); // "alternate" fadd
never@739 1079 void faddp(int i = 1);
never@739 1080
never@739 1081 void fchs();
never@739 1082
never@739 1083 void fcom(int i);
never@739 1084
never@739 1085 void fcomp(int i = 1);
never@739 1086 void fcomp_d(Address src);
never@739 1087 void fcomp_s(Address src);
never@739 1088
never@739 1089 void fcompp();
never@739 1090
never@739 1091 void fcos();
never@739 1092
never@739 1093 void fdecstp();
never@739 1094
never@739 1095 void fdiv(int i);
never@739 1096 void fdiv_d(Address src);
never@739 1097 void fdivr_s(Address src);
never@739 1098 void fdiva(int i); // "alternate" fdiv
never@739 1099 void fdivp(int i = 1);
never@739 1100
never@739 1101 void fdivr(int i);
never@739 1102 void fdivr_d(Address src);
never@739 1103 void fdiv_s(Address src);
never@739 1104
never@739 1105 void fdivra(int i); // "alternate" reversed fdiv
never@739 1106
never@739 1107 void fdivrp(int i = 1);
never@739 1108
never@739 1109 void ffree(int i = 0);
never@739 1110
never@739 1111 void fild_d(Address adr);
never@739 1112 void fild_s(Address adr);
never@739 1113
never@739 1114 void fincstp();
never@739 1115
never@739 1116 void finit();
never@739 1117
never@739 1118 void fist_s (Address adr);
never@739 1119 void fistp_d(Address adr);
never@739 1120 void fistp_s(Address adr);
never@739 1121
never@739 1122 void fld1();
never@739 1123
never@739 1124 void fld_d(Address adr);
never@739 1125 void fld_s(Address adr);
never@739 1126 void fld_s(int index);
never@739 1127 void fld_x(Address adr); // extended-precision (80-bit) format
never@739 1128
never@739 1129 void fldcw(Address src);
never@739 1130
never@739 1131 void fldenv(Address src);
never@739 1132
never@739 1133 void fldlg2();
never@739 1134
never@739 1135 void fldln2();
never@739 1136
never@739 1137 void fldz();
never@739 1138
never@739 1139 void flog();
never@739 1140 void flog10();
never@739 1141
never@739 1142 void fmul(int i);
never@739 1143
never@739 1144 void fmul_d(Address src);
never@739 1145 void fmul_s(Address src);
never@739 1146
never@739 1147 void fmula(int i); // "alternate" fmul
never@739 1148
never@739 1149 void fmulp(int i = 1);
never@739 1150
never@739 1151 void fnsave(Address dst);
never@739 1152
never@739 1153 void fnstcw(Address src);
never@739 1154
never@739 1155 void fnstsw_ax();
never@739 1156
never@739 1157 void fprem();
never@739 1158 void fprem1();
never@739 1159
never@739 1160 void frstor(Address src);
never@739 1161
never@739 1162 void fsin();
never@739 1163
never@739 1164 void fsqrt();
never@739 1165
never@739 1166 void fst_d(Address adr);
never@739 1167 void fst_s(Address adr);
never@739 1168
never@739 1169 void fstp_d(Address adr);
never@739 1170 void fstp_d(int index);
never@739 1171 void fstp_s(Address adr);
never@739 1172 void fstp_x(Address adr); // extended-precision (80-bit) format
never@739 1173
never@739 1174 void fsub(int i);
never@739 1175 void fsub_d(Address src);
never@739 1176 void fsub_s(Address src);
never@739 1177
never@739 1178 void fsuba(int i); // "alternate" fsub
never@739 1179
never@739 1180 void fsubp(int i = 1);
never@739 1181
never@739 1182 void fsubr(int i);
never@739 1183 void fsubr_d(Address src);
never@739 1184 void fsubr_s(Address src);
never@739 1185
never@739 1186 void fsubra(int i); // "alternate" reversed fsub
never@739 1187
never@739 1188 void fsubrp(int i = 1);
never@739 1189
never@739 1190 void ftan();
never@739 1191
never@739 1192 void ftst();
never@739 1193
never@739 1194 void fucomi(int i = 1);
never@739 1195 void fucomip(int i = 1);
never@739 1196
never@739 1197 void fwait();
never@739 1198
never@739 1199 void fxch(int i = 1);
never@739 1200
never@739 1201 void fxrstor(Address src);
never@739 1202
never@739 1203 void fxsave(Address dst);
never@739 1204
never@739 1205 void fyl2x();
roland@3787 1206 void frndint();
roland@3787 1207 void f2xm1();
roland@3787 1208 void fldl2e();
never@739 1209
never@739 1210 void hlt();
never@739 1211
never@739 1212 void idivl(Register src);
kvn@2275 1213 void divl(Register src); // Unsigned division
never@739 1214
kvn@7152 1215 #ifdef _LP64
never@739 1216 void idivq(Register src);
kvn@7152 1217 #endif
never@739 1218
never@739 1219 void imull(Register dst, Register src);
never@739 1220 void imull(Register dst, Register src, int value);
rbackman@5997 1221 void imull(Register dst, Address src);
never@739 1222
kvn@7152 1223 #ifdef _LP64
never@739 1224 void imulq(Register dst, Register src);
never@739 1225 void imulq(Register dst, Register src, int value);
rbackman@5997 1226 void imulq(Register dst, Address src);
rbackman@5997 1227 #endif
never@739 1228
duke@435 1229 // jcc is the generic conditional branch generator to run-
duke@435 1230 // time routines, jcc is used for branches to labels. jcc
duke@435 1231 // takes a branch opcode (cc) and a label (L) and generates
duke@435 1232 // either a backward branch or a forward branch and links it
duke@435 1233 // to the label fixup chain. Usage:
duke@435 1234 //
duke@435 1235 // Label L; // unbound label
duke@435 1236 // jcc(cc, L); // forward branch to unbound label
duke@435 1237 // bind(L); // bind label to the current pc
duke@435 1238 // jcc(cc, L); // backward branch to bound label
duke@435 1239 // bind(L); // illegal: a label may be bound only once
duke@435 1240 //
duke@435 1241 // Note: The same Label can be used for forward and backward branches
duke@435 1242 // but it may be bound only once.
duke@435 1243
kvn@3049 1244 void jcc(Condition cc, Label& L, bool maybe_short = true);
duke@435 1245
duke@435 1246 // Conditional jump to a 8-bit offset to L.
duke@435 1247 // WARNING: be very careful using this for forward jumps. If the label is
duke@435 1248 // not bound within an 8-bit offset of this instruction, a run-time error
duke@435 1249 // will occur.
duke@435 1250 void jccb(Condition cc, Label& L);
duke@435 1251
never@739 1252 void jmp(Address entry); // pc <- entry
never@739 1253
never@739 1254 // Label operations & relative jumps (PPUM Appendix D)
kvn@3049 1255 void jmp(Label& L, bool maybe_short = true); // unconditional jump to L
never@739 1256
never@739 1257 void jmp(Register entry); // pc <- entry
never@739 1258
never@739 1259 // Unconditional 8-bit offset jump to L.
never@739 1260 // WARNING: be very careful using this for forward jumps. If the label is
never@739 1261 // not bound within an 8-bit offset of this instruction, a run-time error
never@739 1262 // will occur.
never@739 1263 void jmpb(Label& L);
never@739 1264
never@739 1265 void ldmxcsr( Address src );
never@739 1266
never@739 1267 void leal(Register dst, Address src);
never@739 1268
never@739 1269 void leaq(Register dst, Address src);
never@739 1270
twisti@4318 1271 void lfence();
never@739 1272
never@739 1273 void lock();
never@739 1274
twisti@1210 1275 void lzcntl(Register dst, Register src);
twisti@1210 1276
twisti@1210 1277 #ifdef _LP64
twisti@1210 1278 void lzcntq(Register dst, Register src);
twisti@1210 1279 #endif
twisti@1210 1280
never@739 1281 enum Membar_mask_bits {
never@739 1282 StoreStore = 1 << 3,
never@739 1283 LoadStore = 1 << 2,
never@739 1284 StoreLoad = 1 << 1,
never@739 1285 LoadLoad = 1 << 0
never@739 1286 };
never@739 1287
never@1106 1288 // Serializes memory and blows flags
never@739 1289 void membar(Membar_mask_bits order_constraint) {
never@1106 1290 if (os::is_MP()) {
never@1106 1291 // We only have to handle StoreLoad
never@1106 1292 if (order_constraint & StoreLoad) {
never@1106 1293 // All usable chips support "locked" instructions which suffice
never@1106 1294 // as barriers, and are much faster than the alternative of
never@1106 1295 // using cpuid instruction. We use here a locked add [esp],0.
never@1106 1296 // This is conveniently otherwise a no-op except for blowing
never@1106 1297 // flags.
never@1106 1298 // Any change to this code may need to revisit other places in
never@1106 1299 // the code where this idiom is used, in particular the
never@1106 1300 // orderAccess code.
never@1106 1301 lock();
never@1106 1302 addl(Address(rsp, 0), 0);// Assert the lock# signal here
never@1106 1303 }
never@1106 1304 }
never@739 1305 }
never@739 1306
never@739 1307 void mfence();
never@739 1308
never@739 1309 // Moves
never@739 1310
never@739 1311 void mov64(Register dst, int64_t imm64);
never@739 1312
never@739 1313 void movb(Address dst, Register src);
never@739 1314 void movb(Address dst, int imm8);
never@739 1315 void movb(Register dst, Address src);
never@739 1316
never@739 1317 void movdl(XMMRegister dst, Register src);
never@739 1318 void movdl(Register dst, XMMRegister src);
kvn@2602 1319 void movdl(XMMRegister dst, Address src);
kvn@3882 1320 void movdl(Address dst, XMMRegister src);
never@739 1321
never@739 1322 // Move Double Quadword
never@739 1323 void movdq(XMMRegister dst, Register src);
never@739 1324 void movdq(Register dst, XMMRegister src);
never@739 1325
never@739 1326 // Move Aligned Double Quadword
never@739 1327 void movdqa(XMMRegister dst, XMMRegister src);
drchase@5353 1328 void movdqa(XMMRegister dst, Address src);
never@739 1329
kvn@840 1330 // Move Unaligned Double Quadword
kvn@840 1331 void movdqu(Address dst, XMMRegister src);
kvn@840 1332 void movdqu(XMMRegister dst, Address src);
kvn@840 1333 void movdqu(XMMRegister dst, XMMRegister src);
kvn@840 1334
kvn@3882 1335 // Move Unaligned 256bit Vector
kvn@3882 1336 void vmovdqu(Address dst, XMMRegister src);
kvn@3882 1337 void vmovdqu(XMMRegister dst, Address src);
kvn@3882 1338 void vmovdqu(XMMRegister dst, XMMRegister src);
kvn@3882 1339
kvn@3882 1340 // Move lower 64bit to high 64bit in 128bit register
kvn@3882 1341 void movlhps(XMMRegister dst, XMMRegister src);
kvn@3882 1342
never@739 1343 void movl(Register dst, int32_t imm32);
never@739 1344 void movl(Address dst, int32_t imm32);
never@739 1345 void movl(Register dst, Register src);
never@739 1346 void movl(Register dst, Address src);
never@739 1347 void movl(Address dst, Register src);
never@739 1348
never@739 1349 // These dummies prevent using movl from converting a zero (like NULL) into Register
never@739 1350 // by giving the compiler two choices it can't resolve
never@739 1351
never@739 1352 void movl(Address dst, void* junk);
never@739 1353 void movl(Register dst, void* junk);
never@739 1354
never@739 1355 #ifdef _LP64
never@739 1356 void movq(Register dst, Register src);
never@739 1357 void movq(Register dst, Address src);
phh@2423 1358 void movq(Address dst, Register src);
never@739 1359 #endif
never@739 1360
never@739 1361 void movq(Address dst, MMXRegister src );
never@739 1362 void movq(MMXRegister dst, Address src );
never@739 1363
never@739 1364 #ifdef _LP64
never@739 1365 // These dummies prevent using movq from converting a zero (like NULL) into Register
never@739 1366 // by giving the compiler two choices it can't resolve
never@739 1367
never@739 1368 void movq(Address dst, void* dummy);
never@739 1369 void movq(Register dst, void* dummy);
never@739 1370 #endif
never@739 1371
never@739 1372 // Move Quadword
never@739 1373 void movq(Address dst, XMMRegister src);
never@739 1374 void movq(XMMRegister dst, Address src);
never@739 1375
never@739 1376 void movsbl(Register dst, Address src);
never@739 1377 void movsbl(Register dst, Register src);
never@739 1378
never@739 1379 #ifdef _LP64
twisti@1059 1380 void movsbq(Register dst, Address src);
twisti@1059 1381 void movsbq(Register dst, Register src);
twisti@1059 1382
never@739 1383 // Move signed 32bit immediate to 64bit extending sign
phh@2423 1384 void movslq(Address dst, int32_t imm64);
never@739 1385 void movslq(Register dst, int32_t imm64);
never@739 1386
never@739 1387 void movslq(Register dst, Address src);
never@739 1388 void movslq(Register dst, Register src);
never@739 1389 void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous
never@739 1390 #endif
never@739 1391
never@739 1392 void movswl(Register dst, Address src);
never@739 1393 void movswl(Register dst, Register src);
never@739 1394
twisti@1059 1395 #ifdef _LP64
twisti@1059 1396 void movswq(Register dst, Address src);
twisti@1059 1397 void movswq(Register dst, Register src);
twisti@1059 1398 #endif
twisti@1059 1399
never@739 1400 void movw(Address dst, int imm16);
never@739 1401 void movw(Register dst, Address src);
never@739 1402 void movw(Address dst, Register src);
never@739 1403
never@739 1404 void movzbl(Register dst, Address src);
never@739 1405 void movzbl(Register dst, Register src);
never@739 1406
twisti@1059 1407 #ifdef _LP64
twisti@1059 1408 void movzbq(Register dst, Address src);
twisti@1059 1409 void movzbq(Register dst, Register src);
twisti@1059 1410 #endif
twisti@1059 1411
never@739 1412 void movzwl(Register dst, Address src);
never@739 1413 void movzwl(Register dst, Register src);
never@739 1414
twisti@1059 1415 #ifdef _LP64
twisti@1059 1416 void movzwq(Register dst, Address src);
twisti@1059 1417 void movzwq(Register dst, Register src);
twisti@1059 1418 #endif
twisti@1059 1419
kvn@7152 1420 // Unsigned multiply with RAX destination register
never@739 1421 void mull(Address src);
never@739 1422 void mull(Register src);
never@739 1423
kvn@7152 1424 #ifdef _LP64
kvn@7152 1425 void mulq(Address src);
kvn@7152 1426 void mulq(Register src);
kvn@7152 1427 void mulxq(Register dst1, Register dst2, Register src);
kvn@7152 1428 #endif
kvn@7152 1429
never@739 1430 // Multiply Scalar Double-Precision Floating-Point Values
never@739 1431 void mulsd(XMMRegister dst, Address src);
never@739 1432 void mulsd(XMMRegister dst, XMMRegister src);
never@739 1433
never@739 1434 // Multiply Scalar Single-Precision Floating-Point Values
never@739 1435 void mulss(XMMRegister dst, Address src);
never@739 1436 void mulss(XMMRegister dst, XMMRegister src);
never@739 1437
never@739 1438 void negl(Register dst);
never@739 1439
never@739 1440 #ifdef _LP64
never@739 1441 void negq(Register dst);
never@739 1442 #endif
never@739 1443
never@739 1444 void nop(int i = 1);
never@739 1445
never@739 1446 void notl(Register dst);
never@739 1447
never@739 1448 #ifdef _LP64
never@739 1449 void notq(Register dst);
never@739 1450 #endif
never@739 1451
never@739 1452 void orl(Address dst, int32_t imm32);
never@739 1453 void orl(Register dst, int32_t imm32);
never@739 1454 void orl(Register dst, Address src);
never@739 1455 void orl(Register dst, Register src);
never@739 1456
never@739 1457 void orq(Address dst, int32_t imm32);
never@739 1458 void orq(Register dst, int32_t imm32);
never@739 1459 void orq(Register dst, Address src);
never@739 1460 void orq(Register dst, Register src);
never@739 1461
kvn@3388 1462 // Pack with unsigned saturation
kvn@3388 1463 void packuswb(XMMRegister dst, XMMRegister src);
kvn@3388 1464 void packuswb(XMMRegister dst, Address src);
kvn@4479 1465 void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4479 1466
kvn@4479 1467 // Pemutation of 64bit words
kvn@4479 1468 void vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256);
kvn@3388 1469
kvn@6429 1470 void pause();
kvn@6429 1471
cfang@1116 1472 // SSE4.2 string instructions
cfang@1116 1473 void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
cfang@1116 1474 void pcmpestri(XMMRegister xmm1, Address src, int imm8);
cfang@1116 1475
drchase@5353 1476 // SSE 4.1 extract
drchase@5353 1477 void pextrd(Register dst, XMMRegister src, int imm8);
drchase@5353 1478 void pextrq(Register dst, XMMRegister src, int imm8);
drchase@5353 1479
drchase@5353 1480 // SSE 4.1 insert
drchase@5353 1481 void pinsrd(XMMRegister dst, Register src, int imm8);
drchase@5353 1482 void pinsrq(XMMRegister dst, Register src, int imm8);
drchase@5353 1483
kvn@3388 1484 // SSE4.1 packed move
kvn@3388 1485 void pmovzxbw(XMMRegister dst, XMMRegister src);
kvn@3388 1486 void pmovzxbw(XMMRegister dst, Address src);
kvn@3388 1487
roland@1495 1488 #ifndef _LP64 // no 32bit push/pop on amd64
never@739 1489 void popl(Address dst);
roland@1495 1490 #endif
never@739 1491
never@739 1492 #ifdef _LP64
never@739 1493 void popq(Address dst);
never@739 1494 #endif
never@739 1495
twisti@1078 1496 void popcntl(Register dst, Address src);
twisti@1078 1497 void popcntl(Register dst, Register src);
twisti@1078 1498
twisti@1078 1499 #ifdef _LP64
twisti@1078 1500 void popcntq(Register dst, Address src);
twisti@1078 1501 void popcntq(Register dst, Register src);
twisti@1078 1502 #endif
twisti@1078 1503
never@739 1504 // Prefetches (SSE, SSE2, 3DNOW only)
never@739 1505
never@739 1506 void prefetchnta(Address src);
never@739 1507 void prefetchr(Address src);
never@739 1508 void prefetcht0(Address src);
never@739 1509 void prefetcht1(Address src);
never@739 1510 void prefetcht2(Address src);
never@739 1511 void prefetchw(Address src);
never@739 1512
kvn@4205 1513 // Shuffle Bytes
kvn@4205 1514 void pshufb(XMMRegister dst, XMMRegister src);
kvn@4205 1515 void pshufb(XMMRegister dst, Address src);
kvn@4205 1516
never@739 1517 // Shuffle Packed Doublewords
never@739 1518 void pshufd(XMMRegister dst, XMMRegister src, int mode);
never@739 1519 void pshufd(XMMRegister dst, Address src, int mode);
never@739 1520
never@739 1521 // Shuffle Packed Low Words
never@739 1522 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
never@739 1523 void pshuflw(XMMRegister dst, Address src, int mode);
never@739 1524
kvn@2602 1525 // Shift Right by bytes Logical DoubleQuadword Immediate
kvn@2602 1526 void psrldq(XMMRegister dst, int shift);
kvn@2602 1527
kvn@4413 1528 // Logical Compare 128bit
cfang@1116 1529 void ptest(XMMRegister dst, XMMRegister src);
cfang@1116 1530 void ptest(XMMRegister dst, Address src);
kvn@4413 1531 // Logical Compare 256bit
kvn@4413 1532 void vptest(XMMRegister dst, XMMRegister src);
kvn@4413 1533 void vptest(XMMRegister dst, Address src);
cfang@1116 1534
never@739 1535 // Interleave Low Bytes
never@739 1536 void punpcklbw(XMMRegister dst, XMMRegister src);
kvn@3388 1537 void punpcklbw(XMMRegister dst, Address src);
kvn@3388 1538
kvn@3388 1539 // Interleave Low Doublewords
kvn@3388 1540 void punpckldq(XMMRegister dst, XMMRegister src);
kvn@3388 1541 void punpckldq(XMMRegister dst, Address src);
never@739 1542
kvn@3929 1543 // Interleave Low Quadwords
kvn@3929 1544 void punpcklqdq(XMMRegister dst, XMMRegister src);
kvn@3929 1545
roland@1495 1546 #ifndef _LP64 // no 32bit push/pop on amd64
never@739 1547 void pushl(Address src);
roland@1495 1548 #endif
never@739 1549
never@739 1550 void pushq(Address src);
never@739 1551
never@739 1552 void rcll(Register dst, int imm8);
never@739 1553
never@739 1554 void rclq(Register dst, int imm8);
never@739 1555
kvn@6429 1556 void rdtsc();
kvn@6429 1557
never@739 1558 void ret(int imm16);
duke@435 1559
kvn@7152 1560 #ifdef _LP64
kvn@7152 1561 void rorq(Register dst, int imm8);
kvn@7152 1562 void rorxq(Register dst, Register src, int imm8);
kvn@7152 1563 #endif
kvn@7152 1564
duke@435 1565 void sahf();
duke@435 1566
never@739 1567 void sarl(Register dst, int imm8);
never@739 1568 void sarl(Register dst);
never@739 1569
never@739 1570 void sarq(Register dst, int imm8);
never@739 1571 void sarq(Register dst);
never@739 1572
never@739 1573 void sbbl(Address dst, int32_t imm32);
never@739 1574 void sbbl(Register dst, int32_t imm32);
never@739 1575 void sbbl(Register dst, Address src);
never@739 1576 void sbbl(Register dst, Register src);
never@739 1577
never@739 1578 void sbbq(Address dst, int32_t imm32);
never@739 1579 void sbbq(Register dst, int32_t imm32);
never@739 1580 void sbbq(Register dst, Address src);
never@739 1581 void sbbq(Register dst, Register src);
never@739 1582
never@739 1583 void setb(Condition cc, Register dst);
never@739 1584
never@739 1585 void shldl(Register dst, Register src);
never@739 1586
never@739 1587 void shll(Register dst, int imm8);
never@739 1588 void shll(Register dst);
never@739 1589
never@739 1590 void shlq(Register dst, int imm8);
never@739 1591 void shlq(Register dst);
never@739 1592
never@739 1593 void shrdl(Register dst, Register src);
never@739 1594
never@739 1595 void shrl(Register dst, int imm8);
never@739 1596 void shrl(Register dst);
never@739 1597
never@739 1598 void shrq(Register dst, int imm8);
never@739 1599 void shrq(Register dst);
never@739 1600
never@739 1601 void smovl(); // QQQ generic?
never@739 1602
never@739 1603 // Compute Square Root of Scalar Double-Precision Floating-Point Value
never@739 1604 void sqrtsd(XMMRegister dst, Address src);
never@739 1605 void sqrtsd(XMMRegister dst, XMMRegister src);
never@739 1606
twisti@2350 1607 // Compute Square Root of Scalar Single-Precision Floating-Point Value
twisti@2350 1608 void sqrtss(XMMRegister dst, Address src);
twisti@2350 1609 void sqrtss(XMMRegister dst, XMMRegister src);
twisti@2350 1610
twisti@4318 1611 void std();
never@739 1612
never@739 1613 void stmxcsr( Address dst );
never@739 1614
never@739 1615 void subl(Address dst, int32_t imm32);
never@739 1616 void subl(Address dst, Register src);
never@739 1617 void subl(Register dst, int32_t imm32);
never@739 1618 void subl(Register dst, Address src);
never@739 1619 void subl(Register dst, Register src);
never@739 1620
never@739 1621 void subq(Address dst, int32_t imm32);
never@739 1622 void subq(Address dst, Register src);
never@739 1623 void subq(Register dst, int32_t imm32);
never@739 1624 void subq(Register dst, Address src);
never@739 1625 void subq(Register dst, Register src);
never@739 1626
kvn@3574 1627 // Force generation of a 4 byte immediate value even if it fits into 8bit
kvn@3574 1628 void subl_imm32(Register dst, int32_t imm32);
kvn@3574 1629 void subq_imm32(Register dst, int32_t imm32);
never@739 1630
never@739 1631 // Subtract Scalar Double-Precision Floating-Point Values
never@739 1632 void subsd(XMMRegister dst, Address src);
never@739 1633 void subsd(XMMRegister dst, XMMRegister src);
never@739 1634
never@739 1635 // Subtract Scalar Single-Precision Floating-Point Values
never@739 1636 void subss(XMMRegister dst, Address src);
duke@435 1637 void subss(XMMRegister dst, XMMRegister src);
never@739 1638
never@739 1639 void testb(Register dst, int imm8);
never@739 1640
never@739 1641 void testl(Register dst, int32_t imm32);
never@739 1642 void testl(Register dst, Register src);
never@739 1643 void testl(Register dst, Address src);
never@739 1644
never@739 1645 void testq(Register dst, int32_t imm32);
never@739 1646 void testq(Register dst, Register src);
never@739 1647
iveresov@6378 1648 // BMI - count trailing zeros
iveresov@6378 1649 void tzcntl(Register dst, Register src);
iveresov@6378 1650 void tzcntq(Register dst, Register src);
never@739 1651
never@739 1652 // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
never@739 1653 void ucomisd(XMMRegister dst, Address src);
never@739 1654 void ucomisd(XMMRegister dst, XMMRegister src);
never@739 1655
never@739 1656 // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
never@739 1657 void ucomiss(XMMRegister dst, Address src);
duke@435 1658 void ucomiss(XMMRegister dst, XMMRegister src);
never@739 1659
kvn@6429 1660 void xabort(int8_t imm8);
kvn@6429 1661
never@739 1662 void xaddl(Address dst, Register src);
never@739 1663
never@739 1664 void xaddq(Address dst, Register src);
never@739 1665
kvn@6429 1666 void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none);
kvn@6429 1667
never@739 1668 void xchgl(Register reg, Address adr);
never@739 1669 void xchgl(Register dst, Register src);
never@739 1670
never@739 1671 void xchgq(Register reg, Address adr);
never@739 1672 void xchgq(Register dst, Register src);
never@739 1673
kvn@6429 1674 void xend();
kvn@6429 1675
kvn@3388 1676 // Get Value of Extended Control Register
twisti@4318 1677 void xgetbv();
kvn@3388 1678
never@739 1679 void xorl(Register dst, int32_t imm32);
never@739 1680 void xorl(Register dst, Address src);
never@739 1681 void xorl(Register dst, Register src);
never@739 1682
never@739 1683 void xorq(Register dst, Address src);
never@739 1684 void xorq(Register dst, Register src);
never@739 1685
kvn@3388 1686 void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
kvn@3388 1687
kvn@3929 1688 // AVX 3-operands scalar instructions (encoded with VEX prefix)
kvn@4001 1689
kvn@3390 1690 void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
kvn@3390 1691 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
kvn@3390 1692 void vaddss(XMMRegister dst, XMMRegister nds, Address src);
kvn@3390 1693 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
kvn@3390 1694 void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
kvn@3390 1695 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
kvn@3390 1696 void vdivss(XMMRegister dst, XMMRegister nds, Address src);
kvn@3390 1697 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src);
kvn@3390 1698 void vmulsd(XMMRegister dst, XMMRegister nds, Address src);
kvn@3390 1699 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
kvn@3390 1700 void vmulss(XMMRegister dst, XMMRegister nds, Address src);
kvn@3390 1701 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src);
kvn@3390 1702 void vsubsd(XMMRegister dst, XMMRegister nds, Address src);
kvn@3390 1703 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
kvn@3390 1704 void vsubss(XMMRegister dst, XMMRegister nds, Address src);
kvn@3390 1705 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
kvn@3929 1706
kvn@4001 1707
kvn@4001 1708 //====================VECTOR ARITHMETIC=====================================
kvn@4001 1709
kvn@4001 1710 // Add Packed Floating-Point Values
kvn@4001 1711 void addpd(XMMRegister dst, XMMRegister src);
kvn@4001 1712 void addps(XMMRegister dst, XMMRegister src);
kvn@4001 1713 void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1714 void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1715 void vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1716 void vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1717
kvn@4001 1718 // Subtract Packed Floating-Point Values
kvn@4001 1719 void subpd(XMMRegister dst, XMMRegister src);
kvn@4001 1720 void subps(XMMRegister dst, XMMRegister src);
kvn@4001 1721 void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1722 void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1723 void vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1724 void vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1725
kvn@4001 1726 // Multiply Packed Floating-Point Values
kvn@4001 1727 void mulpd(XMMRegister dst, XMMRegister src);
kvn@4001 1728 void mulps(XMMRegister dst, XMMRegister src);
kvn@4001 1729 void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1730 void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1731 void vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1732 void vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1733
kvn@4001 1734 // Divide Packed Floating-Point Values
kvn@4001 1735 void divpd(XMMRegister dst, XMMRegister src);
kvn@4001 1736 void divps(XMMRegister dst, XMMRegister src);
kvn@4001 1737 void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1738 void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1739 void vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1740 void vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1741
kvn@4001 1742 // Bitwise Logical AND of Packed Floating-Point Values
kvn@4001 1743 void andpd(XMMRegister dst, XMMRegister src);
kvn@4001 1744 void andps(XMMRegister dst, XMMRegister src);
kvn@4001 1745 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1746 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1747 void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1748 void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1749
kvn@4001 1750 // Bitwise Logical XOR of Packed Floating-Point Values
kvn@4001 1751 void xorpd(XMMRegister dst, XMMRegister src);
kvn@4001 1752 void xorps(XMMRegister dst, XMMRegister src);
kvn@3882 1753 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@3882 1754 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1755 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1756 void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1757
kvn@4001 1758 // Add packed integers
kvn@4001 1759 void paddb(XMMRegister dst, XMMRegister src);
kvn@4001 1760 void paddw(XMMRegister dst, XMMRegister src);
kvn@4001 1761 void paddd(XMMRegister dst, XMMRegister src);
kvn@4001 1762 void paddq(XMMRegister dst, XMMRegister src);
kvn@4001 1763 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1764 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1765 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1766 void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1767 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1768 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1769 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1770 void vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1771
kvn@4001 1772 // Sub packed integers
kvn@4001 1773 void psubb(XMMRegister dst, XMMRegister src);
kvn@4001 1774 void psubw(XMMRegister dst, XMMRegister src);
kvn@4001 1775 void psubd(XMMRegister dst, XMMRegister src);
kvn@4001 1776 void psubq(XMMRegister dst, XMMRegister src);
kvn@4001 1777 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1778 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1779 void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1780 void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1781 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1782 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1783 void vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1784 void vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1785
kvn@4001 1786 // Multiply packed integers (only shorts and ints)
kvn@4001 1787 void pmullw(XMMRegister dst, XMMRegister src);
kvn@4001 1788 void pmulld(XMMRegister dst, XMMRegister src);
kvn@4001 1789 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1790 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1791 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1792 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1793
kvn@4001 1794 // Shift left packed integers
kvn@4001 1795 void psllw(XMMRegister dst, int shift);
kvn@4001 1796 void pslld(XMMRegister dst, int shift);
kvn@4001 1797 void psllq(XMMRegister dst, int shift);
kvn@4001 1798 void psllw(XMMRegister dst, XMMRegister shift);
kvn@4001 1799 void pslld(XMMRegister dst, XMMRegister shift);
kvn@4001 1800 void psllq(XMMRegister dst, XMMRegister shift);
kvn@4001 1801 void vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
kvn@4001 1802 void vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256);
kvn@4001 1803 void vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256);
kvn@4001 1804 void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
kvn@4001 1805 void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
kvn@4001 1806 void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
kvn@4001 1807
kvn@4001 1808 // Logical shift right packed integers
kvn@4001 1809 void psrlw(XMMRegister dst, int shift);
kvn@4001 1810 void psrld(XMMRegister dst, int shift);
kvn@4001 1811 void psrlq(XMMRegister dst, int shift);
kvn@4001 1812 void psrlw(XMMRegister dst, XMMRegister shift);
kvn@4001 1813 void psrld(XMMRegister dst, XMMRegister shift);
kvn@4001 1814 void psrlq(XMMRegister dst, XMMRegister shift);
kvn@4001 1815 void vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
kvn@4001 1816 void vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256);
kvn@4001 1817 void vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256);
kvn@4001 1818 void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
kvn@4001 1819 void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
kvn@4001 1820 void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
kvn@4001 1821
kvn@4001 1822 // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs)
kvn@4001 1823 void psraw(XMMRegister dst, int shift);
kvn@4001 1824 void psrad(XMMRegister dst, int shift);
kvn@4001 1825 void psraw(XMMRegister dst, XMMRegister shift);
kvn@4001 1826 void psrad(XMMRegister dst, XMMRegister shift);
kvn@4001 1827 void vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
kvn@4001 1828 void vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256);
kvn@4001 1829 void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
kvn@4001 1830 void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
kvn@4001 1831
kvn@4001 1832 // And packed integers
kvn@4001 1833 void pand(XMMRegister dst, XMMRegister src);
kvn@4001 1834 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1835 void vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1836
kvn@4001 1837 // Or packed integers
kvn@4001 1838 void por(XMMRegister dst, XMMRegister src);
kvn@4001 1839 void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1840 void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1841
kvn@4001 1842 // Xor packed integers
kvn@4001 1843 void pxor(XMMRegister dst, XMMRegister src);
kvn@3929 1844 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
kvn@4001 1845 void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
kvn@4001 1846
kvn@4001 1847 // Copy low 128bit into high 128bit of YMM registers.
kvn@3882 1848 void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
kvn@3929 1849 void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
kvn@3882 1850
kvn@4103 1851 // Load/store high 128bit of YMM registers which does not destroy other half.
kvn@4103 1852 void vinsertf128h(XMMRegister dst, Address src);
kvn@4103 1853 void vinserti128h(XMMRegister dst, Address src);
kvn@4103 1854 void vextractf128h(Address dst, XMMRegister src);
kvn@4103 1855 void vextracti128h(Address dst, XMMRegister src);
kvn@4103 1856
kvn@4411 1857 // duplicate 4-bytes integer data from src into 8 locations in dest
kvn@4411 1858 void vpbroadcastd(XMMRegister dst, XMMRegister src);
kvn@4411 1859
drchase@5353 1860 // Carry-Less Multiplication Quadword
kvn@7025 1861 void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
drchase@5353 1862 void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
drchase@5353 1863
kvn@3882 1864 // AVX instruction which is used to clear upper 128 bits of YMM registers and
kvn@3882 1865 // to avoid transaction penalty between AVX and SSE states. There is no
kvn@3882 1866 // penalty if legacy SSE instructions are encoded using VEX prefix because
kvn@3882 1867 // they always clear upper 128 bits. It should be used before calling
kvn@3882 1868 // runtime code and native libraries.
kvn@3882 1869 void vzeroupper();
kvn@3390 1870
kvn@3388 1871 protected:
kvn@3388 1872 // Next instructions require address alignment 16 bytes SSE mode.
kvn@3388 1873 // They should be called only from corresponding MacroAssembler instructions.
kvn@3388 1874 void andpd(XMMRegister dst, Address src);
kvn@3388 1875 void andps(XMMRegister dst, Address src);
never@739 1876 void xorpd(XMMRegister dst, Address src);
never@739 1877 void xorps(XMMRegister dst, Address src);
kvn@3388 1878
duke@435 1879 };
duke@435 1880
stefank@2314 1881 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP

mercurial