Sat, 07 Nov 2020 10:30:02 +0800
Added tag mips-jdk8u275-b01 for changeset d3b4d62f391f
duke@435 | 1 | /* |
zgu@4492 | 2 | * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP |
stefank@2314 | 26 | #define CPU_X86_VM_ASSEMBLER_X86_HPP |
stefank@2314 | 27 | |
twisti@4318 | 28 | #include "asm/register.hpp" |
twisti@4318 | 29 | |
duke@435 | 30 | class BiasedLockingCounters; |
duke@435 | 31 | |
duke@435 | 32 | // Contains all the definitions needed for x86 assembly code generation. |
duke@435 | 33 | |
duke@435 | 34 | // Calling convention |
duke@435 | 35 | class Argument VALUE_OBJ_CLASS_SPEC { |
duke@435 | 36 | public: |
duke@435 | 37 | enum { |
duke@435 | 38 | #ifdef _LP64 |
duke@435 | 39 | #ifdef _WIN64 |
duke@435 | 40 | n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) |
duke@435 | 41 | n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) |
duke@435 | 42 | #else |
duke@435 | 43 | n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) |
duke@435 | 44 | n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) |
duke@435 | 45 | #endif // _WIN64 |
duke@435 | 46 | n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... |
duke@435 | 47 | n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... |
duke@435 | 48 | #else |
duke@435 | 49 | n_register_parameters = 0 // 0 registers used to pass arguments |
duke@435 | 50 | #endif // _LP64 |
duke@435 | 51 | }; |
duke@435 | 52 | }; |
duke@435 | 53 | |
duke@435 | 54 | |
duke@435 | 55 | #ifdef _LP64 |
duke@435 | 56 | // Symbolically name the register arguments used by the c calling convention. |
duke@435 | 57 | // Windows is different from linux/solaris. So much for standards... |
duke@435 | 58 | |
duke@435 | 59 | #ifdef _WIN64 |
duke@435 | 60 | |
duke@435 | 61 | REGISTER_DECLARATION(Register, c_rarg0, rcx); |
duke@435 | 62 | REGISTER_DECLARATION(Register, c_rarg1, rdx); |
duke@435 | 63 | REGISTER_DECLARATION(Register, c_rarg2, r8); |
duke@435 | 64 | REGISTER_DECLARATION(Register, c_rarg3, r9); |
duke@435 | 65 | |
never@739 | 66 | REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); |
never@739 | 67 | REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); |
never@739 | 68 | REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); |
never@739 | 69 | REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); |
duke@435 | 70 | |
duke@435 | 71 | #else |
duke@435 | 72 | |
duke@435 | 73 | REGISTER_DECLARATION(Register, c_rarg0, rdi); |
duke@435 | 74 | REGISTER_DECLARATION(Register, c_rarg1, rsi); |
duke@435 | 75 | REGISTER_DECLARATION(Register, c_rarg2, rdx); |
duke@435 | 76 | REGISTER_DECLARATION(Register, c_rarg3, rcx); |
duke@435 | 77 | REGISTER_DECLARATION(Register, c_rarg4, r8); |
duke@435 | 78 | REGISTER_DECLARATION(Register, c_rarg5, r9); |
duke@435 | 79 | |
never@739 | 80 | REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); |
never@739 | 81 | REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); |
never@739 | 82 | REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); |
never@739 | 83 | REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); |
never@739 | 84 | REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4); |
never@739 | 85 | REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5); |
never@739 | 86 | REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6); |
never@739 | 87 | REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7); |
duke@435 | 88 | |
duke@435 | 89 | #endif // _WIN64 |
duke@435 | 90 | |
duke@435 | 91 | // Symbolically name the register arguments used by the Java calling convention. |
duke@435 | 92 | // We have control over the convention for java so we can do what we please. |
duke@435 | 93 | // What pleases us is to offset the java calling convention so that when |
duke@435 | 94 | // we call a suitable jni method the arguments are lined up and we don't |
duke@435 | 95 | // have to do little shuffling. A suitable jni method is non-static and a |
duke@435 | 96 | // small number of arguments (two fewer args on windows) |
duke@435 | 97 | // |
duke@435 | 98 | // |-------------------------------------------------------| |
duke@435 | 99 | // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | |
duke@435 | 100 | // |-------------------------------------------------------| |
duke@435 | 101 | // | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) |
duke@435 | 102 | // | rdi rsi rdx rcx r8 r9 | solaris/linux |
duke@435 | 103 | // |-------------------------------------------------------| |
duke@435 | 104 | // | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | |
duke@435 | 105 | // |-------------------------------------------------------| |
duke@435 | 106 | |
duke@435 | 107 | REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); |
duke@435 | 108 | REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); |
duke@435 | 109 | REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); |
duke@435 | 110 | // Windows runs out of register args here |
duke@435 | 111 | #ifdef _WIN64 |
duke@435 | 112 | REGISTER_DECLARATION(Register, j_rarg3, rdi); |
duke@435 | 113 | REGISTER_DECLARATION(Register, j_rarg4, rsi); |
duke@435 | 114 | #else |
duke@435 | 115 | REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); |
duke@435 | 116 | REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); |
duke@435 | 117 | #endif /* _WIN64 */ |
duke@435 | 118 | REGISTER_DECLARATION(Register, j_rarg5, c_rarg0); |
duke@435 | 119 | |
never@739 | 120 | REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0); |
never@739 | 121 | REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1); |
never@739 | 122 | REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2); |
never@739 | 123 | REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3); |
never@739 | 124 | REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4); |
never@739 | 125 | REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5); |
never@739 | 126 | REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6); |
never@739 | 127 | REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7); |
duke@435 | 128 | |
duke@435 | 129 | REGISTER_DECLARATION(Register, rscratch1, r10); // volatile |
duke@435 | 130 | REGISTER_DECLARATION(Register, rscratch2, r11); // volatile |
duke@435 | 131 | |
never@739 | 132 | REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved |
duke@435 | 133 | REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved |
duke@435 | 134 | |
never@739 | 135 | #else |
never@739 | 136 | // rscratch1 will apear in 32bit code that is dead but of course must compile |
never@739 | 137 | // Using noreg ensures if the dead code is incorrectly live and executed it |
never@739 | 138 | // will cause an assertion failure |
never@739 | 139 | #define rscratch1 noreg |
iveresov@2344 | 140 | #define rscratch2 noreg |
never@739 | 141 | |
duke@435 | 142 | #endif // _LP64 |
duke@435 | 143 | |
zmajo@7854 | 144 | // JSR 292 |
zmajo@7854 | 145 | // On x86, the SP does not have to be saved when invoking method handle intrinsics |
zmajo@7854 | 146 | // or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg. |
zmajo@7854 | 147 | REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg); |
twisti@1919 | 148 | |
duke@435 | 149 | // Address is an abstraction used to represent a memory location |
duke@435 | 150 | // using any of the amd64 addressing modes with one object. |
duke@435 | 151 | // |
duke@435 | 152 | // Note: A register location is represented via a Register, not |
duke@435 | 153 | // via an address for efficiency & simplicity reasons. |
duke@435 | 154 | |
duke@435 | 155 | class ArrayAddress; |
duke@435 | 156 | |
duke@435 | 157 | class Address VALUE_OBJ_CLASS_SPEC { |
duke@435 | 158 | public: |
duke@435 | 159 | enum ScaleFactor { |
duke@435 | 160 | no_scale = -1, |
duke@435 | 161 | times_1 = 0, |
duke@435 | 162 | times_2 = 1, |
duke@435 | 163 | times_4 = 2, |
never@739 | 164 | times_8 = 3, |
never@739 | 165 | times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4) |
duke@435 | 166 | }; |
jrose@1057 | 167 | static ScaleFactor times(int size) { |
jrose@1057 | 168 | assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size"); |
jrose@1057 | 169 | if (size == 8) return times_8; |
jrose@1057 | 170 | if (size == 4) return times_4; |
jrose@1057 | 171 | if (size == 2) return times_2; |
jrose@1057 | 172 | return times_1; |
jrose@1057 | 173 | } |
jrose@1057 | 174 | static int scale_size(ScaleFactor scale) { |
jrose@1057 | 175 | assert(scale != no_scale, ""); |
jrose@1057 | 176 | assert(((1 << (int)times_1) == 1 && |
jrose@1057 | 177 | (1 << (int)times_2) == 2 && |
jrose@1057 | 178 | (1 << (int)times_4) == 4 && |
jrose@1057 | 179 | (1 << (int)times_8) == 8), ""); |
jrose@1057 | 180 | return (1 << (int)scale); |
jrose@1057 | 181 | } |
duke@435 | 182 | |
duke@435 | 183 | private: |
duke@435 | 184 | Register _base; |
duke@435 | 185 | Register _index; |
duke@435 | 186 | ScaleFactor _scale; |
duke@435 | 187 | int _disp; |
duke@435 | 188 | RelocationHolder _rspec; |
duke@435 | 189 | |
never@739 | 190 | // Easily misused constructors make them private |
never@739 | 191 | // %%% can we make these go away? |
never@739 | 192 | NOT_LP64(Address(address loc, RelocationHolder spec);) |
never@739 | 193 | Address(int disp, address loc, relocInfo::relocType rtype); |
never@739 | 194 | Address(int disp, address loc, RelocationHolder spec); |
duke@435 | 195 | |
duke@435 | 196 | public: |
never@739 | 197 | |
never@739 | 198 | int disp() { return _disp; } |
duke@435 | 199 | // creation |
duke@435 | 200 | Address() |
duke@435 | 201 | : _base(noreg), |
duke@435 | 202 | _index(noreg), |
duke@435 | 203 | _scale(no_scale), |
duke@435 | 204 | _disp(0) { |
duke@435 | 205 | } |
duke@435 | 206 | |
duke@435 | 207 | // No default displacement otherwise Register can be implicitly |
duke@435 | 208 | // converted to 0(Register) which is quite a different animal. |
duke@435 | 209 | |
duke@435 | 210 | Address(Register base, int disp) |
duke@435 | 211 | : _base(base), |
duke@435 | 212 | _index(noreg), |
duke@435 | 213 | _scale(no_scale), |
duke@435 | 214 | _disp(disp) { |
duke@435 | 215 | } |
duke@435 | 216 | |
duke@435 | 217 | Address(Register base, Register index, ScaleFactor scale, int disp = 0) |
duke@435 | 218 | : _base (base), |
duke@435 | 219 | _index(index), |
duke@435 | 220 | _scale(scale), |
duke@435 | 221 | _disp (disp) { |
duke@435 | 222 | assert(!index->is_valid() == (scale == Address::no_scale), |
duke@435 | 223 | "inconsistent address"); |
duke@435 | 224 | } |
duke@435 | 225 | |
jrose@1100 | 226 | Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0) |
jrose@1057 | 227 | : _base (base), |
jrose@1057 | 228 | _index(index.register_or_noreg()), |
jrose@1057 | 229 | _scale(scale), |
jrose@1057 | 230 | _disp (disp + (index.constant_or_zero() * scale_size(scale))) { |
jrose@1057 | 231 | if (!index.is_register()) scale = Address::no_scale; |
jrose@1057 | 232 | assert(!_index->is_valid() == (scale == Address::no_scale), |
jrose@1057 | 233 | "inconsistent address"); |
jrose@1057 | 234 | } |
jrose@1057 | 235 | |
jrose@1057 | 236 | Address plus_disp(int disp) const { |
jrose@1057 | 237 | Address a = (*this); |
jrose@1057 | 238 | a._disp += disp; |
jrose@1057 | 239 | return a; |
jrose@1057 | 240 | } |
never@2895 | 241 | Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const { |
never@2895 | 242 | Address a = (*this); |
never@2895 | 243 | a._disp += disp.constant_or_zero() * scale_size(scale); |
never@2895 | 244 | if (disp.is_register()) { |
never@2895 | 245 | assert(!a.index()->is_valid(), "competing indexes"); |
never@2895 | 246 | a._index = disp.as_register(); |
never@2895 | 247 | a._scale = scale; |
never@2895 | 248 | } |
never@2895 | 249 | return a; |
never@2895 | 250 | } |
never@2895 | 251 | bool is_same_address(Address a) const { |
never@2895 | 252 | // disregard _rspec |
never@2895 | 253 | return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale; |
never@2895 | 254 | } |
jrose@1057 | 255 | |
duke@435 | 256 | // The following two overloads are used in connection with the |
duke@435 | 257 | // ByteSize type (see sizes.hpp). They simplify the use of |
duke@435 | 258 | // ByteSize'd arguments in assembly code. Note that their equivalent |
duke@435 | 259 | // for the optimized build are the member functions with int disp |
duke@435 | 260 | // argument since ByteSize is mapped to an int type in that case. |
duke@435 | 261 | // |
duke@435 | 262 | // Note: DO NOT introduce similar overloaded functions for WordSize |
duke@435 | 263 | // arguments as in the optimized mode, both ByteSize and WordSize |
duke@435 | 264 | // are mapped to the same type and thus the compiler cannot make a |
duke@435 | 265 | // distinction anymore (=> compiler errors). |
duke@435 | 266 | |
duke@435 | 267 | #ifdef ASSERT |
duke@435 | 268 | Address(Register base, ByteSize disp) |
duke@435 | 269 | : _base(base), |
duke@435 | 270 | _index(noreg), |
duke@435 | 271 | _scale(no_scale), |
duke@435 | 272 | _disp(in_bytes(disp)) { |
duke@435 | 273 | } |
duke@435 | 274 | |
duke@435 | 275 | Address(Register base, Register index, ScaleFactor scale, ByteSize disp) |
duke@435 | 276 | : _base(base), |
duke@435 | 277 | _index(index), |
duke@435 | 278 | _scale(scale), |
duke@435 | 279 | _disp(in_bytes(disp)) { |
duke@435 | 280 | assert(!index->is_valid() == (scale == Address::no_scale), |
duke@435 | 281 | "inconsistent address"); |
duke@435 | 282 | } |
jrose@1057 | 283 | |
jrose@1100 | 284 | Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp) |
jrose@1057 | 285 | : _base (base), |
jrose@1057 | 286 | _index(index.register_or_noreg()), |
jrose@1057 | 287 | _scale(scale), |
jrose@1057 | 288 | _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))) { |
jrose@1057 | 289 | if (!index.is_register()) scale = Address::no_scale; |
jrose@1057 | 290 | assert(!_index->is_valid() == (scale == Address::no_scale), |
jrose@1057 | 291 | "inconsistent address"); |
jrose@1057 | 292 | } |
jrose@1057 | 293 | |
duke@435 | 294 | #endif // ASSERT |
duke@435 | 295 | |
duke@435 | 296 | // accessors |
ysr@777 | 297 | bool uses(Register reg) const { return _base == reg || _index == reg; } |
ysr@777 | 298 | Register base() const { return _base; } |
ysr@777 | 299 | Register index() const { return _index; } |
ysr@777 | 300 | ScaleFactor scale() const { return _scale; } |
ysr@777 | 301 | int disp() const { return _disp; } |
duke@435 | 302 | |
duke@435 | 303 | // Convert the raw encoding form into the form expected by the constructor for |
duke@435 | 304 | // Address. An index of 4 (rsp) corresponds to having no index, so convert |
duke@435 | 305 | // that to noreg for the Address constructor. |
coleenp@4037 | 306 | static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc); |
duke@435 | 307 | |
duke@435 | 308 | static Address make_array(ArrayAddress); |
duke@435 | 309 | |
duke@435 | 310 | private: |
duke@435 | 311 | bool base_needs_rex() const { |
duke@435 | 312 | return _base != noreg && _base->encoding() >= 8; |
duke@435 | 313 | } |
duke@435 | 314 | |
duke@435 | 315 | bool index_needs_rex() const { |
duke@435 | 316 | return _index != noreg &&_index->encoding() >= 8; |
duke@435 | 317 | } |
duke@435 | 318 | |
duke@435 | 319 | relocInfo::relocType reloc() const { return _rspec.type(); } |
duke@435 | 320 | |
duke@435 | 321 | friend class Assembler; |
duke@435 | 322 | friend class MacroAssembler; |
duke@435 | 323 | friend class LIR_Assembler; // base/index/scale/disp |
duke@435 | 324 | }; |
duke@435 | 325 | |
duke@435 | 326 | // |
duke@435 | 327 | // AddressLiteral has been split out from Address because operands of this type |
duke@435 | 328 | // need to be treated specially on 32bit vs. 64bit platforms. By splitting it out |
duke@435 | 329 | // the few instructions that need to deal with address literals are unique and the |
duke@435 | 330 | // MacroAssembler does not have to implement every instruction in the Assembler |
duke@435 | 331 | // in order to search for address literals that may need special handling depending |
duke@435 | 332 | // on the instruction and the platform. As small step on the way to merging i486/amd64 |
duke@435 | 333 | // directories. |
duke@435 | 334 | // |
duke@435 | 335 | class AddressLiteral VALUE_OBJ_CLASS_SPEC { |
duke@435 | 336 | friend class ArrayAddress; |
duke@435 | 337 | RelocationHolder _rspec; |
duke@435 | 338 | // Typically we use AddressLiterals we want to use their rval |
duke@435 | 339 | // However in some situations we want the lval (effect address) of the item. |
duke@435 | 340 | // We provide a special factory for making those lvals. |
duke@435 | 341 | bool _is_lval; |
duke@435 | 342 | |
duke@435 | 343 | // If the target is far we'll need to load the ea of this to |
duke@435 | 344 | // a register to reach it. Otherwise if near we can do rip |
duke@435 | 345 | // relative addressing. |
duke@435 | 346 | |
duke@435 | 347 | address _target; |
duke@435 | 348 | |
duke@435 | 349 | protected: |
duke@435 | 350 | // creation |
duke@435 | 351 | AddressLiteral() |
duke@435 | 352 | : _is_lval(false), |
duke@435 | 353 | _target(NULL) |
duke@435 | 354 | {} |
duke@435 | 355 | |
duke@435 | 356 | public: |
duke@435 | 357 | |
duke@435 | 358 | |
duke@435 | 359 | AddressLiteral(address target, relocInfo::relocType rtype); |
duke@435 | 360 | |
duke@435 | 361 | AddressLiteral(address target, RelocationHolder const& rspec) |
duke@435 | 362 | : _rspec(rspec), |
duke@435 | 363 | _is_lval(false), |
duke@435 | 364 | _target(target) |
duke@435 | 365 | {} |
duke@435 | 366 | |
duke@435 | 367 | AddressLiteral addr() { |
duke@435 | 368 | AddressLiteral ret = *this; |
duke@435 | 369 | ret._is_lval = true; |
duke@435 | 370 | return ret; |
duke@435 | 371 | } |
duke@435 | 372 | |
duke@435 | 373 | |
duke@435 | 374 | private: |
duke@435 | 375 | |
duke@435 | 376 | address target() { return _target; } |
duke@435 | 377 | bool is_lval() { return _is_lval; } |
duke@435 | 378 | |
duke@435 | 379 | relocInfo::relocType reloc() const { return _rspec.type(); } |
duke@435 | 380 | const RelocationHolder& rspec() const { return _rspec; } |
duke@435 | 381 | |
duke@435 | 382 | friend class Assembler; |
duke@435 | 383 | friend class MacroAssembler; |
duke@435 | 384 | friend class Address; |
duke@435 | 385 | friend class LIR_Assembler; |
duke@435 | 386 | }; |
duke@435 | 387 | |
duke@435 | 388 | // Convience classes |
duke@435 | 389 | class RuntimeAddress: public AddressLiteral { |
duke@435 | 390 | |
duke@435 | 391 | public: |
duke@435 | 392 | |
duke@435 | 393 | RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {} |
duke@435 | 394 | |
duke@435 | 395 | }; |
duke@435 | 396 | |
duke@435 | 397 | class ExternalAddress: public AddressLiteral { |
never@2737 | 398 | private: |
never@2737 | 399 | static relocInfo::relocType reloc_for_target(address target) { |
never@2737 | 400 | // Sometimes ExternalAddress is used for values which aren't |
never@2737 | 401 | // exactly addresses, like the card table base. |
never@2737 | 402 | // external_word_type can't be used for values in the first page |
never@2737 | 403 | // so just skip the reloc in that case. |
never@2737 | 404 | return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; |
never@2737 | 405 | } |
never@2737 | 406 | |
never@2737 | 407 | public: |
never@2737 | 408 | |
never@2737 | 409 | ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {} |
duke@435 | 410 | |
duke@435 | 411 | }; |
duke@435 | 412 | |
duke@435 | 413 | class InternalAddress: public AddressLiteral { |
duke@435 | 414 | |
duke@435 | 415 | public: |
duke@435 | 416 | |
duke@435 | 417 | InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} |
duke@435 | 418 | |
duke@435 | 419 | }; |
duke@435 | 420 | |
duke@435 | 421 | // x86 can do array addressing as a single operation since disp can be an absolute |
duke@435 | 422 | // address amd64 can't. We create a class that expresses the concept but does extra |
duke@435 | 423 | // magic on amd64 to get the final result |
duke@435 | 424 | |
duke@435 | 425 | class ArrayAddress VALUE_OBJ_CLASS_SPEC { |
duke@435 | 426 | private: |
duke@435 | 427 | |
duke@435 | 428 | AddressLiteral _base; |
duke@435 | 429 | Address _index; |
duke@435 | 430 | |
duke@435 | 431 | public: |
duke@435 | 432 | |
duke@435 | 433 | ArrayAddress() {}; |
duke@435 | 434 | ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {}; |
duke@435 | 435 | AddressLiteral base() { return _base; } |
duke@435 | 436 | Address index() { return _index; } |
duke@435 | 437 | |
duke@435 | 438 | }; |
duke@435 | 439 | |
never@739 | 440 | const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY( 512 / wordSize); |
duke@435 | 441 | |
duke@435 | 442 | // The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction |
duke@435 | 443 | // level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write |
duke@435 | 444 | // is what you get. The Assembler is generating code into a CodeBuffer. |
duke@435 | 445 | |
duke@435 | 446 | class Assembler : public AbstractAssembler { |
duke@435 | 447 | friend class AbstractAssembler; // for the non-virtual hack |
duke@435 | 448 | friend class LIR_Assembler; // as_Address() |
never@739 | 449 | friend class StubGenerator; |
duke@435 | 450 | |
duke@435 | 451 | public: |
duke@435 | 452 | enum Condition { // The x86 condition codes used for conditional jumps/moves. |
duke@435 | 453 | zero = 0x4, |
duke@435 | 454 | notZero = 0x5, |
duke@435 | 455 | equal = 0x4, |
duke@435 | 456 | notEqual = 0x5, |
duke@435 | 457 | less = 0xc, |
duke@435 | 458 | lessEqual = 0xe, |
duke@435 | 459 | greater = 0xf, |
duke@435 | 460 | greaterEqual = 0xd, |
duke@435 | 461 | below = 0x2, |
duke@435 | 462 | belowEqual = 0x6, |
duke@435 | 463 | above = 0x7, |
duke@435 | 464 | aboveEqual = 0x3, |
duke@435 | 465 | overflow = 0x0, |
duke@435 | 466 | noOverflow = 0x1, |
duke@435 | 467 | carrySet = 0x2, |
duke@435 | 468 | carryClear = 0x3, |
duke@435 | 469 | negative = 0x8, |
duke@435 | 470 | positive = 0x9, |
duke@435 | 471 | parity = 0xa, |
duke@435 | 472 | noParity = 0xb |
duke@435 | 473 | }; |
duke@435 | 474 | |
duke@435 | 475 | enum Prefix { |
duke@435 | 476 | // segment overrides |
duke@435 | 477 | CS_segment = 0x2e, |
duke@435 | 478 | SS_segment = 0x36, |
duke@435 | 479 | DS_segment = 0x3e, |
duke@435 | 480 | ES_segment = 0x26, |
duke@435 | 481 | FS_segment = 0x64, |
duke@435 | 482 | GS_segment = 0x65, |
duke@435 | 483 | |
duke@435 | 484 | REX = 0x40, |
duke@435 | 485 | |
duke@435 | 486 | REX_B = 0x41, |
duke@435 | 487 | REX_X = 0x42, |
duke@435 | 488 | REX_XB = 0x43, |
duke@435 | 489 | REX_R = 0x44, |
duke@435 | 490 | REX_RB = 0x45, |
duke@435 | 491 | REX_RX = 0x46, |
duke@435 | 492 | REX_RXB = 0x47, |
duke@435 | 493 | |
duke@435 | 494 | REX_W = 0x48, |
duke@435 | 495 | |
duke@435 | 496 | REX_WB = 0x49, |
duke@435 | 497 | REX_WX = 0x4A, |
duke@435 | 498 | REX_WXB = 0x4B, |
duke@435 | 499 | REX_WR = 0x4C, |
duke@435 | 500 | REX_WRB = 0x4D, |
duke@435 | 501 | REX_WRX = 0x4E, |
kvn@3388 | 502 | REX_WRXB = 0x4F, |
kvn@3388 | 503 | |
kvn@3388 | 504 | VEX_3bytes = 0xC4, |
kvn@3388 | 505 | VEX_2bytes = 0xC5 |
kvn@3388 | 506 | }; |
kvn@3388 | 507 | |
kvn@3388 | 508 | enum VexPrefix { |
kvn@3388 | 509 | VEX_B = 0x20, |
kvn@3388 | 510 | VEX_X = 0x40, |
kvn@3388 | 511 | VEX_R = 0x80, |
kvn@3388 | 512 | VEX_W = 0x80 |
kvn@3388 | 513 | }; |
kvn@3388 | 514 | |
kvn@3388 | 515 | enum VexSimdPrefix { |
kvn@3388 | 516 | VEX_SIMD_NONE = 0x0, |
kvn@3388 | 517 | VEX_SIMD_66 = 0x1, |
kvn@3388 | 518 | VEX_SIMD_F3 = 0x2, |
kvn@3388 | 519 | VEX_SIMD_F2 = 0x3 |
kvn@3388 | 520 | }; |
kvn@3388 | 521 | |
kvn@3388 | 522 | enum VexOpcode { |
kvn@3388 | 523 | VEX_OPCODE_NONE = 0x0, |
kvn@3388 | 524 | VEX_OPCODE_0F = 0x1, |
kvn@3388 | 525 | VEX_OPCODE_0F_38 = 0x2, |
kvn@3388 | 526 | VEX_OPCODE_0F_3A = 0x3 |
duke@435 | 527 | }; |
duke@435 | 528 | |
duke@435 | 529 | enum WhichOperand { |
duke@435 | 530 | // input to locate_operand, and format code for relocations |
never@739 | 531 | imm_operand = 0, // embedded 32-bit|64-bit immediate operand |
duke@435 | 532 | disp32_operand = 1, // embedded 32-bit displacement or address |
duke@435 | 533 | call32_operand = 2, // embedded 32-bit self-relative displacement |
never@739 | 534 | #ifndef _LP64 |
duke@435 | 535 | _WhichOperand_limit = 3 |
never@739 | 536 | #else |
never@739 | 537 | narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop |
never@739 | 538 | _WhichOperand_limit = 4 |
never@739 | 539 | #endif |
duke@435 | 540 | }; |
duke@435 | 541 | |
never@739 | 542 | |
never@739 | 543 | |
never@739 | 544 | // NOTE: The general philopsophy of the declarations here is that 64bit versions |
never@739 | 545 | // of instructions are freely declared without the need for wrapping them an ifdef. |
never@739 | 546 | // (Some dangerous instructions are ifdef's out of inappropriate jvm's.) |
never@739 | 547 | // In the .cpp file the implementations are wrapped so that they are dropped out |
zgu@4492 | 548 | // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL |
never@739 | 549 | // to the size it was prior to merging up the 32bit and 64bit assemblers. |
never@739 | 550 | // |
never@739 | 551 | // This does mean you'll get a linker/runtime error if you use a 64bit only instruction |
never@739 | 552 | // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down. |
never@739 | 553 | |
never@739 | 554 | private: |
never@739 | 555 | |
never@739 | 556 | |
never@739 | 557 | // 64bit prefixes |
never@739 | 558 | int prefix_and_encode(int reg_enc, bool byteinst = false); |
never@739 | 559 | int prefixq_and_encode(int reg_enc); |
never@739 | 560 | |
never@739 | 561 | int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false); |
never@739 | 562 | int prefixq_and_encode(int dst_enc, int src_enc); |
never@739 | 563 | |
never@739 | 564 | void prefix(Register reg); |
never@739 | 565 | void prefix(Address adr); |
never@739 | 566 | void prefixq(Address adr); |
never@739 | 567 | |
never@739 | 568 | void prefix(Address adr, Register reg, bool byteinst = false); |
kvn@3388 | 569 | void prefix(Address adr, XMMRegister reg); |
never@739 | 570 | void prefixq(Address adr, Register reg); |
kvn@3388 | 571 | void prefixq(Address adr, XMMRegister reg); |
never@739 | 572 | |
never@739 | 573 | void prefetch_prefix(Address src); |
never@739 | 574 | |
kvn@3388 | 575 | void rex_prefix(Address adr, XMMRegister xreg, |
kvn@3388 | 576 | VexSimdPrefix pre, VexOpcode opc, bool rex_w); |
kvn@3388 | 577 | int rex_prefix_and_encode(int dst_enc, int src_enc, |
kvn@3388 | 578 | VexSimdPrefix pre, VexOpcode opc, bool rex_w); |
kvn@3388 | 579 | |
kvn@3388 | 580 | void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, |
kvn@3388 | 581 | int nds_enc, VexSimdPrefix pre, VexOpcode opc, |
kvn@3388 | 582 | bool vector256); |
kvn@3388 | 583 | |
kvn@3388 | 584 | void vex_prefix(Address adr, int nds_enc, int xreg_enc, |
kvn@3388 | 585 | VexSimdPrefix pre, VexOpcode opc, |
kvn@3388 | 586 | bool vex_w, bool vector256); |
kvn@3388 | 587 | |
kvn@3390 | 588 | void vex_prefix(XMMRegister dst, XMMRegister nds, Address src, |
kvn@3390 | 589 | VexSimdPrefix pre, bool vector256 = false) { |
kvn@3882 | 590 | int dst_enc = dst->encoding(); |
kvn@3882 | 591 | int nds_enc = nds->is_valid() ? nds->encoding() : 0; |
kvn@3882 | 592 | vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256); |
kvn@3390 | 593 | } |
kvn@3390 | 594 | |
iveresov@6378 | 595 | void vex_prefix_0F38(Register dst, Register nds, Address src) { |
iveresov@6378 | 596 | bool vex_w = false; |
iveresov@6378 | 597 | bool vector256 = false; |
iveresov@6378 | 598 | vex_prefix(src, nds->encoding(), dst->encoding(), |
iveresov@6378 | 599 | VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256); |
iveresov@6378 | 600 | } |
iveresov@6378 | 601 | |
iveresov@6378 | 602 | void vex_prefix_0F38_q(Register dst, Register nds, Address src) { |
iveresov@6378 | 603 | bool vex_w = true; |
iveresov@6378 | 604 | bool vector256 = false; |
iveresov@6378 | 605 | vex_prefix(src, nds->encoding(), dst->encoding(), |
iveresov@6378 | 606 | VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256); |
iveresov@6378 | 607 | } |
kvn@3388 | 608 | int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, |
kvn@3388 | 609 | VexSimdPrefix pre, VexOpcode opc, |
kvn@3388 | 610 | bool vex_w, bool vector256); |
kvn@3388 | 611 | |
iveresov@6378 | 612 | int vex_prefix_0F38_and_encode(Register dst, Register nds, Register src) { |
iveresov@6378 | 613 | bool vex_w = false; |
iveresov@6378 | 614 | bool vector256 = false; |
iveresov@6378 | 615 | return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), |
iveresov@6378 | 616 | VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256); |
iveresov@6378 | 617 | } |
iveresov@6378 | 618 | int vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src) { |
iveresov@6378 | 619 | bool vex_w = true; |
iveresov@6378 | 620 | bool vector256 = false; |
iveresov@6378 | 621 | return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), |
iveresov@6378 | 622 | VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256); |
iveresov@6378 | 623 | } |
kvn@3390 | 624 | int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, |
kvn@3882 | 625 | VexSimdPrefix pre, bool vector256 = false, |
kvn@3882 | 626 | VexOpcode opc = VEX_OPCODE_0F) { |
kvn@3882 | 627 | int src_enc = src->encoding(); |
kvn@3882 | 628 | int dst_enc = dst->encoding(); |
kvn@3882 | 629 | int nds_enc = nds->is_valid() ? nds->encoding() : 0; |
kvn@3882 | 630 | return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector256); |
kvn@3390 | 631 | } |
kvn@3388 | 632 | |
kvn@3388 | 633 | void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, |
kvn@3388 | 634 | VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F, |
kvn@3388 | 635 | bool rex_w = false, bool vector256 = false); |
kvn@3388 | 636 | |
kvn@3388 | 637 | void simd_prefix(XMMRegister dst, Address src, |
kvn@3388 | 638 | VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { |
kvn@3388 | 639 | simd_prefix(dst, xnoreg, src, pre, opc); |
kvn@3388 | 640 | } |
kvn@4001 | 641 | |
kvn@3388 | 642 | void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) { |
kvn@3388 | 643 | simd_prefix(src, dst, pre); |
kvn@3388 | 644 | } |
kvn@3388 | 645 | void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src, |
kvn@3388 | 646 | VexSimdPrefix pre) { |
kvn@3388 | 647 | bool rex_w = true; |
kvn@3388 | 648 | simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w); |
kvn@3388 | 649 | } |
kvn@3388 | 650 | |
kvn@3388 | 651 | int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, |
kvn@3388 | 652 | VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F, |
kvn@3388 | 653 | bool rex_w = false, bool vector256 = false); |
kvn@3388 | 654 | |
kvn@3388 | 655 | // Move/convert 32-bit integer value. |
kvn@3388 | 656 | int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src, |
kvn@3388 | 657 | VexSimdPrefix pre) { |
kvn@3388 | 658 | // It is OK to cast from Register to XMMRegister to pass argument here |
kvn@3388 | 659 | // since only encoding is used in simd_prefix_and_encode() and number of |
kvn@3388 | 660 | // Gen and Xmm registers are the same. |
kvn@3388 | 661 | return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre); |
kvn@3388 | 662 | } |
kvn@3388 | 663 | int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre) { |
kvn@3388 | 664 | return simd_prefix_and_encode(dst, xnoreg, src, pre); |
kvn@3388 | 665 | } |
kvn@3388 | 666 | int simd_prefix_and_encode(Register dst, XMMRegister src, |
kvn@3388 | 667 | VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { |
kvn@3388 | 668 | return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc); |
kvn@3388 | 669 | } |
kvn@3388 | 670 | |
kvn@3388 | 671 | // Move/convert 64-bit integer value. |
kvn@3388 | 672 | int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src, |
kvn@3388 | 673 | VexSimdPrefix pre) { |
kvn@3388 | 674 | bool rex_w = true; |
kvn@3388 | 675 | return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, VEX_OPCODE_0F, rex_w); |
kvn@3388 | 676 | } |
kvn@3388 | 677 | int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre) { |
kvn@3388 | 678 | return simd_prefix_and_encode_q(dst, xnoreg, src, pre); |
kvn@3388 | 679 | } |
kvn@3388 | 680 | int simd_prefix_and_encode_q(Register dst, XMMRegister src, |
kvn@3388 | 681 | VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { |
kvn@3388 | 682 | bool rex_w = true; |
kvn@3388 | 683 | return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc, rex_w); |
kvn@3388 | 684 | } |
kvn@3388 | 685 | |
never@739 | 686 | // Helper functions for groups of instructions |
never@739 | 687 | void emit_arith_b(int op1, int op2, Register dst, int imm8); |
never@739 | 688 | |
never@739 | 689 | void emit_arith(int op1, int op2, Register dst, int32_t imm32); |
kvn@3574 | 690 | // Force generation of a 4 byte immediate value even if it fits into 8bit |
kvn@3574 | 691 | void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32); |
never@739 | 692 | void emit_arith(int op1, int op2, Register dst, Register src); |
never@739 | 693 | |
kvn@4001 | 694 | void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre); |
kvn@4001 | 695 | void emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre); |
kvn@4001 | 696 | void emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre); |
kvn@4001 | 697 | void emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre); |
kvn@4001 | 698 | void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, |
kvn@4001 | 699 | Address src, VexSimdPrefix pre, bool vector256); |
kvn@4001 | 700 | void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, |
kvn@4001 | 701 | XMMRegister src, VexSimdPrefix pre, bool vector256); |
kvn@4001 | 702 | |
never@739 | 703 | void emit_operand(Register reg, |
never@739 | 704 | Register base, Register index, Address::ScaleFactor scale, |
never@739 | 705 | int disp, |
never@739 | 706 | RelocationHolder const& rspec, |
never@739 | 707 | int rip_relative_correction = 0); |
never@739 | 708 | |
never@739 | 709 | void emit_operand(Register reg, Address adr, int rip_relative_correction = 0); |
never@739 | 710 | |
never@739 | 711 | // operands that only take the original 32bit registers |
never@739 | 712 | void emit_operand32(Register reg, Address adr); |
never@739 | 713 | |
never@739 | 714 | void emit_operand(XMMRegister reg, |
never@739 | 715 | Register base, Register index, Address::ScaleFactor scale, |
never@739 | 716 | int disp, |
never@739 | 717 | RelocationHolder const& rspec); |
never@739 | 718 | |
never@739 | 719 | void emit_operand(XMMRegister reg, Address adr); |
never@739 | 720 | |
never@739 | 721 | void emit_operand(MMXRegister reg, Address adr); |
never@739 | 722 | |
never@739 | 723 | // workaround gcc (3.2.1-7) bug |
never@739 | 724 | void emit_operand(Address adr, MMXRegister reg); |
never@739 | 725 | |
never@739 | 726 | |
never@739 | 727 | // Immediate-to-memory forms |
never@739 | 728 | void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32); |
never@739 | 729 | |
never@739 | 730 | void emit_farith(int b1, int b2, int i); |
never@739 | 731 | |
duke@435 | 732 | |
duke@435 | 733 | protected: |
never@739 | 734 | #ifdef ASSERT |
never@739 | 735 | void check_relocation(RelocationHolder const& rspec, int format); |
never@739 | 736 | #endif |
never@739 | 737 | |
never@739 | 738 | void emit_data(jint data, relocInfo::relocType rtype, int format); |
never@739 | 739 | void emit_data(jint data, RelocationHolder const& rspec, int format); |
never@739 | 740 | void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); |
never@739 | 741 | void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); |
never@739 | 742 | |
never@739 | 743 | bool reachable(AddressLiteral adr) NOT_LP64({ return true;}); |
never@739 | 744 | |
never@739 | 745 | // These are all easily abused and hence protected |
never@739 | 746 | |
never@739 | 747 | // 32BIT ONLY SECTION |
never@739 | 748 | #ifndef _LP64 |
never@739 | 749 | // Make these disappear in 64bit mode since they would never be correct |
never@739 | 750 | void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY |
never@739 | 751 | void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY |
never@739 | 752 | |
kvn@1077 | 753 | void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY |
never@739 | 754 | void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY |
never@739 | 755 | |
never@739 | 756 | void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY |
never@739 | 757 | #else |
never@739 | 758 | // 64BIT ONLY SECTION |
never@739 | 759 | void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY |
kvn@1077 | 760 | |
kvn@1077 | 761 | void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec); |
kvn@1077 | 762 | void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec); |
kvn@1077 | 763 | |
kvn@1077 | 764 | void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec); |
kvn@1077 | 765 | void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec); |
never@739 | 766 | #endif // _LP64 |
never@739 | 767 | |
never@739 | 768 | // These are unique in that we are ensured by the caller that the 32bit |
never@739 | 769 | // relative in these instructions will always be able to reach the potentially |
never@739 | 770 | // 64bit address described by entry. Since they can take a 64bit address they |
never@739 | 771 | // don't have the 32 suffix like the other instructions in this class. |
never@739 | 772 | |
never@739 | 773 | void call_literal(address entry, RelocationHolder const& rspec); |
never@739 | 774 | void jmp_literal(address entry, RelocationHolder const& rspec); |
never@739 | 775 | |
never@739 | 776 | // Avoid using directly section |
never@739 | 777 | // Instructions in this section are actually usable by anyone without danger |
never@739 | 778 | // of failure but have performance issues that are addressed my enhanced |
never@739 | 779 | // instructions which will do the proper thing base on the particular cpu. |
never@739 | 780 | // We protect them because we don't trust you... |
never@739 | 781 | |
duke@435 | 782 | // Don't use next inc() and dec() methods directly. INC & DEC instructions |
duke@435 | 783 | // could cause a partial flag stall since they don't set CF flag. |
duke@435 | 784 | // Use MacroAssembler::decrement() & MacroAssembler::increment() methods |
duke@435 | 785 | // which call inc() & dec() or add() & sub() in accordance with |
duke@435 | 786 | // the product flag UseIncDec value. |
duke@435 | 787 | |
duke@435 | 788 | void decl(Register dst); |
duke@435 | 789 | void decl(Address dst); |
never@739 | 790 | void decq(Register dst); |
never@739 | 791 | void decq(Address dst); |
duke@435 | 792 | |
duke@435 | 793 | void incl(Register dst); |
duke@435 | 794 | void incl(Address dst); |
never@739 | 795 | void incq(Register dst); |
never@739 | 796 | void incq(Address dst); |
never@739 | 797 | |
never@739 | 798 | // New cpus require use of movsd and movss to avoid partial register stall |
never@739 | 799 | // when loading from memory. But for old Opteron use movlpd instead of movsd. |
never@739 | 800 | // The selection is done in MacroAssembler::movdbl() and movflt(). |
never@739 | 801 | |
never@739 | 802 | // Move Scalar Single-Precision Floating-Point Values |
never@739 | 803 | void movss(XMMRegister dst, Address src); |
never@739 | 804 | void movss(XMMRegister dst, XMMRegister src); |
never@739 | 805 | void movss(Address dst, XMMRegister src); |
never@739 | 806 | |
never@739 | 807 | // Move Scalar Double-Precision Floating-Point Values |
never@739 | 808 | void movsd(XMMRegister dst, Address src); |
never@739 | 809 | void movsd(XMMRegister dst, XMMRegister src); |
never@739 | 810 | void movsd(Address dst, XMMRegister src); |
never@739 | 811 | void movlpd(XMMRegister dst, Address src); |
never@739 | 812 | |
never@739 | 813 | // New cpus require use of movaps and movapd to avoid partial register stall |
never@739 | 814 | // when moving between registers. |
never@739 | 815 | void movaps(XMMRegister dst, XMMRegister src); |
never@739 | 816 | void movapd(XMMRegister dst, XMMRegister src); |
never@739 | 817 | |
never@739 | 818 | // End avoid using directly |
never@739 | 819 | |
never@739 | 820 | |
never@739 | 821 | // Instruction prefixes |
never@739 | 822 | void prefix(Prefix p); |
never@739 | 823 | |
never@739 | 824 | public: |
never@739 | 825 | |
never@739 | 826 | // Creation |
never@739 | 827 | Assembler(CodeBuffer* code) : AbstractAssembler(code) {} |
never@739 | 828 | |
never@739 | 829 | // Decoding |
never@739 | 830 | static address locate_operand(address inst, WhichOperand which); |
never@739 | 831 | static address locate_next_instruction(address inst); |
never@739 | 832 | |
never@739 | 833 | // Utilities |
iveresov@2686 | 834 | static bool is_polling_page_far() NOT_LP64({ return false;}); |
iveresov@2686 | 835 | |
never@739 | 836 | // Generic instructions |
never@739 | 837 | // Does 32bit or 64bit as needed for the platform. In some sense these |
never@739 | 838 | // belong in macro assembler but there is no need for both varieties to exist |
never@739 | 839 | |
never@739 | 840 | void lea(Register dst, Address src); |
never@739 | 841 | |
never@739 | 842 | void mov(Register dst, Register src); |
never@739 | 843 | |
never@739 | 844 | void pusha(); |
never@739 | 845 | void popa(); |
never@739 | 846 | |
never@739 | 847 | void pushf(); |
never@739 | 848 | void popf(); |
never@739 | 849 | |
never@739 | 850 | void push(int32_t imm32); |
never@739 | 851 | |
never@739 | 852 | void push(Register src); |
never@739 | 853 | |
never@739 | 854 | void pop(Register dst); |
never@739 | 855 | |
never@739 | 856 | // These are dummies to prevent surprise implicit conversions to Register |
never@739 | 857 | void push(void* v); |
never@739 | 858 | void pop(void* v); |
never@739 | 859 | |
never@739 | 860 | // These do register sized moves/scans |
never@739 | 861 | void rep_mov(); |
kvn@4410 | 862 | void rep_stos(); |
kvn@4410 | 863 | void rep_stosb(); |
never@739 | 864 | void repne_scan(); |
never@739 | 865 | #ifdef _LP64 |
never@739 | 866 | void repne_scanl(); |
never@739 | 867 | #endif |
never@739 | 868 | |
never@739 | 869 | // Vanilla instructions in lexical order |
never@739 | 870 | |
phh@2423 | 871 | void adcl(Address dst, int32_t imm32); |
phh@2423 | 872 | void adcl(Address dst, Register src); |
never@739 | 873 | void adcl(Register dst, int32_t imm32); |
never@739 | 874 | void adcl(Register dst, Address src); |
never@739 | 875 | void adcl(Register dst, Register src); |
never@739 | 876 | |
never@739 | 877 | void adcq(Register dst, int32_t imm32); |
never@739 | 878 | void adcq(Register dst, Address src); |
never@739 | 879 | void adcq(Register dst, Register src); |
never@739 | 880 | |
never@739 | 881 | void addl(Address dst, int32_t imm32); |
never@739 | 882 | void addl(Address dst, Register src); |
never@739 | 883 | void addl(Register dst, int32_t imm32); |
never@739 | 884 | void addl(Register dst, Address src); |
never@739 | 885 | void addl(Register dst, Register src); |
never@739 | 886 | |
never@739 | 887 | void addq(Address dst, int32_t imm32); |
never@739 | 888 | void addq(Address dst, Register src); |
never@739 | 889 | void addq(Register dst, int32_t imm32); |
never@739 | 890 | void addq(Register dst, Address src); |
never@739 | 891 | void addq(Register dst, Register src); |
never@739 | 892 | |
kvn@7152 | 893 | #ifdef _LP64 |
kvn@7152 | 894 | //Add Unsigned Integers with Carry Flag |
kvn@7152 | 895 | void adcxq(Register dst, Register src); |
kvn@7152 | 896 | |
kvn@7152 | 897 | //Add Unsigned Integers with Overflow Flag |
kvn@7152 | 898 | void adoxq(Register dst, Register src); |
kvn@7152 | 899 | #endif |
kvn@7152 | 900 | |
duke@435 | 901 | void addr_nop_4(); |
duke@435 | 902 | void addr_nop_5(); |
duke@435 | 903 | void addr_nop_7(); |
duke@435 | 904 | void addr_nop_8(); |
duke@435 | 905 | |
never@739 | 906 | // Add Scalar Double-Precision Floating-Point Values |
never@739 | 907 | void addsd(XMMRegister dst, Address src); |
never@739 | 908 | void addsd(XMMRegister dst, XMMRegister src); |
never@739 | 909 | |
never@739 | 910 | // Add Scalar Single-Precision Floating-Point Values |
never@739 | 911 | void addss(XMMRegister dst, Address src); |
never@739 | 912 | void addss(XMMRegister dst, XMMRegister src); |
never@739 | 913 | |
kvn@4205 | 914 | // AES instructions |
kvn@4205 | 915 | void aesdec(XMMRegister dst, Address src); |
kvn@4205 | 916 | void aesdec(XMMRegister dst, XMMRegister src); |
kvn@4205 | 917 | void aesdeclast(XMMRegister dst, Address src); |
kvn@4205 | 918 | void aesdeclast(XMMRegister dst, XMMRegister src); |
kvn@4205 | 919 | void aesenc(XMMRegister dst, Address src); |
kvn@4205 | 920 | void aesenc(XMMRegister dst, XMMRegister src); |
kvn@4205 | 921 | void aesenclast(XMMRegister dst, Address src); |
kvn@4205 | 922 | void aesenclast(XMMRegister dst, XMMRegister src); |
kvn@4205 | 923 | |
kvn@4205 | 924 | |
kvn@3388 | 925 | void andl(Address dst, int32_t imm32); |
never@739 | 926 | void andl(Register dst, int32_t imm32); |
never@739 | 927 | void andl(Register dst, Address src); |
never@739 | 928 | void andl(Register dst, Register src); |
never@739 | 929 | |
never@2980 | 930 | void andq(Address dst, int32_t imm32); |
never@739 | 931 | void andq(Register dst, int32_t imm32); |
never@739 | 932 | void andq(Register dst, Address src); |
never@739 | 933 | void andq(Register dst, Register src); |
never@739 | 934 | |
iveresov@6378 | 935 | // BMI instructions |
iveresov@6378 | 936 | void andnl(Register dst, Register src1, Register src2); |
iveresov@6378 | 937 | void andnl(Register dst, Register src1, Address src2); |
iveresov@6378 | 938 | void andnq(Register dst, Register src1, Register src2); |
iveresov@6378 | 939 | void andnq(Register dst, Register src1, Address src2); |
iveresov@6378 | 940 | |
iveresov@6378 | 941 | void blsil(Register dst, Register src); |
iveresov@6378 | 942 | void blsil(Register dst, Address src); |
iveresov@6378 | 943 | void blsiq(Register dst, Register src); |
iveresov@6378 | 944 | void blsiq(Register dst, Address src); |
iveresov@6378 | 945 | |
iveresov@6378 | 946 | void blsmskl(Register dst, Register src); |
iveresov@6378 | 947 | void blsmskl(Register dst, Address src); |
iveresov@6378 | 948 | void blsmskq(Register dst, Register src); |
iveresov@6378 | 949 | void blsmskq(Register dst, Address src); |
iveresov@6378 | 950 | |
iveresov@6378 | 951 | void blsrl(Register dst, Register src); |
iveresov@6378 | 952 | void blsrl(Register dst, Address src); |
iveresov@6378 | 953 | void blsrq(Register dst, Register src); |
iveresov@6378 | 954 | void blsrq(Register dst, Address src); |
iveresov@6378 | 955 | |
twisti@1210 | 956 | void bsfl(Register dst, Register src); |
twisti@1210 | 957 | void bsrl(Register dst, Register src); |
twisti@1210 | 958 | |
twisti@1210 | 959 | #ifdef _LP64 |
twisti@1210 | 960 | void bsfq(Register dst, Register src); |
twisti@1210 | 961 | void bsrq(Register dst, Register src); |
twisti@1210 | 962 | #endif |
twisti@1210 | 963 | |
never@739 | 964 | void bswapl(Register reg); |
never@739 | 965 | |
never@739 | 966 | void bswapq(Register reg); |
never@739 | 967 | |
duke@435 | 968 | void call(Label& L, relocInfo::relocType rtype); |
duke@435 | 969 | void call(Register reg); // push pc; pc <- reg |
duke@435 | 970 | void call(Address adr); // push pc; pc <- adr |
duke@435 | 971 | |
never@739 | 972 | void cdql(); |
never@739 | 973 | |
never@739 | 974 | void cdqq(); |
never@739 | 975 | |
twisti@4318 | 976 | void cld(); |
never@739 | 977 | |
never@739 | 978 | void clflush(Address adr); |
never@739 | 979 | |
never@739 | 980 | void cmovl(Condition cc, Register dst, Register src); |
never@739 | 981 | void cmovl(Condition cc, Register dst, Address src); |
never@739 | 982 | |
never@739 | 983 | void cmovq(Condition cc, Register dst, Register src); |
never@739 | 984 | void cmovq(Condition cc, Register dst, Address src); |
never@739 | 985 | |
never@739 | 986 | |
never@739 | 987 | void cmpb(Address dst, int imm8); |
never@739 | 988 | |
never@739 | 989 | void cmpl(Address dst, int32_t imm32); |
never@739 | 990 | |
never@739 | 991 | void cmpl(Register dst, int32_t imm32); |
never@739 | 992 | void cmpl(Register dst, Register src); |
never@739 | 993 | void cmpl(Register dst, Address src); |
never@739 | 994 | |
never@739 | 995 | void cmpq(Address dst, int32_t imm32); |
never@739 | 996 | void cmpq(Address dst, Register src); |
never@739 | 997 | |
never@739 | 998 | void cmpq(Register dst, int32_t imm32); |
never@739 | 999 | void cmpq(Register dst, Register src); |
never@739 | 1000 | void cmpq(Register dst, Address src); |
never@739 | 1001 | |
never@739 | 1002 | // these are dummies used to catch attempting to convert NULL to Register |
never@739 | 1003 | void cmpl(Register dst, void* junk); // dummy |
never@739 | 1004 | void cmpq(Register dst, void* junk); // dummy |
never@739 | 1005 | |
never@739 | 1006 | void cmpw(Address dst, int imm16); |
never@739 | 1007 | |
never@739 | 1008 | void cmpxchg8 (Address adr); |
never@739 | 1009 | |
never@739 | 1010 | void cmpxchgl(Register reg, Address adr); |
never@739 | 1011 | |
never@739 | 1012 | void cmpxchgq(Register reg, Address adr); |
never@739 | 1013 | |
never@739 | 1014 | // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS |
never@739 | 1015 | void comisd(XMMRegister dst, Address src); |
kvn@3388 | 1016 | void comisd(XMMRegister dst, XMMRegister src); |
never@739 | 1017 | |
never@739 | 1018 | // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS |
never@739 | 1019 | void comiss(XMMRegister dst, Address src); |
kvn@3388 | 1020 | void comiss(XMMRegister dst, XMMRegister src); |
never@739 | 1021 | |
never@739 | 1022 | // Identify processor type and features |
twisti@4318 | 1023 | void cpuid(); |
never@739 | 1024 | |
never@739 | 1025 | // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value |
never@739 | 1026 | void cvtsd2ss(XMMRegister dst, XMMRegister src); |
kvn@3388 | 1027 | void cvtsd2ss(XMMRegister dst, Address src); |
never@739 | 1028 | |
never@739 | 1029 | // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value |
never@739 | 1030 | void cvtsi2sdl(XMMRegister dst, Register src); |
kvn@3388 | 1031 | void cvtsi2sdl(XMMRegister dst, Address src); |
never@739 | 1032 | void cvtsi2sdq(XMMRegister dst, Register src); |
kvn@3388 | 1033 | void cvtsi2sdq(XMMRegister dst, Address src); |
never@739 | 1034 | |
never@739 | 1035 | // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value |
never@739 | 1036 | void cvtsi2ssl(XMMRegister dst, Register src); |
kvn@3388 | 1037 | void cvtsi2ssl(XMMRegister dst, Address src); |
never@739 | 1038 | void cvtsi2ssq(XMMRegister dst, Register src); |
kvn@3388 | 1039 | void cvtsi2ssq(XMMRegister dst, Address src); |
never@739 | 1040 | |
never@739 | 1041 | // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value |
never@739 | 1042 | void cvtdq2pd(XMMRegister dst, XMMRegister src); |
never@739 | 1043 | |
never@739 | 1044 | // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value |
never@739 | 1045 | void cvtdq2ps(XMMRegister dst, XMMRegister src); |
never@739 | 1046 | |
never@739 | 1047 | // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value |
never@739 | 1048 | void cvtss2sd(XMMRegister dst, XMMRegister src); |
kvn@3388 | 1049 | void cvtss2sd(XMMRegister dst, Address src); |
never@739 | 1050 | |
never@739 | 1051 | // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer |
never@739 | 1052 | void cvttsd2sil(Register dst, Address src); |
never@739 | 1053 | void cvttsd2sil(Register dst, XMMRegister src); |
never@739 | 1054 | void cvttsd2siq(Register dst, XMMRegister src); |
never@739 | 1055 | |
never@739 | 1056 | // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer |
never@739 | 1057 | void cvttss2sil(Register dst, XMMRegister src); |
never@739 | 1058 | void cvttss2siq(Register dst, XMMRegister src); |
never@739 | 1059 | |
never@739 | 1060 | // Divide Scalar Double-Precision Floating-Point Values |
never@739 | 1061 | void divsd(XMMRegister dst, Address src); |
never@739 | 1062 | void divsd(XMMRegister dst, XMMRegister src); |
never@739 | 1063 | |
never@739 | 1064 | // Divide Scalar Single-Precision Floating-Point Values |
never@739 | 1065 | void divss(XMMRegister dst, Address src); |
never@739 | 1066 | void divss(XMMRegister dst, XMMRegister src); |
never@739 | 1067 | |
never@739 | 1068 | void emms(); |
never@739 | 1069 | |
never@739 | 1070 | void fabs(); |
never@739 | 1071 | |
never@739 | 1072 | void fadd(int i); |
never@739 | 1073 | |
never@739 | 1074 | void fadd_d(Address src); |
never@739 | 1075 | void fadd_s(Address src); |
never@739 | 1076 | |
never@739 | 1077 | // "Alternate" versions of x87 instructions place result down in FPU |
never@739 | 1078 | // stack instead of on TOS |
never@739 | 1079 | |
never@739 | 1080 | void fadda(int i); // "alternate" fadd |
never@739 | 1081 | void faddp(int i = 1); |
never@739 | 1082 | |
never@739 | 1083 | void fchs(); |
never@739 | 1084 | |
never@739 | 1085 | void fcom(int i); |
never@739 | 1086 | |
never@739 | 1087 | void fcomp(int i = 1); |
never@739 | 1088 | void fcomp_d(Address src); |
never@739 | 1089 | void fcomp_s(Address src); |
never@739 | 1090 | |
never@739 | 1091 | void fcompp(); |
never@739 | 1092 | |
never@739 | 1093 | void fcos(); |
never@739 | 1094 | |
never@739 | 1095 | void fdecstp(); |
never@739 | 1096 | |
never@739 | 1097 | void fdiv(int i); |
never@739 | 1098 | void fdiv_d(Address src); |
never@739 | 1099 | void fdivr_s(Address src); |
never@739 | 1100 | void fdiva(int i); // "alternate" fdiv |
never@739 | 1101 | void fdivp(int i = 1); |
never@739 | 1102 | |
never@739 | 1103 | void fdivr(int i); |
never@739 | 1104 | void fdivr_d(Address src); |
never@739 | 1105 | void fdiv_s(Address src); |
never@739 | 1106 | |
never@739 | 1107 | void fdivra(int i); // "alternate" reversed fdiv |
never@739 | 1108 | |
never@739 | 1109 | void fdivrp(int i = 1); |
never@739 | 1110 | |
never@739 | 1111 | void ffree(int i = 0); |
never@739 | 1112 | |
never@739 | 1113 | void fild_d(Address adr); |
never@739 | 1114 | void fild_s(Address adr); |
never@739 | 1115 | |
never@739 | 1116 | void fincstp(); |
never@739 | 1117 | |
never@739 | 1118 | void finit(); |
never@739 | 1119 | |
never@739 | 1120 | void fist_s (Address adr); |
never@739 | 1121 | void fistp_d(Address adr); |
never@739 | 1122 | void fistp_s(Address adr); |
never@739 | 1123 | |
never@739 | 1124 | void fld1(); |
never@739 | 1125 | |
never@739 | 1126 | void fld_d(Address adr); |
never@739 | 1127 | void fld_s(Address adr); |
never@739 | 1128 | void fld_s(int index); |
never@739 | 1129 | void fld_x(Address adr); // extended-precision (80-bit) format |
never@739 | 1130 | |
never@739 | 1131 | void fldcw(Address src); |
never@739 | 1132 | |
never@739 | 1133 | void fldenv(Address src); |
never@739 | 1134 | |
never@739 | 1135 | void fldlg2(); |
never@739 | 1136 | |
never@739 | 1137 | void fldln2(); |
never@739 | 1138 | |
never@739 | 1139 | void fldz(); |
never@739 | 1140 | |
never@739 | 1141 | void flog(); |
never@739 | 1142 | void flog10(); |
never@739 | 1143 | |
never@739 | 1144 | void fmul(int i); |
never@739 | 1145 | |
never@739 | 1146 | void fmul_d(Address src); |
never@739 | 1147 | void fmul_s(Address src); |
never@739 | 1148 | |
never@739 | 1149 | void fmula(int i); // "alternate" fmul |
never@739 | 1150 | |
never@739 | 1151 | void fmulp(int i = 1); |
never@739 | 1152 | |
never@739 | 1153 | void fnsave(Address dst); |
never@739 | 1154 | |
never@739 | 1155 | void fnstcw(Address src); |
never@739 | 1156 | |
never@739 | 1157 | void fnstsw_ax(); |
never@739 | 1158 | |
never@739 | 1159 | void fprem(); |
never@739 | 1160 | void fprem1(); |
never@739 | 1161 | |
never@739 | 1162 | void frstor(Address src); |
never@739 | 1163 | |
never@739 | 1164 | void fsin(); |
never@739 | 1165 | |
never@739 | 1166 | void fsqrt(); |
never@739 | 1167 | |
never@739 | 1168 | void fst_d(Address adr); |
never@739 | 1169 | void fst_s(Address adr); |
never@739 | 1170 | |
never@739 | 1171 | void fstp_d(Address adr); |
never@739 | 1172 | void fstp_d(int index); |
never@739 | 1173 | void fstp_s(Address adr); |
never@739 | 1174 | void fstp_x(Address adr); // extended-precision (80-bit) format |
never@739 | 1175 | |
never@739 | 1176 | void fsub(int i); |
never@739 | 1177 | void fsub_d(Address src); |
never@739 | 1178 | void fsub_s(Address src); |
never@739 | 1179 | |
never@739 | 1180 | void fsuba(int i); // "alternate" fsub |
never@739 | 1181 | |
never@739 | 1182 | void fsubp(int i = 1); |
never@739 | 1183 | |
never@739 | 1184 | void fsubr(int i); |
never@739 | 1185 | void fsubr_d(Address src); |
never@739 | 1186 | void fsubr_s(Address src); |
never@739 | 1187 | |
never@739 | 1188 | void fsubra(int i); // "alternate" reversed fsub |
never@739 | 1189 | |
never@739 | 1190 | void fsubrp(int i = 1); |
never@739 | 1191 | |
never@739 | 1192 | void ftan(); |
never@739 | 1193 | |
never@739 | 1194 | void ftst(); |
never@739 | 1195 | |
never@739 | 1196 | void fucomi(int i = 1); |
never@739 | 1197 | void fucomip(int i = 1); |
never@739 | 1198 | |
never@739 | 1199 | void fwait(); |
never@739 | 1200 | |
never@739 | 1201 | void fxch(int i = 1); |
never@739 | 1202 | |
never@739 | 1203 | void fxrstor(Address src); |
never@739 | 1204 | |
never@739 | 1205 | void fxsave(Address dst); |
never@739 | 1206 | |
never@739 | 1207 | void fyl2x(); |
roland@3787 | 1208 | void frndint(); |
roland@3787 | 1209 | void f2xm1(); |
roland@3787 | 1210 | void fldl2e(); |
never@739 | 1211 | |
never@739 | 1212 | void hlt(); |
never@739 | 1213 | |
never@739 | 1214 | void idivl(Register src); |
kvn@2275 | 1215 | void divl(Register src); // Unsigned division |
never@739 | 1216 | |
kvn@7152 | 1217 | #ifdef _LP64 |
never@739 | 1218 | void idivq(Register src); |
kvn@7152 | 1219 | #endif |
never@739 | 1220 | |
never@739 | 1221 | void imull(Register dst, Register src); |
never@739 | 1222 | void imull(Register dst, Register src, int value); |
rbackman@5997 | 1223 | void imull(Register dst, Address src); |
never@739 | 1224 | |
kvn@7152 | 1225 | #ifdef _LP64 |
never@739 | 1226 | void imulq(Register dst, Register src); |
never@739 | 1227 | void imulq(Register dst, Register src, int value); |
rbackman@5997 | 1228 | void imulq(Register dst, Address src); |
rbackman@5997 | 1229 | #endif |
never@739 | 1230 | |
duke@435 | 1231 | // jcc is the generic conditional branch generator to run- |
duke@435 | 1232 | // time routines, jcc is used for branches to labels. jcc |
duke@435 | 1233 | // takes a branch opcode (cc) and a label (L) and generates |
duke@435 | 1234 | // either a backward branch or a forward branch and links it |
duke@435 | 1235 | // to the label fixup chain. Usage: |
duke@435 | 1236 | // |
duke@435 | 1237 | // Label L; // unbound label |
duke@435 | 1238 | // jcc(cc, L); // forward branch to unbound label |
duke@435 | 1239 | // bind(L); // bind label to the current pc |
duke@435 | 1240 | // jcc(cc, L); // backward branch to bound label |
duke@435 | 1241 | // bind(L); // illegal: a label may be bound only once |
duke@435 | 1242 | // |
duke@435 | 1243 | // Note: The same Label can be used for forward and backward branches |
duke@435 | 1244 | // but it may be bound only once. |
duke@435 | 1245 | |
kvn@3049 | 1246 | void jcc(Condition cc, Label& L, bool maybe_short = true); |
duke@435 | 1247 | |
duke@435 | 1248 | // Conditional jump to a 8-bit offset to L. |
duke@435 | 1249 | // WARNING: be very careful using this for forward jumps. If the label is |
duke@435 | 1250 | // not bound within an 8-bit offset of this instruction, a run-time error |
duke@435 | 1251 | // will occur. |
duke@435 | 1252 | void jccb(Condition cc, Label& L); |
duke@435 | 1253 | |
never@739 | 1254 | void jmp(Address entry); // pc <- entry |
never@739 | 1255 | |
never@739 | 1256 | // Label operations & relative jumps (PPUM Appendix D) |
kvn@3049 | 1257 | void jmp(Label& L, bool maybe_short = true); // unconditional jump to L |
never@739 | 1258 | |
never@739 | 1259 | void jmp(Register entry); // pc <- entry |
never@739 | 1260 | |
never@739 | 1261 | // Unconditional 8-bit offset jump to L. |
never@739 | 1262 | // WARNING: be very careful using this for forward jumps. If the label is |
never@739 | 1263 | // not bound within an 8-bit offset of this instruction, a run-time error |
never@739 | 1264 | // will occur. |
never@739 | 1265 | void jmpb(Label& L); |
never@739 | 1266 | |
never@739 | 1267 | void ldmxcsr( Address src ); |
never@739 | 1268 | |
never@739 | 1269 | void leal(Register dst, Address src); |
never@739 | 1270 | |
never@739 | 1271 | void leaq(Register dst, Address src); |
never@739 | 1272 | |
twisti@4318 | 1273 | void lfence(); |
never@739 | 1274 | |
never@739 | 1275 | void lock(); |
never@739 | 1276 | |
twisti@1210 | 1277 | void lzcntl(Register dst, Register src); |
twisti@1210 | 1278 | |
twisti@1210 | 1279 | #ifdef _LP64 |
twisti@1210 | 1280 | void lzcntq(Register dst, Register src); |
twisti@1210 | 1281 | #endif |
twisti@1210 | 1282 | |
never@739 | 1283 | enum Membar_mask_bits { |
never@739 | 1284 | StoreStore = 1 << 3, |
never@739 | 1285 | LoadStore = 1 << 2, |
never@739 | 1286 | StoreLoad = 1 << 1, |
never@739 | 1287 | LoadLoad = 1 << 0 |
never@739 | 1288 | }; |
never@739 | 1289 | |
never@1106 | 1290 | // Serializes memory and blows flags |
never@739 | 1291 | void membar(Membar_mask_bits order_constraint) { |
never@1106 | 1292 | if (os::is_MP()) { |
never@1106 | 1293 | // We only have to handle StoreLoad |
never@1106 | 1294 | if (order_constraint & StoreLoad) { |
never@1106 | 1295 | // All usable chips support "locked" instructions which suffice |
never@1106 | 1296 | // as barriers, and are much faster than the alternative of |
never@1106 | 1297 | // using cpuid instruction. We use here a locked add [esp],0. |
never@1106 | 1298 | // This is conveniently otherwise a no-op except for blowing |
never@1106 | 1299 | // flags. |
never@1106 | 1300 | // Any change to this code may need to revisit other places in |
never@1106 | 1301 | // the code where this idiom is used, in particular the |
never@1106 | 1302 | // orderAccess code. |
never@1106 | 1303 | lock(); |
never@1106 | 1304 | addl(Address(rsp, 0), 0);// Assert the lock# signal here |
never@1106 | 1305 | } |
never@1106 | 1306 | } |
never@739 | 1307 | } |
never@739 | 1308 | |
never@739 | 1309 | void mfence(); |
never@739 | 1310 | |
never@739 | 1311 | // Moves |
never@739 | 1312 | |
never@739 | 1313 | void mov64(Register dst, int64_t imm64); |
never@739 | 1314 | |
never@739 | 1315 | void movb(Address dst, Register src); |
never@739 | 1316 | void movb(Address dst, int imm8); |
never@739 | 1317 | void movb(Register dst, Address src); |
never@739 | 1318 | |
never@739 | 1319 | void movdl(XMMRegister dst, Register src); |
never@739 | 1320 | void movdl(Register dst, XMMRegister src); |
kvn@2602 | 1321 | void movdl(XMMRegister dst, Address src); |
kvn@3882 | 1322 | void movdl(Address dst, XMMRegister src); |
never@739 | 1323 | |
never@739 | 1324 | // Move Double Quadword |
never@739 | 1325 | void movdq(XMMRegister dst, Register src); |
never@739 | 1326 | void movdq(Register dst, XMMRegister src); |
never@739 | 1327 | |
never@739 | 1328 | // Move Aligned Double Quadword |
never@739 | 1329 | void movdqa(XMMRegister dst, XMMRegister src); |
drchase@5353 | 1330 | void movdqa(XMMRegister dst, Address src); |
never@739 | 1331 | |
kvn@840 | 1332 | // Move Unaligned Double Quadword |
kvn@840 | 1333 | void movdqu(Address dst, XMMRegister src); |
kvn@840 | 1334 | void movdqu(XMMRegister dst, Address src); |
kvn@840 | 1335 | void movdqu(XMMRegister dst, XMMRegister src); |
kvn@840 | 1336 | |
kvn@3882 | 1337 | // Move Unaligned 256bit Vector |
kvn@3882 | 1338 | void vmovdqu(Address dst, XMMRegister src); |
kvn@3882 | 1339 | void vmovdqu(XMMRegister dst, Address src); |
kvn@3882 | 1340 | void vmovdqu(XMMRegister dst, XMMRegister src); |
kvn@3882 | 1341 | |
kvn@3882 | 1342 | // Move lower 64bit to high 64bit in 128bit register |
kvn@3882 | 1343 | void movlhps(XMMRegister dst, XMMRegister src); |
kvn@3882 | 1344 | |
never@739 | 1345 | void movl(Register dst, int32_t imm32); |
never@739 | 1346 | void movl(Address dst, int32_t imm32); |
never@739 | 1347 | void movl(Register dst, Register src); |
never@739 | 1348 | void movl(Register dst, Address src); |
never@739 | 1349 | void movl(Address dst, Register src); |
never@739 | 1350 | |
never@739 | 1351 | // These dummies prevent using movl from converting a zero (like NULL) into Register |
never@739 | 1352 | // by giving the compiler two choices it can't resolve |
never@739 | 1353 | |
never@739 | 1354 | void movl(Address dst, void* junk); |
never@739 | 1355 | void movl(Register dst, void* junk); |
never@739 | 1356 | |
never@739 | 1357 | #ifdef _LP64 |
never@739 | 1358 | void movq(Register dst, Register src); |
never@739 | 1359 | void movq(Register dst, Address src); |
phh@2423 | 1360 | void movq(Address dst, Register src); |
never@739 | 1361 | #endif |
never@739 | 1362 | |
never@739 | 1363 | void movq(Address dst, MMXRegister src ); |
never@739 | 1364 | void movq(MMXRegister dst, Address src ); |
never@739 | 1365 | |
never@739 | 1366 | #ifdef _LP64 |
never@739 | 1367 | // These dummies prevent using movq from converting a zero (like NULL) into Register |
never@739 | 1368 | // by giving the compiler two choices it can't resolve |
never@739 | 1369 | |
never@739 | 1370 | void movq(Address dst, void* dummy); |
never@739 | 1371 | void movq(Register dst, void* dummy); |
never@739 | 1372 | #endif |
never@739 | 1373 | |
never@739 | 1374 | // Move Quadword |
never@739 | 1375 | void movq(Address dst, XMMRegister src); |
never@739 | 1376 | void movq(XMMRegister dst, Address src); |
never@739 | 1377 | |
never@739 | 1378 | void movsbl(Register dst, Address src); |
never@739 | 1379 | void movsbl(Register dst, Register src); |
never@739 | 1380 | |
never@739 | 1381 | #ifdef _LP64 |
twisti@1059 | 1382 | void movsbq(Register dst, Address src); |
twisti@1059 | 1383 | void movsbq(Register dst, Register src); |
twisti@1059 | 1384 | |
never@739 | 1385 | // Move signed 32bit immediate to 64bit extending sign |
phh@2423 | 1386 | void movslq(Address dst, int32_t imm64); |
never@739 | 1387 | void movslq(Register dst, int32_t imm64); |
never@739 | 1388 | |
never@739 | 1389 | void movslq(Register dst, Address src); |
never@739 | 1390 | void movslq(Register dst, Register src); |
never@739 | 1391 | void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous |
never@739 | 1392 | #endif |
never@739 | 1393 | |
never@739 | 1394 | void movswl(Register dst, Address src); |
never@739 | 1395 | void movswl(Register dst, Register src); |
never@739 | 1396 | |
twisti@1059 | 1397 | #ifdef _LP64 |
twisti@1059 | 1398 | void movswq(Register dst, Address src); |
twisti@1059 | 1399 | void movswq(Register dst, Register src); |
twisti@1059 | 1400 | #endif |
twisti@1059 | 1401 | |
never@739 | 1402 | void movw(Address dst, int imm16); |
never@739 | 1403 | void movw(Register dst, Address src); |
never@739 | 1404 | void movw(Address dst, Register src); |
never@739 | 1405 | |
never@739 | 1406 | void movzbl(Register dst, Address src); |
never@739 | 1407 | void movzbl(Register dst, Register src); |
never@739 | 1408 | |
twisti@1059 | 1409 | #ifdef _LP64 |
twisti@1059 | 1410 | void movzbq(Register dst, Address src); |
twisti@1059 | 1411 | void movzbq(Register dst, Register src); |
twisti@1059 | 1412 | #endif |
twisti@1059 | 1413 | |
never@739 | 1414 | void movzwl(Register dst, Address src); |
never@739 | 1415 | void movzwl(Register dst, Register src); |
never@739 | 1416 | |
twisti@1059 | 1417 | #ifdef _LP64 |
twisti@1059 | 1418 | void movzwq(Register dst, Address src); |
twisti@1059 | 1419 | void movzwq(Register dst, Register src); |
twisti@1059 | 1420 | #endif |
twisti@1059 | 1421 | |
kvn@7152 | 1422 | // Unsigned multiply with RAX destination register |
never@739 | 1423 | void mull(Address src); |
never@739 | 1424 | void mull(Register src); |
never@739 | 1425 | |
kvn@7152 | 1426 | #ifdef _LP64 |
kvn@7152 | 1427 | void mulq(Address src); |
kvn@7152 | 1428 | void mulq(Register src); |
kvn@7152 | 1429 | void mulxq(Register dst1, Register dst2, Register src); |
kvn@7152 | 1430 | #endif |
kvn@7152 | 1431 | |
never@739 | 1432 | // Multiply Scalar Double-Precision Floating-Point Values |
never@739 | 1433 | void mulsd(XMMRegister dst, Address src); |
never@739 | 1434 | void mulsd(XMMRegister dst, XMMRegister src); |
never@739 | 1435 | |
never@739 | 1436 | // Multiply Scalar Single-Precision Floating-Point Values |
never@739 | 1437 | void mulss(XMMRegister dst, Address src); |
never@739 | 1438 | void mulss(XMMRegister dst, XMMRegister src); |
never@739 | 1439 | |
never@739 | 1440 | void negl(Register dst); |
never@739 | 1441 | |
never@739 | 1442 | #ifdef _LP64 |
never@739 | 1443 | void negq(Register dst); |
never@739 | 1444 | #endif |
never@739 | 1445 | |
never@739 | 1446 | void nop(int i = 1); |
never@739 | 1447 | |
never@739 | 1448 | void notl(Register dst); |
never@739 | 1449 | |
never@739 | 1450 | #ifdef _LP64 |
never@739 | 1451 | void notq(Register dst); |
never@739 | 1452 | #endif |
never@739 | 1453 | |
never@739 | 1454 | void orl(Address dst, int32_t imm32); |
never@739 | 1455 | void orl(Register dst, int32_t imm32); |
never@739 | 1456 | void orl(Register dst, Address src); |
never@739 | 1457 | void orl(Register dst, Register src); |
igerasim@8307 | 1458 | void orl(Address dst, Register src); |
never@739 | 1459 | |
never@739 | 1460 | void orq(Address dst, int32_t imm32); |
never@739 | 1461 | void orq(Register dst, int32_t imm32); |
never@739 | 1462 | void orq(Register dst, Address src); |
never@739 | 1463 | void orq(Register dst, Register src); |
never@739 | 1464 | |
kvn@3388 | 1465 | // Pack with unsigned saturation |
kvn@3388 | 1466 | void packuswb(XMMRegister dst, XMMRegister src); |
kvn@3388 | 1467 | void packuswb(XMMRegister dst, Address src); |
kvn@4479 | 1468 | void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4479 | 1469 | |
kvn@4479 | 1470 | // Pemutation of 64bit words |
kvn@4479 | 1471 | void vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256); |
kvn@3388 | 1472 | |
kvn@6429 | 1473 | void pause(); |
kvn@6429 | 1474 | |
cfang@1116 | 1475 | // SSE4.2 string instructions |
cfang@1116 | 1476 | void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8); |
cfang@1116 | 1477 | void pcmpestri(XMMRegister xmm1, Address src, int imm8); |
cfang@1116 | 1478 | |
drchase@5353 | 1479 | // SSE 4.1 extract |
drchase@5353 | 1480 | void pextrd(Register dst, XMMRegister src, int imm8); |
drchase@5353 | 1481 | void pextrq(Register dst, XMMRegister src, int imm8); |
drchase@5353 | 1482 | |
drchase@5353 | 1483 | // SSE 4.1 insert |
drchase@5353 | 1484 | void pinsrd(XMMRegister dst, Register src, int imm8); |
drchase@5353 | 1485 | void pinsrq(XMMRegister dst, Register src, int imm8); |
drchase@5353 | 1486 | |
kvn@3388 | 1487 | // SSE4.1 packed move |
kvn@3388 | 1488 | void pmovzxbw(XMMRegister dst, XMMRegister src); |
kvn@3388 | 1489 | void pmovzxbw(XMMRegister dst, Address src); |
kvn@3388 | 1490 | |
roland@1495 | 1491 | #ifndef _LP64 // no 32bit push/pop on amd64 |
never@739 | 1492 | void popl(Address dst); |
roland@1495 | 1493 | #endif |
never@739 | 1494 | |
never@739 | 1495 | #ifdef _LP64 |
never@739 | 1496 | void popq(Address dst); |
never@739 | 1497 | #endif |
never@739 | 1498 | |
twisti@1078 | 1499 | void popcntl(Register dst, Address src); |
twisti@1078 | 1500 | void popcntl(Register dst, Register src); |
twisti@1078 | 1501 | |
twisti@1078 | 1502 | #ifdef _LP64 |
twisti@1078 | 1503 | void popcntq(Register dst, Address src); |
twisti@1078 | 1504 | void popcntq(Register dst, Register src); |
twisti@1078 | 1505 | #endif |
twisti@1078 | 1506 | |
never@739 | 1507 | // Prefetches (SSE, SSE2, 3DNOW only) |
never@739 | 1508 | |
never@739 | 1509 | void prefetchnta(Address src); |
never@739 | 1510 | void prefetchr(Address src); |
never@739 | 1511 | void prefetcht0(Address src); |
never@739 | 1512 | void prefetcht1(Address src); |
never@739 | 1513 | void prefetcht2(Address src); |
never@739 | 1514 | void prefetchw(Address src); |
never@739 | 1515 | |
kvn@4205 | 1516 | // Shuffle Bytes |
kvn@4205 | 1517 | void pshufb(XMMRegister dst, XMMRegister src); |
kvn@4205 | 1518 | void pshufb(XMMRegister dst, Address src); |
kvn@4205 | 1519 | |
never@739 | 1520 | // Shuffle Packed Doublewords |
never@739 | 1521 | void pshufd(XMMRegister dst, XMMRegister src, int mode); |
never@739 | 1522 | void pshufd(XMMRegister dst, Address src, int mode); |
never@739 | 1523 | |
never@739 | 1524 | // Shuffle Packed Low Words |
never@739 | 1525 | void pshuflw(XMMRegister dst, XMMRegister src, int mode); |
never@739 | 1526 | void pshuflw(XMMRegister dst, Address src, int mode); |
never@739 | 1527 | |
kvn@2602 | 1528 | // Shift Right by bytes Logical DoubleQuadword Immediate |
kvn@2602 | 1529 | void psrldq(XMMRegister dst, int shift); |
ascarpino@9788 | 1530 | // Shift Left by bytes Logical DoubleQuadword Immediate |
ascarpino@9788 | 1531 | void pslldq(XMMRegister dst, int shift); |
kvn@2602 | 1532 | |
kvn@4413 | 1533 | // Logical Compare 128bit |
cfang@1116 | 1534 | void ptest(XMMRegister dst, XMMRegister src); |
cfang@1116 | 1535 | void ptest(XMMRegister dst, Address src); |
kvn@4413 | 1536 | // Logical Compare 256bit |
kvn@4413 | 1537 | void vptest(XMMRegister dst, XMMRegister src); |
kvn@4413 | 1538 | void vptest(XMMRegister dst, Address src); |
cfang@1116 | 1539 | |
never@739 | 1540 | // Interleave Low Bytes |
never@739 | 1541 | void punpcklbw(XMMRegister dst, XMMRegister src); |
kvn@3388 | 1542 | void punpcklbw(XMMRegister dst, Address src); |
kvn@3388 | 1543 | |
kvn@3388 | 1544 | // Interleave Low Doublewords |
kvn@3388 | 1545 | void punpckldq(XMMRegister dst, XMMRegister src); |
kvn@3388 | 1546 | void punpckldq(XMMRegister dst, Address src); |
never@739 | 1547 | |
kvn@3929 | 1548 | // Interleave Low Quadwords |
kvn@3929 | 1549 | void punpcklqdq(XMMRegister dst, XMMRegister src); |
kvn@3929 | 1550 | |
roland@1495 | 1551 | #ifndef _LP64 // no 32bit push/pop on amd64 |
never@739 | 1552 | void pushl(Address src); |
roland@1495 | 1553 | #endif |
never@739 | 1554 | |
never@739 | 1555 | void pushq(Address src); |
never@739 | 1556 | |
never@739 | 1557 | void rcll(Register dst, int imm8); |
never@739 | 1558 | |
never@739 | 1559 | void rclq(Register dst, int imm8); |
never@739 | 1560 | |
igerasim@8307 | 1561 | void rcrq(Register dst, int imm8); |
igerasim@8307 | 1562 | |
kvn@6429 | 1563 | void rdtsc(); |
kvn@6429 | 1564 | |
never@739 | 1565 | void ret(int imm16); |
duke@435 | 1566 | |
kvn@7152 | 1567 | #ifdef _LP64 |
kvn@7152 | 1568 | void rorq(Register dst, int imm8); |
kvn@7152 | 1569 | void rorxq(Register dst, Register src, int imm8); |
kvn@7152 | 1570 | #endif |
kvn@7152 | 1571 | |
duke@435 | 1572 | void sahf(); |
duke@435 | 1573 | |
never@739 | 1574 | void sarl(Register dst, int imm8); |
never@739 | 1575 | void sarl(Register dst); |
never@739 | 1576 | |
never@739 | 1577 | void sarq(Register dst, int imm8); |
never@739 | 1578 | void sarq(Register dst); |
never@739 | 1579 | |
never@739 | 1580 | void sbbl(Address dst, int32_t imm32); |
never@739 | 1581 | void sbbl(Register dst, int32_t imm32); |
never@739 | 1582 | void sbbl(Register dst, Address src); |
never@739 | 1583 | void sbbl(Register dst, Register src); |
never@739 | 1584 | |
never@739 | 1585 | void sbbq(Address dst, int32_t imm32); |
never@739 | 1586 | void sbbq(Register dst, int32_t imm32); |
never@739 | 1587 | void sbbq(Register dst, Address src); |
never@739 | 1588 | void sbbq(Register dst, Register src); |
never@739 | 1589 | |
never@739 | 1590 | void setb(Condition cc, Register dst); |
never@739 | 1591 | |
never@739 | 1592 | void shldl(Register dst, Register src); |
never@739 | 1593 | |
never@739 | 1594 | void shll(Register dst, int imm8); |
never@739 | 1595 | void shll(Register dst); |
never@739 | 1596 | |
never@739 | 1597 | void shlq(Register dst, int imm8); |
never@739 | 1598 | void shlq(Register dst); |
never@739 | 1599 | |
never@739 | 1600 | void shrdl(Register dst, Register src); |
never@739 | 1601 | |
never@739 | 1602 | void shrl(Register dst, int imm8); |
never@739 | 1603 | void shrl(Register dst); |
never@739 | 1604 | |
never@739 | 1605 | void shrq(Register dst, int imm8); |
never@739 | 1606 | void shrq(Register dst); |
never@739 | 1607 | |
never@739 | 1608 | void smovl(); // QQQ generic? |
never@739 | 1609 | |
never@739 | 1610 | // Compute Square Root of Scalar Double-Precision Floating-Point Value |
never@739 | 1611 | void sqrtsd(XMMRegister dst, Address src); |
never@739 | 1612 | void sqrtsd(XMMRegister dst, XMMRegister src); |
never@739 | 1613 | |
twisti@2350 | 1614 | // Compute Square Root of Scalar Single-Precision Floating-Point Value |
twisti@2350 | 1615 | void sqrtss(XMMRegister dst, Address src); |
twisti@2350 | 1616 | void sqrtss(XMMRegister dst, XMMRegister src); |
twisti@2350 | 1617 | |
twisti@4318 | 1618 | void std(); |
never@739 | 1619 | |
never@739 | 1620 | void stmxcsr( Address dst ); |
never@739 | 1621 | |
never@739 | 1622 | void subl(Address dst, int32_t imm32); |
never@739 | 1623 | void subl(Address dst, Register src); |
never@739 | 1624 | void subl(Register dst, int32_t imm32); |
never@739 | 1625 | void subl(Register dst, Address src); |
never@739 | 1626 | void subl(Register dst, Register src); |
never@739 | 1627 | |
never@739 | 1628 | void subq(Address dst, int32_t imm32); |
never@739 | 1629 | void subq(Address dst, Register src); |
never@739 | 1630 | void subq(Register dst, int32_t imm32); |
never@739 | 1631 | void subq(Register dst, Address src); |
never@739 | 1632 | void subq(Register dst, Register src); |
never@739 | 1633 | |
kvn@3574 | 1634 | // Force generation of a 4 byte immediate value even if it fits into 8bit |
kvn@3574 | 1635 | void subl_imm32(Register dst, int32_t imm32); |
kvn@3574 | 1636 | void subq_imm32(Register dst, int32_t imm32); |
never@739 | 1637 | |
never@739 | 1638 | // Subtract Scalar Double-Precision Floating-Point Values |
never@739 | 1639 | void subsd(XMMRegister dst, Address src); |
never@739 | 1640 | void subsd(XMMRegister dst, XMMRegister src); |
never@739 | 1641 | |
never@739 | 1642 | // Subtract Scalar Single-Precision Floating-Point Values |
never@739 | 1643 | void subss(XMMRegister dst, Address src); |
duke@435 | 1644 | void subss(XMMRegister dst, XMMRegister src); |
never@739 | 1645 | |
never@739 | 1646 | void testb(Register dst, int imm8); |
never@739 | 1647 | |
never@739 | 1648 | void testl(Register dst, int32_t imm32); |
never@739 | 1649 | void testl(Register dst, Register src); |
never@739 | 1650 | void testl(Register dst, Address src); |
never@739 | 1651 | |
never@739 | 1652 | void testq(Register dst, int32_t imm32); |
never@739 | 1653 | void testq(Register dst, Register src); |
never@739 | 1654 | |
iveresov@6378 | 1655 | // BMI - count trailing zeros |
iveresov@6378 | 1656 | void tzcntl(Register dst, Register src); |
iveresov@6378 | 1657 | void tzcntq(Register dst, Register src); |
never@739 | 1658 | |
never@739 | 1659 | // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS |
never@739 | 1660 | void ucomisd(XMMRegister dst, Address src); |
never@739 | 1661 | void ucomisd(XMMRegister dst, XMMRegister src); |
never@739 | 1662 | |
never@739 | 1663 | // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS |
never@739 | 1664 | void ucomiss(XMMRegister dst, Address src); |
duke@435 | 1665 | void ucomiss(XMMRegister dst, XMMRegister src); |
never@739 | 1666 | |
kvn@6429 | 1667 | void xabort(int8_t imm8); |
kvn@6429 | 1668 | |
never@739 | 1669 | void xaddl(Address dst, Register src); |
never@739 | 1670 | |
never@739 | 1671 | void xaddq(Address dst, Register src); |
never@739 | 1672 | |
kvn@6429 | 1673 | void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none); |
kvn@6429 | 1674 | |
never@739 | 1675 | void xchgl(Register reg, Address adr); |
never@739 | 1676 | void xchgl(Register dst, Register src); |
never@739 | 1677 | |
never@739 | 1678 | void xchgq(Register reg, Address adr); |
never@739 | 1679 | void xchgq(Register dst, Register src); |
never@739 | 1680 | |
kvn@6429 | 1681 | void xend(); |
kvn@6429 | 1682 | |
kvn@3388 | 1683 | // Get Value of Extended Control Register |
twisti@4318 | 1684 | void xgetbv(); |
kvn@3388 | 1685 | |
never@739 | 1686 | void xorl(Register dst, int32_t imm32); |
never@739 | 1687 | void xorl(Register dst, Address src); |
never@739 | 1688 | void xorl(Register dst, Register src); |
never@739 | 1689 | |
never@739 | 1690 | void xorq(Register dst, Address src); |
never@739 | 1691 | void xorq(Register dst, Register src); |
never@739 | 1692 | |
kvn@3388 | 1693 | void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0 |
kvn@3388 | 1694 | |
kvn@3929 | 1695 | // AVX 3-operands scalar instructions (encoded with VEX prefix) |
kvn@4001 | 1696 | |
kvn@3390 | 1697 | void vaddsd(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1698 | void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1699 | void vaddss(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1700 | void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1701 | void vdivsd(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1702 | void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1703 | void vdivss(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1704 | void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1705 | void vmulsd(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1706 | void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1707 | void vmulss(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1708 | void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1709 | void vsubsd(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1710 | void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1711 | void vsubss(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1712 | void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3929 | 1713 | |
kvn@4001 | 1714 | |
kvn@4001 | 1715 | //====================VECTOR ARITHMETIC===================================== |
kvn@4001 | 1716 | |
kvn@4001 | 1717 | // Add Packed Floating-Point Values |
kvn@4001 | 1718 | void addpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1719 | void addps(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1720 | void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1721 | void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1722 | void vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1723 | void vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1724 | |
kvn@4001 | 1725 | // Subtract Packed Floating-Point Values |
kvn@4001 | 1726 | void subpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1727 | void subps(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1728 | void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1729 | void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1730 | void vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1731 | void vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1732 | |
kvn@4001 | 1733 | // Multiply Packed Floating-Point Values |
kvn@4001 | 1734 | void mulpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1735 | void mulps(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1736 | void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1737 | void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1738 | void vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1739 | void vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1740 | |
kvn@4001 | 1741 | // Divide Packed Floating-Point Values |
kvn@4001 | 1742 | void divpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1743 | void divps(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1744 | void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1745 | void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1746 | void vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1747 | void vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1748 | |
kvn@4001 | 1749 | // Bitwise Logical AND of Packed Floating-Point Values |
kvn@4001 | 1750 | void andpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1751 | void andps(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1752 | void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1753 | void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1754 | void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1755 | void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1756 | |
kvn@4001 | 1757 | // Bitwise Logical XOR of Packed Floating-Point Values |
kvn@4001 | 1758 | void xorpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1759 | void xorps(XMMRegister dst, XMMRegister src); |
kvn@3882 | 1760 | void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@3882 | 1761 | void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1762 | void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1763 | void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1764 | |
kvn@4001 | 1765 | // Add packed integers |
kvn@4001 | 1766 | void paddb(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1767 | void paddw(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1768 | void paddd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1769 | void paddq(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1770 | void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1771 | void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1772 | void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1773 | void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1774 | void vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1775 | void vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1776 | void vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1777 | void vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1778 | |
kvn@4001 | 1779 | // Sub packed integers |
kvn@4001 | 1780 | void psubb(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1781 | void psubw(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1782 | void psubd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1783 | void psubq(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1784 | void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1785 | void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1786 | void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1787 | void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1788 | void vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1789 | void vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1790 | void vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1791 | void vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1792 | |
kvn@4001 | 1793 | // Multiply packed integers (only shorts and ints) |
kvn@4001 | 1794 | void pmullw(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1795 | void pmulld(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1796 | void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1797 | void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1798 | void vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1799 | void vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1800 | |
kvn@4001 | 1801 | // Shift left packed integers |
kvn@4001 | 1802 | void psllw(XMMRegister dst, int shift); |
kvn@4001 | 1803 | void pslld(XMMRegister dst, int shift); |
kvn@4001 | 1804 | void psllq(XMMRegister dst, int shift); |
kvn@4001 | 1805 | void psllw(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1806 | void pslld(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1807 | void psllq(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1808 | void vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1809 | void vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1810 | void vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1811 | void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1812 | void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1813 | void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1814 | |
kvn@4001 | 1815 | // Logical shift right packed integers |
kvn@4001 | 1816 | void psrlw(XMMRegister dst, int shift); |
kvn@4001 | 1817 | void psrld(XMMRegister dst, int shift); |
kvn@4001 | 1818 | void psrlq(XMMRegister dst, int shift); |
kvn@4001 | 1819 | void psrlw(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1820 | void psrld(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1821 | void psrlq(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1822 | void vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1823 | void vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1824 | void vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1825 | void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1826 | void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1827 | void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1828 | |
kvn@4001 | 1829 | // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs) |
kvn@4001 | 1830 | void psraw(XMMRegister dst, int shift); |
kvn@4001 | 1831 | void psrad(XMMRegister dst, int shift); |
kvn@4001 | 1832 | void psraw(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1833 | void psrad(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1834 | void vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1835 | void vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1836 | void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1837 | void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1838 | |
kvn@4001 | 1839 | // And packed integers |
kvn@4001 | 1840 | void pand(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1841 | void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1842 | void vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1843 | |
kvn@4001 | 1844 | // Or packed integers |
kvn@4001 | 1845 | void por(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1846 | void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1847 | void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1848 | |
kvn@4001 | 1849 | // Xor packed integers |
kvn@4001 | 1850 | void pxor(XMMRegister dst, XMMRegister src); |
kvn@3929 | 1851 | void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1852 | void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1853 | |
kvn@4001 | 1854 | // Copy low 128bit into high 128bit of YMM registers. |
kvn@3882 | 1855 | void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3929 | 1856 | void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3882 | 1857 | |
kvn@4103 | 1858 | // Load/store high 128bit of YMM registers which does not destroy other half. |
kvn@4103 | 1859 | void vinsertf128h(XMMRegister dst, Address src); |
kvn@4103 | 1860 | void vinserti128h(XMMRegister dst, Address src); |
kvn@4103 | 1861 | void vextractf128h(Address dst, XMMRegister src); |
kvn@4103 | 1862 | void vextracti128h(Address dst, XMMRegister src); |
kvn@4103 | 1863 | |
kvn@4411 | 1864 | // duplicate 4-bytes integer data from src into 8 locations in dest |
kvn@4411 | 1865 | void vpbroadcastd(XMMRegister dst, XMMRegister src); |
kvn@4411 | 1866 | |
drchase@5353 | 1867 | // Carry-Less Multiplication Quadword |
kvn@7025 | 1868 | void pclmulqdq(XMMRegister dst, XMMRegister src, int mask); |
drchase@5353 | 1869 | void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask); |
drchase@5353 | 1870 | |
kvn@3882 | 1871 | // AVX instruction which is used to clear upper 128 bits of YMM registers and |
kvn@3882 | 1872 | // to avoid transaction penalty between AVX and SSE states. There is no |
kvn@3882 | 1873 | // penalty if legacy SSE instructions are encoded using VEX prefix because |
kvn@3882 | 1874 | // they always clear upper 128 bits. It should be used before calling |
kvn@3882 | 1875 | // runtime code and native libraries. |
kvn@3882 | 1876 | void vzeroupper(); |
kvn@3390 | 1877 | |
kvn@3388 | 1878 | protected: |
kvn@3388 | 1879 | // Next instructions require address alignment 16 bytes SSE mode. |
kvn@3388 | 1880 | // They should be called only from corresponding MacroAssembler instructions. |
kvn@3388 | 1881 | void andpd(XMMRegister dst, Address src); |
kvn@3388 | 1882 | void andps(XMMRegister dst, Address src); |
never@739 | 1883 | void xorpd(XMMRegister dst, Address src); |
never@739 | 1884 | void xorps(XMMRegister dst, Address src); |
kvn@3388 | 1885 | |
duke@435 | 1886 | }; |
duke@435 | 1887 | |
stefank@2314 | 1888 | #endif // CPU_X86_VM_ASSEMBLER_X86_HPP |