Mon, 17 Sep 2012 19:39:07 -0700
7196199: java/text/Bidi/Bug6665028.java failed: Bidi run count incorrect
Summary: Save whole XMM/YMM registers in safepoint interrupt handler.
Reviewed-by: roland, twisti
duke@435 | 1 | /* |
kvn@3882 | 2 | * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP |
stefank@2314 | 26 | #define CPU_X86_VM_ASSEMBLER_X86_HPP |
stefank@2314 | 27 | |
duke@435 | 28 | class BiasedLockingCounters; |
duke@435 | 29 | |
duke@435 | 30 | // Contains all the definitions needed for x86 assembly code generation. |
duke@435 | 31 | |
duke@435 | 32 | // Calling convention |
duke@435 | 33 | class Argument VALUE_OBJ_CLASS_SPEC { |
duke@435 | 34 | public: |
duke@435 | 35 | enum { |
duke@435 | 36 | #ifdef _LP64 |
duke@435 | 37 | #ifdef _WIN64 |
duke@435 | 38 | n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) |
duke@435 | 39 | n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) |
duke@435 | 40 | #else |
duke@435 | 41 | n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) |
duke@435 | 42 | n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) |
duke@435 | 43 | #endif // _WIN64 |
duke@435 | 44 | n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... |
duke@435 | 45 | n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... |
duke@435 | 46 | #else |
duke@435 | 47 | n_register_parameters = 0 // 0 registers used to pass arguments |
duke@435 | 48 | #endif // _LP64 |
duke@435 | 49 | }; |
duke@435 | 50 | }; |
duke@435 | 51 | |
duke@435 | 52 | |
duke@435 | 53 | #ifdef _LP64 |
duke@435 | 54 | // Symbolically name the register arguments used by the c calling convention. |
duke@435 | 55 | // Windows is different from linux/solaris. So much for standards... |
duke@435 | 56 | |
duke@435 | 57 | #ifdef _WIN64 |
duke@435 | 58 | |
duke@435 | 59 | REGISTER_DECLARATION(Register, c_rarg0, rcx); |
duke@435 | 60 | REGISTER_DECLARATION(Register, c_rarg1, rdx); |
duke@435 | 61 | REGISTER_DECLARATION(Register, c_rarg2, r8); |
duke@435 | 62 | REGISTER_DECLARATION(Register, c_rarg3, r9); |
duke@435 | 63 | |
never@739 | 64 | REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); |
never@739 | 65 | REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); |
never@739 | 66 | REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); |
never@739 | 67 | REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); |
duke@435 | 68 | |
duke@435 | 69 | #else |
duke@435 | 70 | |
duke@435 | 71 | REGISTER_DECLARATION(Register, c_rarg0, rdi); |
duke@435 | 72 | REGISTER_DECLARATION(Register, c_rarg1, rsi); |
duke@435 | 73 | REGISTER_DECLARATION(Register, c_rarg2, rdx); |
duke@435 | 74 | REGISTER_DECLARATION(Register, c_rarg3, rcx); |
duke@435 | 75 | REGISTER_DECLARATION(Register, c_rarg4, r8); |
duke@435 | 76 | REGISTER_DECLARATION(Register, c_rarg5, r9); |
duke@435 | 77 | |
never@739 | 78 | REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); |
never@739 | 79 | REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); |
never@739 | 80 | REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); |
never@739 | 81 | REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); |
never@739 | 82 | REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4); |
never@739 | 83 | REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5); |
never@739 | 84 | REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6); |
never@739 | 85 | REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7); |
duke@435 | 86 | |
duke@435 | 87 | #endif // _WIN64 |
duke@435 | 88 | |
duke@435 | 89 | // Symbolically name the register arguments used by the Java calling convention. |
duke@435 | 90 | // We have control over the convention for java so we can do what we please. |
duke@435 | 91 | // What pleases us is to offset the java calling convention so that when |
duke@435 | 92 | // we call a suitable jni method the arguments are lined up and we don't |
duke@435 | 93 | // have to do little shuffling. A suitable jni method is non-static and a |
duke@435 | 94 | // small number of arguments (two fewer args on windows) |
duke@435 | 95 | // |
duke@435 | 96 | // |-------------------------------------------------------| |
duke@435 | 97 | // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | |
duke@435 | 98 | // |-------------------------------------------------------| |
duke@435 | 99 | // | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) |
duke@435 | 100 | // | rdi rsi rdx rcx r8 r9 | solaris/linux |
duke@435 | 101 | // |-------------------------------------------------------| |
duke@435 | 102 | // | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | |
duke@435 | 103 | // |-------------------------------------------------------| |
duke@435 | 104 | |
duke@435 | 105 | REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); |
duke@435 | 106 | REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); |
duke@435 | 107 | REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); |
duke@435 | 108 | // Windows runs out of register args here |
duke@435 | 109 | #ifdef _WIN64 |
duke@435 | 110 | REGISTER_DECLARATION(Register, j_rarg3, rdi); |
duke@435 | 111 | REGISTER_DECLARATION(Register, j_rarg4, rsi); |
duke@435 | 112 | #else |
duke@435 | 113 | REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); |
duke@435 | 114 | REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); |
duke@435 | 115 | #endif /* _WIN64 */ |
duke@435 | 116 | REGISTER_DECLARATION(Register, j_rarg5, c_rarg0); |
duke@435 | 117 | |
never@739 | 118 | REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0); |
never@739 | 119 | REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1); |
never@739 | 120 | REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2); |
never@739 | 121 | REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3); |
never@739 | 122 | REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4); |
never@739 | 123 | REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5); |
never@739 | 124 | REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6); |
never@739 | 125 | REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7); |
duke@435 | 126 | |
duke@435 | 127 | REGISTER_DECLARATION(Register, rscratch1, r10); // volatile |
duke@435 | 128 | REGISTER_DECLARATION(Register, rscratch2, r11); // volatile |
duke@435 | 129 | |
never@739 | 130 | REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved |
duke@435 | 131 | REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved |
duke@435 | 132 | |
never@739 | 133 | #else |
never@739 | 134 | // rscratch1 will apear in 32bit code that is dead but of course must compile |
never@739 | 135 | // Using noreg ensures if the dead code is incorrectly live and executed it |
never@739 | 136 | // will cause an assertion failure |
never@739 | 137 | #define rscratch1 noreg |
iveresov@2344 | 138 | #define rscratch2 noreg |
never@739 | 139 | |
duke@435 | 140 | #endif // _LP64 |
duke@435 | 141 | |
twisti@1919 | 142 | // JSR 292 fixed register usages: |
twisti@1919 | 143 | REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp); |
twisti@1919 | 144 | |
duke@435 | 145 | // Address is an abstraction used to represent a memory location |
duke@435 | 146 | // using any of the amd64 addressing modes with one object. |
duke@435 | 147 | // |
duke@435 | 148 | // Note: A register location is represented via a Register, not |
duke@435 | 149 | // via an address for efficiency & simplicity reasons. |
duke@435 | 150 | |
duke@435 | 151 | class ArrayAddress; |
duke@435 | 152 | |
duke@435 | 153 | class Address VALUE_OBJ_CLASS_SPEC { |
duke@435 | 154 | public: |
duke@435 | 155 | enum ScaleFactor { |
duke@435 | 156 | no_scale = -1, |
duke@435 | 157 | times_1 = 0, |
duke@435 | 158 | times_2 = 1, |
duke@435 | 159 | times_4 = 2, |
never@739 | 160 | times_8 = 3, |
never@739 | 161 | times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4) |
duke@435 | 162 | }; |
jrose@1057 | 163 | static ScaleFactor times(int size) { |
jrose@1057 | 164 | assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size"); |
jrose@1057 | 165 | if (size == 8) return times_8; |
jrose@1057 | 166 | if (size == 4) return times_4; |
jrose@1057 | 167 | if (size == 2) return times_2; |
jrose@1057 | 168 | return times_1; |
jrose@1057 | 169 | } |
jrose@1057 | 170 | static int scale_size(ScaleFactor scale) { |
jrose@1057 | 171 | assert(scale != no_scale, ""); |
jrose@1057 | 172 | assert(((1 << (int)times_1) == 1 && |
jrose@1057 | 173 | (1 << (int)times_2) == 2 && |
jrose@1057 | 174 | (1 << (int)times_4) == 4 && |
jrose@1057 | 175 | (1 << (int)times_8) == 8), ""); |
jrose@1057 | 176 | return (1 << (int)scale); |
jrose@1057 | 177 | } |
duke@435 | 178 | |
duke@435 | 179 | private: |
duke@435 | 180 | Register _base; |
duke@435 | 181 | Register _index; |
duke@435 | 182 | ScaleFactor _scale; |
duke@435 | 183 | int _disp; |
duke@435 | 184 | RelocationHolder _rspec; |
duke@435 | 185 | |
never@739 | 186 | // Easily misused constructors make them private |
never@739 | 187 | // %%% can we make these go away? |
never@739 | 188 | NOT_LP64(Address(address loc, RelocationHolder spec);) |
never@739 | 189 | Address(int disp, address loc, relocInfo::relocType rtype); |
never@739 | 190 | Address(int disp, address loc, RelocationHolder spec); |
duke@435 | 191 | |
duke@435 | 192 | public: |
never@739 | 193 | |
never@739 | 194 | int disp() { return _disp; } |
duke@435 | 195 | // creation |
duke@435 | 196 | Address() |
duke@435 | 197 | : _base(noreg), |
duke@435 | 198 | _index(noreg), |
duke@435 | 199 | _scale(no_scale), |
duke@435 | 200 | _disp(0) { |
duke@435 | 201 | } |
duke@435 | 202 | |
duke@435 | 203 | // No default displacement otherwise Register can be implicitly |
duke@435 | 204 | // converted to 0(Register) which is quite a different animal. |
duke@435 | 205 | |
duke@435 | 206 | Address(Register base, int disp) |
duke@435 | 207 | : _base(base), |
duke@435 | 208 | _index(noreg), |
duke@435 | 209 | _scale(no_scale), |
duke@435 | 210 | _disp(disp) { |
duke@435 | 211 | } |
duke@435 | 212 | |
duke@435 | 213 | Address(Register base, Register index, ScaleFactor scale, int disp = 0) |
duke@435 | 214 | : _base (base), |
duke@435 | 215 | _index(index), |
duke@435 | 216 | _scale(scale), |
duke@435 | 217 | _disp (disp) { |
duke@435 | 218 | assert(!index->is_valid() == (scale == Address::no_scale), |
duke@435 | 219 | "inconsistent address"); |
duke@435 | 220 | } |
duke@435 | 221 | |
jrose@1100 | 222 | Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0) |
jrose@1057 | 223 | : _base (base), |
jrose@1057 | 224 | _index(index.register_or_noreg()), |
jrose@1057 | 225 | _scale(scale), |
jrose@1057 | 226 | _disp (disp + (index.constant_or_zero() * scale_size(scale))) { |
jrose@1057 | 227 | if (!index.is_register()) scale = Address::no_scale; |
jrose@1057 | 228 | assert(!_index->is_valid() == (scale == Address::no_scale), |
jrose@1057 | 229 | "inconsistent address"); |
jrose@1057 | 230 | } |
jrose@1057 | 231 | |
jrose@1057 | 232 | Address plus_disp(int disp) const { |
jrose@1057 | 233 | Address a = (*this); |
jrose@1057 | 234 | a._disp += disp; |
jrose@1057 | 235 | return a; |
jrose@1057 | 236 | } |
never@2895 | 237 | Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const { |
never@2895 | 238 | Address a = (*this); |
never@2895 | 239 | a._disp += disp.constant_or_zero() * scale_size(scale); |
never@2895 | 240 | if (disp.is_register()) { |
never@2895 | 241 | assert(!a.index()->is_valid(), "competing indexes"); |
never@2895 | 242 | a._index = disp.as_register(); |
never@2895 | 243 | a._scale = scale; |
never@2895 | 244 | } |
never@2895 | 245 | return a; |
never@2895 | 246 | } |
never@2895 | 247 | bool is_same_address(Address a) const { |
never@2895 | 248 | // disregard _rspec |
never@2895 | 249 | return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale; |
never@2895 | 250 | } |
jrose@1057 | 251 | |
duke@435 | 252 | // The following two overloads are used in connection with the |
duke@435 | 253 | // ByteSize type (see sizes.hpp). They simplify the use of |
duke@435 | 254 | // ByteSize'd arguments in assembly code. Note that their equivalent |
duke@435 | 255 | // for the optimized build are the member functions with int disp |
duke@435 | 256 | // argument since ByteSize is mapped to an int type in that case. |
duke@435 | 257 | // |
duke@435 | 258 | // Note: DO NOT introduce similar overloaded functions for WordSize |
duke@435 | 259 | // arguments as in the optimized mode, both ByteSize and WordSize |
duke@435 | 260 | // are mapped to the same type and thus the compiler cannot make a |
duke@435 | 261 | // distinction anymore (=> compiler errors). |
duke@435 | 262 | |
duke@435 | 263 | #ifdef ASSERT |
duke@435 | 264 | Address(Register base, ByteSize disp) |
duke@435 | 265 | : _base(base), |
duke@435 | 266 | _index(noreg), |
duke@435 | 267 | _scale(no_scale), |
duke@435 | 268 | _disp(in_bytes(disp)) { |
duke@435 | 269 | } |
duke@435 | 270 | |
duke@435 | 271 | Address(Register base, Register index, ScaleFactor scale, ByteSize disp) |
duke@435 | 272 | : _base(base), |
duke@435 | 273 | _index(index), |
duke@435 | 274 | _scale(scale), |
duke@435 | 275 | _disp(in_bytes(disp)) { |
duke@435 | 276 | assert(!index->is_valid() == (scale == Address::no_scale), |
duke@435 | 277 | "inconsistent address"); |
duke@435 | 278 | } |
jrose@1057 | 279 | |
jrose@1100 | 280 | Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp) |
jrose@1057 | 281 | : _base (base), |
jrose@1057 | 282 | _index(index.register_or_noreg()), |
jrose@1057 | 283 | _scale(scale), |
jrose@1057 | 284 | _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))) { |
jrose@1057 | 285 | if (!index.is_register()) scale = Address::no_scale; |
jrose@1057 | 286 | assert(!_index->is_valid() == (scale == Address::no_scale), |
jrose@1057 | 287 | "inconsistent address"); |
jrose@1057 | 288 | } |
jrose@1057 | 289 | |
duke@435 | 290 | #endif // ASSERT |
duke@435 | 291 | |
duke@435 | 292 | // accessors |
ysr@777 | 293 | bool uses(Register reg) const { return _base == reg || _index == reg; } |
ysr@777 | 294 | Register base() const { return _base; } |
ysr@777 | 295 | Register index() const { return _index; } |
ysr@777 | 296 | ScaleFactor scale() const { return _scale; } |
ysr@777 | 297 | int disp() const { return _disp; } |
duke@435 | 298 | |
duke@435 | 299 | // Convert the raw encoding form into the form expected by the constructor for |
duke@435 | 300 | // Address. An index of 4 (rsp) corresponds to having no index, so convert |
duke@435 | 301 | // that to noreg for the Address constructor. |
coleenp@4037 | 302 | static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc); |
duke@435 | 303 | |
duke@435 | 304 | static Address make_array(ArrayAddress); |
duke@435 | 305 | |
duke@435 | 306 | private: |
duke@435 | 307 | bool base_needs_rex() const { |
duke@435 | 308 | return _base != noreg && _base->encoding() >= 8; |
duke@435 | 309 | } |
duke@435 | 310 | |
duke@435 | 311 | bool index_needs_rex() const { |
duke@435 | 312 | return _index != noreg &&_index->encoding() >= 8; |
duke@435 | 313 | } |
duke@435 | 314 | |
duke@435 | 315 | relocInfo::relocType reloc() const { return _rspec.type(); } |
duke@435 | 316 | |
duke@435 | 317 | friend class Assembler; |
duke@435 | 318 | friend class MacroAssembler; |
duke@435 | 319 | friend class LIR_Assembler; // base/index/scale/disp |
duke@435 | 320 | }; |
duke@435 | 321 | |
duke@435 | 322 | // |
duke@435 | 323 | // AddressLiteral has been split out from Address because operands of this type |
duke@435 | 324 | // need to be treated specially on 32bit vs. 64bit platforms. By splitting it out |
duke@435 | 325 | // the few instructions that need to deal with address literals are unique and the |
duke@435 | 326 | // MacroAssembler does not have to implement every instruction in the Assembler |
duke@435 | 327 | // in order to search for address literals that may need special handling depending |
duke@435 | 328 | // on the instruction and the platform. As small step on the way to merging i486/amd64 |
duke@435 | 329 | // directories. |
duke@435 | 330 | // |
duke@435 | 331 | class AddressLiteral VALUE_OBJ_CLASS_SPEC { |
duke@435 | 332 | friend class ArrayAddress; |
duke@435 | 333 | RelocationHolder _rspec; |
duke@435 | 334 | // Typically we use AddressLiterals we want to use their rval |
duke@435 | 335 | // However in some situations we want the lval (effect address) of the item. |
duke@435 | 336 | // We provide a special factory for making those lvals. |
duke@435 | 337 | bool _is_lval; |
duke@435 | 338 | |
duke@435 | 339 | // If the target is far we'll need to load the ea of this to |
duke@435 | 340 | // a register to reach it. Otherwise if near we can do rip |
duke@435 | 341 | // relative addressing. |
duke@435 | 342 | |
duke@435 | 343 | address _target; |
duke@435 | 344 | |
duke@435 | 345 | protected: |
duke@435 | 346 | // creation |
duke@435 | 347 | AddressLiteral() |
duke@435 | 348 | : _is_lval(false), |
duke@435 | 349 | _target(NULL) |
duke@435 | 350 | {} |
duke@435 | 351 | |
duke@435 | 352 | public: |
duke@435 | 353 | |
duke@435 | 354 | |
duke@435 | 355 | AddressLiteral(address target, relocInfo::relocType rtype); |
duke@435 | 356 | |
duke@435 | 357 | AddressLiteral(address target, RelocationHolder const& rspec) |
duke@435 | 358 | : _rspec(rspec), |
duke@435 | 359 | _is_lval(false), |
duke@435 | 360 | _target(target) |
duke@435 | 361 | {} |
duke@435 | 362 | |
duke@435 | 363 | AddressLiteral addr() { |
duke@435 | 364 | AddressLiteral ret = *this; |
duke@435 | 365 | ret._is_lval = true; |
duke@435 | 366 | return ret; |
duke@435 | 367 | } |
duke@435 | 368 | |
duke@435 | 369 | |
duke@435 | 370 | private: |
duke@435 | 371 | |
duke@435 | 372 | address target() { return _target; } |
duke@435 | 373 | bool is_lval() { return _is_lval; } |
duke@435 | 374 | |
duke@435 | 375 | relocInfo::relocType reloc() const { return _rspec.type(); } |
duke@435 | 376 | const RelocationHolder& rspec() const { return _rspec; } |
duke@435 | 377 | |
duke@435 | 378 | friend class Assembler; |
duke@435 | 379 | friend class MacroAssembler; |
duke@435 | 380 | friend class Address; |
duke@435 | 381 | friend class LIR_Assembler; |
duke@435 | 382 | }; |
duke@435 | 383 | |
duke@435 | 384 | // Convience classes |
duke@435 | 385 | class RuntimeAddress: public AddressLiteral { |
duke@435 | 386 | |
duke@435 | 387 | public: |
duke@435 | 388 | |
duke@435 | 389 | RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {} |
duke@435 | 390 | |
duke@435 | 391 | }; |
duke@435 | 392 | |
duke@435 | 393 | class ExternalAddress: public AddressLiteral { |
never@2737 | 394 | private: |
never@2737 | 395 | static relocInfo::relocType reloc_for_target(address target) { |
never@2737 | 396 | // Sometimes ExternalAddress is used for values which aren't |
never@2737 | 397 | // exactly addresses, like the card table base. |
never@2737 | 398 | // external_word_type can't be used for values in the first page |
never@2737 | 399 | // so just skip the reloc in that case. |
never@2737 | 400 | return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; |
never@2737 | 401 | } |
never@2737 | 402 | |
never@2737 | 403 | public: |
never@2737 | 404 | |
never@2737 | 405 | ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {} |
duke@435 | 406 | |
duke@435 | 407 | }; |
duke@435 | 408 | |
duke@435 | 409 | class InternalAddress: public AddressLiteral { |
duke@435 | 410 | |
duke@435 | 411 | public: |
duke@435 | 412 | |
duke@435 | 413 | InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} |
duke@435 | 414 | |
duke@435 | 415 | }; |
duke@435 | 416 | |
duke@435 | 417 | // x86 can do array addressing as a single operation since disp can be an absolute |
duke@435 | 418 | // address amd64 can't. We create a class that expresses the concept but does extra |
duke@435 | 419 | // magic on amd64 to get the final result |
duke@435 | 420 | |
duke@435 | 421 | class ArrayAddress VALUE_OBJ_CLASS_SPEC { |
duke@435 | 422 | private: |
duke@435 | 423 | |
duke@435 | 424 | AddressLiteral _base; |
duke@435 | 425 | Address _index; |
duke@435 | 426 | |
duke@435 | 427 | public: |
duke@435 | 428 | |
duke@435 | 429 | ArrayAddress() {}; |
duke@435 | 430 | ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {}; |
duke@435 | 431 | AddressLiteral base() { return _base; } |
duke@435 | 432 | Address index() { return _index; } |
duke@435 | 433 | |
duke@435 | 434 | }; |
duke@435 | 435 | |
never@739 | 436 | const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY( 512 / wordSize); |
duke@435 | 437 | |
duke@435 | 438 | // The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction |
duke@435 | 439 | // level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write |
duke@435 | 440 | // is what you get. The Assembler is generating code into a CodeBuffer. |
duke@435 | 441 | |
duke@435 | 442 | class Assembler : public AbstractAssembler { |
duke@435 | 443 | friend class AbstractAssembler; // for the non-virtual hack |
duke@435 | 444 | friend class LIR_Assembler; // as_Address() |
never@739 | 445 | friend class StubGenerator; |
duke@435 | 446 | |
duke@435 | 447 | public: |
duke@435 | 448 | enum Condition { // The x86 condition codes used for conditional jumps/moves. |
duke@435 | 449 | zero = 0x4, |
duke@435 | 450 | notZero = 0x5, |
duke@435 | 451 | equal = 0x4, |
duke@435 | 452 | notEqual = 0x5, |
duke@435 | 453 | less = 0xc, |
duke@435 | 454 | lessEqual = 0xe, |
duke@435 | 455 | greater = 0xf, |
duke@435 | 456 | greaterEqual = 0xd, |
duke@435 | 457 | below = 0x2, |
duke@435 | 458 | belowEqual = 0x6, |
duke@435 | 459 | above = 0x7, |
duke@435 | 460 | aboveEqual = 0x3, |
duke@435 | 461 | overflow = 0x0, |
duke@435 | 462 | noOverflow = 0x1, |
duke@435 | 463 | carrySet = 0x2, |
duke@435 | 464 | carryClear = 0x3, |
duke@435 | 465 | negative = 0x8, |
duke@435 | 466 | positive = 0x9, |
duke@435 | 467 | parity = 0xa, |
duke@435 | 468 | noParity = 0xb |
duke@435 | 469 | }; |
duke@435 | 470 | |
duke@435 | 471 | enum Prefix { |
duke@435 | 472 | // segment overrides |
duke@435 | 473 | CS_segment = 0x2e, |
duke@435 | 474 | SS_segment = 0x36, |
duke@435 | 475 | DS_segment = 0x3e, |
duke@435 | 476 | ES_segment = 0x26, |
duke@435 | 477 | FS_segment = 0x64, |
duke@435 | 478 | GS_segment = 0x65, |
duke@435 | 479 | |
duke@435 | 480 | REX = 0x40, |
duke@435 | 481 | |
duke@435 | 482 | REX_B = 0x41, |
duke@435 | 483 | REX_X = 0x42, |
duke@435 | 484 | REX_XB = 0x43, |
duke@435 | 485 | REX_R = 0x44, |
duke@435 | 486 | REX_RB = 0x45, |
duke@435 | 487 | REX_RX = 0x46, |
duke@435 | 488 | REX_RXB = 0x47, |
duke@435 | 489 | |
duke@435 | 490 | REX_W = 0x48, |
duke@435 | 491 | |
duke@435 | 492 | REX_WB = 0x49, |
duke@435 | 493 | REX_WX = 0x4A, |
duke@435 | 494 | REX_WXB = 0x4B, |
duke@435 | 495 | REX_WR = 0x4C, |
duke@435 | 496 | REX_WRB = 0x4D, |
duke@435 | 497 | REX_WRX = 0x4E, |
kvn@3388 | 498 | REX_WRXB = 0x4F, |
kvn@3388 | 499 | |
kvn@3388 | 500 | VEX_3bytes = 0xC4, |
kvn@3388 | 501 | VEX_2bytes = 0xC5 |
kvn@3388 | 502 | }; |
kvn@3388 | 503 | |
kvn@3388 | 504 | enum VexPrefix { |
kvn@3388 | 505 | VEX_B = 0x20, |
kvn@3388 | 506 | VEX_X = 0x40, |
kvn@3388 | 507 | VEX_R = 0x80, |
kvn@3388 | 508 | VEX_W = 0x80 |
kvn@3388 | 509 | }; |
kvn@3388 | 510 | |
kvn@3388 | 511 | enum VexSimdPrefix { |
kvn@3388 | 512 | VEX_SIMD_NONE = 0x0, |
kvn@3388 | 513 | VEX_SIMD_66 = 0x1, |
kvn@3388 | 514 | VEX_SIMD_F3 = 0x2, |
kvn@3388 | 515 | VEX_SIMD_F2 = 0x3 |
kvn@3388 | 516 | }; |
kvn@3388 | 517 | |
kvn@3388 | 518 | enum VexOpcode { |
kvn@3388 | 519 | VEX_OPCODE_NONE = 0x0, |
kvn@3388 | 520 | VEX_OPCODE_0F = 0x1, |
kvn@3388 | 521 | VEX_OPCODE_0F_38 = 0x2, |
kvn@3388 | 522 | VEX_OPCODE_0F_3A = 0x3 |
duke@435 | 523 | }; |
duke@435 | 524 | |
duke@435 | 525 | enum WhichOperand { |
duke@435 | 526 | // input to locate_operand, and format code for relocations |
never@739 | 527 | imm_operand = 0, // embedded 32-bit|64-bit immediate operand |
duke@435 | 528 | disp32_operand = 1, // embedded 32-bit displacement or address |
duke@435 | 529 | call32_operand = 2, // embedded 32-bit self-relative displacement |
never@739 | 530 | #ifndef _LP64 |
duke@435 | 531 | _WhichOperand_limit = 3 |
never@739 | 532 | #else |
never@739 | 533 | narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop |
never@739 | 534 | _WhichOperand_limit = 4 |
never@739 | 535 | #endif |
duke@435 | 536 | }; |
duke@435 | 537 | |
never@739 | 538 | |
never@739 | 539 | |
never@739 | 540 | // NOTE: The general philopsophy of the declarations here is that 64bit versions |
never@739 | 541 | // of instructions are freely declared without the need for wrapping them an ifdef. |
never@739 | 542 | // (Some dangerous instructions are ifdef's out of inappropriate jvm's.) |
never@739 | 543 | // In the .cpp file the implementations are wrapped so that they are dropped out |
never@739 | 544 | // of the resulting jvm. This is done mostly to keep the footprint of KERNEL |
never@739 | 545 | // to the size it was prior to merging up the 32bit and 64bit assemblers. |
never@739 | 546 | // |
never@739 | 547 | // This does mean you'll get a linker/runtime error if you use a 64bit only instruction |
never@739 | 548 | // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down. |
never@739 | 549 | |
never@739 | 550 | private: |
never@739 | 551 | |
never@739 | 552 | |
never@739 | 553 | // 64bit prefixes |
never@739 | 554 | int prefix_and_encode(int reg_enc, bool byteinst = false); |
never@739 | 555 | int prefixq_and_encode(int reg_enc); |
never@739 | 556 | |
never@739 | 557 | int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false); |
never@739 | 558 | int prefixq_and_encode(int dst_enc, int src_enc); |
never@739 | 559 | |
never@739 | 560 | void prefix(Register reg); |
never@739 | 561 | void prefix(Address adr); |
never@739 | 562 | void prefixq(Address adr); |
never@739 | 563 | |
never@739 | 564 | void prefix(Address adr, Register reg, bool byteinst = false); |
kvn@3388 | 565 | void prefix(Address adr, XMMRegister reg); |
never@739 | 566 | void prefixq(Address adr, Register reg); |
kvn@3388 | 567 | void prefixq(Address adr, XMMRegister reg); |
never@739 | 568 | |
never@739 | 569 | void prefetch_prefix(Address src); |
never@739 | 570 | |
kvn@3388 | 571 | void rex_prefix(Address adr, XMMRegister xreg, |
kvn@3388 | 572 | VexSimdPrefix pre, VexOpcode opc, bool rex_w); |
kvn@3388 | 573 | int rex_prefix_and_encode(int dst_enc, int src_enc, |
kvn@3388 | 574 | VexSimdPrefix pre, VexOpcode opc, bool rex_w); |
kvn@3388 | 575 | |
kvn@3388 | 576 | void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, |
kvn@3388 | 577 | int nds_enc, VexSimdPrefix pre, VexOpcode opc, |
kvn@3388 | 578 | bool vector256); |
kvn@3388 | 579 | |
kvn@3388 | 580 | void vex_prefix(Address adr, int nds_enc, int xreg_enc, |
kvn@3388 | 581 | VexSimdPrefix pre, VexOpcode opc, |
kvn@3388 | 582 | bool vex_w, bool vector256); |
kvn@3388 | 583 | |
kvn@3390 | 584 | void vex_prefix(XMMRegister dst, XMMRegister nds, Address src, |
kvn@3390 | 585 | VexSimdPrefix pre, bool vector256 = false) { |
kvn@3882 | 586 | int dst_enc = dst->encoding(); |
kvn@3882 | 587 | int nds_enc = nds->is_valid() ? nds->encoding() : 0; |
kvn@3882 | 588 | vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256); |
kvn@3390 | 589 | } |
kvn@3390 | 590 | |
kvn@3388 | 591 | int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, |
kvn@3388 | 592 | VexSimdPrefix pre, VexOpcode opc, |
kvn@3388 | 593 | bool vex_w, bool vector256); |
kvn@3388 | 594 | |
kvn@3390 | 595 | int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, |
kvn@3882 | 596 | VexSimdPrefix pre, bool vector256 = false, |
kvn@3882 | 597 | VexOpcode opc = VEX_OPCODE_0F) { |
kvn@3882 | 598 | int src_enc = src->encoding(); |
kvn@3882 | 599 | int dst_enc = dst->encoding(); |
kvn@3882 | 600 | int nds_enc = nds->is_valid() ? nds->encoding() : 0; |
kvn@3882 | 601 | return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector256); |
kvn@3390 | 602 | } |
kvn@3388 | 603 | |
kvn@3388 | 604 | void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, |
kvn@3388 | 605 | VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F, |
kvn@3388 | 606 | bool rex_w = false, bool vector256 = false); |
kvn@3388 | 607 | |
kvn@3388 | 608 | void simd_prefix(XMMRegister dst, Address src, |
kvn@3388 | 609 | VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { |
kvn@3388 | 610 | simd_prefix(dst, xnoreg, src, pre, opc); |
kvn@3388 | 611 | } |
kvn@4001 | 612 | |
kvn@3388 | 613 | void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) { |
kvn@3388 | 614 | simd_prefix(src, dst, pre); |
kvn@3388 | 615 | } |
kvn@3388 | 616 | void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src, |
kvn@3388 | 617 | VexSimdPrefix pre) { |
kvn@3388 | 618 | bool rex_w = true; |
kvn@3388 | 619 | simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w); |
kvn@3388 | 620 | } |
kvn@3388 | 621 | |
kvn@3388 | 622 | int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, |
kvn@3388 | 623 | VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F, |
kvn@3388 | 624 | bool rex_w = false, bool vector256 = false); |
kvn@3388 | 625 | |
kvn@3388 | 626 | // Move/convert 32-bit integer value. |
kvn@3388 | 627 | int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src, |
kvn@3388 | 628 | VexSimdPrefix pre) { |
kvn@3388 | 629 | // It is OK to cast from Register to XMMRegister to pass argument here |
kvn@3388 | 630 | // since only encoding is used in simd_prefix_and_encode() and number of |
kvn@3388 | 631 | // Gen and Xmm registers are the same. |
kvn@3388 | 632 | return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre); |
kvn@3388 | 633 | } |
kvn@3388 | 634 | int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre) { |
kvn@3388 | 635 | return simd_prefix_and_encode(dst, xnoreg, src, pre); |
kvn@3388 | 636 | } |
kvn@3388 | 637 | int simd_prefix_and_encode(Register dst, XMMRegister src, |
kvn@3388 | 638 | VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { |
kvn@3388 | 639 | return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc); |
kvn@3388 | 640 | } |
kvn@3388 | 641 | |
kvn@3388 | 642 | // Move/convert 64-bit integer value. |
kvn@3388 | 643 | int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src, |
kvn@3388 | 644 | VexSimdPrefix pre) { |
kvn@3388 | 645 | bool rex_w = true; |
kvn@3388 | 646 | return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, VEX_OPCODE_0F, rex_w); |
kvn@3388 | 647 | } |
kvn@3388 | 648 | int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre) { |
kvn@3388 | 649 | return simd_prefix_and_encode_q(dst, xnoreg, src, pre); |
kvn@3388 | 650 | } |
kvn@3388 | 651 | int simd_prefix_and_encode_q(Register dst, XMMRegister src, |
kvn@3388 | 652 | VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { |
kvn@3388 | 653 | bool rex_w = true; |
kvn@3388 | 654 | return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc, rex_w); |
kvn@3388 | 655 | } |
kvn@3388 | 656 | |
never@739 | 657 | // Helper functions for groups of instructions |
never@739 | 658 | void emit_arith_b(int op1, int op2, Register dst, int imm8); |
never@739 | 659 | |
never@739 | 660 | void emit_arith(int op1, int op2, Register dst, int32_t imm32); |
kvn@3574 | 661 | // Force generation of a 4 byte immediate value even if it fits into 8bit |
kvn@3574 | 662 | void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32); |
never@739 | 663 | void emit_arith(int op1, int op2, Register dst, Register src); |
never@739 | 664 | |
kvn@4001 | 665 | void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre); |
kvn@4001 | 666 | void emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre); |
kvn@4001 | 667 | void emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre); |
kvn@4001 | 668 | void emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre); |
kvn@4001 | 669 | void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, |
kvn@4001 | 670 | Address src, VexSimdPrefix pre, bool vector256); |
kvn@4001 | 671 | void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, |
kvn@4001 | 672 | XMMRegister src, VexSimdPrefix pre, bool vector256); |
kvn@4001 | 673 | |
never@739 | 674 | void emit_operand(Register reg, |
never@739 | 675 | Register base, Register index, Address::ScaleFactor scale, |
never@739 | 676 | int disp, |
never@739 | 677 | RelocationHolder const& rspec, |
never@739 | 678 | int rip_relative_correction = 0); |
never@739 | 679 | |
never@739 | 680 | void emit_operand(Register reg, Address adr, int rip_relative_correction = 0); |
never@739 | 681 | |
never@739 | 682 | // operands that only take the original 32bit registers |
never@739 | 683 | void emit_operand32(Register reg, Address adr); |
never@739 | 684 | |
never@739 | 685 | void emit_operand(XMMRegister reg, |
never@739 | 686 | Register base, Register index, Address::ScaleFactor scale, |
never@739 | 687 | int disp, |
never@739 | 688 | RelocationHolder const& rspec); |
never@739 | 689 | |
never@739 | 690 | void emit_operand(XMMRegister reg, Address adr); |
never@739 | 691 | |
never@739 | 692 | void emit_operand(MMXRegister reg, Address adr); |
never@739 | 693 | |
never@739 | 694 | // workaround gcc (3.2.1-7) bug |
never@739 | 695 | void emit_operand(Address adr, MMXRegister reg); |
never@739 | 696 | |
never@739 | 697 | |
never@739 | 698 | // Immediate-to-memory forms |
never@739 | 699 | void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32); |
never@739 | 700 | |
never@739 | 701 | void emit_farith(int b1, int b2, int i); |
never@739 | 702 | |
duke@435 | 703 | |
duke@435 | 704 | protected: |
never@739 | 705 | #ifdef ASSERT |
never@739 | 706 | void check_relocation(RelocationHolder const& rspec, int format); |
never@739 | 707 | #endif |
never@739 | 708 | |
never@739 | 709 | inline void emit_long64(jlong x); |
never@739 | 710 | |
never@739 | 711 | void emit_data(jint data, relocInfo::relocType rtype, int format); |
never@739 | 712 | void emit_data(jint data, RelocationHolder const& rspec, int format); |
never@739 | 713 | void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); |
never@739 | 714 | void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); |
never@739 | 715 | |
never@739 | 716 | bool reachable(AddressLiteral adr) NOT_LP64({ return true;}); |
never@739 | 717 | |
never@739 | 718 | // These are all easily abused and hence protected |
never@739 | 719 | |
never@739 | 720 | // 32BIT ONLY SECTION |
never@739 | 721 | #ifndef _LP64 |
never@739 | 722 | // Make these disappear in 64bit mode since they would never be correct |
never@739 | 723 | void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY |
never@739 | 724 | void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY |
never@739 | 725 | |
kvn@1077 | 726 | void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY |
never@739 | 727 | void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY |
never@739 | 728 | |
never@739 | 729 | void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY |
never@739 | 730 | #else |
never@739 | 731 | // 64BIT ONLY SECTION |
never@739 | 732 | void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY |
kvn@1077 | 733 | |
kvn@1077 | 734 | void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec); |
kvn@1077 | 735 | void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec); |
kvn@1077 | 736 | |
kvn@1077 | 737 | void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec); |
kvn@1077 | 738 | void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec); |
never@739 | 739 | #endif // _LP64 |
never@739 | 740 | |
never@739 | 741 | // These are unique in that we are ensured by the caller that the 32bit |
never@739 | 742 | // relative in these instructions will always be able to reach the potentially |
never@739 | 743 | // 64bit address described by entry. Since they can take a 64bit address they |
never@739 | 744 | // don't have the 32 suffix like the other instructions in this class. |
never@739 | 745 | |
never@739 | 746 | void call_literal(address entry, RelocationHolder const& rspec); |
never@739 | 747 | void jmp_literal(address entry, RelocationHolder const& rspec); |
never@739 | 748 | |
never@739 | 749 | // Avoid using directly section |
never@739 | 750 | // Instructions in this section are actually usable by anyone without danger |
never@739 | 751 | // of failure but have performance issues that are addressed my enhanced |
never@739 | 752 | // instructions which will do the proper thing base on the particular cpu. |
never@739 | 753 | // We protect them because we don't trust you... |
never@739 | 754 | |
duke@435 | 755 | // Don't use next inc() and dec() methods directly. INC & DEC instructions |
duke@435 | 756 | // could cause a partial flag stall since they don't set CF flag. |
duke@435 | 757 | // Use MacroAssembler::decrement() & MacroAssembler::increment() methods |
duke@435 | 758 | // which call inc() & dec() or add() & sub() in accordance with |
duke@435 | 759 | // the product flag UseIncDec value. |
duke@435 | 760 | |
duke@435 | 761 | void decl(Register dst); |
duke@435 | 762 | void decl(Address dst); |
never@739 | 763 | void decq(Register dst); |
never@739 | 764 | void decq(Address dst); |
duke@435 | 765 | |
duke@435 | 766 | void incl(Register dst); |
duke@435 | 767 | void incl(Address dst); |
never@739 | 768 | void incq(Register dst); |
never@739 | 769 | void incq(Address dst); |
never@739 | 770 | |
never@739 | 771 | // New cpus require use of movsd and movss to avoid partial register stall |
never@739 | 772 | // when loading from memory. But for old Opteron use movlpd instead of movsd. |
never@739 | 773 | // The selection is done in MacroAssembler::movdbl() and movflt(). |
never@739 | 774 | |
never@739 | 775 | // Move Scalar Single-Precision Floating-Point Values |
never@739 | 776 | void movss(XMMRegister dst, Address src); |
never@739 | 777 | void movss(XMMRegister dst, XMMRegister src); |
never@739 | 778 | void movss(Address dst, XMMRegister src); |
never@739 | 779 | |
never@739 | 780 | // Move Scalar Double-Precision Floating-Point Values |
never@739 | 781 | void movsd(XMMRegister dst, Address src); |
never@739 | 782 | void movsd(XMMRegister dst, XMMRegister src); |
never@739 | 783 | void movsd(Address dst, XMMRegister src); |
never@739 | 784 | void movlpd(XMMRegister dst, Address src); |
never@739 | 785 | |
never@739 | 786 | // New cpus require use of movaps and movapd to avoid partial register stall |
never@739 | 787 | // when moving between registers. |
never@739 | 788 | void movaps(XMMRegister dst, XMMRegister src); |
never@739 | 789 | void movapd(XMMRegister dst, XMMRegister src); |
never@739 | 790 | |
never@739 | 791 | // End avoid using directly |
never@739 | 792 | |
never@739 | 793 | |
never@739 | 794 | // Instruction prefixes |
never@739 | 795 | void prefix(Prefix p); |
never@739 | 796 | |
never@739 | 797 | public: |
never@739 | 798 | |
never@739 | 799 | // Creation |
never@739 | 800 | Assembler(CodeBuffer* code) : AbstractAssembler(code) {} |
never@739 | 801 | |
never@739 | 802 | // Decoding |
never@739 | 803 | static address locate_operand(address inst, WhichOperand which); |
never@739 | 804 | static address locate_next_instruction(address inst); |
never@739 | 805 | |
never@739 | 806 | // Utilities |
iveresov@2686 | 807 | static bool is_polling_page_far() NOT_LP64({ return false;}); |
iveresov@2686 | 808 | |
never@739 | 809 | // Generic instructions |
never@739 | 810 | // Does 32bit or 64bit as needed for the platform. In some sense these |
never@739 | 811 | // belong in macro assembler but there is no need for both varieties to exist |
never@739 | 812 | |
never@739 | 813 | void lea(Register dst, Address src); |
never@739 | 814 | |
never@739 | 815 | void mov(Register dst, Register src); |
never@739 | 816 | |
never@739 | 817 | void pusha(); |
never@739 | 818 | void popa(); |
never@739 | 819 | |
never@739 | 820 | void pushf(); |
never@739 | 821 | void popf(); |
never@739 | 822 | |
never@739 | 823 | void push(int32_t imm32); |
never@739 | 824 | |
never@739 | 825 | void push(Register src); |
never@739 | 826 | |
never@739 | 827 | void pop(Register dst); |
never@739 | 828 | |
never@739 | 829 | // These are dummies to prevent surprise implicit conversions to Register |
never@739 | 830 | void push(void* v); |
never@739 | 831 | void pop(void* v); |
never@739 | 832 | |
never@739 | 833 | // These do register sized moves/scans |
never@739 | 834 | void rep_mov(); |
never@739 | 835 | void rep_set(); |
never@739 | 836 | void repne_scan(); |
never@739 | 837 | #ifdef _LP64 |
never@739 | 838 | void repne_scanl(); |
never@739 | 839 | #endif |
never@739 | 840 | |
never@739 | 841 | // Vanilla instructions in lexical order |
never@739 | 842 | |
phh@2423 | 843 | void adcl(Address dst, int32_t imm32); |
phh@2423 | 844 | void adcl(Address dst, Register src); |
never@739 | 845 | void adcl(Register dst, int32_t imm32); |
never@739 | 846 | void adcl(Register dst, Address src); |
never@739 | 847 | void adcl(Register dst, Register src); |
never@739 | 848 | |
never@739 | 849 | void adcq(Register dst, int32_t imm32); |
never@739 | 850 | void adcq(Register dst, Address src); |
never@739 | 851 | void adcq(Register dst, Register src); |
never@739 | 852 | |
never@739 | 853 | void addl(Address dst, int32_t imm32); |
never@739 | 854 | void addl(Address dst, Register src); |
never@739 | 855 | void addl(Register dst, int32_t imm32); |
never@739 | 856 | void addl(Register dst, Address src); |
never@739 | 857 | void addl(Register dst, Register src); |
never@739 | 858 | |
never@739 | 859 | void addq(Address dst, int32_t imm32); |
never@739 | 860 | void addq(Address dst, Register src); |
never@739 | 861 | void addq(Register dst, int32_t imm32); |
never@739 | 862 | void addq(Register dst, Address src); |
never@739 | 863 | void addq(Register dst, Register src); |
never@739 | 864 | |
duke@435 | 865 | void addr_nop_4(); |
duke@435 | 866 | void addr_nop_5(); |
duke@435 | 867 | void addr_nop_7(); |
duke@435 | 868 | void addr_nop_8(); |
duke@435 | 869 | |
never@739 | 870 | // Add Scalar Double-Precision Floating-Point Values |
never@739 | 871 | void addsd(XMMRegister dst, Address src); |
never@739 | 872 | void addsd(XMMRegister dst, XMMRegister src); |
never@739 | 873 | |
never@739 | 874 | // Add Scalar Single-Precision Floating-Point Values |
never@739 | 875 | void addss(XMMRegister dst, Address src); |
never@739 | 876 | void addss(XMMRegister dst, XMMRegister src); |
never@739 | 877 | |
kvn@3388 | 878 | void andl(Address dst, int32_t imm32); |
never@739 | 879 | void andl(Register dst, int32_t imm32); |
never@739 | 880 | void andl(Register dst, Address src); |
never@739 | 881 | void andl(Register dst, Register src); |
never@739 | 882 | |
never@2980 | 883 | void andq(Address dst, int32_t imm32); |
never@739 | 884 | void andq(Register dst, int32_t imm32); |
never@739 | 885 | void andq(Register dst, Address src); |
never@739 | 886 | void andq(Register dst, Register src); |
never@739 | 887 | |
twisti@1210 | 888 | void bsfl(Register dst, Register src); |
twisti@1210 | 889 | void bsrl(Register dst, Register src); |
twisti@1210 | 890 | |
twisti@1210 | 891 | #ifdef _LP64 |
twisti@1210 | 892 | void bsfq(Register dst, Register src); |
twisti@1210 | 893 | void bsrq(Register dst, Register src); |
twisti@1210 | 894 | #endif |
twisti@1210 | 895 | |
never@739 | 896 | void bswapl(Register reg); |
never@739 | 897 | |
never@739 | 898 | void bswapq(Register reg); |
never@739 | 899 | |
duke@435 | 900 | void call(Label& L, relocInfo::relocType rtype); |
duke@435 | 901 | void call(Register reg); // push pc; pc <- reg |
duke@435 | 902 | void call(Address adr); // push pc; pc <- adr |
duke@435 | 903 | |
never@739 | 904 | void cdql(); |
never@739 | 905 | |
never@739 | 906 | void cdqq(); |
never@739 | 907 | |
never@739 | 908 | void cld() { emit_byte(0xfc); } |
never@739 | 909 | |
never@739 | 910 | void clflush(Address adr); |
never@739 | 911 | |
never@739 | 912 | void cmovl(Condition cc, Register dst, Register src); |
never@739 | 913 | void cmovl(Condition cc, Register dst, Address src); |
never@739 | 914 | |
never@739 | 915 | void cmovq(Condition cc, Register dst, Register src); |
never@739 | 916 | void cmovq(Condition cc, Register dst, Address src); |
never@739 | 917 | |
never@739 | 918 | |
never@739 | 919 | void cmpb(Address dst, int imm8); |
never@739 | 920 | |
never@739 | 921 | void cmpl(Address dst, int32_t imm32); |
never@739 | 922 | |
never@739 | 923 | void cmpl(Register dst, int32_t imm32); |
never@739 | 924 | void cmpl(Register dst, Register src); |
never@739 | 925 | void cmpl(Register dst, Address src); |
never@739 | 926 | |
never@739 | 927 | void cmpq(Address dst, int32_t imm32); |
never@739 | 928 | void cmpq(Address dst, Register src); |
never@739 | 929 | |
never@739 | 930 | void cmpq(Register dst, int32_t imm32); |
never@739 | 931 | void cmpq(Register dst, Register src); |
never@739 | 932 | void cmpq(Register dst, Address src); |
never@739 | 933 | |
never@739 | 934 | // these are dummies used to catch attempting to convert NULL to Register |
never@739 | 935 | void cmpl(Register dst, void* junk); // dummy |
never@739 | 936 | void cmpq(Register dst, void* junk); // dummy |
never@739 | 937 | |
never@739 | 938 | void cmpw(Address dst, int imm16); |
never@739 | 939 | |
never@739 | 940 | void cmpxchg8 (Address adr); |
never@739 | 941 | |
never@739 | 942 | void cmpxchgl(Register reg, Address adr); |
never@739 | 943 | |
never@739 | 944 | void cmpxchgq(Register reg, Address adr); |
never@739 | 945 | |
never@739 | 946 | // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS |
never@739 | 947 | void comisd(XMMRegister dst, Address src); |
kvn@3388 | 948 | void comisd(XMMRegister dst, XMMRegister src); |
never@739 | 949 | |
never@739 | 950 | // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS |
never@739 | 951 | void comiss(XMMRegister dst, Address src); |
kvn@3388 | 952 | void comiss(XMMRegister dst, XMMRegister src); |
never@739 | 953 | |
never@739 | 954 | // Identify processor type and features |
never@739 | 955 | void cpuid() { |
never@739 | 956 | emit_byte(0x0F); |
never@739 | 957 | emit_byte(0xA2); |
never@739 | 958 | } |
never@739 | 959 | |
never@739 | 960 | // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value |
never@739 | 961 | void cvtsd2ss(XMMRegister dst, XMMRegister src); |
kvn@3388 | 962 | void cvtsd2ss(XMMRegister dst, Address src); |
never@739 | 963 | |
never@739 | 964 | // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value |
never@739 | 965 | void cvtsi2sdl(XMMRegister dst, Register src); |
kvn@3388 | 966 | void cvtsi2sdl(XMMRegister dst, Address src); |
never@739 | 967 | void cvtsi2sdq(XMMRegister dst, Register src); |
kvn@3388 | 968 | void cvtsi2sdq(XMMRegister dst, Address src); |
never@739 | 969 | |
never@739 | 970 | // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value |
never@739 | 971 | void cvtsi2ssl(XMMRegister dst, Register src); |
kvn@3388 | 972 | void cvtsi2ssl(XMMRegister dst, Address src); |
never@739 | 973 | void cvtsi2ssq(XMMRegister dst, Register src); |
kvn@3388 | 974 | void cvtsi2ssq(XMMRegister dst, Address src); |
never@739 | 975 | |
never@739 | 976 | // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value |
never@739 | 977 | void cvtdq2pd(XMMRegister dst, XMMRegister src); |
never@739 | 978 | |
never@739 | 979 | // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value |
never@739 | 980 | void cvtdq2ps(XMMRegister dst, XMMRegister src); |
never@739 | 981 | |
never@739 | 982 | // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value |
never@739 | 983 | void cvtss2sd(XMMRegister dst, XMMRegister src); |
kvn@3388 | 984 | void cvtss2sd(XMMRegister dst, Address src); |
never@739 | 985 | |
never@739 | 986 | // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer |
never@739 | 987 | void cvttsd2sil(Register dst, Address src); |
never@739 | 988 | void cvttsd2sil(Register dst, XMMRegister src); |
never@739 | 989 | void cvttsd2siq(Register dst, XMMRegister src); |
never@739 | 990 | |
never@739 | 991 | // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer |
never@739 | 992 | void cvttss2sil(Register dst, XMMRegister src); |
never@739 | 993 | void cvttss2siq(Register dst, XMMRegister src); |
never@739 | 994 | |
never@739 | 995 | // Divide Scalar Double-Precision Floating-Point Values |
never@739 | 996 | void divsd(XMMRegister dst, Address src); |
never@739 | 997 | void divsd(XMMRegister dst, XMMRegister src); |
never@739 | 998 | |
never@739 | 999 | // Divide Scalar Single-Precision Floating-Point Values |
never@739 | 1000 | void divss(XMMRegister dst, Address src); |
never@739 | 1001 | void divss(XMMRegister dst, XMMRegister src); |
never@739 | 1002 | |
never@739 | 1003 | void emms(); |
never@739 | 1004 | |
never@739 | 1005 | void fabs(); |
never@739 | 1006 | |
never@739 | 1007 | void fadd(int i); |
never@739 | 1008 | |
never@739 | 1009 | void fadd_d(Address src); |
never@739 | 1010 | void fadd_s(Address src); |
never@739 | 1011 | |
never@739 | 1012 | // "Alternate" versions of x87 instructions place result down in FPU |
never@739 | 1013 | // stack instead of on TOS |
never@739 | 1014 | |
never@739 | 1015 | void fadda(int i); // "alternate" fadd |
never@739 | 1016 | void faddp(int i = 1); |
never@739 | 1017 | |
never@739 | 1018 | void fchs(); |
never@739 | 1019 | |
never@739 | 1020 | void fcom(int i); |
never@739 | 1021 | |
never@739 | 1022 | void fcomp(int i = 1); |
never@739 | 1023 | void fcomp_d(Address src); |
never@739 | 1024 | void fcomp_s(Address src); |
never@739 | 1025 | |
never@739 | 1026 | void fcompp(); |
never@739 | 1027 | |
never@739 | 1028 | void fcos(); |
never@739 | 1029 | |
never@739 | 1030 | void fdecstp(); |
never@739 | 1031 | |
never@739 | 1032 | void fdiv(int i); |
never@739 | 1033 | void fdiv_d(Address src); |
never@739 | 1034 | void fdivr_s(Address src); |
never@739 | 1035 | void fdiva(int i); // "alternate" fdiv |
never@739 | 1036 | void fdivp(int i = 1); |
never@739 | 1037 | |
never@739 | 1038 | void fdivr(int i); |
never@739 | 1039 | void fdivr_d(Address src); |
never@739 | 1040 | void fdiv_s(Address src); |
never@739 | 1041 | |
never@739 | 1042 | void fdivra(int i); // "alternate" reversed fdiv |
never@739 | 1043 | |
never@739 | 1044 | void fdivrp(int i = 1); |
never@739 | 1045 | |
never@739 | 1046 | void ffree(int i = 0); |
never@739 | 1047 | |
never@739 | 1048 | void fild_d(Address adr); |
never@739 | 1049 | void fild_s(Address adr); |
never@739 | 1050 | |
never@739 | 1051 | void fincstp(); |
never@739 | 1052 | |
never@739 | 1053 | void finit(); |
never@739 | 1054 | |
never@739 | 1055 | void fist_s (Address adr); |
never@739 | 1056 | void fistp_d(Address adr); |
never@739 | 1057 | void fistp_s(Address adr); |
never@739 | 1058 | |
never@739 | 1059 | void fld1(); |
never@739 | 1060 | |
never@739 | 1061 | void fld_d(Address adr); |
never@739 | 1062 | void fld_s(Address adr); |
never@739 | 1063 | void fld_s(int index); |
never@739 | 1064 | void fld_x(Address adr); // extended-precision (80-bit) format |
never@739 | 1065 | |
never@739 | 1066 | void fldcw(Address src); |
never@739 | 1067 | |
never@739 | 1068 | void fldenv(Address src); |
never@739 | 1069 | |
never@739 | 1070 | void fldlg2(); |
never@739 | 1071 | |
never@739 | 1072 | void fldln2(); |
never@739 | 1073 | |
never@739 | 1074 | void fldz(); |
never@739 | 1075 | |
never@739 | 1076 | void flog(); |
never@739 | 1077 | void flog10(); |
never@739 | 1078 | |
never@739 | 1079 | void fmul(int i); |
never@739 | 1080 | |
never@739 | 1081 | void fmul_d(Address src); |
never@739 | 1082 | void fmul_s(Address src); |
never@739 | 1083 | |
never@739 | 1084 | void fmula(int i); // "alternate" fmul |
never@739 | 1085 | |
never@739 | 1086 | void fmulp(int i = 1); |
never@739 | 1087 | |
never@739 | 1088 | void fnsave(Address dst); |
never@739 | 1089 | |
never@739 | 1090 | void fnstcw(Address src); |
never@739 | 1091 | |
never@739 | 1092 | void fnstsw_ax(); |
never@739 | 1093 | |
never@739 | 1094 | void fprem(); |
never@739 | 1095 | void fprem1(); |
never@739 | 1096 | |
never@739 | 1097 | void frstor(Address src); |
never@739 | 1098 | |
never@739 | 1099 | void fsin(); |
never@739 | 1100 | |
never@739 | 1101 | void fsqrt(); |
never@739 | 1102 | |
never@739 | 1103 | void fst_d(Address adr); |
never@739 | 1104 | void fst_s(Address adr); |
never@739 | 1105 | |
never@739 | 1106 | void fstp_d(Address adr); |
never@739 | 1107 | void fstp_d(int index); |
never@739 | 1108 | void fstp_s(Address adr); |
never@739 | 1109 | void fstp_x(Address adr); // extended-precision (80-bit) format |
never@739 | 1110 | |
never@739 | 1111 | void fsub(int i); |
never@739 | 1112 | void fsub_d(Address src); |
never@739 | 1113 | void fsub_s(Address src); |
never@739 | 1114 | |
never@739 | 1115 | void fsuba(int i); // "alternate" fsub |
never@739 | 1116 | |
never@739 | 1117 | void fsubp(int i = 1); |
never@739 | 1118 | |
never@739 | 1119 | void fsubr(int i); |
never@739 | 1120 | void fsubr_d(Address src); |
never@739 | 1121 | void fsubr_s(Address src); |
never@739 | 1122 | |
never@739 | 1123 | void fsubra(int i); // "alternate" reversed fsub |
never@739 | 1124 | |
never@739 | 1125 | void fsubrp(int i = 1); |
never@739 | 1126 | |
never@739 | 1127 | void ftan(); |
never@739 | 1128 | |
never@739 | 1129 | void ftst(); |
never@739 | 1130 | |
never@739 | 1131 | void fucomi(int i = 1); |
never@739 | 1132 | void fucomip(int i = 1); |
never@739 | 1133 | |
never@739 | 1134 | void fwait(); |
never@739 | 1135 | |
never@739 | 1136 | void fxch(int i = 1); |
never@739 | 1137 | |
never@739 | 1138 | void fxrstor(Address src); |
never@739 | 1139 | |
never@739 | 1140 | void fxsave(Address dst); |
never@739 | 1141 | |
never@739 | 1142 | void fyl2x(); |
roland@3787 | 1143 | void frndint(); |
roland@3787 | 1144 | void f2xm1(); |
roland@3787 | 1145 | void fldl2e(); |
never@739 | 1146 | |
never@739 | 1147 | void hlt(); |
never@739 | 1148 | |
never@739 | 1149 | void idivl(Register src); |
kvn@2275 | 1150 | void divl(Register src); // Unsigned division |
never@739 | 1151 | |
never@739 | 1152 | void idivq(Register src); |
never@739 | 1153 | |
never@739 | 1154 | void imull(Register dst, Register src); |
never@739 | 1155 | void imull(Register dst, Register src, int value); |
never@739 | 1156 | |
never@739 | 1157 | void imulq(Register dst, Register src); |
never@739 | 1158 | void imulq(Register dst, Register src, int value); |
never@739 | 1159 | |
duke@435 | 1160 | |
duke@435 | 1161 | // jcc is the generic conditional branch generator to run- |
duke@435 | 1162 | // time routines, jcc is used for branches to labels. jcc |
duke@435 | 1163 | // takes a branch opcode (cc) and a label (L) and generates |
duke@435 | 1164 | // either a backward branch or a forward branch and links it |
duke@435 | 1165 | // to the label fixup chain. Usage: |
duke@435 | 1166 | // |
duke@435 | 1167 | // Label L; // unbound label |
duke@435 | 1168 | // jcc(cc, L); // forward branch to unbound label |
duke@435 | 1169 | // bind(L); // bind label to the current pc |
duke@435 | 1170 | // jcc(cc, L); // backward branch to bound label |
duke@435 | 1171 | // bind(L); // illegal: a label may be bound only once |
duke@435 | 1172 | // |
duke@435 | 1173 | // Note: The same Label can be used for forward and backward branches |
duke@435 | 1174 | // but it may be bound only once. |
duke@435 | 1175 | |
kvn@3049 | 1176 | void jcc(Condition cc, Label& L, bool maybe_short = true); |
duke@435 | 1177 | |
duke@435 | 1178 | // Conditional jump to a 8-bit offset to L. |
duke@435 | 1179 | // WARNING: be very careful using this for forward jumps. If the label is |
duke@435 | 1180 | // not bound within an 8-bit offset of this instruction, a run-time error |
duke@435 | 1181 | // will occur. |
duke@435 | 1182 | void jccb(Condition cc, Label& L); |
duke@435 | 1183 | |
never@739 | 1184 | void jmp(Address entry); // pc <- entry |
never@739 | 1185 | |
never@739 | 1186 | // Label operations & relative jumps (PPUM Appendix D) |
kvn@3049 | 1187 | void jmp(Label& L, bool maybe_short = true); // unconditional jump to L |
never@739 | 1188 | |
never@739 | 1189 | void jmp(Register entry); // pc <- entry |
never@739 | 1190 | |
never@739 | 1191 | // Unconditional 8-bit offset jump to L. |
never@739 | 1192 | // WARNING: be very careful using this for forward jumps. If the label is |
never@739 | 1193 | // not bound within an 8-bit offset of this instruction, a run-time error |
never@739 | 1194 | // will occur. |
never@739 | 1195 | void jmpb(Label& L); |
never@739 | 1196 | |
never@739 | 1197 | void ldmxcsr( Address src ); |
never@739 | 1198 | |
never@739 | 1199 | void leal(Register dst, Address src); |
never@739 | 1200 | |
never@739 | 1201 | void leaq(Register dst, Address src); |
never@739 | 1202 | |
never@739 | 1203 | void lfence() { |
never@739 | 1204 | emit_byte(0x0F); |
never@739 | 1205 | emit_byte(0xAE); |
never@739 | 1206 | emit_byte(0xE8); |
never@739 | 1207 | } |
never@739 | 1208 | |
never@739 | 1209 | void lock(); |
never@739 | 1210 | |
twisti@1210 | 1211 | void lzcntl(Register dst, Register src); |
twisti@1210 | 1212 | |
twisti@1210 | 1213 | #ifdef _LP64 |
twisti@1210 | 1214 | void lzcntq(Register dst, Register src); |
twisti@1210 | 1215 | #endif |
twisti@1210 | 1216 | |
never@739 | 1217 | enum Membar_mask_bits { |
never@739 | 1218 | StoreStore = 1 << 3, |
never@739 | 1219 | LoadStore = 1 << 2, |
never@739 | 1220 | StoreLoad = 1 << 1, |
never@739 | 1221 | LoadLoad = 1 << 0 |
never@739 | 1222 | }; |
never@739 | 1223 | |
never@1106 | 1224 | // Serializes memory and blows flags |
never@739 | 1225 | void membar(Membar_mask_bits order_constraint) { |
never@1106 | 1226 | if (os::is_MP()) { |
never@1106 | 1227 | // We only have to handle StoreLoad |
never@1106 | 1228 | if (order_constraint & StoreLoad) { |
never@1106 | 1229 | // All usable chips support "locked" instructions which suffice |
never@1106 | 1230 | // as barriers, and are much faster than the alternative of |
never@1106 | 1231 | // using cpuid instruction. We use here a locked add [esp],0. |
never@1106 | 1232 | // This is conveniently otherwise a no-op except for blowing |
never@1106 | 1233 | // flags. |
never@1106 | 1234 | // Any change to this code may need to revisit other places in |
never@1106 | 1235 | // the code where this idiom is used, in particular the |
never@1106 | 1236 | // orderAccess code. |
never@1106 | 1237 | lock(); |
never@1106 | 1238 | addl(Address(rsp, 0), 0);// Assert the lock# signal here |
never@1106 | 1239 | } |
never@1106 | 1240 | } |
never@739 | 1241 | } |
never@739 | 1242 | |
never@739 | 1243 | void mfence(); |
never@739 | 1244 | |
never@739 | 1245 | // Moves |
never@739 | 1246 | |
never@739 | 1247 | void mov64(Register dst, int64_t imm64); |
never@739 | 1248 | |
never@739 | 1249 | void movb(Address dst, Register src); |
never@739 | 1250 | void movb(Address dst, int imm8); |
never@739 | 1251 | void movb(Register dst, Address src); |
never@739 | 1252 | |
never@739 | 1253 | void movdl(XMMRegister dst, Register src); |
never@739 | 1254 | void movdl(Register dst, XMMRegister src); |
kvn@2602 | 1255 | void movdl(XMMRegister dst, Address src); |
kvn@3882 | 1256 | void movdl(Address dst, XMMRegister src); |
never@739 | 1257 | |
never@739 | 1258 | // Move Double Quadword |
never@739 | 1259 | void movdq(XMMRegister dst, Register src); |
never@739 | 1260 | void movdq(Register dst, XMMRegister src); |
never@739 | 1261 | |
never@739 | 1262 | // Move Aligned Double Quadword |
never@739 | 1263 | void movdqa(XMMRegister dst, XMMRegister src); |
never@739 | 1264 | |
kvn@840 | 1265 | // Move Unaligned Double Quadword |
kvn@840 | 1266 | void movdqu(Address dst, XMMRegister src); |
kvn@840 | 1267 | void movdqu(XMMRegister dst, Address src); |
kvn@840 | 1268 | void movdqu(XMMRegister dst, XMMRegister src); |
kvn@840 | 1269 | |
kvn@3882 | 1270 | // Move Unaligned 256bit Vector |
kvn@3882 | 1271 | void vmovdqu(Address dst, XMMRegister src); |
kvn@3882 | 1272 | void vmovdqu(XMMRegister dst, Address src); |
kvn@3882 | 1273 | void vmovdqu(XMMRegister dst, XMMRegister src); |
kvn@3882 | 1274 | |
kvn@3882 | 1275 | // Move lower 64bit to high 64bit in 128bit register |
kvn@3882 | 1276 | void movlhps(XMMRegister dst, XMMRegister src); |
kvn@3882 | 1277 | |
never@739 | 1278 | void movl(Register dst, int32_t imm32); |
never@739 | 1279 | void movl(Address dst, int32_t imm32); |
never@739 | 1280 | void movl(Register dst, Register src); |
never@739 | 1281 | void movl(Register dst, Address src); |
never@739 | 1282 | void movl(Address dst, Register src); |
never@739 | 1283 | |
never@739 | 1284 | // These dummies prevent using movl from converting a zero (like NULL) into Register |
never@739 | 1285 | // by giving the compiler two choices it can't resolve |
never@739 | 1286 | |
never@739 | 1287 | void movl(Address dst, void* junk); |
never@739 | 1288 | void movl(Register dst, void* junk); |
never@739 | 1289 | |
never@739 | 1290 | #ifdef _LP64 |
never@739 | 1291 | void movq(Register dst, Register src); |
never@739 | 1292 | void movq(Register dst, Address src); |
phh@2423 | 1293 | void movq(Address dst, Register src); |
never@739 | 1294 | #endif |
never@739 | 1295 | |
never@739 | 1296 | void movq(Address dst, MMXRegister src ); |
never@739 | 1297 | void movq(MMXRegister dst, Address src ); |
never@739 | 1298 | |
never@739 | 1299 | #ifdef _LP64 |
never@739 | 1300 | // These dummies prevent using movq from converting a zero (like NULL) into Register |
never@739 | 1301 | // by giving the compiler two choices it can't resolve |
never@739 | 1302 | |
never@739 | 1303 | void movq(Address dst, void* dummy); |
never@739 | 1304 | void movq(Register dst, void* dummy); |
never@739 | 1305 | #endif |
never@739 | 1306 | |
never@739 | 1307 | // Move Quadword |
never@739 | 1308 | void movq(Address dst, XMMRegister src); |
never@739 | 1309 | void movq(XMMRegister dst, Address src); |
never@739 | 1310 | |
never@739 | 1311 | void movsbl(Register dst, Address src); |
never@739 | 1312 | void movsbl(Register dst, Register src); |
never@739 | 1313 | |
never@739 | 1314 | #ifdef _LP64 |
twisti@1059 | 1315 | void movsbq(Register dst, Address src); |
twisti@1059 | 1316 | void movsbq(Register dst, Register src); |
twisti@1059 | 1317 | |
never@739 | 1318 | // Move signed 32bit immediate to 64bit extending sign |
phh@2423 | 1319 | void movslq(Address dst, int32_t imm64); |
never@739 | 1320 | void movslq(Register dst, int32_t imm64); |
never@739 | 1321 | |
never@739 | 1322 | void movslq(Register dst, Address src); |
never@739 | 1323 | void movslq(Register dst, Register src); |
never@739 | 1324 | void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous |
never@739 | 1325 | #endif |
never@739 | 1326 | |
never@739 | 1327 | void movswl(Register dst, Address src); |
never@739 | 1328 | void movswl(Register dst, Register src); |
never@739 | 1329 | |
twisti@1059 | 1330 | #ifdef _LP64 |
twisti@1059 | 1331 | void movswq(Register dst, Address src); |
twisti@1059 | 1332 | void movswq(Register dst, Register src); |
twisti@1059 | 1333 | #endif |
twisti@1059 | 1334 | |
never@739 | 1335 | void movw(Address dst, int imm16); |
never@739 | 1336 | void movw(Register dst, Address src); |
never@739 | 1337 | void movw(Address dst, Register src); |
never@739 | 1338 | |
never@739 | 1339 | void movzbl(Register dst, Address src); |
never@739 | 1340 | void movzbl(Register dst, Register src); |
never@739 | 1341 | |
twisti@1059 | 1342 | #ifdef _LP64 |
twisti@1059 | 1343 | void movzbq(Register dst, Address src); |
twisti@1059 | 1344 | void movzbq(Register dst, Register src); |
twisti@1059 | 1345 | #endif |
twisti@1059 | 1346 | |
never@739 | 1347 | void movzwl(Register dst, Address src); |
never@739 | 1348 | void movzwl(Register dst, Register src); |
never@739 | 1349 | |
twisti@1059 | 1350 | #ifdef _LP64 |
twisti@1059 | 1351 | void movzwq(Register dst, Address src); |
twisti@1059 | 1352 | void movzwq(Register dst, Register src); |
twisti@1059 | 1353 | #endif |
twisti@1059 | 1354 | |
never@739 | 1355 | void mull(Address src); |
never@739 | 1356 | void mull(Register src); |
never@739 | 1357 | |
never@739 | 1358 | // Multiply Scalar Double-Precision Floating-Point Values |
never@739 | 1359 | void mulsd(XMMRegister dst, Address src); |
never@739 | 1360 | void mulsd(XMMRegister dst, XMMRegister src); |
never@739 | 1361 | |
never@739 | 1362 | // Multiply Scalar Single-Precision Floating-Point Values |
never@739 | 1363 | void mulss(XMMRegister dst, Address src); |
never@739 | 1364 | void mulss(XMMRegister dst, XMMRegister src); |
never@739 | 1365 | |
never@739 | 1366 | void negl(Register dst); |
never@739 | 1367 | |
never@739 | 1368 | #ifdef _LP64 |
never@739 | 1369 | void negq(Register dst); |
never@739 | 1370 | #endif |
never@739 | 1371 | |
never@739 | 1372 | void nop(int i = 1); |
never@739 | 1373 | |
never@739 | 1374 | void notl(Register dst); |
never@739 | 1375 | |
never@739 | 1376 | #ifdef _LP64 |
never@739 | 1377 | void notq(Register dst); |
never@739 | 1378 | #endif |
never@739 | 1379 | |
never@739 | 1380 | void orl(Address dst, int32_t imm32); |
never@739 | 1381 | void orl(Register dst, int32_t imm32); |
never@739 | 1382 | void orl(Register dst, Address src); |
never@739 | 1383 | void orl(Register dst, Register src); |
never@739 | 1384 | |
never@739 | 1385 | void orq(Address dst, int32_t imm32); |
never@739 | 1386 | void orq(Register dst, int32_t imm32); |
never@739 | 1387 | void orq(Register dst, Address src); |
never@739 | 1388 | void orq(Register dst, Register src); |
never@739 | 1389 | |
kvn@3388 | 1390 | // Pack with unsigned saturation |
kvn@3388 | 1391 | void packuswb(XMMRegister dst, XMMRegister src); |
kvn@3388 | 1392 | void packuswb(XMMRegister dst, Address src); |
kvn@3388 | 1393 | |
cfang@1116 | 1394 | // SSE4.2 string instructions |
cfang@1116 | 1395 | void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8); |
cfang@1116 | 1396 | void pcmpestri(XMMRegister xmm1, Address src, int imm8); |
cfang@1116 | 1397 | |
kvn@3388 | 1398 | // SSE4.1 packed move |
kvn@3388 | 1399 | void pmovzxbw(XMMRegister dst, XMMRegister src); |
kvn@3388 | 1400 | void pmovzxbw(XMMRegister dst, Address src); |
kvn@3388 | 1401 | |
roland@1495 | 1402 | #ifndef _LP64 // no 32bit push/pop on amd64 |
never@739 | 1403 | void popl(Address dst); |
roland@1495 | 1404 | #endif |
never@739 | 1405 | |
never@739 | 1406 | #ifdef _LP64 |
never@739 | 1407 | void popq(Address dst); |
never@739 | 1408 | #endif |
never@739 | 1409 | |
twisti@1078 | 1410 | void popcntl(Register dst, Address src); |
twisti@1078 | 1411 | void popcntl(Register dst, Register src); |
twisti@1078 | 1412 | |
twisti@1078 | 1413 | #ifdef _LP64 |
twisti@1078 | 1414 | void popcntq(Register dst, Address src); |
twisti@1078 | 1415 | void popcntq(Register dst, Register src); |
twisti@1078 | 1416 | #endif |
twisti@1078 | 1417 | |
never@739 | 1418 | // Prefetches (SSE, SSE2, 3DNOW only) |
never@739 | 1419 | |
never@739 | 1420 | void prefetchnta(Address src); |
never@739 | 1421 | void prefetchr(Address src); |
never@739 | 1422 | void prefetcht0(Address src); |
never@739 | 1423 | void prefetcht1(Address src); |
never@739 | 1424 | void prefetcht2(Address src); |
never@739 | 1425 | void prefetchw(Address src); |
never@739 | 1426 | |
never@739 | 1427 | // Shuffle Packed Doublewords |
never@739 | 1428 | void pshufd(XMMRegister dst, XMMRegister src, int mode); |
never@739 | 1429 | void pshufd(XMMRegister dst, Address src, int mode); |
never@739 | 1430 | |
never@739 | 1431 | // Shuffle Packed Low Words |
never@739 | 1432 | void pshuflw(XMMRegister dst, XMMRegister src, int mode); |
never@739 | 1433 | void pshuflw(XMMRegister dst, Address src, int mode); |
never@739 | 1434 | |
kvn@2602 | 1435 | // Shift Right by bytes Logical DoubleQuadword Immediate |
kvn@2602 | 1436 | void psrldq(XMMRegister dst, int shift); |
kvn@2602 | 1437 | |
cfang@1116 | 1438 | // Logical Compare Double Quadword |
cfang@1116 | 1439 | void ptest(XMMRegister dst, XMMRegister src); |
cfang@1116 | 1440 | void ptest(XMMRegister dst, Address src); |
cfang@1116 | 1441 | |
never@739 | 1442 | // Interleave Low Bytes |
never@739 | 1443 | void punpcklbw(XMMRegister dst, XMMRegister src); |
kvn@3388 | 1444 | void punpcklbw(XMMRegister dst, Address src); |
kvn@3388 | 1445 | |
kvn@3388 | 1446 | // Interleave Low Doublewords |
kvn@3388 | 1447 | void punpckldq(XMMRegister dst, XMMRegister src); |
kvn@3388 | 1448 | void punpckldq(XMMRegister dst, Address src); |
never@739 | 1449 | |
kvn@3929 | 1450 | // Interleave Low Quadwords |
kvn@3929 | 1451 | void punpcklqdq(XMMRegister dst, XMMRegister src); |
kvn@3929 | 1452 | |
roland@1495 | 1453 | #ifndef _LP64 // no 32bit push/pop on amd64 |
never@739 | 1454 | void pushl(Address src); |
roland@1495 | 1455 | #endif |
never@739 | 1456 | |
never@739 | 1457 | void pushq(Address src); |
never@739 | 1458 | |
never@739 | 1459 | void rcll(Register dst, int imm8); |
never@739 | 1460 | |
never@739 | 1461 | void rclq(Register dst, int imm8); |
never@739 | 1462 | |
never@739 | 1463 | void ret(int imm16); |
duke@435 | 1464 | |
duke@435 | 1465 | void sahf(); |
duke@435 | 1466 | |
never@739 | 1467 | void sarl(Register dst, int imm8); |
never@739 | 1468 | void sarl(Register dst); |
never@739 | 1469 | |
never@739 | 1470 | void sarq(Register dst, int imm8); |
never@739 | 1471 | void sarq(Register dst); |
never@739 | 1472 | |
never@739 | 1473 | void sbbl(Address dst, int32_t imm32); |
never@739 | 1474 | void sbbl(Register dst, int32_t imm32); |
never@739 | 1475 | void sbbl(Register dst, Address src); |
never@739 | 1476 | void sbbl(Register dst, Register src); |
never@739 | 1477 | |
never@739 | 1478 | void sbbq(Address dst, int32_t imm32); |
never@739 | 1479 | void sbbq(Register dst, int32_t imm32); |
never@739 | 1480 | void sbbq(Register dst, Address src); |
never@739 | 1481 | void sbbq(Register dst, Register src); |
never@739 | 1482 | |
never@739 | 1483 | void setb(Condition cc, Register dst); |
never@739 | 1484 | |
never@739 | 1485 | void shldl(Register dst, Register src); |
never@739 | 1486 | |
never@739 | 1487 | void shll(Register dst, int imm8); |
never@739 | 1488 | void shll(Register dst); |
never@739 | 1489 | |
never@739 | 1490 | void shlq(Register dst, int imm8); |
never@739 | 1491 | void shlq(Register dst); |
never@739 | 1492 | |
never@739 | 1493 | void shrdl(Register dst, Register src); |
never@739 | 1494 | |
never@739 | 1495 | void shrl(Register dst, int imm8); |
never@739 | 1496 | void shrl(Register dst); |
never@739 | 1497 | |
never@739 | 1498 | void shrq(Register dst, int imm8); |
never@739 | 1499 | void shrq(Register dst); |
never@739 | 1500 | |
never@739 | 1501 | void smovl(); // QQQ generic? |
never@739 | 1502 | |
never@739 | 1503 | // Compute Square Root of Scalar Double-Precision Floating-Point Value |
never@739 | 1504 | void sqrtsd(XMMRegister dst, Address src); |
never@739 | 1505 | void sqrtsd(XMMRegister dst, XMMRegister src); |
never@739 | 1506 | |
twisti@2350 | 1507 | // Compute Square Root of Scalar Single-Precision Floating-Point Value |
twisti@2350 | 1508 | void sqrtss(XMMRegister dst, Address src); |
twisti@2350 | 1509 | void sqrtss(XMMRegister dst, XMMRegister src); |
twisti@2350 | 1510 | |
never@739 | 1511 | void std() { emit_byte(0xfd); } |
never@739 | 1512 | |
never@739 | 1513 | void stmxcsr( Address dst ); |
never@739 | 1514 | |
never@739 | 1515 | void subl(Address dst, int32_t imm32); |
never@739 | 1516 | void subl(Address dst, Register src); |
never@739 | 1517 | void subl(Register dst, int32_t imm32); |
never@739 | 1518 | void subl(Register dst, Address src); |
never@739 | 1519 | void subl(Register dst, Register src); |
never@739 | 1520 | |
never@739 | 1521 | void subq(Address dst, int32_t imm32); |
never@739 | 1522 | void subq(Address dst, Register src); |
never@739 | 1523 | void subq(Register dst, int32_t imm32); |
never@739 | 1524 | void subq(Register dst, Address src); |
never@739 | 1525 | void subq(Register dst, Register src); |
never@739 | 1526 | |
kvn@3574 | 1527 | // Force generation of a 4 byte immediate value even if it fits into 8bit |
kvn@3574 | 1528 | void subl_imm32(Register dst, int32_t imm32); |
kvn@3574 | 1529 | void subq_imm32(Register dst, int32_t imm32); |
never@739 | 1530 | |
never@739 | 1531 | // Subtract Scalar Double-Precision Floating-Point Values |
never@739 | 1532 | void subsd(XMMRegister dst, Address src); |
never@739 | 1533 | void subsd(XMMRegister dst, XMMRegister src); |
never@739 | 1534 | |
never@739 | 1535 | // Subtract Scalar Single-Precision Floating-Point Values |
never@739 | 1536 | void subss(XMMRegister dst, Address src); |
duke@435 | 1537 | void subss(XMMRegister dst, XMMRegister src); |
never@739 | 1538 | |
never@739 | 1539 | void testb(Register dst, int imm8); |
never@739 | 1540 | |
never@739 | 1541 | void testl(Register dst, int32_t imm32); |
never@739 | 1542 | void testl(Register dst, Register src); |
never@739 | 1543 | void testl(Register dst, Address src); |
never@739 | 1544 | |
never@739 | 1545 | void testq(Register dst, int32_t imm32); |
never@739 | 1546 | void testq(Register dst, Register src); |
never@739 | 1547 | |
never@739 | 1548 | |
never@739 | 1549 | // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS |
never@739 | 1550 | void ucomisd(XMMRegister dst, Address src); |
never@739 | 1551 | void ucomisd(XMMRegister dst, XMMRegister src); |
never@739 | 1552 | |
never@739 | 1553 | // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS |
never@739 | 1554 | void ucomiss(XMMRegister dst, Address src); |
duke@435 | 1555 | void ucomiss(XMMRegister dst, XMMRegister src); |
never@739 | 1556 | |
never@739 | 1557 | void xaddl(Address dst, Register src); |
never@739 | 1558 | |
never@739 | 1559 | void xaddq(Address dst, Register src); |
never@739 | 1560 | |
never@739 | 1561 | void xchgl(Register reg, Address adr); |
never@739 | 1562 | void xchgl(Register dst, Register src); |
never@739 | 1563 | |
never@739 | 1564 | void xchgq(Register reg, Address adr); |
never@739 | 1565 | void xchgq(Register dst, Register src); |
never@739 | 1566 | |
kvn@3388 | 1567 | // Get Value of Extended Control Register |
kvn@3388 | 1568 | void xgetbv() { |
kvn@3388 | 1569 | emit_byte(0x0F); |
kvn@3388 | 1570 | emit_byte(0x01); |
kvn@3388 | 1571 | emit_byte(0xD0); |
kvn@3388 | 1572 | } |
kvn@3388 | 1573 | |
never@739 | 1574 | void xorl(Register dst, int32_t imm32); |
never@739 | 1575 | void xorl(Register dst, Address src); |
never@739 | 1576 | void xorl(Register dst, Register src); |
never@739 | 1577 | |
never@739 | 1578 | void xorq(Register dst, Address src); |
never@739 | 1579 | void xorq(Register dst, Register src); |
never@739 | 1580 | |
kvn@3388 | 1581 | void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0 |
kvn@3388 | 1582 | |
kvn@3929 | 1583 | // AVX 3-operands scalar instructions (encoded with VEX prefix) |
kvn@4001 | 1584 | |
kvn@3390 | 1585 | void vaddsd(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1586 | void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1587 | void vaddss(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1588 | void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1589 | void vdivsd(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1590 | void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1591 | void vdivss(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1592 | void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1593 | void vmulsd(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1594 | void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1595 | void vmulss(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1596 | void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1597 | void vsubsd(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1598 | void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3390 | 1599 | void vsubss(XMMRegister dst, XMMRegister nds, Address src); |
kvn@3390 | 1600 | void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3929 | 1601 | |
kvn@4001 | 1602 | |
kvn@4001 | 1603 | //====================VECTOR ARITHMETIC===================================== |
kvn@4001 | 1604 | |
kvn@4001 | 1605 | // Add Packed Floating-Point Values |
kvn@4001 | 1606 | void addpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1607 | void addps(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1608 | void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1609 | void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1610 | void vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1611 | void vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1612 | |
kvn@4001 | 1613 | // Subtract Packed Floating-Point Values |
kvn@4001 | 1614 | void subpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1615 | void subps(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1616 | void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1617 | void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1618 | void vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1619 | void vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1620 | |
kvn@4001 | 1621 | // Multiply Packed Floating-Point Values |
kvn@4001 | 1622 | void mulpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1623 | void mulps(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1624 | void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1625 | void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1626 | void vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1627 | void vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1628 | |
kvn@4001 | 1629 | // Divide Packed Floating-Point Values |
kvn@4001 | 1630 | void divpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1631 | void divps(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1632 | void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1633 | void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1634 | void vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1635 | void vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1636 | |
kvn@4001 | 1637 | // Bitwise Logical AND of Packed Floating-Point Values |
kvn@4001 | 1638 | void andpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1639 | void andps(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1640 | void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1641 | void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1642 | void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1643 | void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1644 | |
kvn@4001 | 1645 | // Bitwise Logical XOR of Packed Floating-Point Values |
kvn@4001 | 1646 | void xorpd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1647 | void xorps(XMMRegister dst, XMMRegister src); |
kvn@3882 | 1648 | void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@3882 | 1649 | void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1650 | void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1651 | void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1652 | |
kvn@4001 | 1653 | // Add packed integers |
kvn@4001 | 1654 | void paddb(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1655 | void paddw(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1656 | void paddd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1657 | void paddq(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1658 | void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1659 | void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1660 | void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1661 | void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1662 | void vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1663 | void vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1664 | void vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1665 | void vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1666 | |
kvn@4001 | 1667 | // Sub packed integers |
kvn@4001 | 1668 | void psubb(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1669 | void psubw(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1670 | void psubd(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1671 | void psubq(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1672 | void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1673 | void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1674 | void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1675 | void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1676 | void vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1677 | void vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1678 | void vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1679 | void vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1680 | |
kvn@4001 | 1681 | // Multiply packed integers (only shorts and ints) |
kvn@4001 | 1682 | void pmullw(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1683 | void pmulld(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1684 | void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1685 | void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1686 | void vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1687 | void vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1688 | |
kvn@4001 | 1689 | // Shift left packed integers |
kvn@4001 | 1690 | void psllw(XMMRegister dst, int shift); |
kvn@4001 | 1691 | void pslld(XMMRegister dst, int shift); |
kvn@4001 | 1692 | void psllq(XMMRegister dst, int shift); |
kvn@4001 | 1693 | void psllw(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1694 | void pslld(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1695 | void psllq(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1696 | void vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1697 | void vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1698 | void vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1699 | void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1700 | void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1701 | void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1702 | |
kvn@4001 | 1703 | // Logical shift right packed integers |
kvn@4001 | 1704 | void psrlw(XMMRegister dst, int shift); |
kvn@4001 | 1705 | void psrld(XMMRegister dst, int shift); |
kvn@4001 | 1706 | void psrlq(XMMRegister dst, int shift); |
kvn@4001 | 1707 | void psrlw(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1708 | void psrld(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1709 | void psrlq(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1710 | void vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1711 | void vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1712 | void vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1713 | void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1714 | void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1715 | void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1716 | |
kvn@4001 | 1717 | // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs) |
kvn@4001 | 1718 | void psraw(XMMRegister dst, int shift); |
kvn@4001 | 1719 | void psrad(XMMRegister dst, int shift); |
kvn@4001 | 1720 | void psraw(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1721 | void psrad(XMMRegister dst, XMMRegister shift); |
kvn@4001 | 1722 | void vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1723 | void vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256); |
kvn@4001 | 1724 | void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1725 | void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); |
kvn@4001 | 1726 | |
kvn@4001 | 1727 | // And packed integers |
kvn@4001 | 1728 | void pand(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1729 | void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1730 | void vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1731 | |
kvn@4001 | 1732 | // Or packed integers |
kvn@4001 | 1733 | void por(XMMRegister dst, XMMRegister src); |
kvn@4001 | 1734 | void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1735 | void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1736 | |
kvn@4001 | 1737 | // Xor packed integers |
kvn@4001 | 1738 | void pxor(XMMRegister dst, XMMRegister src); |
kvn@3929 | 1739 | void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); |
kvn@4001 | 1740 | void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256); |
kvn@4001 | 1741 | |
kvn@4001 | 1742 | // Copy low 128bit into high 128bit of YMM registers. |
kvn@3882 | 1743 | void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3929 | 1744 | void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src); |
kvn@3882 | 1745 | |
kvn@4103 | 1746 | // Load/store high 128bit of YMM registers which does not destroy other half. |
kvn@4103 | 1747 | void vinsertf128h(XMMRegister dst, Address src); |
kvn@4103 | 1748 | void vinserti128h(XMMRegister dst, Address src); |
kvn@4103 | 1749 | void vextractf128h(Address dst, XMMRegister src); |
kvn@4103 | 1750 | void vextracti128h(Address dst, XMMRegister src); |
kvn@4103 | 1751 | |
kvn@3882 | 1752 | // AVX instruction which is used to clear upper 128 bits of YMM registers and |
kvn@3882 | 1753 | // to avoid transaction penalty between AVX and SSE states. There is no |
kvn@3882 | 1754 | // penalty if legacy SSE instructions are encoded using VEX prefix because |
kvn@3882 | 1755 | // they always clear upper 128 bits. It should be used before calling |
kvn@3882 | 1756 | // runtime code and native libraries. |
kvn@3882 | 1757 | void vzeroupper(); |
kvn@3390 | 1758 | |
kvn@3388 | 1759 | protected: |
kvn@3388 | 1760 | // Next instructions require address alignment 16 bytes SSE mode. |
kvn@3388 | 1761 | // They should be called only from corresponding MacroAssembler instructions. |
kvn@3388 | 1762 | void andpd(XMMRegister dst, Address src); |
kvn@3388 | 1763 | void andps(XMMRegister dst, Address src); |
never@739 | 1764 | void xorpd(XMMRegister dst, Address src); |
never@739 | 1765 | void xorps(XMMRegister dst, Address src); |
kvn@3388 | 1766 | |
duke@435 | 1767 | }; |
duke@435 | 1768 | |
duke@435 | 1769 | |
duke@435 | 1770 | // MacroAssembler extends Assembler by frequently used macros. |
duke@435 | 1771 | // |
duke@435 | 1772 | // Instructions for which a 'better' code sequence exists depending |
duke@435 | 1773 | // on arguments should also go in here. |
duke@435 | 1774 | |
duke@435 | 1775 | class MacroAssembler: public Assembler { |
ysr@777 | 1776 | friend class LIR_Assembler; |
ysr@777 | 1777 | friend class Runtime1; // as_Address() |
johnc@2781 | 1778 | |
duke@435 | 1779 | protected: |
duke@435 | 1780 | |
duke@435 | 1781 | Address as_Address(AddressLiteral adr); |
duke@435 | 1782 | Address as_Address(ArrayAddress adr); |
duke@435 | 1783 | |
duke@435 | 1784 | // Support for VM calls |
duke@435 | 1785 | // |
duke@435 | 1786 | // This is the base routine called by the different versions of call_VM_leaf. The interpreter |
duke@435 | 1787 | // may customize this version by overriding it for its purposes (e.g., to save/restore |
duke@435 | 1788 | // additional registers when doing a VM call). |
duke@435 | 1789 | #ifdef CC_INTERP |
duke@435 | 1790 | // c++ interpreter never wants to use interp_masm version of call_VM |
duke@435 | 1791 | #define VIRTUAL |
duke@435 | 1792 | #else |
duke@435 | 1793 | #define VIRTUAL virtual |
duke@435 | 1794 | #endif |
duke@435 | 1795 | |
duke@435 | 1796 | VIRTUAL void call_VM_leaf_base( |
duke@435 | 1797 | address entry_point, // the entry point |
duke@435 | 1798 | int number_of_arguments // the number of arguments to pop after the call |
duke@435 | 1799 | ); |
duke@435 | 1800 | |
duke@435 | 1801 | // This is the base routine called by the different versions of call_VM. The interpreter |
duke@435 | 1802 | // may customize this version by overriding it for its purposes (e.g., to save/restore |
duke@435 | 1803 | // additional registers when doing a VM call). |
duke@435 | 1804 | // |
duke@435 | 1805 | // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base |
duke@435 | 1806 | // returns the register which contains the thread upon return. If a thread register has been |
duke@435 | 1807 | // specified, the return value will correspond to that register. If no last_java_sp is specified |
duke@435 | 1808 | // (noreg) than rsp will be used instead. |
duke@435 | 1809 | VIRTUAL void call_VM_base( // returns the register containing the thread upon return |
duke@435 | 1810 | Register oop_result, // where an oop-result ends up if any; use noreg otherwise |
duke@435 | 1811 | Register java_thread, // the thread if computed before ; use noreg otherwise |
duke@435 | 1812 | Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise |
duke@435 | 1813 | address entry_point, // the entry point |
duke@435 | 1814 | int number_of_arguments, // the number of arguments (w/o thread) to pop after the call |
duke@435 | 1815 | bool check_exceptions // whether to check for pending exceptions after return |
duke@435 | 1816 | ); |
duke@435 | 1817 | |
duke@435 | 1818 | // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. |
duke@435 | 1819 | // The implementation is only non-empty for the InterpreterMacroAssembler, |
duke@435 | 1820 | // as only the interpreter handles PopFrame and ForceEarlyReturn requests. |
duke@435 | 1821 | virtual void check_and_handle_popframe(Register java_thread); |
duke@435 | 1822 | virtual void check_and_handle_earlyret(Register java_thread); |
duke@435 | 1823 | |
duke@435 | 1824 | void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); |
duke@435 | 1825 | |
duke@435 | 1826 | // helpers for FPU flag access |
duke@435 | 1827 | // tmp is a temporary register, if none is available use noreg |
duke@435 | 1828 | void save_rax (Register tmp); |
duke@435 | 1829 | void restore_rax(Register tmp); |
duke@435 | 1830 | |
duke@435 | 1831 | public: |
duke@435 | 1832 | MacroAssembler(CodeBuffer* code) : Assembler(code) {} |
duke@435 | 1833 | |
duke@435 | 1834 | // Support for NULL-checks |
duke@435 | 1835 | // |
duke@435 | 1836 | // Generates code that causes a NULL OS exception if the content of reg is NULL. |
duke@435 | 1837 | // If the accessed location is M[reg + offset] and the offset is known, provide the |
duke@435 | 1838 | // offset. No explicit code generation is needed if the offset is within a certain |
duke@435 | 1839 | // range (0 <= offset <= page_size). |
duke@435 | 1840 | |
duke@435 | 1841 | void null_check(Register reg, int offset = -1); |
kvn@603 | 1842 | static bool needs_explicit_null_check(intptr_t offset); |
duke@435 | 1843 | |
duke@435 | 1844 | // Required platform-specific helpers for Label::patch_instructions. |
duke@435 | 1845 | // They _shadow_ the declarations in AbstractAssembler, which are undefined. |
duke@435 | 1846 | void pd_patch_instruction(address branch, address target); |
duke@435 | 1847 | #ifndef PRODUCT |
duke@435 | 1848 | static void pd_print_patched_instruction(address branch); |
duke@435 | 1849 | #endif |
duke@435 | 1850 | |
duke@435 | 1851 | // The following 4 methods return the offset of the appropriate move instruction |
duke@435 | 1852 | |
jrose@1057 | 1853 | // Support for fast byte/short loading with zero extension (depending on particular CPU) |
duke@435 | 1854 | int load_unsigned_byte(Register dst, Address src); |
jrose@1057 | 1855 | int load_unsigned_short(Register dst, Address src); |
jrose@1057 | 1856 | |
jrose@1057 | 1857 | // Support for fast byte/short loading with sign extension (depending on particular CPU) |
duke@435 | 1858 | int load_signed_byte(Register dst, Address src); |
jrose@1057 | 1859 | int load_signed_short(Register dst, Address src); |
duke@435 | 1860 | |
duke@435 | 1861 | // Support for sign-extension (hi:lo = extend_sign(lo)) |
duke@435 | 1862 | void extend_sign(Register hi, Register lo); |
duke@435 | 1863 | |
twisti@2565 | 1864 | // Load and store values by size and signed-ness |
twisti@2565 | 1865 | void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); |
twisti@2565 | 1866 | void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); |
jrose@1057 | 1867 | |
duke@435 | 1868 | // Support for inc/dec with optimal instruction selection depending on value |
never@739 | 1869 | |
never@739 | 1870 | void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } |
never@739 | 1871 | void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } |
never@739 | 1872 | |
never@739 | 1873 | void decrementl(Address dst, int value = 1); |
never@739 | 1874 | void decrementl(Register reg, int value = 1); |
never@739 | 1875 | |
never@739 | 1876 | void decrementq(Register reg, int value = 1); |
never@739 | 1877 | void decrementq(Address dst, int value = 1); |
never@739 | 1878 | |
never@739 | 1879 | void incrementl(Address dst, int value = 1); |
never@739 | 1880 | void incrementl(Register reg, int value = 1); |
never@739 | 1881 | |
never@739 | 1882 | void incrementq(Register reg, int value = 1); |
never@739 | 1883 | void incrementq(Address dst, int value = 1); |
never@739 | 1884 | |
duke@435 | 1885 | |
duke@435 | 1886 | // Support optimal SSE move instructions. |
duke@435 | 1887 | void movflt(XMMRegister dst, XMMRegister src) { |
duke@435 | 1888 | if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } |
duke@435 | 1889 | else { movss (dst, src); return; } |
duke@435 | 1890 | } |
duke@435 | 1891 | void movflt(XMMRegister dst, Address src) { movss(dst, src); } |
duke@435 | 1892 | void movflt(XMMRegister dst, AddressLiteral src); |
duke@435 | 1893 | void movflt(Address dst, XMMRegister src) { movss(dst, src); } |
duke@435 | 1894 | |
duke@435 | 1895 | void movdbl(XMMRegister dst, XMMRegister src) { |
duke@435 | 1896 | if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } |
duke@435 | 1897 | else { movsd (dst, src); return; } |
duke@435 | 1898 | } |
duke@435 | 1899 | |
duke@435 | 1900 | void movdbl(XMMRegister dst, AddressLiteral src); |
duke@435 | 1901 | |
duke@435 | 1902 | void movdbl(XMMRegister dst, Address src) { |
duke@435 | 1903 | if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } |
duke@435 | 1904 | else { movlpd(dst, src); return; } |
duke@435 | 1905 | } |
duke@435 | 1906 | void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } |
duke@435 | 1907 | |
never@739 | 1908 | void incrementl(AddressLiteral dst); |
never@739 | 1909 | void incrementl(ArrayAddress dst); |
duke@435 | 1910 | |
duke@435 | 1911 | // Alignment |
duke@435 | 1912 | void align(int modulus); |
duke@435 | 1913 | |
kvn@3574 | 1914 | // A 5 byte nop that is safe for patching (see patch_verified_entry) |
kvn@3574 | 1915 | void fat_nop(); |
duke@435 | 1916 | |
duke@435 | 1917 | // Stack frame creation/removal |
duke@435 | 1918 | void enter(); |
duke@435 | 1919 | void leave(); |
duke@435 | 1920 | |
duke@435 | 1921 | // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) |
duke@435 | 1922 | // The pointer will be loaded into the thread register. |
duke@435 | 1923 | void get_thread(Register thread); |
duke@435 | 1924 | |
apetrusenko@797 | 1925 | |
duke@435 | 1926 | // Support for VM calls |
duke@435 | 1927 | // |
duke@435 | 1928 | // It is imperative that all calls into the VM are handled via the call_VM macros. |
duke@435 | 1929 | // They make sure that the stack linkage is setup correctly. call_VM's correspond |
duke@435 | 1930 | // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. |
duke@435 | 1931 | |
never@739 | 1932 | |
never@739 | 1933 | void call_VM(Register oop_result, |
never@739 | 1934 | address entry_point, |
never@739 | 1935 | bool check_exceptions = true); |
never@739 | 1936 | void call_VM(Register oop_result, |
never@739 | 1937 | address entry_point, |
never@739 | 1938 | Register arg_1, |
never@739 | 1939 | bool check_exceptions = true); |
never@739 | 1940 | void call_VM(Register oop_result, |
never@739 | 1941 | address entry_point, |
never@739 | 1942 | Register arg_1, Register arg_2, |
never@739 | 1943 | bool check_exceptions = true); |
never@739 | 1944 | void call_VM(Register oop_result, |
never@739 | 1945 | address entry_point, |
never@739 | 1946 | Register arg_1, Register arg_2, Register arg_3, |
never@739 | 1947 | bool check_exceptions = true); |
never@739 | 1948 | |
never@739 | 1949 | // Overloadings with last_Java_sp |
never@739 | 1950 | void call_VM(Register oop_result, |
never@739 | 1951 | Register last_java_sp, |
never@739 | 1952 | address entry_point, |
never@739 | 1953 | int number_of_arguments = 0, |
never@739 | 1954 | bool check_exceptions = true); |
never@739 | 1955 | void call_VM(Register oop_result, |
never@739 | 1956 | Register last_java_sp, |
never@739 | 1957 | address entry_point, |
never@739 | 1958 | Register arg_1, bool |
never@739 | 1959 | check_exceptions = true); |
never@739 | 1960 | void call_VM(Register oop_result, |
never@739 | 1961 | Register last_java_sp, |
never@739 | 1962 | address entry_point, |
never@739 | 1963 | Register arg_1, Register arg_2, |
never@739 | 1964 | bool check_exceptions = true); |
never@739 | 1965 | void call_VM(Register oop_result, |
never@739 | 1966 | Register last_java_sp, |
never@739 | 1967 | address entry_point, |
never@739 | 1968 | Register arg_1, Register arg_2, Register arg_3, |
never@739 | 1969 | bool check_exceptions = true); |
never@739 | 1970 | |
coleenp@4037 | 1971 | void get_vm_result (Register oop_result, Register thread); |
coleenp@4037 | 1972 | void get_vm_result_2(Register metadata_result, Register thread); |
coleenp@4037 | 1973 | |
jrose@2952 | 1974 | // These always tightly bind to MacroAssembler::call_VM_base |
jrose@2952 | 1975 | // bypassing the virtual implementation |
jrose@2952 | 1976 | void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); |
jrose@2952 | 1977 | void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); |
jrose@2952 | 1978 | void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); |
jrose@2952 | 1979 | void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); |
jrose@2952 | 1980 | void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); |
jrose@2952 | 1981 | |
never@739 | 1982 | void call_VM_leaf(address entry_point, |
never@739 | 1983 | int number_of_arguments = 0); |
never@739 | 1984 | void call_VM_leaf(address entry_point, |
never@739 | 1985 | Register arg_1); |
never@739 | 1986 | void call_VM_leaf(address entry_point, |
never@739 | 1987 | Register arg_1, Register arg_2); |
never@739 | 1988 | void call_VM_leaf(address entry_point, |
never@739 | 1989 | Register arg_1, Register arg_2, Register arg_3); |
duke@435 | 1990 | |
never@2868 | 1991 | // These always tightly bind to MacroAssembler::call_VM_leaf_base |
never@2868 | 1992 | // bypassing the virtual implementation |
never@2868 | 1993 | void super_call_VM_leaf(address entry_point); |
never@2868 | 1994 | void super_call_VM_leaf(address entry_point, Register arg_1); |
never@2868 | 1995 | void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); |
never@2868 | 1996 | void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); |
never@2868 | 1997 | void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); |
never@2868 | 1998 | |
duke@435 | 1999 | // last Java Frame (fills frame anchor) |
never@739 | 2000 | void set_last_Java_frame(Register thread, |
never@739 | 2001 | Register last_java_sp, |
never@739 | 2002 | Register last_java_fp, |
never@739 | 2003 | address last_java_pc); |
never@739 | 2004 | |
never@739 | 2005 | // thread in the default location (r15_thread on 64bit) |
never@739 | 2006 | void set_last_Java_frame(Register last_java_sp, |
never@739 | 2007 | Register last_java_fp, |
never@739 | 2008 | address last_java_pc); |
never@739 | 2009 | |
duke@435 | 2010 | void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc); |
duke@435 | 2011 | |
never@739 | 2012 | // thread in the default location (r15_thread on 64bit) |
never@739 | 2013 | void reset_last_Java_frame(bool clear_fp, bool clear_pc); |
never@739 | 2014 | |
duke@435 | 2015 | // Stores |
duke@435 | 2016 | void store_check(Register obj); // store check for obj - register is destroyed afterwards |
duke@435 | 2017 | void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) |
duke@435 | 2018 | |
johnc@2781 | 2019 | #ifndef SERIALGC |
johnc@2781 | 2020 | |
apetrusenko@797 | 2021 | void g1_write_barrier_pre(Register obj, |
johnc@2781 | 2022 | Register pre_val, |
apetrusenko@797 | 2023 | Register thread, |
apetrusenko@797 | 2024 | Register tmp, |
johnc@2781 | 2025 | bool tosca_live, |
johnc@2781 | 2026 | bool expand_call); |
johnc@2781 | 2027 | |
apetrusenko@797 | 2028 | void g1_write_barrier_post(Register store_addr, |
apetrusenko@797 | 2029 | Register new_val, |
apetrusenko@797 | 2030 | Register thread, |
apetrusenko@797 | 2031 | Register tmp, |
apetrusenko@797 | 2032 | Register tmp2); |
ysr@777 | 2033 | |
johnc@2781 | 2034 | #endif // SERIALGC |
ysr@777 | 2035 | |
duke@435 | 2036 | // split store_check(Register obj) to enhance instruction interleaving |
duke@435 | 2037 | void store_check_part_1(Register obj); |
duke@435 | 2038 | void store_check_part_2(Register obj); |
duke@435 | 2039 | |
duke@435 | 2040 | // C 'boolean' to Java boolean: x == 0 ? 0 : 1 |
duke@435 | 2041 | void c2bool(Register x); |
duke@435 | 2042 | |
duke@435 | 2043 | // C++ bool manipulation |
duke@435 | 2044 | |
duke@435 | 2045 | void movbool(Register dst, Address src); |
duke@435 | 2046 | void movbool(Address dst, bool boolconst); |
duke@435 | 2047 | void movbool(Address dst, Register src); |
duke@435 | 2048 | void testbool(Register dst); |
duke@435 | 2049 | |
never@739 | 2050 | // oop manipulations |
never@739 | 2051 | void load_klass(Register dst, Register src); |
never@739 | 2052 | void store_klass(Register dst, Register src); |
never@739 | 2053 | |
twisti@2201 | 2054 | void load_heap_oop(Register dst, Address src); |
iveresov@2746 | 2055 | void load_heap_oop_not_null(Register dst, Address src); |
twisti@2201 | 2056 | void store_heap_oop(Address dst, Register src); |
twisti@3969 | 2057 | void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg); |
twisti@2201 | 2058 | |
twisti@2201 | 2059 | // Used for storing NULL. All other oop constants should be |
twisti@2201 | 2060 | // stored using routines that take a jobject. |
twisti@2201 | 2061 | void store_heap_oop_null(Address dst); |
twisti@2201 | 2062 | |
never@739 | 2063 | void load_prototype_header(Register dst, Register src); |
never@739 | 2064 | |
never@739 | 2065 | #ifdef _LP64 |
never@739 | 2066 | void store_klass_gap(Register dst, Register src); |
never@739 | 2067 | |
johnc@1482 | 2068 | // This dummy is to prevent a call to store_heap_oop from |
johnc@1482 | 2069 | // converting a zero (like NULL) into a Register by giving |
johnc@1482 | 2070 | // the compiler two choices it can't resolve |
johnc@1482 | 2071 | |
johnc@1482 | 2072 | void store_heap_oop(Address dst, void* dummy); |
johnc@1482 | 2073 | |
never@739 | 2074 | void encode_heap_oop(Register r); |
never@739 | 2075 | void decode_heap_oop(Register r); |
never@739 | 2076 | void encode_heap_oop_not_null(Register r); |
never@739 | 2077 | void decode_heap_oop_not_null(Register r); |
never@739 | 2078 | void encode_heap_oop_not_null(Register dst, Register src); |
never@739 | 2079 | void decode_heap_oop_not_null(Register dst, Register src); |
never@739 | 2080 | |
never@739 | 2081 | void set_narrow_oop(Register dst, jobject obj); |
kvn@1077 | 2082 | void set_narrow_oop(Address dst, jobject obj); |
kvn@1077 | 2083 | void cmp_narrow_oop(Register dst, jobject obj); |
kvn@1077 | 2084 | void cmp_narrow_oop(Address dst, jobject obj); |
never@739 | 2085 | |
never@739 | 2086 | // if heap base register is used - reinit it with the correct value |
never@739 | 2087 | void reinit_heapbase(); |
kvn@2039 | 2088 | |
kvn@2039 | 2089 | DEBUG_ONLY(void verify_heapbase(const char* msg);) |
kvn@2039 | 2090 | |
never@739 | 2091 | #endif // _LP64 |
never@739 | 2092 | |
never@739 | 2093 | // Int division/remainder for Java |
duke@435 | 2094 | // (as idivl, but checks for special case as described in JVM spec.) |
duke@435 | 2095 | // returns idivl instruction offset for implicit exception handling |
duke@435 | 2096 | int corrected_idivl(Register reg); |
duke@435 | 2097 | |
never@739 | 2098 | // Long division/remainder for Java |
never@739 | 2099 | // (as idivq, but checks for special case as described in JVM spec.) |
never@739 | 2100 | // returns idivq instruction offset for implicit exception handling |
never@739 | 2101 | int corrected_idivq(Register reg); |
never@739 | 2102 | |
duke@435 | 2103 | void int3(); |
duke@435 | 2104 | |
never@739 | 2105 | // Long operation macros for a 32bit cpu |
duke@435 | 2106 | // Long negation for Java |
duke@435 | 2107 | void lneg(Register hi, Register lo); |
duke@435 | 2108 | |
duke@435 | 2109 | // Long multiplication for Java |
never@739 | 2110 | // (destroys contents of eax, ebx, ecx and edx) |
duke@435 | 2111 | void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y |
duke@435 | 2112 | |
duke@435 | 2113 | // Long shifts for Java |
duke@435 | 2114 | // (semantics as described in JVM spec.) |
duke@435 | 2115 | void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) |
duke@435 | 2116 | void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) |
duke@435 | 2117 | |
duke@435 | 2118 | // Long compare for Java |
duke@435 | 2119 | // (semantics as described in JVM spec.) |
duke@435 | 2120 | void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) |
duke@435 | 2121 | |
never@739 | 2122 | |
never@739 | 2123 | // misc |
never@739 | 2124 | |
never@739 | 2125 | // Sign extension |
never@739 | 2126 | void sign_extend_short(Register reg); |
never@739 | 2127 | void sign_extend_byte(Register reg); |
never@739 | 2128 | |
never@739 | 2129 | // Division by power of 2, rounding towards 0 |
never@739 | 2130 | void division_with_shift(Register reg, int shift_value); |
never@739 | 2131 | |
duke@435 | 2132 | // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: |
duke@435 | 2133 | // |
duke@435 | 2134 | // CF (corresponds to C0) if x < y |
duke@435 | 2135 | // PF (corresponds to C2) if unordered |
duke@435 | 2136 | // ZF (corresponds to C3) if x = y |
duke@435 | 2137 | // |
duke@435 | 2138 | // The arguments are in reversed order on the stack (i.e., top of stack is first argument). |
duke@435 | 2139 | // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) |
duke@435 | 2140 | void fcmp(Register tmp); |
duke@435 | 2141 | // Variant of the above which allows y to be further down the stack |
duke@435 | 2142 | // and which only pops x and y if specified. If pop_right is |
duke@435 | 2143 | // specified then pop_left must also be specified. |
duke@435 | 2144 | void fcmp(Register tmp, int index, bool pop_left, bool pop_right); |
duke@435 | 2145 | |
duke@435 | 2146 | // Floating-point comparison for Java |
duke@435 | 2147 | // Compares the top-most stack entries on the FPU stack and stores the result in dst. |
duke@435 | 2148 | // The arguments are in reversed order on the stack (i.e., top of stack is first argument). |
duke@435 | 2149 | // (semantics as described in JVM spec.) |
duke@435 | 2150 | void fcmp2int(Register dst, bool unordered_is_less); |
duke@435 | 2151 | // Variant of the above which allows y to be further down the stack |
duke@435 | 2152 | // and which only pops x and y if specified. If pop_right is |
duke@435 | 2153 | // specified then pop_left must also be specified. |
duke@435 | 2154 | void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); |
duke@435 | 2155 | |
duke@435 | 2156 | // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) |
duke@435 | 2157 | // tmp is a temporary register, if none is available use noreg |
duke@435 | 2158 | void fremr(Register tmp); |
duke@435 | 2159 | |
duke@435 | 2160 | |
duke@435 | 2161 | // same as fcmp2int, but using SSE2 |
duke@435 | 2162 | void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); |
duke@435 | 2163 | void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); |
duke@435 | 2164 | |
duke@435 | 2165 | // Inlined sin/cos generator for Java; must not use CPU instruction |
duke@435 | 2166 | // directly on Intel as it does not have high enough precision |
duke@435 | 2167 | // outside of the range [-pi/4, pi/4]. Extra argument indicate the |
duke@435 | 2168 | // number of FPU stack slots in use; all but the topmost will |
duke@435 | 2169 | // require saving if a slow case is necessary. Assumes argument is |
duke@435 | 2170 | // on FP TOS; result is on FP TOS. No cpu registers are changed by |
duke@435 | 2171 | // this code. |
duke@435 | 2172 | void trigfunc(char trig, int num_fpu_regs_in_use = 1); |
duke@435 | 2173 | |
duke@435 | 2174 | // branch to L if FPU flag C2 is set/not set |
duke@435 | 2175 | // tmp is a temporary register, if none is available use noreg |
duke@435 | 2176 | void jC2 (Register tmp, Label& L); |
duke@435 | 2177 | void jnC2(Register tmp, Label& L); |
duke@435 | 2178 | |
duke@435 | 2179 | // Pop ST (ffree & fincstp combined) |
duke@435 | 2180 | void fpop(); |
duke@435 | 2181 | |
duke@435 | 2182 | // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack |
duke@435 | 2183 | void push_fTOS(); |
duke@435 | 2184 | |
duke@435 | 2185 | // pops double TOS element from CPU stack and pushes on FPU stack |
duke@435 | 2186 | void pop_fTOS(); |
duke@435 | 2187 | |
duke@435 | 2188 | void empty_FPU_stack(); |
duke@435 | 2189 | |
duke@435 | 2190 | void push_IU_state(); |
duke@435 | 2191 | void pop_IU_state(); |
duke@435 | 2192 | |
duke@435 | 2193 | void push_FPU_state(); |
duke@435 | 2194 | void pop_FPU_state(); |
duke@435 | 2195 | |
duke@435 | 2196 | void push_CPU_state(); |
duke@435 | 2197 | void pop_CPU_state(); |
duke@435 | 2198 | |
duke@435 | 2199 | // Round up to a power of two |
duke@435 | 2200 | void round_to(Register reg, int modulus); |
duke@435 | 2201 | |
duke@435 | 2202 | // Callee saved registers handling |
duke@435 | 2203 | void push_callee_saved_registers(); |
duke@435 | 2204 | void pop_callee_saved_registers(); |
duke@435 | 2205 | |
duke@435 | 2206 | // allocation |
duke@435 | 2207 | void eden_allocate( |
duke@435 | 2208 | Register obj, // result: pointer to object after successful allocation |
duke@435 | 2209 | Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise |
duke@435 | 2210 | int con_size_in_bytes, // object size in bytes if known at compile time |
duke@435 | 2211 | Register t1, // temp register |
duke@435 | 2212 | Label& slow_case // continuation point if fast allocation fails |
duke@435 | 2213 | ); |
duke@435 | 2214 | void tlab_allocate( |
duke@435 | 2215 | Register obj, // result: pointer to object after successful allocation |
duke@435 | 2216 | Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise |
duke@435 | 2217 | int con_size_in_bytes, // object size in bytes if known at compile time |
duke@435 | 2218 | Register t1, // temp register |
duke@435 | 2219 | Register t2, // temp register |
duke@435 | 2220 | Label& slow_case // continuation point if fast allocation fails |
duke@435 | 2221 | ); |
phh@2423 | 2222 | Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address |
phh@2423 | 2223 | void incr_allocated_bytes(Register thread, |
phh@2423 | 2224 | Register var_size_in_bytes, int con_size_in_bytes, |
phh@2423 | 2225 | Register t1 = noreg); |
duke@435 | 2226 | |
jrose@1058 | 2227 | // interface method calling |
jrose@1058 | 2228 | void lookup_interface_method(Register recv_klass, |
jrose@1058 | 2229 | Register intf_klass, |
jrose@1100 | 2230 | RegisterOrConstant itable_index, |
jrose@1058 | 2231 | Register method_result, |
jrose@1058 | 2232 | Register scan_temp, |
jrose@1058 | 2233 | Label& no_such_interface); |
jrose@1058 | 2234 | |
twisti@3969 | 2235 | // virtual method calling |
twisti@3969 | 2236 | void lookup_virtual_method(Register recv_klass, |
twisti@3969 | 2237 | RegisterOrConstant vtable_index, |
twisti@3969 | 2238 | Register method_result); |
twisti@3969 | 2239 | |
jrose@1079 | 2240 | // Test sub_klass against super_klass, with fast and slow paths. |
jrose@1079 | 2241 | |
jrose@1079 | 2242 | // The fast path produces a tri-state answer: yes / no / maybe-slow. |
jrose@1079 | 2243 | // One of the three labels can be NULL, meaning take the fall-through. |
jrose@1079 | 2244 | // If super_check_offset is -1, the value is loaded up from super_klass. |
jrose@1079 | 2245 | // No registers are killed, except temp_reg. |
jrose@1079 | 2246 | void check_klass_subtype_fast_path(Register sub_klass, |
jrose@1079 | 2247 | Register super_klass, |
jrose@1079 | 2248 | Register temp_reg, |
jrose@1079 | 2249 | Label* L_success, |
jrose@1079 | 2250 | Label* L_failure, |
jrose@1079 | 2251 | Label* L_slow_path, |
jrose@1100 | 2252 | RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); |
jrose@1079 | 2253 | |
jrose@1079 | 2254 | // The rest of the type check; must be wired to a corresponding fast path. |
jrose@1079 | 2255 | // It does not repeat the fast path logic, so don't use it standalone. |
jrose@1079 | 2256 | // The temp_reg and temp2_reg can be noreg, if no temps are available. |
jrose@1079 | 2257 | // Updates the sub's secondary super cache as necessary. |
jrose@1079 | 2258 | // If set_cond_codes, condition codes will be Z on success, NZ on failure. |
jrose@1079 | 2259 | void check_klass_subtype_slow_path(Register sub_klass, |
jrose@1079 | 2260 | Register super_klass, |
jrose@1079 | 2261 | Register temp_reg, |
jrose@1079 | 2262 | Register temp2_reg, |
jrose@1079 | 2263 | Label* L_success, |
jrose@1079 | 2264 | Label* L_failure, |
jrose@1079 | 2265 | bool set_cond_codes = false); |
jrose@1079 | 2266 | |
jrose@1079 | 2267 | // Simplified, combined version, good for typical uses. |
jrose@1079 | 2268 | // Falls through on failure. |
jrose@1079 | 2269 | void check_klass_subtype(Register sub_klass, |
jrose@1079 | 2270 | Register super_klass, |
jrose@1079 | 2271 | Register temp_reg, |
jrose@1079 | 2272 | Label& L_success); |
jrose@1079 | 2273 | |
jrose@1145 | 2274 | // method handles (JSR 292) |
jrose@1145 | 2275 | Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); |
jrose@1145 | 2276 | |
duke@435 | 2277 | //---- |
duke@435 | 2278 | void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 |
duke@435 | 2279 | |
duke@435 | 2280 | // Debugging |
never@739 | 2281 | |
never@739 | 2282 | // only if +VerifyOops |
coleenp@4052 | 2283 | // TODO: Make these macros with file and line like sparc version! |
never@739 | 2284 | void verify_oop(Register reg, const char* s = "broken oop"); |
duke@435 | 2285 | void verify_oop_addr(Address addr, const char * s = "broken oop addr"); |
duke@435 | 2286 | |
coleenp@4052 | 2287 | // TODO: verify method and klass metadata (compare against vptr?) |
coleenp@4052 | 2288 | void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} |
coleenp@4052 | 2289 | void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} |
coleenp@4052 | 2290 | |
coleenp@4052 | 2291 | #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) |
coleenp@4052 | 2292 | #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) |
coleenp@4052 | 2293 | |
never@739 | 2294 | // only if +VerifyFPU |
never@739 | 2295 | void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); |
never@739 | 2296 | |
never@739 | 2297 | // prints msg, dumps registers and stops execution |
never@739 | 2298 | void stop(const char* msg); |
never@739 | 2299 | |
never@739 | 2300 | // prints msg and continues |
never@739 | 2301 | void warn(const char* msg); |
never@739 | 2302 | |
twisti@3969 | 2303 | // dumps registers and other state |
twisti@3969 | 2304 | void print_state(); |
twisti@3969 | 2305 | |
never@739 | 2306 | static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); |
never@739 | 2307 | static void debug64(char* msg, int64_t pc, int64_t regs[]); |
twisti@3969 | 2308 | static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); |
twisti@3969 | 2309 | static void print_state64(int64_t pc, int64_t regs[]); |
never@739 | 2310 | |
duke@435 | 2311 | void os_breakpoint(); |
never@739 | 2312 | |
duke@435 | 2313 | void untested() { stop("untested"); } |
never@739 | 2314 | |
twisti@2201 | 2315 | void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); } |
never@739 | 2316 | |
duke@435 | 2317 | void should_not_reach_here() { stop("should not reach here"); } |
never@739 | 2318 | |
duke@435 | 2319 | void print_CPU_state(); |
duke@435 | 2320 | |
duke@435 | 2321 | // Stack overflow checking |
duke@435 | 2322 | void bang_stack_with_offset(int offset) { |
duke@435 | 2323 | // stack grows down, caller passes positive offset |
duke@435 | 2324 | assert(offset > 0, "must bang with negative offset"); |
duke@435 | 2325 | movl(Address(rsp, (-offset)), rax); |
duke@435 | 2326 | } |
duke@435 | 2327 | |
duke@435 | 2328 | // Writes to stack successive pages until offset reached to check for |
duke@435 | 2329 | // stack overflow + shadow pages. Also, clobbers tmp |
duke@435 | 2330 | void bang_stack_size(Register size, Register tmp); |
duke@435 | 2331 | |
jrose@1100 | 2332 | virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, |
jrose@1100 | 2333 | Register tmp, |
jrose@1100 | 2334 | int offset); |
jrose@1057 | 2335 | |
duke@435 | 2336 | // Support for serializing memory accesses between threads |
duke@435 | 2337 | void serialize_memory(Register thread, Register tmp); |
duke@435 | 2338 | |
duke@435 | 2339 | void verify_tlab(); |
duke@435 | 2340 | |
duke@435 | 2341 | // Biased locking support |
duke@435 | 2342 | // lock_reg and obj_reg must be loaded up with the appropriate values. |
duke@435 | 2343 | // swap_reg must be rax, and is killed. |
duke@435 | 2344 | // tmp_reg is optional. If it is supplied (i.e., != noreg) it will |
duke@435 | 2345 | // be killed; if not supplied, push/pop will be used internally to |
duke@435 | 2346 | // allocate a temporary (inefficient, avoid if possible). |
duke@435 | 2347 | // Optional slow case is for implementations (interpreter and C1) which branch to |
duke@435 | 2348 | // slow case directly. Leaves condition codes set for C2's Fast_Lock node. |
duke@435 | 2349 | // Returns offset of first potentially-faulting instruction for null |
duke@435 | 2350 | // check info (currently consumed only by C1). If |
duke@435 | 2351 | // swap_reg_contains_mark is true then returns -1 as it is assumed |
duke@435 | 2352 | // the calling code has already passed any potential faults. |
kvn@855 | 2353 | int biased_locking_enter(Register lock_reg, Register obj_reg, |
kvn@855 | 2354 | Register swap_reg, Register tmp_reg, |
duke@435 | 2355 | bool swap_reg_contains_mark, |
duke@435 | 2356 | Label& done, Label* slow_case = NULL, |
duke@435 | 2357 | BiasedLockingCounters* counters = NULL); |
duke@435 | 2358 | void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); |
duke@435 | 2359 | |
duke@435 | 2360 | |
duke@435 | 2361 | Condition negate_condition(Condition cond); |
duke@435 | 2362 | |
duke@435 | 2363 | // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit |
duke@435 | 2364 | // operands. In general the names are modified to avoid hiding the instruction in Assembler |
duke@435 | 2365 | // so that we don't need to implement all the varieties in the Assembler with trivial wrappers |
duke@435 | 2366 | // here in MacroAssembler. The major exception to this rule is call |
duke@435 | 2367 | |
duke@435 | 2368 | // Arithmetics |
duke@435 | 2369 | |
never@739 | 2370 | |
never@739 | 2371 | void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } |
never@739 | 2372 | void addptr(Address dst, Register src); |
never@739 | 2373 | |
never@739 | 2374 | void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } |
never@739 | 2375 | void addptr(Register dst, int32_t src); |
never@739 | 2376 | void addptr(Register dst, Register src); |
never@2895 | 2377 | void addptr(Register dst, RegisterOrConstant src) { |
never@2895 | 2378 | if (src.is_constant()) addptr(dst, (int) src.as_constant()); |
never@2895 | 2379 | else addptr(dst, src.as_register()); |
never@2895 | 2380 | } |
never@739 | 2381 | |
never@739 | 2382 | void andptr(Register dst, int32_t src); |
never@739 | 2383 | void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } |
never@739 | 2384 | |
never@739 | 2385 | void cmp8(AddressLiteral src1, int imm); |
never@739 | 2386 | |
never@739 | 2387 | // renamed to drag out the casting of address to int32_t/intptr_t |
duke@435 | 2388 | void cmp32(Register src1, int32_t imm); |
duke@435 | 2389 | |
duke@435 | 2390 | void cmp32(AddressLiteral src1, int32_t imm); |
duke@435 | 2391 | // compare reg - mem, or reg - &mem |
duke@435 | 2392 | void cmp32(Register src1, AddressLiteral src2); |
duke@435 | 2393 | |
duke@435 | 2394 | void cmp32(Register src1, Address src2); |
duke@435 | 2395 | |
never@739 | 2396 | #ifndef _LP64 |
coleenp@4037 | 2397 | void cmpklass(Address dst, Metadata* obj); |
coleenp@4037 | 2398 | void cmpklass(Register dst, Metadata* obj); |
never@739 | 2399 | void cmpoop(Address dst, jobject obj); |
never@739 | 2400 | void cmpoop(Register dst, jobject obj); |
never@739 | 2401 | #endif // _LP64 |
never@739 | 2402 | |
duke@435 | 2403 | // NOTE src2 must be the lval. This is NOT an mem-mem compare |
duke@435 | 2404 | void cmpptr(Address src1, AddressLiteral src2); |
duke@435 | 2405 | |
duke@435 | 2406 | void cmpptr(Register src1, AddressLiteral src2); |
duke@435 | 2407 | |
never@739 | 2408 | void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
never@739 | 2409 | void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
never@739 | 2410 | // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
never@739 | 2411 | |
never@739 | 2412 | void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
never@739 | 2413 | void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
never@739 | 2414 | |
never@739 | 2415 | // cmp64 to avoild hiding cmpq |
never@739 | 2416 | void cmp64(Register src1, AddressLiteral src); |
never@739 | 2417 | |
never@739 | 2418 | void cmpxchgptr(Register reg, Address adr); |
never@739 | 2419 | |
never@739 | 2420 | void locked_cmpxchgptr(Register reg, AddressLiteral adr); |
never@739 | 2421 | |
never@739 | 2422 | |
never@739 | 2423 | void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } |
never@739 | 2424 | |
never@739 | 2425 | |
never@739 | 2426 | void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } |
never@739 | 2427 | |
never@739 | 2428 | void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } |
never@739 | 2429 | |
never@739 | 2430 | void shlptr(Register dst, int32_t shift); |
never@739 | 2431 | void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } |
never@739 | 2432 | |
never@739 | 2433 | void shrptr(Register dst, int32_t shift); |
never@739 | 2434 | void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } |
never@739 | 2435 | |
never@739 | 2436 | void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } |
never@739 | 2437 | void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } |
never@739 | 2438 | |
never@739 | 2439 | void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } |
never@739 | 2440 | |
never@739 | 2441 | void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } |
never@739 | 2442 | void subptr(Register dst, int32_t src); |
kvn@3574 | 2443 | // Force generation of a 4 byte immediate value even if it fits into 8bit |
kvn@3574 | 2444 | void subptr_imm32(Register dst, int32_t src); |
never@739 | 2445 | void subptr(Register dst, Register src); |
never@2895 | 2446 | void subptr(Register dst, RegisterOrConstant src) { |
never@2895 | 2447 | if (src.is_constant()) subptr(dst, (int) src.as_constant()); |
never@2895 | 2448 | else subptr(dst, src.as_register()); |
never@2895 | 2449 | } |
never@739 | 2450 | |
never@739 | 2451 | void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } |
never@739 | 2452 | void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } |
never@739 | 2453 | |
never@739 | 2454 | void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } |
never@739 | 2455 | void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } |
never@739 | 2456 | |
never@739 | 2457 | void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } |
never@739 | 2458 | |
never@739 | 2459 | |
duke@435 | 2460 | |
duke@435 | 2461 | // Helper functions for statistics gathering. |
duke@435 | 2462 | // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. |
duke@435 | 2463 | void cond_inc32(Condition cond, AddressLiteral counter_addr); |
duke@435 | 2464 | // Unconditional atomic increment. |
duke@435 | 2465 | void atomic_incl(AddressLiteral counter_addr); |
duke@435 | 2466 | |
duke@435 | 2467 | void lea(Register dst, AddressLiteral adr); |
duke@435 | 2468 | void lea(Address dst, AddressLiteral adr); |
never@739 | 2469 | void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } |
never@739 | 2470 | |
never@739 | 2471 | void leal32(Register dst, Address src) { leal(dst, src); } |
never@739 | 2472 | |
iveresov@2686 | 2473 | // Import other testl() methods from the parent class or else |
iveresov@2686 | 2474 | // they will be hidden by the following overriding declaration. |
iveresov@2686 | 2475 | using Assembler::testl; |
iveresov@2686 | 2476 | void testl(Register dst, AddressLiteral src); |
never@739 | 2477 | |
never@739 | 2478 | void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } |
never@739 | 2479 | void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } |
never@739 | 2480 | void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } |
never@739 | 2481 | |
never@739 | 2482 | void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } |
never@739 | 2483 | void testptr(Register src1, Register src2); |
never@739 | 2484 | |
never@739 | 2485 | void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } |
never@739 | 2486 | void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } |
duke@435 | 2487 | |
duke@435 | 2488 | // Calls |
duke@435 | 2489 | |
duke@435 | 2490 | void call(Label& L, relocInfo::relocType rtype); |
duke@435 | 2491 | void call(Register entry); |
duke@435 | 2492 | |
duke@435 | 2493 | // NOTE: this call tranfers to the effective address of entry NOT |
duke@435 | 2494 | // the address contained by entry. This is because this is more natural |
duke@435 | 2495 | // for jumps/calls. |
duke@435 | 2496 | void call(AddressLiteral entry); |
duke@435 | 2497 | |
coleenp@4037 | 2498 | // Emit the CompiledIC call idiom |
coleenp@4037 | 2499 | void ic_call(address entry); |
coleenp@4037 | 2500 | |
duke@435 | 2501 | // Jumps |
duke@435 | 2502 | |
duke@435 | 2503 | // NOTE: these jumps tranfer to the effective address of dst NOT |
duke@435 | 2504 | // the address contained by dst. This is because this is more natural |
duke@435 | 2505 | // for jumps/calls. |
duke@435 | 2506 | void jump(AddressLiteral dst); |
duke@435 | 2507 | void jump_cc(Condition cc, AddressLiteral dst); |
duke@435 | 2508 | |
duke@435 | 2509 | // 32bit can do a case table jump in one instruction but we no longer allow the base |
duke@435 | 2510 | // to be installed in the Address class. This jump will tranfers to the address |
duke@435 | 2511 | // contained in the location described by entry (not the address of entry) |
duke@435 | 2512 | void jump(ArrayAddress entry); |
duke@435 | 2513 | |
duke@435 | 2514 | // Floating |
duke@435 | 2515 | |
duke@435 | 2516 | void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } |
duke@435 | 2517 | void andpd(XMMRegister dst, AddressLiteral src); |
duke@435 | 2518 | |
kvn@3388 | 2519 | void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } |
kvn@3388 | 2520 | void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } |
kvn@3388 | 2521 | void andps(XMMRegister dst, AddressLiteral src); |
kvn@3388 | 2522 | |
kvn@3388 | 2523 | void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } |
duke@435 | 2524 | void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } |
duke@435 | 2525 | void comiss(XMMRegister dst, AddressLiteral src); |
duke@435 | 2526 | |
kvn@3388 | 2527 | void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } |
duke@435 | 2528 | void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } |
duke@435 | 2529 | void comisd(XMMRegister dst, AddressLiteral src); |
duke@435 | 2530 | |
twisti@2350 | 2531 | void fadd_s(Address src) { Assembler::fadd_s(src); } |
twisti@2350 | 2532 | void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } |
twisti@2350 | 2533 | |
duke@435 | 2534 | void fldcw(Address src) { Assembler::fldcw(src); } |
duke@435 | 2535 | void fldcw(AddressLiteral src); |
duke@435 | 2536 | |
duke@435 | 2537 | void fld_s(int index) { Assembler::fld_s(index); } |
duke@435 | 2538 | void fld_s(Address src) { Assembler::fld_s(src); } |
duke@435 | 2539 | void fld_s(AddressLiteral src); |
duke@435 | 2540 | |
duke@435 | 2541 | void fld_d(Address src) { Assembler::fld_d(src); } |
duke@435 | 2542 | void fld_d(AddressLiteral src); |
duke@435 | 2543 | |
duke@435 | 2544 | void fld_x(Address src) { Assembler::fld_x(src); } |
duke@435 | 2545 | void fld_x(AddressLiteral src); |
duke@435 | 2546 | |
twisti@2350 | 2547 | void fmul_s(Address src) { Assembler::fmul_s(src); } |
twisti@2350 | 2548 | void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } |
twisti@2350 | 2549 | |
duke@435 | 2550 | void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } |
duke@435 | 2551 | void ldmxcsr(AddressLiteral src); |
duke@435 | 2552 | |
roland@3787 | 2553 | // compute pow(x,y) and exp(x) with x86 instructions. Don't cover |
roland@3787 | 2554 | // all corner cases and may result in NaN and require fallback to a |
roland@3787 | 2555 | // runtime call. |
roland@3787 | 2556 | void fast_pow(); |
roland@3787 | 2557 | void fast_exp(); |
roland@3844 | 2558 | void increase_precision(); |
roland@3844 | 2559 | void restore_precision(); |
roland@3787 | 2560 | |
roland@3787 | 2561 | // computes exp(x). Fallback to runtime call included. |
roland@3787 | 2562 | void exp_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(true, num_fpu_regs_in_use); } |
roland@3787 | 2563 | // computes pow(x,y). Fallback to runtime call included. |
roland@3787 | 2564 | void pow_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(false, num_fpu_regs_in_use); } |
roland@3787 | 2565 | |
never@739 | 2566 | private: |
roland@3787 | 2567 | |
roland@3787 | 2568 | // call runtime as a fallback for trig functions and pow/exp. |
roland@3787 | 2569 | void fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use); |
roland@3787 | 2570 | |
roland@3787 | 2571 | // computes 2^(Ylog2X); Ylog2X in ST(0) |
roland@3787 | 2572 | void pow_exp_core_encoding(); |
roland@3787 | 2573 | |
roland@3787 | 2574 | // computes pow(x,y) or exp(x). Fallback to runtime call included. |
roland@3787 | 2575 | void pow_or_exp(bool is_exp, int num_fpu_regs_in_use); |
roland@3787 | 2576 | |
never@739 | 2577 | // these are private because users should be doing movflt/movdbl |
never@739 | 2578 | |
duke@435 | 2579 | void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } |
duke@435 | 2580 | void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } |
duke@435 | 2581 | void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } |
duke@435 | 2582 | void movss(XMMRegister dst, AddressLiteral src); |
duke@435 | 2583 | |
kvn@3388 | 2584 | void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } |
never@739 | 2585 | void movlpd(XMMRegister dst, AddressLiteral src); |
never@739 | 2586 | |
never@739 | 2587 | public: |
never@739 | 2588 | |
twisti@2350 | 2589 | void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } |
twisti@2350 | 2590 | void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } |
kvn@3388 | 2591 | void addsd(XMMRegister dst, AddressLiteral src); |
twisti@2350 | 2592 | |
twisti@2350 | 2593 | void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } |
twisti@2350 | 2594 | void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } |
kvn@3388 | 2595 | void addss(XMMRegister dst, AddressLiteral src); |
twisti@2350 | 2596 | |
twisti@2350 | 2597 | void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } |
twisti@2350 | 2598 | void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } |
kvn@3388 | 2599 | void divsd(XMMRegister dst, AddressLiteral src); |
twisti@2350 | 2600 | |
twisti@2350 | 2601 | void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } |
twisti@2350 | 2602 | void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } |
kvn@3388 | 2603 | void divss(XMMRegister dst, AddressLiteral src); |
twisti@2350 | 2604 | |
phh@2423 | 2605 | void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } |
phh@2423 | 2606 | void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } |
phh@2423 | 2607 | void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } |
kvn@3388 | 2608 | void movsd(XMMRegister dst, AddressLiteral src); |
twisti@2350 | 2609 | |
twisti@2350 | 2610 | void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } |
twisti@2350 | 2611 | void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } |
kvn@3388 | 2612 | void mulsd(XMMRegister dst, AddressLiteral src); |
twisti@2350 | 2613 | |
twisti@2350 | 2614 | void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } |
twisti@2350 | 2615 | void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } |
kvn@3388 | 2616 | void mulss(XMMRegister dst, AddressLiteral src); |
twisti@2350 | 2617 | |
twisti@2350 | 2618 | void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } |
twisti@2350 | 2619 | void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } |
kvn@3388 | 2620 | void sqrtsd(XMMRegister dst, AddressLiteral src); |
twisti@2350 | 2621 | |
twisti@2350 | 2622 | void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } |
twisti@2350 | 2623 | void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } |
kvn@3388 | 2624 | void sqrtss(XMMRegister dst, AddressLiteral src); |
twisti@2350 | 2625 | |
twisti@2350 | 2626 | void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } |
twisti@2350 | 2627 | void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } |
kvn@3388 | 2628 | void subsd(XMMRegister dst, AddressLiteral src); |
twisti@2350 | 2629 | |
twisti@2350 | 2630 | void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } |
twisti@2350 | 2631 | void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } |
kvn@3388 | 2632 | void subss(XMMRegister dst, AddressLiteral src); |
duke@435 | 2633 | |
duke@435 | 2634 | void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } |
kvn@3388 | 2635 | void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } |
duke@435 | 2636 | void ucomiss(XMMRegister dst, AddressLiteral src); |
duke@435 | 2637 | |
duke@435 | 2638 | void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } |
kvn@3388 | 2639 | void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } |
duke@435 | 2640 | void ucomisd(XMMRegister dst, AddressLiteral src); |
duke@435 | 2641 | |
duke@435 | 2642 | // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values |
duke@435 | 2643 | void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); } |
duke@435 | 2644 | void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } |
duke@435 | 2645 | void xorpd(XMMRegister dst, AddressLiteral src); |
duke@435 | 2646 | |
duke@435 | 2647 | // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values |
duke@435 | 2648 | void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); } |
duke@435 | 2649 | void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } |
duke@435 | 2650 | void xorps(XMMRegister dst, AddressLiteral src); |
duke@435 | 2651 | |
kvn@3390 | 2652 | // AVX 3-operands instructions |
kvn@3390 | 2653 | |
kvn@3390 | 2654 | void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } |
kvn@3390 | 2655 | void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } |
kvn@3390 | 2656 | void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
kvn@3390 | 2657 | |
kvn@3390 | 2658 | void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } |
kvn@3390 | 2659 | void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } |
kvn@3390 | 2660 | void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
kvn@3390 | 2661 | |
kvn@4001 | 2662 | void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); } |
kvn@4001 | 2663 | void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); } |
kvn@4001 | 2664 | void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); |
kvn@4001 | 2665 | |
kvn@4001 | 2666 | void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); } |
kvn@4001 | 2667 | void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); } |
kvn@4001 | 2668 | void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); |
kvn@3390 | 2669 | |
kvn@3390 | 2670 | void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } |
kvn@3390 | 2671 | void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } |
kvn@3390 | 2672 | void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
kvn@3390 | 2673 | |
kvn@3390 | 2674 | void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } |
kvn@3390 | 2675 | void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } |
kvn@3390 | 2676 | void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
kvn@3390 | 2677 | |
kvn@3390 | 2678 | void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } |
kvn@3390 | 2679 | void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } |
kvn@3390 | 2680 | void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
kvn@3390 | 2681 | |
kvn@3390 | 2682 | void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } |
kvn@3390 | 2683 | void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } |
kvn@3390 | 2684 | void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
kvn@3390 | 2685 | |
kvn@3390 | 2686 | void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } |
kvn@3390 | 2687 | void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } |
kvn@3390 | 2688 | void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
kvn@3390 | 2689 | |
kvn@3390 | 2690 | void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } |
kvn@3390 | 2691 | void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } |
kvn@3390 | 2692 | void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
kvn@3390 | 2693 | |
kvn@3882 | 2694 | // AVX Vector instructions |
kvn@3882 | 2695 | |
kvn@3882 | 2696 | void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); } |
kvn@4001 | 2697 | void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); } |
kvn@4001 | 2698 | void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); |
kvn@3390 | 2699 | |
kvn@3882 | 2700 | void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); } |
kvn@4001 | 2701 | void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); } |
kvn@4001 | 2702 | void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); |
kvn@3390 | 2703 | |
kvn@3929 | 2704 | void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { |
kvn@3929 | 2705 | if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2 |
kvn@3929 | 2706 | Assembler::vpxor(dst, nds, src, vector256); |
kvn@3929 | 2707 | else |
kvn@3929 | 2708 | Assembler::vxorpd(dst, nds, src, vector256); |
kvn@3929 | 2709 | } |
kvn@4001 | 2710 | void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { |
kvn@4001 | 2711 | if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2 |
kvn@4001 | 2712 | Assembler::vpxor(dst, nds, src, vector256); |
kvn@4001 | 2713 | else |
kvn@4001 | 2714 | Assembler::vxorpd(dst, nds, src, vector256); |
kvn@4001 | 2715 | } |
kvn@3929 | 2716 | |
kvn@3929 | 2717 | // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector. |
kvn@3929 | 2718 | void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { |
kvn@3929 | 2719 | if (UseAVX > 1) // vinserti128h is available only in AVX2 |
kvn@3929 | 2720 | Assembler::vinserti128h(dst, nds, src); |
kvn@3929 | 2721 | else |
kvn@3929 | 2722 | Assembler::vinsertf128h(dst, nds, src); |
kvn@3929 | 2723 | } |
kvn@3390 | 2724 | |
duke@435 | 2725 | // Data |
duke@435 | 2726 | |
twisti@2697 | 2727 | void cmov32( Condition cc, Register dst, Address src); |
twisti@2697 | 2728 | void cmov32( Condition cc, Register dst, Register src); |
twisti@2697 | 2729 | |
twisti@2697 | 2730 | void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } |
twisti@2697 | 2731 | |
twisti@2697 | 2732 | void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } |
twisti@2697 | 2733 | void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } |
never@739 | 2734 | |
duke@435 | 2735 | void movoop(Register dst, jobject obj); |
duke@435 | 2736 | void movoop(Address dst, jobject obj); |
duke@435 | 2737 | |
coleenp@4037 | 2738 | void mov_metadata(Register dst, Metadata* obj); |
coleenp@4037 | 2739 | void mov_metadata(Address dst, Metadata* obj); |
coleenp@4037 | 2740 | |
duke@435 | 2741 | void movptr(ArrayAddress dst, Register src); |
duke@435 | 2742 | // can this do an lea? |
duke@435 | 2743 | void movptr(Register dst, ArrayAddress src); |
duke@435 | 2744 | |
never@739 | 2745 | void movptr(Register dst, Address src); |
never@739 | 2746 | |
duke@435 | 2747 | void movptr(Register dst, AddressLiteral src); |
duke@435 | 2748 | |
never@739 | 2749 | void movptr(Register dst, intptr_t src); |
never@739 | 2750 | void movptr(Register dst, Register src); |
never@739 | 2751 | void movptr(Address dst, intptr_t src); |
never@739 | 2752 | |
never@739 | 2753 | void movptr(Address dst, Register src); |
never@739 | 2754 | |
never@2895 | 2755 | void movptr(Register dst, RegisterOrConstant src) { |
never@2895 | 2756 | if (src.is_constant()) movptr(dst, src.as_constant()); |
never@2895 | 2757 | else movptr(dst, src.as_register()); |
never@2895 | 2758 | } |
never@2895 | 2759 | |
never@739 | 2760 | #ifdef _LP64 |
never@739 | 2761 | // Generally the next two are only used for moving NULL |
never@739 | 2762 | // Although there are situations in initializing the mark word where |
never@739 | 2763 | // they could be used. They are dangerous. |
never@739 | 2764 | |
never@739 | 2765 | // They only exist on LP64 so that int32_t and intptr_t are not the same |
never@739 | 2766 | // and we have ambiguous declarations. |
never@739 | 2767 | |
never@739 | 2768 | void movptr(Address dst, int32_t imm32); |
never@739 | 2769 | void movptr(Register dst, int32_t imm32); |
never@739 | 2770 | #endif // _LP64 |
never@739 | 2771 | |
duke@435 | 2772 | // to avoid hiding movl |
duke@435 | 2773 | void mov32(AddressLiteral dst, Register src); |
duke@435 | 2774 | void mov32(Register dst, AddressLiteral src); |
never@739 | 2775 | |
duke@435 | 2776 | // to avoid hiding movb |
duke@435 | 2777 | void movbyte(ArrayAddress dst, int src); |
duke@435 | 2778 | |
kvn@3929 | 2779 | // Import other mov() methods from the parent class or else |
kvn@3929 | 2780 | // they will be hidden by the following overriding declaration. |
kvn@3929 | 2781 | using Assembler::movdl; |
kvn@3929 | 2782 | using Assembler::movq; |
kvn@3929 | 2783 | void movdl(XMMRegister dst, AddressLiteral src); |
kvn@3929 | 2784 | void movq(XMMRegister dst, AddressLiteral src); |
kvn@3929 | 2785 | |
duke@435 | 2786 | // Can push value or effective address |
duke@435 | 2787 | void pushptr(AddressLiteral src); |
duke@435 | 2788 | |
never@739 | 2789 | void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } |
never@739 | 2790 | void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } |
never@739 | 2791 | |
never@739 | 2792 | void pushoop(jobject obj); |
coleenp@4037 | 2793 | void pushklass(Metadata* obj); |
never@739 | 2794 | |
never@739 | 2795 | // sign extend as need a l to ptr sized element |
never@739 | 2796 | void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } |
never@739 | 2797 | void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } |
never@739 | 2798 | |
kvn@3574 | 2799 | // C2 compiled method's prolog code. |
kvn@3574 | 2800 | void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b); |
kvn@3574 | 2801 | |
kvn@1421 | 2802 | // IndexOf strings. |
kvn@2602 | 2803 | // Small strings are loaded through stack if they cross page boundary. |
kvn@1421 | 2804 | void string_indexof(Register str1, Register str2, |
kvn@2602 | 2805 | Register cnt1, Register cnt2, |
kvn@2602 | 2806 | int int_cnt2, Register result, |
kvn@1421 | 2807 | XMMRegister vec, Register tmp); |
kvn@1421 | 2808 | |
kvn@2602 | 2809 | // IndexOf for constant substrings with size >= 8 elements |
kvn@2602 | 2810 | // which don't need to be loaded through stack. |
kvn@2602 | 2811 | void string_indexofC8(Register str1, Register str2, |
kvn@2602 | 2812 | Register cnt1, Register cnt2, |
kvn@2602 | 2813 | int int_cnt2, Register result, |
kvn@2602 | 2814 | XMMRegister vec, Register tmp); |
kvn@2602 | 2815 | |
kvn@2602 | 2816 | // Smallest code: we don't need to load through stack, |
kvn@2602 | 2817 | // check string tail. |
kvn@2602 | 2818 | |
kvn@1421 | 2819 | // Compare strings. |
kvn@1421 | 2820 | void string_compare(Register str1, Register str2, |
kvn@1421 | 2821 | Register cnt1, Register cnt2, Register result, |
never@2569 | 2822 | XMMRegister vec1); |
kvn@1421 | 2823 | |
kvn@1421 | 2824 | // Compare char[] arrays. |
kvn@1421 | 2825 | void char_arrays_equals(bool is_array_equ, Register ary1, Register ary2, |
kvn@1421 | 2826 | Register limit, Register result, Register chr, |
kvn@1421 | 2827 | XMMRegister vec1, XMMRegister vec2); |
never@739 | 2828 | |
never@2118 | 2829 | // Fill primitive arrays |
never@2118 | 2830 | void generate_fill(BasicType t, bool aligned, |
never@2118 | 2831 | Register to, Register value, Register count, |
never@2118 | 2832 | Register rtmp, XMMRegister xtmp); |
never@2118 | 2833 | |
duke@435 | 2834 | #undef VIRTUAL |
duke@435 | 2835 | |
duke@435 | 2836 | }; |
duke@435 | 2837 | |
duke@435 | 2838 | /** |
duke@435 | 2839 | * class SkipIfEqual: |
duke@435 | 2840 | * |
duke@435 | 2841 | * Instantiating this class will result in assembly code being output that will |
duke@435 | 2842 | * jump around any code emitted between the creation of the instance and it's |
duke@435 | 2843 | * automatic destruction at the end of a scope block, depending on the value of |
duke@435 | 2844 | * the flag passed to the constructor, which will be checked at run-time. |
duke@435 | 2845 | */ |
duke@435 | 2846 | class SkipIfEqual { |
duke@435 | 2847 | private: |
duke@435 | 2848 | MacroAssembler* _masm; |
duke@435 | 2849 | Label _label; |
duke@435 | 2850 | |
duke@435 | 2851 | public: |
duke@435 | 2852 | SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); |
duke@435 | 2853 | ~SkipIfEqual(); |
duke@435 | 2854 | }; |
duke@435 | 2855 | |
duke@435 | 2856 | #ifdef ASSERT |
duke@435 | 2857 | inline bool AbstractAssembler::pd_check_instruction_mark() { return true; } |
duke@435 | 2858 | #endif |
stefank@2314 | 2859 | |
stefank@2314 | 2860 | #endif // CPU_X86_VM_ASSEMBLER_X86_HPP |