1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/cpu/x86/vm/nativeInst_x86.hpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,588 @@ 1.4 +/* 1.5 + * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +// We have interfaces for the following instructions: 1.29 +// - NativeInstruction 1.30 +// - - NativeCall 1.31 +// - - NativeMovConstReg 1.32 +// - - NativeMovConstRegPatching 1.33 +// - - NativeMovRegMem 1.34 +// - - NativeMovRegMemPatching 1.35 +// - - NativeJump 1.36 +// - - NativeIllegalOpCode 1.37 +// - - NativeGeneralJump 1.38 +// - - NativeReturn 1.39 +// - - NativeReturnX (return with argument) 1.40 +// - - NativePushConst 1.41 +// - - NativeTstRegMem 1.42 + 1.43 +// The base class for different kinds of native instruction abstractions. 1.44 +// Provides the primitive operations to manipulate code relative to this. 1.45 + 1.46 +class NativeInstruction VALUE_OBJ_CLASS_SPEC { 1.47 + friend class Relocation; 1.48 + 1.49 + public: 1.50 + enum Intel_specific_constants { 1.51 + nop_instruction_code = 0x90, 1.52 + nop_instruction_size = 1 1.53 + }; 1.54 + 1.55 + bool is_nop() { return ubyte_at(0) == nop_instruction_code; } 1.56 + inline bool is_call(); 1.57 + inline bool is_illegal(); 1.58 + inline bool is_return(); 1.59 + inline bool is_jump(); 1.60 + inline bool is_cond_jump(); 1.61 + inline bool is_safepoint_poll(); 1.62 + inline bool is_mov_literal64(); 1.63 + 1.64 + protected: 1.65 + address addr_at(int offset) const { return address(this) + offset; } 1.66 + 1.67 + s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } 1.68 + u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } 1.69 + 1.70 + jint int_at(int offset) const { return *(jint*) addr_at(offset); } 1.71 + 1.72 + intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); } 1.73 + 1.74 + oop oop_at (int offset) const { return *(oop*) addr_at(offset); } 1.75 + 1.76 + 1.77 + void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; wrote(offset); } 1.78 + void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; wrote(offset); } 1.79 + void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); } 1.80 + void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); } 1.81 + 1.82 + // This doesn't really do anything on Intel, but it is the place where 1.83 + // cache invalidation belongs, generically: 1.84 + void wrote(int offset); 1.85 + 1.86 + public: 1.87 + 1.88 + // unit test stuff 1.89 + static void test() {} // override for testing 1.90 + 1.91 + inline friend NativeInstruction* nativeInstruction_at(address address); 1.92 +}; 1.93 + 1.94 +inline NativeInstruction* nativeInstruction_at(address address) { 1.95 + NativeInstruction* inst = (NativeInstruction*)address; 1.96 +#ifdef ASSERT 1.97 + //inst->verify(); 1.98 +#endif 1.99 + return inst; 1.100 +} 1.101 + 1.102 +inline NativeCall* nativeCall_at(address address); 1.103 +// The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off 1.104 +// instructions (used to manipulate inline caches, primitive & dll calls, etc.). 1.105 + 1.106 +class NativeCall: public NativeInstruction { 1.107 + public: 1.108 + enum Intel_specific_constants { 1.109 + instruction_code = 0xE8, 1.110 + instruction_size = 5, 1.111 + instruction_offset = 0, 1.112 + displacement_offset = 1, 1.113 + return_address_offset = 5 1.114 + }; 1.115 + 1.116 + enum { cache_line_size = BytesPerWord }; // conservative estimate! 1.117 + 1.118 + address instruction_address() const { return addr_at(instruction_offset); } 1.119 + address next_instruction_address() const { return addr_at(return_address_offset); } 1.120 + int displacement() const { return (jint) int_at(displacement_offset); } 1.121 + address displacement_address() const { return addr_at(displacement_offset); } 1.122 + address return_address() const { return addr_at(return_address_offset); } 1.123 + address destination() const; 1.124 + void set_destination(address dest) { 1.125 +#ifdef AMD64 1.126 + assert((labs((intptr_t) dest - (intptr_t) return_address()) & 1.127 + 0xFFFFFFFF00000000) == 0, 1.128 + "must be 32bit offset"); 1.129 +#endif // AMD64 1.130 + set_int_at(displacement_offset, dest - return_address()); 1.131 + } 1.132 + void set_destination_mt_safe(address dest); 1.133 + 1.134 + void verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); } 1.135 + void verify(); 1.136 + void print(); 1.137 + 1.138 + // Creation 1.139 + inline friend NativeCall* nativeCall_at(address address); 1.140 + inline friend NativeCall* nativeCall_before(address return_address); 1.141 + 1.142 + static bool is_call_at(address instr) { 1.143 + return ((*instr) & 0xFF) == NativeCall::instruction_code; 1.144 + } 1.145 + 1.146 + static bool is_call_before(address return_address) { 1.147 + return is_call_at(return_address - NativeCall::return_address_offset); 1.148 + } 1.149 + 1.150 + static bool is_call_to(address instr, address target) { 1.151 + return nativeInstruction_at(instr)->is_call() && 1.152 + nativeCall_at(instr)->destination() == target; 1.153 + } 1.154 + 1.155 + // MT-safe patching of a call instruction. 1.156 + static void insert(address code_pos, address entry); 1.157 + 1.158 + static void replace_mt_safe(address instr_addr, address code_buffer); 1.159 +}; 1.160 + 1.161 +inline NativeCall* nativeCall_at(address address) { 1.162 + NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); 1.163 +#ifdef ASSERT 1.164 + call->verify(); 1.165 +#endif 1.166 + return call; 1.167 +} 1.168 + 1.169 +inline NativeCall* nativeCall_before(address return_address) { 1.170 + NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); 1.171 +#ifdef ASSERT 1.172 + call->verify(); 1.173 +#endif 1.174 + return call; 1.175 +} 1.176 + 1.177 +// An interface for accessing/manipulating native mov reg, imm32 instructions. 1.178 +// (used to manipulate inlined 32bit data dll calls, etc.) 1.179 +class NativeMovConstReg: public NativeInstruction { 1.180 +#ifdef AMD64 1.181 + static const bool has_rex = true; 1.182 + static const int rex_size = 1; 1.183 +#else 1.184 + static const bool has_rex = false; 1.185 + static const int rex_size = 0; 1.186 +#endif // AMD64 1.187 + public: 1.188 + enum Intel_specific_constants { 1.189 + instruction_code = 0xB8, 1.190 + instruction_size = 1 + rex_size + wordSize, 1.191 + instruction_offset = 0, 1.192 + data_offset = 1 + rex_size, 1.193 + next_instruction_offset = instruction_size, 1.194 + register_mask = 0x07 1.195 + }; 1.196 + 1.197 + address instruction_address() const { return addr_at(instruction_offset); } 1.198 + address next_instruction_address() const { return addr_at(next_instruction_offset); } 1.199 + intptr_t data() const { return ptr_at(data_offset); } 1.200 + void set_data(intptr_t x) { set_ptr_at(data_offset, x); } 1.201 + 1.202 + void verify(); 1.203 + void print(); 1.204 + 1.205 + // unit test stuff 1.206 + static void test() {} 1.207 + 1.208 + // Creation 1.209 + inline friend NativeMovConstReg* nativeMovConstReg_at(address address); 1.210 + inline friend NativeMovConstReg* nativeMovConstReg_before(address address); 1.211 +}; 1.212 + 1.213 +inline NativeMovConstReg* nativeMovConstReg_at(address address) { 1.214 + NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); 1.215 +#ifdef ASSERT 1.216 + test->verify(); 1.217 +#endif 1.218 + return test; 1.219 +} 1.220 + 1.221 +inline NativeMovConstReg* nativeMovConstReg_before(address address) { 1.222 + NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); 1.223 +#ifdef ASSERT 1.224 + test->verify(); 1.225 +#endif 1.226 + return test; 1.227 +} 1.228 + 1.229 +class NativeMovConstRegPatching: public NativeMovConstReg { 1.230 + private: 1.231 + friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { 1.232 + NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); 1.233 + #ifdef ASSERT 1.234 + test->verify(); 1.235 + #endif 1.236 + return test; 1.237 + } 1.238 +}; 1.239 + 1.240 +#ifndef AMD64 1.241 + 1.242 +// An interface for accessing/manipulating native moves of the form: 1.243 +// mov[b/w/l] [reg + offset], reg (instruction_code_reg2mem) 1.244 +// mov[b/w/l] reg, [reg+offset] (instruction_code_mem2reg 1.245 +// mov[s/z]x[w/b] [reg + offset], reg 1.246 +// fld_s [reg+offset] 1.247 +// fld_d [reg+offset] 1.248 +// fstp_s [reg + offset] 1.249 +// fstp_d [reg + offset] 1.250 +// 1.251 +// Warning: These routines must be able to handle any instruction sequences 1.252 +// that are generated as a result of the load/store byte,word,long 1.253 +// macros. For example: The load_unsigned_byte instruction generates 1.254 +// an xor reg,reg inst prior to generating the movb instruction. This 1.255 +// class must skip the xor instruction. 1.256 + 1.257 +class NativeMovRegMem: public NativeInstruction { 1.258 + public: 1.259 + enum Intel_specific_constants { 1.260 + instruction_code_xor = 0x33, 1.261 + instruction_extended_prefix = 0x0F, 1.262 + instruction_code_mem2reg_movzxb = 0xB6, 1.263 + instruction_code_mem2reg_movsxb = 0xBE, 1.264 + instruction_code_mem2reg_movzxw = 0xB7, 1.265 + instruction_code_mem2reg_movsxw = 0xBF, 1.266 + instruction_operandsize_prefix = 0x66, 1.267 + instruction_code_reg2meml = 0x89, 1.268 + instruction_code_mem2regl = 0x8b, 1.269 + instruction_code_reg2memb = 0x88, 1.270 + instruction_code_mem2regb = 0x8a, 1.271 + instruction_code_float_s = 0xd9, 1.272 + instruction_code_float_d = 0xdd, 1.273 + instruction_code_long_volatile = 0xdf, 1.274 + instruction_code_xmm_ss_prefix = 0xf3, 1.275 + instruction_code_xmm_sd_prefix = 0xf2, 1.276 + instruction_code_xmm_code = 0x0f, 1.277 + instruction_code_xmm_load = 0x10, 1.278 + instruction_code_xmm_store = 0x11, 1.279 + instruction_code_xmm_lpd = 0x12, 1.280 + 1.281 + instruction_size = 4, 1.282 + instruction_offset = 0, 1.283 + data_offset = 2, 1.284 + next_instruction_offset = 4 1.285 + }; 1.286 + 1.287 + address instruction_address() const { 1.288 + if (*addr_at(instruction_offset) == instruction_operandsize_prefix && 1.289 + *addr_at(instruction_offset+1) != instruction_code_xmm_code) { 1.290 + return addr_at(instruction_offset+1); // Not SSE instructions 1.291 + } 1.292 + else if (*addr_at(instruction_offset) == instruction_extended_prefix) { 1.293 + return addr_at(instruction_offset+1); 1.294 + } 1.295 + else if (*addr_at(instruction_offset) == instruction_code_xor) { 1.296 + return addr_at(instruction_offset+2); 1.297 + } 1.298 + else return addr_at(instruction_offset); 1.299 + } 1.300 + 1.301 + address next_instruction_address() const { 1.302 + switch (*addr_at(instruction_offset)) { 1.303 + case instruction_operandsize_prefix: 1.304 + if (*addr_at(instruction_offset+1) == instruction_code_xmm_code) 1.305 + return instruction_address() + instruction_size; // SSE instructions 1.306 + case instruction_extended_prefix: 1.307 + return instruction_address() + instruction_size + 1; 1.308 + case instruction_code_reg2meml: 1.309 + case instruction_code_mem2regl: 1.310 + case instruction_code_reg2memb: 1.311 + case instruction_code_mem2regb: 1.312 + case instruction_code_xor: 1.313 + return instruction_address() + instruction_size + 2; 1.314 + default: 1.315 + return instruction_address() + instruction_size; 1.316 + } 1.317 + } 1.318 + int offset() const{ 1.319 + if (*addr_at(instruction_offset) == instruction_operandsize_prefix && 1.320 + *addr_at(instruction_offset+1) != instruction_code_xmm_code) { 1.321 + return int_at(data_offset+1); // Not SSE instructions 1.322 + } 1.323 + else if (*addr_at(instruction_offset) == instruction_extended_prefix) { 1.324 + return int_at(data_offset+1); 1.325 + } 1.326 + else if (*addr_at(instruction_offset) == instruction_code_xor || 1.327 + *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix || 1.328 + *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix || 1.329 + *addr_at(instruction_offset) == instruction_operandsize_prefix) { 1.330 + return int_at(data_offset+2); 1.331 + } 1.332 + else return int_at(data_offset); 1.333 + } 1.334 + 1.335 + void set_offset(int x) { 1.336 + if (*addr_at(instruction_offset) == instruction_operandsize_prefix && 1.337 + *addr_at(instruction_offset+1) != instruction_code_xmm_code) { 1.338 + set_int_at(data_offset+1, x); // Not SSE instructions 1.339 + } 1.340 + else if (*addr_at(instruction_offset) == instruction_extended_prefix) { 1.341 + set_int_at(data_offset+1, x); 1.342 + } 1.343 + else if (*addr_at(instruction_offset) == instruction_code_xor || 1.344 + *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix || 1.345 + *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix || 1.346 + *addr_at(instruction_offset) == instruction_operandsize_prefix) { 1.347 + set_int_at(data_offset+2, x); 1.348 + } 1.349 + else set_int_at(data_offset, x); 1.350 + } 1.351 + 1.352 + void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } 1.353 + void copy_instruction_to(address new_instruction_address); 1.354 + 1.355 + void verify(); 1.356 + void print (); 1.357 + 1.358 + // unit test stuff 1.359 + static void test() {} 1.360 + 1.361 + private: 1.362 + inline friend NativeMovRegMem* nativeMovRegMem_at (address address); 1.363 +}; 1.364 + 1.365 +inline NativeMovRegMem* nativeMovRegMem_at (address address) { 1.366 + NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); 1.367 +#ifdef ASSERT 1.368 + test->verify(); 1.369 +#endif 1.370 + return test; 1.371 +} 1.372 + 1.373 +class NativeMovRegMemPatching: public NativeMovRegMem { 1.374 + private: 1.375 + friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) { 1.376 + NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)(address - instruction_offset); 1.377 + #ifdef ASSERT 1.378 + test->verify(); 1.379 + #endif 1.380 + return test; 1.381 + } 1.382 +}; 1.383 + 1.384 + 1.385 + 1.386 +// An interface for accessing/manipulating native leal instruction of form: 1.387 +// leal reg, [reg + offset] 1.388 + 1.389 +class NativeLoadAddress: public NativeMovRegMem { 1.390 + public: 1.391 + enum Intel_specific_constants { 1.392 + instruction_code = 0x8D 1.393 + }; 1.394 + 1.395 + void verify(); 1.396 + void print (); 1.397 + 1.398 + // unit test stuff 1.399 + static void test() {} 1.400 + 1.401 + private: 1.402 + friend NativeLoadAddress* nativeLoadAddress_at (address address) { 1.403 + NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset); 1.404 + #ifdef ASSERT 1.405 + test->verify(); 1.406 + #endif 1.407 + return test; 1.408 + } 1.409 +}; 1.410 + 1.411 +#endif // AMD64 1.412 + 1.413 +// jump rel32off 1.414 + 1.415 +class NativeJump: public NativeInstruction { 1.416 + public: 1.417 + enum Intel_specific_constants { 1.418 + instruction_code = 0xe9, 1.419 + instruction_size = 5, 1.420 + instruction_offset = 0, 1.421 + data_offset = 1, 1.422 + next_instruction_offset = 5 1.423 + }; 1.424 + 1.425 + address instruction_address() const { return addr_at(instruction_offset); } 1.426 + address next_instruction_address() const { return addr_at(next_instruction_offset); } 1.427 + address jump_destination() const { 1.428 + address dest = (int_at(data_offset)+next_instruction_address()); 1.429 +#ifdef AMD64 // What is this about? 1.430 + // return -1 if jump to self 1.431 + dest = (dest == (address) this) ? (address) -1 : dest; 1.432 +#endif // AMD64 1.433 + return dest; 1.434 + } 1.435 + 1.436 + void set_jump_destination(address dest) { 1.437 + intptr_t val = dest - next_instruction_address(); 1.438 +#ifdef AMD64 1.439 + if (dest == (address) -1) { // can't encode jump to -1 1.440 + val = -5; // jump to self 1.441 + } else { 1.442 + assert((labs(val) & 0xFFFFFFFF00000000) == 0, 1.443 + "must be 32bit offset"); 1.444 + } 1.445 +#endif // AMD64 1.446 + set_int_at(data_offset, (jint)val); 1.447 + } 1.448 + 1.449 + // Creation 1.450 + inline friend NativeJump* nativeJump_at(address address); 1.451 + 1.452 + void verify(); 1.453 + 1.454 + // Unit testing stuff 1.455 + static void test() {} 1.456 + 1.457 + // Insertion of native jump instruction 1.458 + static void insert(address code_pos, address entry); 1.459 + // MT-safe insertion of native jump at verified method entry 1.460 + static void check_verified_entry_alignment(address entry, address verified_entry); 1.461 + static void patch_verified_entry(address entry, address verified_entry, address dest); 1.462 +}; 1.463 + 1.464 +inline NativeJump* nativeJump_at(address address) { 1.465 + NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset); 1.466 +#ifdef ASSERT 1.467 + jump->verify(); 1.468 +#endif 1.469 + return jump; 1.470 +} 1.471 + 1.472 +// Handles all kinds of jump on Intel. Long/far, conditional/unconditional 1.473 +class NativeGeneralJump: public NativeInstruction { 1.474 + public: 1.475 + enum Intel_specific_constants { 1.476 + // Constants does not apply, since the lengths and offsets depends on the actual jump 1.477 + // used 1.478 + // Instruction codes: 1.479 + // Unconditional jumps: 0xE9 (rel32off), 0xEB (rel8off) 1.480 + // Conditional jumps: 0x0F8x (rel32off), 0x7x (rel8off) 1.481 + unconditional_long_jump = 0xe9, 1.482 + unconditional_short_jump = 0xeb, 1.483 + instruction_size = 5 1.484 + }; 1.485 + 1.486 + address instruction_address() const { return addr_at(0); } 1.487 + address jump_destination() const; 1.488 + 1.489 + // Creation 1.490 + inline friend NativeGeneralJump* nativeGeneralJump_at(address address); 1.491 + 1.492 + // Insertion of native general jump instruction 1.493 + static void insert_unconditional(address code_pos, address entry); 1.494 + static void replace_mt_safe(address instr_addr, address code_buffer); 1.495 + 1.496 + void verify(); 1.497 +}; 1.498 + 1.499 +inline NativeGeneralJump* nativeGeneralJump_at(address address) { 1.500 + NativeGeneralJump* jump = (NativeGeneralJump*)(address); 1.501 + debug_only(jump->verify();) 1.502 + return jump; 1.503 +} 1.504 + 1.505 +class NativePopReg : public NativeInstruction { 1.506 + public: 1.507 + enum Intel_specific_constants { 1.508 + instruction_code = 0x58, 1.509 + instruction_size = 1, 1.510 + instruction_offset = 0, 1.511 + data_offset = 1, 1.512 + next_instruction_offset = 1 1.513 + }; 1.514 + 1.515 + // Insert a pop instruction 1.516 + static void insert(address code_pos, Register reg); 1.517 +}; 1.518 + 1.519 + 1.520 +class NativeIllegalInstruction: public NativeInstruction { 1.521 + public: 1.522 + enum Intel_specific_constants { 1.523 + instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B 1.524 + instruction_size = 2, 1.525 + instruction_offset = 0, 1.526 + next_instruction_offset = 2 1.527 + }; 1.528 + 1.529 + // Insert illegal opcode as specific address 1.530 + static void insert(address code_pos); 1.531 +}; 1.532 + 1.533 +// return instruction that does not pop values of the stack 1.534 +class NativeReturn: public NativeInstruction { 1.535 + public: 1.536 + enum Intel_specific_constants { 1.537 + instruction_code = 0xC3, 1.538 + instruction_size = 1, 1.539 + instruction_offset = 0, 1.540 + next_instruction_offset = 1 1.541 + }; 1.542 +}; 1.543 + 1.544 +// return instruction that does pop values of the stack 1.545 +class NativeReturnX: public NativeInstruction { 1.546 + public: 1.547 + enum Intel_specific_constants { 1.548 + instruction_code = 0xC2, 1.549 + instruction_size = 2, 1.550 + instruction_offset = 0, 1.551 + next_instruction_offset = 2 1.552 + }; 1.553 +}; 1.554 + 1.555 +// Simple test vs memory 1.556 +class NativeTstRegMem: public NativeInstruction { 1.557 + public: 1.558 + enum Intel_specific_constants { 1.559 + instruction_code_memXregl = 0x85 1.560 + }; 1.561 +}; 1.562 + 1.563 +inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; } 1.564 +inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; } 1.565 +inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code || 1.566 + ubyte_at(0) == NativeReturnX::instruction_code; } 1.567 +inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code || 1.568 + ubyte_at(0) == 0xEB; /* short jump */ } 1.569 +inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || 1.570 + (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } 1.571 +inline bool NativeInstruction::is_safepoint_poll() { 1.572 +#ifdef AMD64 1.573 + return ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && 1.574 + ubyte_at(1) == 0x05 && // 00 rax 101 1.575 + ((intptr_t) addr_at(6)) + int_at(2) == (intptr_t) os::get_polling_page(); 1.576 +#else 1.577 + return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2regl || 1.578 + ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) && 1.579 + (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */ 1.580 + (os::is_poll_address((address)int_at(2))); 1.581 +#endif // AMD64 1.582 +} 1.583 + 1.584 +inline bool NativeInstruction::is_mov_literal64() { 1.585 +#ifdef AMD64 1.586 + return ((ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB) && 1.587 + (ubyte_at(1) & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8); 1.588 +#else 1.589 + return false; 1.590 +#endif // AMD64 1.591 +}