src/cpu/x86/vm/nativeInst_x86.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
equal deleted inserted replaced
-1:000000000000 0:f90c822e73f8
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_VM_NATIVEINST_X86_HPP
26 #define CPU_X86_VM_NATIVEINST_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "memory/allocation.hpp"
30 #include "runtime/icache.hpp"
31 #include "runtime/os.hpp"
32 #include "utilities/top.hpp"
33
34 // We have interfaces for the following instructions:
35 // - NativeInstruction
36 // - - NativeCall
37 // - - NativeMovConstReg
38 // - - NativeMovConstRegPatching
39 // - - NativeMovRegMem
40 // - - NativeMovRegMemPatching
41 // - - NativeJump
42 // - - NativeIllegalOpCode
43 // - - NativeGeneralJump
44 // - - NativeReturn
45 // - - NativeReturnX (return with argument)
46 // - - NativePushConst
47 // - - NativeTstRegMem
48
49 // The base class for different kinds of native instruction abstractions.
50 // Provides the primitive operations to manipulate code relative to this.
51
52 class NativeInstruction VALUE_OBJ_CLASS_SPEC {
53 friend class Relocation;
54
55 public:
56 enum Intel_specific_constants {
57 nop_instruction_code = 0x90,
58 nop_instruction_size = 1
59 };
60
61 bool is_nop() { return ubyte_at(0) == nop_instruction_code; }
62 bool is_dtrace_trap();
63 inline bool is_call();
64 inline bool is_illegal();
65 inline bool is_return();
66 inline bool is_jump();
67 inline bool is_cond_jump();
68 inline bool is_safepoint_poll();
69 inline bool is_mov_literal64();
70
71 protected:
72 address addr_at(int offset) const { return address(this) + offset; }
73
74 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); }
75 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); }
76
77 jint int_at(int offset) const { return *(jint*) addr_at(offset); }
78
79 intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); }
80
81 oop oop_at (int offset) const { return *(oop*) addr_at(offset); }
82
83
84 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; wrote(offset); }
85 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; wrote(offset); }
86 void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); }
87 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); }
88
89 // This doesn't really do anything on Intel, but it is the place where
90 // cache invalidation belongs, generically:
91 void wrote(int offset);
92
93 public:
94
95 // unit test stuff
96 static void test() {} // override for testing
97
98 inline friend NativeInstruction* nativeInstruction_at(address address);
99 };
100
101 inline NativeInstruction* nativeInstruction_at(address address) {
102 NativeInstruction* inst = (NativeInstruction*)address;
103 #ifdef ASSERT
104 //inst->verify();
105 #endif
106 return inst;
107 }
108
109 inline NativeCall* nativeCall_at(address address);
110 // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
111 // instructions (used to manipulate inline caches, primitive & dll calls, etc.).
112
113 class NativeCall: public NativeInstruction {
114 public:
115 enum Intel_specific_constants {
116 instruction_code = 0xE8,
117 instruction_size = 5,
118 instruction_offset = 0,
119 displacement_offset = 1,
120 return_address_offset = 5
121 };
122
123 enum { cache_line_size = BytesPerWord }; // conservative estimate!
124
125 address instruction_address() const { return addr_at(instruction_offset); }
126 address next_instruction_address() const { return addr_at(return_address_offset); }
127 int displacement() const { return (jint) int_at(displacement_offset); }
128 address displacement_address() const { return addr_at(displacement_offset); }
129 address return_address() const { return addr_at(return_address_offset); }
130 address destination() const;
131 void set_destination(address dest) {
132 #ifdef AMD64
133 assert((labs((intptr_t) dest - (intptr_t) return_address()) &
134 0xFFFFFFFF00000000) == 0,
135 "must be 32bit offset");
136 #endif // AMD64
137 set_int_at(displacement_offset, dest - return_address());
138 }
139 void set_destination_mt_safe(address dest);
140
141 void verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); }
142 void verify();
143 void print();
144
145 // Creation
146 inline friend NativeCall* nativeCall_at(address address);
147 inline friend NativeCall* nativeCall_before(address return_address);
148
149 static bool is_call_at(address instr) {
150 return ((*instr) & 0xFF) == NativeCall::instruction_code;
151 }
152
153 static bool is_call_before(address return_address) {
154 return is_call_at(return_address - NativeCall::return_address_offset);
155 }
156
157 static bool is_call_to(address instr, address target) {
158 return nativeInstruction_at(instr)->is_call() &&
159 nativeCall_at(instr)->destination() == target;
160 }
161
162 // MT-safe patching of a call instruction.
163 static void insert(address code_pos, address entry);
164
165 static void replace_mt_safe(address instr_addr, address code_buffer);
166 };
167
168 inline NativeCall* nativeCall_at(address address) {
169 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset);
170 #ifdef ASSERT
171 call->verify();
172 #endif
173 return call;
174 }
175
176 inline NativeCall* nativeCall_before(address return_address) {
177 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
178 #ifdef ASSERT
179 call->verify();
180 #endif
181 return call;
182 }
183
184 // An interface for accessing/manipulating native mov reg, imm32 instructions.
185 // (used to manipulate inlined 32bit data dll calls, etc.)
186 class NativeMovConstReg: public NativeInstruction {
187 #ifdef AMD64
188 static const bool has_rex = true;
189 static const int rex_size = 1;
190 #else
191 static const bool has_rex = false;
192 static const int rex_size = 0;
193 #endif // AMD64
194 public:
195 enum Intel_specific_constants {
196 instruction_code = 0xB8,
197 instruction_size = 1 + rex_size + wordSize,
198 instruction_offset = 0,
199 data_offset = 1 + rex_size,
200 next_instruction_offset = instruction_size,
201 register_mask = 0x07
202 };
203
204 address instruction_address() const { return addr_at(instruction_offset); }
205 address next_instruction_address() const { return addr_at(next_instruction_offset); }
206 intptr_t data() const { return ptr_at(data_offset); }
207 void set_data(intptr_t x) { set_ptr_at(data_offset, x); }
208
209 void verify();
210 void print();
211
212 // unit test stuff
213 static void test() {}
214
215 // Creation
216 inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
217 inline friend NativeMovConstReg* nativeMovConstReg_before(address address);
218 };
219
220 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
221 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset);
222 #ifdef ASSERT
223 test->verify();
224 #endif
225 return test;
226 }
227
228 inline NativeMovConstReg* nativeMovConstReg_before(address address) {
229 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
230 #ifdef ASSERT
231 test->verify();
232 #endif
233 return test;
234 }
235
236 class NativeMovConstRegPatching: public NativeMovConstReg {
237 private:
238 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
239 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset);
240 #ifdef ASSERT
241 test->verify();
242 #endif
243 return test;
244 }
245 };
246
247 // An interface for accessing/manipulating native moves of the form:
248 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem)
249 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg
250 // mov[s/z]x[w/b/q] [reg + offset], reg
251 // fld_s [reg+offset]
252 // fld_d [reg+offset]
253 // fstp_s [reg + offset]
254 // fstp_d [reg + offset]
255 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
256 //
257 // Warning: These routines must be able to handle any instruction sequences
258 // that are generated as a result of the load/store byte,word,long
259 // macros. For example: The load_unsigned_byte instruction generates
260 // an xor reg,reg inst prior to generating the movb instruction. This
261 // class must skip the xor instruction.
262
263 class NativeMovRegMem: public NativeInstruction {
264 public:
265 enum Intel_specific_constants {
266 instruction_prefix_wide_lo = Assembler::REX,
267 instruction_prefix_wide_hi = Assembler::REX_WRXB,
268 instruction_code_xor = 0x33,
269 instruction_extended_prefix = 0x0F,
270 instruction_code_mem2reg_movslq = 0x63,
271 instruction_code_mem2reg_movzxb = 0xB6,
272 instruction_code_mem2reg_movsxb = 0xBE,
273 instruction_code_mem2reg_movzxw = 0xB7,
274 instruction_code_mem2reg_movsxw = 0xBF,
275 instruction_operandsize_prefix = 0x66,
276 instruction_code_reg2mem = 0x89,
277 instruction_code_mem2reg = 0x8b,
278 instruction_code_reg2memb = 0x88,
279 instruction_code_mem2regb = 0x8a,
280 instruction_code_float_s = 0xd9,
281 instruction_code_float_d = 0xdd,
282 instruction_code_long_volatile = 0xdf,
283 instruction_code_xmm_ss_prefix = 0xf3,
284 instruction_code_xmm_sd_prefix = 0xf2,
285 instruction_code_xmm_code = 0x0f,
286 instruction_code_xmm_load = 0x10,
287 instruction_code_xmm_store = 0x11,
288 instruction_code_xmm_lpd = 0x12,
289
290 instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes,
291 instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes,
292
293 instruction_size = 4,
294 instruction_offset = 0,
295 data_offset = 2,
296 next_instruction_offset = 4
297 };
298
299 // helper
300 int instruction_start() const;
301
302 address instruction_address() const;
303
304 address next_instruction_address() const;
305
306 int offset() const;
307
308 void set_offset(int x);
309
310 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); }
311
312 void verify();
313 void print ();
314
315 // unit test stuff
316 static void test() {}
317
318 private:
319 inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
320 };
321
322 inline NativeMovRegMem* nativeMovRegMem_at (address address) {
323 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset);
324 #ifdef ASSERT
325 test->verify();
326 #endif
327 return test;
328 }
329
330 class NativeMovRegMemPatching: public NativeMovRegMem {
331 private:
332 friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {
333 NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)(address - instruction_offset);
334 #ifdef ASSERT
335 test->verify();
336 #endif
337 return test;
338 }
339 };
340
341
342
343 // An interface for accessing/manipulating native leal instruction of form:
344 // leal reg, [reg + offset]
345
346 class NativeLoadAddress: public NativeMovRegMem {
347 #ifdef AMD64
348 static const bool has_rex = true;
349 static const int rex_size = 1;
350 #else
351 static const bool has_rex = false;
352 static const int rex_size = 0;
353 #endif // AMD64
354 public:
355 enum Intel_specific_constants {
356 instruction_prefix_wide = Assembler::REX_W,
357 instruction_prefix_wide_extended = Assembler::REX_WB,
358 lea_instruction_code = 0x8D,
359 mov64_instruction_code = 0xB8
360 };
361
362 void verify();
363 void print ();
364
365 // unit test stuff
366 static void test() {}
367
368 private:
369 friend NativeLoadAddress* nativeLoadAddress_at (address address) {
370 NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset);
371 #ifdef ASSERT
372 test->verify();
373 #endif
374 return test;
375 }
376 };
377
378 // jump rel32off
379
380 class NativeJump: public NativeInstruction {
381 public:
382 enum Intel_specific_constants {
383 instruction_code = 0xe9,
384 instruction_size = 5,
385 instruction_offset = 0,
386 data_offset = 1,
387 next_instruction_offset = 5
388 };
389
390 address instruction_address() const { return addr_at(instruction_offset); }
391 address next_instruction_address() const { return addr_at(next_instruction_offset); }
392 address jump_destination() const {
393 address dest = (int_at(data_offset)+next_instruction_address());
394 // 32bit used to encode unresolved jmp as jmp -1
395 // 64bit can't produce this so it used jump to self.
396 // Now 32bit and 64bit use jump to self as the unresolved address
397 // which the inline cache code (and relocs) know about
398
399 // return -1 if jump to self
400 dest = (dest == (address) this) ? (address) -1 : dest;
401 return dest;
402 }
403
404 void set_jump_destination(address dest) {
405 intptr_t val = dest - next_instruction_address();
406 if (dest == (address) -1) {
407 val = -5; // jump to self
408 }
409 #ifdef AMD64
410 assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
411 #endif // AMD64
412 set_int_at(data_offset, (jint)val);
413 }
414
415 // Creation
416 inline friend NativeJump* nativeJump_at(address address);
417
418 void verify();
419
420 // Unit testing stuff
421 static void test() {}
422
423 // Insertion of native jump instruction
424 static void insert(address code_pos, address entry);
425 // MT-safe insertion of native jump at verified method entry
426 static void check_verified_entry_alignment(address entry, address verified_entry);
427 static void patch_verified_entry(address entry, address verified_entry, address dest);
428 };
429
430 inline NativeJump* nativeJump_at(address address) {
431 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset);
432 #ifdef ASSERT
433 jump->verify();
434 #endif
435 return jump;
436 }
437
438 // Handles all kinds of jump on Intel. Long/far, conditional/unconditional
439 class NativeGeneralJump: public NativeInstruction {
440 public:
441 enum Intel_specific_constants {
442 // Constants does not apply, since the lengths and offsets depends on the actual jump
443 // used
444 // Instruction codes:
445 // Unconditional jumps: 0xE9 (rel32off), 0xEB (rel8off)
446 // Conditional jumps: 0x0F8x (rel32off), 0x7x (rel8off)
447 unconditional_long_jump = 0xe9,
448 unconditional_short_jump = 0xeb,
449 instruction_size = 5
450 };
451
452 address instruction_address() const { return addr_at(0); }
453 address jump_destination() const;
454
455 // Creation
456 inline friend NativeGeneralJump* nativeGeneralJump_at(address address);
457
458 // Insertion of native general jump instruction
459 static void insert_unconditional(address code_pos, address entry);
460 static void replace_mt_safe(address instr_addr, address code_buffer);
461
462 void verify();
463 };
464
465 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
466 NativeGeneralJump* jump = (NativeGeneralJump*)(address);
467 debug_only(jump->verify();)
468 return jump;
469 }
470
471 class NativePopReg : public NativeInstruction {
472 public:
473 enum Intel_specific_constants {
474 instruction_code = 0x58,
475 instruction_size = 1,
476 instruction_offset = 0,
477 data_offset = 1,
478 next_instruction_offset = 1
479 };
480
481 // Insert a pop instruction
482 static void insert(address code_pos, Register reg);
483 };
484
485
486 class NativeIllegalInstruction: public NativeInstruction {
487 public:
488 enum Intel_specific_constants {
489 instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B
490 instruction_size = 2,
491 instruction_offset = 0,
492 next_instruction_offset = 2
493 };
494
495 // Insert illegal opcode as specific address
496 static void insert(address code_pos);
497 };
498
499 // return instruction that does not pop values of the stack
500 class NativeReturn: public NativeInstruction {
501 public:
502 enum Intel_specific_constants {
503 instruction_code = 0xC3,
504 instruction_size = 1,
505 instruction_offset = 0,
506 next_instruction_offset = 1
507 };
508 };
509
510 // return instruction that does pop values of the stack
511 class NativeReturnX: public NativeInstruction {
512 public:
513 enum Intel_specific_constants {
514 instruction_code = 0xC2,
515 instruction_size = 2,
516 instruction_offset = 0,
517 next_instruction_offset = 2
518 };
519 };
520
521 // Simple test vs memory
522 class NativeTstRegMem: public NativeInstruction {
523 public:
524 enum Intel_specific_constants {
525 instruction_rex_prefix_mask = 0xF0,
526 instruction_rex_prefix = Assembler::REX,
527 instruction_code_memXregl = 0x85,
528 modrm_mask = 0x38, // select reg from the ModRM byte
529 modrm_reg = 0x00 // rax
530 };
531 };
532
533 inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; }
534 inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; }
535 inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code ||
536 ubyte_at(0) == NativeReturnX::instruction_code; }
537 inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code ||
538 ubyte_at(0) == 0xEB; /* short jump */ }
539 inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
540 (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
541 inline bool NativeInstruction::is_safepoint_poll() {
542 #ifdef AMD64
543 if (Assembler::is_polling_page_far()) {
544 // two cases, depending on the choice of the base register in the address.
545 if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix &&
546 ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl &&
547 (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) ||
548 ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
549 (ubyte_at(1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) {
550 return true;
551 } else {
552 return false;
553 }
554 } else {
555 if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
556 ubyte_at(1) == 0x05) { // 00 rax 101
557 address fault = addr_at(6) + int_at(2);
558 return os::is_poll_address(fault);
559 } else {
560 return false;
561 }
562 }
563 #else
564 return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg ||
565 ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) &&
566 (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */
567 (os::is_poll_address((address)int_at(2)));
568 #endif // AMD64
569 }
570
571 inline bool NativeInstruction::is_mov_literal64() {
572 #ifdef AMD64
573 return ((ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB) &&
574 (ubyte_at(1) & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8);
575 #else
576 return false;
577 #endif // AMD64
578 }
579
580 #endif // CPU_X86_VM_NATIVEINST_X86_HPP

mercurial