|
1 /* |
|
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "asm/macroAssembler.hpp" |
|
27 #include "memory/resourceArea.hpp" |
|
28 #include "nativeInst_x86.hpp" |
|
29 #include "oops/oop.inline.hpp" |
|
30 #include "runtime/handles.hpp" |
|
31 #include "runtime/sharedRuntime.hpp" |
|
32 #include "runtime/stubRoutines.hpp" |
|
33 #include "utilities/ostream.hpp" |
|
34 #ifdef COMPILER1 |
|
35 #include "c1/c1_Runtime1.hpp" |
|
36 #endif |
|
37 |
|
38 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
|
39 |
|
40 void NativeInstruction::wrote(int offset) { |
|
41 ICache::invalidate_word(addr_at(offset)); |
|
42 } |
|
43 |
|
44 |
|
45 void NativeCall::verify() { |
|
46 // Make sure code pattern is actually a call imm32 instruction. |
|
47 int inst = ubyte_at(0); |
|
48 if (inst != instruction_code) { |
|
49 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", instruction_address(), |
|
50 inst); |
|
51 fatal("not a call disp32"); |
|
52 } |
|
53 } |
|
54 |
|
55 address NativeCall::destination() const { |
|
56 // Getting the destination of a call isn't safe because that call can |
|
57 // be getting patched while you're calling this. There's only special |
|
58 // places where this can be called but not automatically verifiable by |
|
59 // checking which locks are held. The solution is true atomic patching |
|
60 // on x86, nyi. |
|
61 return return_address() + displacement(); |
|
62 } |
|
63 |
|
64 void NativeCall::print() { |
|
65 tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT, |
|
66 instruction_address(), destination()); |
|
67 } |
|
68 |
|
69 // Inserts a native call instruction at a given pc |
|
70 void NativeCall::insert(address code_pos, address entry) { |
|
71 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); |
|
72 #ifdef AMD64 |
|
73 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); |
|
74 #endif // AMD64 |
|
75 *code_pos = instruction_code; |
|
76 *((int32_t *)(code_pos+1)) = (int32_t) disp; |
|
77 ICache::invalidate_range(code_pos, instruction_size); |
|
78 } |
|
79 |
|
80 // MT-safe patching of a call instruction. |
|
81 // First patches first word of instruction to two jmp's that jmps to them |
|
82 // selfs (spinlock). Then patches the last byte, and then atomicly replaces |
|
83 // the jmp's with the first 4 byte of the new instruction. |
|
84 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { |
|
85 assert(Patching_lock->is_locked() || |
|
86 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); |
|
87 assert (instr_addr != NULL, "illegal address for code patching"); |
|
88 |
|
89 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call |
|
90 if (os::is_MP()) { |
|
91 guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned"); |
|
92 } |
|
93 |
|
94 // First patch dummy jmp in place |
|
95 unsigned char patch[4]; |
|
96 assert(sizeof(patch)==sizeof(jint), "sanity check"); |
|
97 patch[0] = 0xEB; // jmp rel8 |
|
98 patch[1] = 0xFE; // jmp to self |
|
99 patch[2] = 0xEB; |
|
100 patch[3] = 0xFE; |
|
101 |
|
102 // First patch dummy jmp in place |
|
103 *(jint*)instr_addr = *(jint *)patch; |
|
104 |
|
105 // Invalidate. Opteron requires a flush after every write. |
|
106 n_call->wrote(0); |
|
107 |
|
108 // Patch 4th byte |
|
109 instr_addr[4] = code_buffer[4]; |
|
110 |
|
111 n_call->wrote(4); |
|
112 |
|
113 // Patch bytes 0-3 |
|
114 *(jint*)instr_addr = *(jint *)code_buffer; |
|
115 |
|
116 n_call->wrote(0); |
|
117 |
|
118 #ifdef ASSERT |
|
119 // verify patching |
|
120 for ( int i = 0; i < instruction_size; i++) { |
|
121 address ptr = (address)((intptr_t)code_buffer + i); |
|
122 int a_byte = (*ptr) & 0xFF; |
|
123 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); |
|
124 } |
|
125 #endif |
|
126 |
|
127 } |
|
128 |
|
129 |
|
130 // Similar to replace_mt_safe, but just changes the destination. The |
|
131 // important thing is that free-running threads are able to execute this |
|
132 // call instruction at all times. If the displacement field is aligned |
|
133 // we can simply rely on atomicity of 32-bit writes to make sure other threads |
|
134 // will see no intermediate states. Otherwise, the first two bytes of the |
|
135 // call are guaranteed to be aligned, and can be atomically patched to a |
|
136 // self-loop to guard the instruction while we change the other bytes. |
|
137 |
|
138 // We cannot rely on locks here, since the free-running threads must run at |
|
139 // full speed. |
|
140 // |
|
141 // Used in the runtime linkage of calls; see class CompiledIC. |
|
142 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) |
|
143 void NativeCall::set_destination_mt_safe(address dest) { |
|
144 debug_only(verify()); |
|
145 // Make sure patching code is locked. No two threads can patch at the same |
|
146 // time but one may be executing this code. |
|
147 assert(Patching_lock->is_locked() || |
|
148 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); |
|
149 // Both C1 and C2 should now be generating code which aligns the patched address |
|
150 // to be within a single cache line except that C1 does not do the alignment on |
|
151 // uniprocessor systems. |
|
152 bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size == |
|
153 ((uintptr_t)displacement_address() + 3) / cache_line_size; |
|
154 |
|
155 guarantee(!os::is_MP() || is_aligned, "destination must be aligned"); |
|
156 |
|
157 if (is_aligned) { |
|
158 // Simple case: The destination lies within a single cache line. |
|
159 set_destination(dest); |
|
160 } else if ((uintptr_t)instruction_address() / cache_line_size == |
|
161 ((uintptr_t)instruction_address()+1) / cache_line_size) { |
|
162 // Tricky case: The instruction prefix lies within a single cache line. |
|
163 intptr_t disp = dest - return_address(); |
|
164 #ifdef AMD64 |
|
165 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); |
|
166 #endif // AMD64 |
|
167 |
|
168 int call_opcode = instruction_address()[0]; |
|
169 |
|
170 // First patch dummy jump in place: |
|
171 { |
|
172 u_char patch_jump[2]; |
|
173 patch_jump[0] = 0xEB; // jmp rel8 |
|
174 patch_jump[1] = 0xFE; // jmp to self |
|
175 |
|
176 assert(sizeof(patch_jump)==sizeof(short), "sanity check"); |
|
177 *(short*)instruction_address() = *(short*)patch_jump; |
|
178 } |
|
179 // Invalidate. Opteron requires a flush after every write. |
|
180 wrote(0); |
|
181 |
|
182 // (Note: We assume any reader which has already started to read |
|
183 // the unpatched call will completely read the whole unpatched call |
|
184 // without seeing the next writes we are about to make.) |
|
185 |
|
186 // Next, patch the last three bytes: |
|
187 u_char patch_disp[5]; |
|
188 patch_disp[0] = call_opcode; |
|
189 *(int32_t*)&patch_disp[1] = (int32_t)disp; |
|
190 assert(sizeof(patch_disp)==instruction_size, "sanity check"); |
|
191 for (int i = sizeof(short); i < instruction_size; i++) |
|
192 instruction_address()[i] = patch_disp[i]; |
|
193 |
|
194 // Invalidate. Opteron requires a flush after every write. |
|
195 wrote(sizeof(short)); |
|
196 |
|
197 // (Note: We assume that any reader which reads the opcode we are |
|
198 // about to repatch will also read the writes we just made.) |
|
199 |
|
200 // Finally, overwrite the jump: |
|
201 *(short*)instruction_address() = *(short*)patch_disp; |
|
202 // Invalidate. Opteron requires a flush after every write. |
|
203 wrote(0); |
|
204 |
|
205 debug_only(verify()); |
|
206 guarantee(destination() == dest, "patch succeeded"); |
|
207 } else { |
|
208 // Impossible: One or the other must be atomically writable. |
|
209 ShouldNotReachHere(); |
|
210 } |
|
211 } |
|
212 |
|
213 |
|
214 void NativeMovConstReg::verify() { |
|
215 #ifdef AMD64 |
|
216 // make sure code pattern is actually a mov reg64, imm64 instruction |
|
217 if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) || |
|
218 (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) { |
|
219 print(); |
|
220 fatal("not a REX.W[B] mov reg64, imm64"); |
|
221 } |
|
222 #else |
|
223 // make sure code pattern is actually a mov reg, imm32 instruction |
|
224 u_char test_byte = *(u_char*)instruction_address(); |
|
225 u_char test_byte_2 = test_byte & ( 0xff ^ register_mask); |
|
226 if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32"); |
|
227 #endif // AMD64 |
|
228 } |
|
229 |
|
230 |
|
231 void NativeMovConstReg::print() { |
|
232 tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT, |
|
233 instruction_address(), data()); |
|
234 } |
|
235 |
|
236 //------------------------------------------------------------------- |
|
237 |
|
238 int NativeMovRegMem::instruction_start() const { |
|
239 int off = 0; |
|
240 u_char instr_0 = ubyte_at(off); |
|
241 |
|
242 // See comment in Assembler::locate_operand() about VEX prefixes. |
|
243 if (instr_0 == instruction_VEX_prefix_2bytes) { |
|
244 assert((UseAVX > 0), "shouldn't have VEX prefix"); |
|
245 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); |
|
246 return 2; |
|
247 } |
|
248 if (instr_0 == instruction_VEX_prefix_3bytes) { |
|
249 assert((UseAVX > 0), "shouldn't have VEX prefix"); |
|
250 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions")); |
|
251 return 3; |
|
252 } |
|
253 |
|
254 // First check to see if we have a (prefixed or not) xor |
|
255 if (instr_0 >= instruction_prefix_wide_lo && // 0x40 |
|
256 instr_0 <= instruction_prefix_wide_hi) { // 0x4f |
|
257 off++; |
|
258 instr_0 = ubyte_at(off); |
|
259 } |
|
260 |
|
261 if (instr_0 == instruction_code_xor) { |
|
262 off += 2; |
|
263 instr_0 = ubyte_at(off); |
|
264 } |
|
265 |
|
266 // Now look for the real instruction and the many prefix/size specifiers. |
|
267 |
|
268 if (instr_0 == instruction_operandsize_prefix ) { // 0x66 |
|
269 off++; // Not SSE instructions |
|
270 instr_0 = ubyte_at(off); |
|
271 } |
|
272 |
|
273 if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3 |
|
274 instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2 |
|
275 off++; |
|
276 instr_0 = ubyte_at(off); |
|
277 } |
|
278 |
|
279 if ( instr_0 >= instruction_prefix_wide_lo && // 0x40 |
|
280 instr_0 <= instruction_prefix_wide_hi) { // 0x4f |
|
281 off++; |
|
282 instr_0 = ubyte_at(off); |
|
283 } |
|
284 |
|
285 |
|
286 if (instr_0 == instruction_extended_prefix ) { // 0x0f |
|
287 off++; |
|
288 } |
|
289 |
|
290 return off; |
|
291 } |
|
292 |
|
293 address NativeMovRegMem::instruction_address() const { |
|
294 return addr_at(instruction_start()); |
|
295 } |
|
296 |
|
297 address NativeMovRegMem::next_instruction_address() const { |
|
298 address ret = instruction_address() + instruction_size; |
|
299 u_char instr_0 = *(u_char*) instruction_address(); |
|
300 switch (instr_0) { |
|
301 case instruction_operandsize_prefix: |
|
302 |
|
303 fatal("should have skipped instruction_operandsize_prefix"); |
|
304 break; |
|
305 |
|
306 case instruction_extended_prefix: |
|
307 fatal("should have skipped instruction_extended_prefix"); |
|
308 break; |
|
309 |
|
310 case instruction_code_mem2reg_movslq: // 0x63 |
|
311 case instruction_code_mem2reg_movzxb: // 0xB6 |
|
312 case instruction_code_mem2reg_movsxb: // 0xBE |
|
313 case instruction_code_mem2reg_movzxw: // 0xB7 |
|
314 case instruction_code_mem2reg_movsxw: // 0xBF |
|
315 case instruction_code_reg2mem: // 0x89 (q/l) |
|
316 case instruction_code_mem2reg: // 0x8B (q/l) |
|
317 case instruction_code_reg2memb: // 0x88 |
|
318 case instruction_code_mem2regb: // 0x8a |
|
319 |
|
320 case instruction_code_float_s: // 0xd9 fld_s a |
|
321 case instruction_code_float_d: // 0xdd fld_d a |
|
322 |
|
323 case instruction_code_xmm_load: // 0x10 |
|
324 case instruction_code_xmm_store: // 0x11 |
|
325 case instruction_code_xmm_lpd: // 0x12 |
|
326 { |
|
327 // If there is an SIB then instruction is longer than expected |
|
328 u_char mod_rm = *(u_char*)(instruction_address() + 1); |
|
329 if ((mod_rm & 7) == 0x4) { |
|
330 ret++; |
|
331 } |
|
332 } |
|
333 case instruction_code_xor: |
|
334 fatal("should have skipped xor lead in"); |
|
335 break; |
|
336 |
|
337 default: |
|
338 fatal("not a NativeMovRegMem"); |
|
339 } |
|
340 return ret; |
|
341 |
|
342 } |
|
343 |
|
344 int NativeMovRegMem::offset() const{ |
|
345 int off = data_offset + instruction_start(); |
|
346 u_char mod_rm = *(u_char*)(instruction_address() + 1); |
|
347 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is |
|
348 // the encoding to use an SIB byte. Which will have the nnnn |
|
349 // field off by one byte |
|
350 if ((mod_rm & 7) == 0x4) { |
|
351 off++; |
|
352 } |
|
353 return int_at(off); |
|
354 } |
|
355 |
|
356 void NativeMovRegMem::set_offset(int x) { |
|
357 int off = data_offset + instruction_start(); |
|
358 u_char mod_rm = *(u_char*)(instruction_address() + 1); |
|
359 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is |
|
360 // the encoding to use an SIB byte. Which will have the nnnn |
|
361 // field off by one byte |
|
362 if ((mod_rm & 7) == 0x4) { |
|
363 off++; |
|
364 } |
|
365 set_int_at(off, x); |
|
366 } |
|
367 |
|
368 void NativeMovRegMem::verify() { |
|
369 // make sure code pattern is actually a mov [reg+offset], reg instruction |
|
370 u_char test_byte = *(u_char*)instruction_address(); |
|
371 switch (test_byte) { |
|
372 case instruction_code_reg2memb: // 0x88 movb a, r |
|
373 case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit) |
|
374 case instruction_code_mem2regb: // 0x8a movb r, a |
|
375 case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit) |
|
376 break; |
|
377 |
|
378 case instruction_code_mem2reg_movslq: // 0x63 movsql r, a |
|
379 case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb) |
|
380 case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw) |
|
381 case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb) |
|
382 case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw) |
|
383 break; |
|
384 |
|
385 case instruction_code_float_s: // 0xd9 fld_s a |
|
386 case instruction_code_float_d: // 0xdd fld_d a |
|
387 case instruction_code_xmm_load: // 0x10 movsd xmm, a |
|
388 case instruction_code_xmm_store: // 0x11 movsd a, xmm |
|
389 case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a |
|
390 break; |
|
391 |
|
392 default: |
|
393 fatal ("not a mov [reg+offs], reg instruction"); |
|
394 } |
|
395 } |
|
396 |
|
397 |
|
398 void NativeMovRegMem::print() { |
|
399 tty->print_cr("0x%x: mov reg, [reg + %x]", instruction_address(), offset()); |
|
400 } |
|
401 |
|
402 //------------------------------------------------------------------- |
|
403 |
|
404 void NativeLoadAddress::verify() { |
|
405 // make sure code pattern is actually a mov [reg+offset], reg instruction |
|
406 u_char test_byte = *(u_char*)instruction_address(); |
|
407 #ifdef _LP64 |
|
408 if ( (test_byte == instruction_prefix_wide || |
|
409 test_byte == instruction_prefix_wide_extended) ) { |
|
410 test_byte = *(u_char*)(instruction_address() + 1); |
|
411 } |
|
412 #endif // _LP64 |
|
413 if ( ! ((test_byte == lea_instruction_code) |
|
414 LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) { |
|
415 fatal ("not a lea reg, [reg+offs] instruction"); |
|
416 } |
|
417 } |
|
418 |
|
419 |
|
420 void NativeLoadAddress::print() { |
|
421 tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset()); |
|
422 } |
|
423 |
|
424 //-------------------------------------------------------------------------------- |
|
425 |
|
426 void NativeJump::verify() { |
|
427 if (*(u_char*)instruction_address() != instruction_code) { |
|
428 fatal("not a jump instruction"); |
|
429 } |
|
430 } |
|
431 |
|
432 |
|
433 void NativeJump::insert(address code_pos, address entry) { |
|
434 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); |
|
435 #ifdef AMD64 |
|
436 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); |
|
437 #endif // AMD64 |
|
438 |
|
439 *code_pos = instruction_code; |
|
440 *((int32_t*)(code_pos + 1)) = (int32_t)disp; |
|
441 |
|
442 ICache::invalidate_range(code_pos, instruction_size); |
|
443 } |
|
444 |
|
445 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { |
|
446 // Patching to not_entrant can happen while activations of the method are |
|
447 // in use. The patching in that instance must happen only when certain |
|
448 // alignment restrictions are true. These guarantees check those |
|
449 // conditions. |
|
450 #ifdef AMD64 |
|
451 const int linesize = 64; |
|
452 #else |
|
453 const int linesize = 32; |
|
454 #endif // AMD64 |
|
455 |
|
456 // Must be wordSize aligned |
|
457 guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0, |
|
458 "illegal address for code patching 2"); |
|
459 // First 5 bytes must be within the same cache line - 4827828 |
|
460 guarantee((uintptr_t) verified_entry / linesize == |
|
461 ((uintptr_t) verified_entry + 4) / linesize, |
|
462 "illegal address for code patching 3"); |
|
463 } |
|
464 |
|
465 |
|
466 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie) |
|
467 // The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes. |
|
468 // First patches the first word atomically to be a jump to itself. |
|
469 // Then patches the last byte and then atomically patches the first word (4-bytes), |
|
470 // thus inserting the desired jump |
|
471 // This code is mt-safe with the following conditions: entry point is 4 byte aligned, |
|
472 // entry point is in same cache line as unverified entry point, and the instruction being |
|
473 // patched is >= 5 byte (size of patch). |
|
474 // |
|
475 // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit. |
|
476 // In C1 the restriction is enforced by CodeEmitter::method_entry |
|
477 // |
|
478 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { |
|
479 // complete jump instruction (to be inserted) is in code_buffer; |
|
480 unsigned char code_buffer[5]; |
|
481 code_buffer[0] = instruction_code; |
|
482 intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4); |
|
483 #ifdef AMD64 |
|
484 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); |
|
485 #endif // AMD64 |
|
486 *(int32_t*)(code_buffer + 1) = (int32_t)disp; |
|
487 |
|
488 check_verified_entry_alignment(entry, verified_entry); |
|
489 |
|
490 // Can't call nativeJump_at() because it's asserts jump exists |
|
491 NativeJump* n_jump = (NativeJump*) verified_entry; |
|
492 |
|
493 //First patch dummy jmp in place |
|
494 |
|
495 unsigned char patch[4]; |
|
496 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); |
|
497 patch[0] = 0xEB; // jmp rel8 |
|
498 patch[1] = 0xFE; // jmp to self |
|
499 patch[2] = 0xEB; |
|
500 patch[3] = 0xFE; |
|
501 |
|
502 // First patch dummy jmp in place |
|
503 *(int32_t*)verified_entry = *(int32_t *)patch; |
|
504 |
|
505 n_jump->wrote(0); |
|
506 |
|
507 // Patch 5th byte (from jump instruction) |
|
508 verified_entry[4] = code_buffer[4]; |
|
509 |
|
510 n_jump->wrote(4); |
|
511 |
|
512 // Patch bytes 0-3 (from jump instruction) |
|
513 *(int32_t*)verified_entry = *(int32_t *)code_buffer; |
|
514 // Invalidate. Opteron requires a flush after every write. |
|
515 n_jump->wrote(0); |
|
516 |
|
517 } |
|
518 |
|
519 void NativePopReg::insert(address code_pos, Register reg) { |
|
520 assert(reg->encoding() < 8, "no space for REX"); |
|
521 assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update"); |
|
522 *code_pos = (u_char)(instruction_code | reg->encoding()); |
|
523 ICache::invalidate_range(code_pos, instruction_size); |
|
524 } |
|
525 |
|
526 |
|
527 void NativeIllegalInstruction::insert(address code_pos) { |
|
528 assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update"); |
|
529 *(short *)code_pos = instruction_code; |
|
530 ICache::invalidate_range(code_pos, instruction_size); |
|
531 } |
|
532 |
|
533 void NativeGeneralJump::verify() { |
|
534 assert(((NativeInstruction *)this)->is_jump() || |
|
535 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); |
|
536 } |
|
537 |
|
538 |
|
539 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { |
|
540 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4); |
|
541 #ifdef AMD64 |
|
542 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); |
|
543 #endif // AMD64 |
|
544 |
|
545 *code_pos = unconditional_long_jump; |
|
546 *((int32_t *)(code_pos+1)) = (int32_t) disp; |
|
547 ICache::invalidate_range(code_pos, instruction_size); |
|
548 } |
|
549 |
|
550 |
|
551 // MT-safe patching of a long jump instruction. |
|
552 // First patches first word of instruction to two jmp's that jmps to them |
|
553 // selfs (spinlock). Then patches the last byte, and then atomicly replaces |
|
554 // the jmp's with the first 4 byte of the new instruction. |
|
555 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { |
|
556 assert (instr_addr != NULL, "illegal address for code patching (4)"); |
|
557 NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump |
|
558 |
|
559 // Temporary code |
|
560 unsigned char patch[4]; |
|
561 assert(sizeof(patch)==sizeof(int32_t), "sanity check"); |
|
562 patch[0] = 0xEB; // jmp rel8 |
|
563 patch[1] = 0xFE; // jmp to self |
|
564 patch[2] = 0xEB; |
|
565 patch[3] = 0xFE; |
|
566 |
|
567 // First patch dummy jmp in place |
|
568 *(int32_t*)instr_addr = *(int32_t *)patch; |
|
569 n_jump->wrote(0); |
|
570 |
|
571 // Patch 4th byte |
|
572 instr_addr[4] = code_buffer[4]; |
|
573 |
|
574 n_jump->wrote(4); |
|
575 |
|
576 // Patch bytes 0-3 |
|
577 *(jint*)instr_addr = *(jint *)code_buffer; |
|
578 |
|
579 n_jump->wrote(0); |
|
580 |
|
581 #ifdef ASSERT |
|
582 // verify patching |
|
583 for ( int i = 0; i < instruction_size; i++) { |
|
584 address ptr = (address)((intptr_t)code_buffer + i); |
|
585 int a_byte = (*ptr) & 0xFF; |
|
586 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed"); |
|
587 } |
|
588 #endif |
|
589 |
|
590 } |
|
591 |
|
592 |
|
593 |
|
594 address NativeGeneralJump::jump_destination() const { |
|
595 int op_code = ubyte_at(0); |
|
596 bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F); |
|
597 int offset = (op_code == 0x0F) ? 2 : 1; |
|
598 int length = offset + ((is_rel32off) ? 4 : 1); |
|
599 |
|
600 if (is_rel32off) |
|
601 return addr_at(0) + length + int_at(offset); |
|
602 else |
|
603 return addr_at(0) + length + sbyte_at(offset); |
|
604 } |
|
605 |
|
606 bool NativeInstruction::is_dtrace_trap() { |
|
607 return (*(int32_t*)this & 0xff) == 0xcc; |
|
608 } |