Thu, 03 Jan 2013 16:30:47 -0800
8005544: Use 256bit YMM registers in arraycopy stubs on x86
Summary: Use YMM registers in arraycopy and array_fill stubs.
Reviewed-by: roland, twisti
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "nativeInst_x86.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "runtime/handles.hpp"
31 #include "runtime/sharedRuntime.hpp"
32 #include "runtime/stubRoutines.hpp"
33 #include "utilities/ostream.hpp"
34 #ifdef COMPILER1
35 #include "c1/c1_Runtime1.hpp"
36 #endif
38 void NativeInstruction::wrote(int offset) {
39 ICache::invalidate_word(addr_at(offset));
40 }
43 void NativeCall::verify() {
44 // Make sure code pattern is actually a call imm32 instruction.
45 int inst = ubyte_at(0);
46 if (inst != instruction_code) {
47 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", instruction_address(),
48 inst);
49 fatal("not a call disp32");
50 }
51 }
53 address NativeCall::destination() const {
54 // Getting the destination of a call isn't safe because that call can
55 // be getting patched while you're calling this. There's only special
56 // places where this can be called but not automatically verifiable by
57 // checking which locks are held. The solution is true atomic patching
58 // on x86, nyi.
59 return return_address() + displacement();
60 }
62 void NativeCall::print() {
63 tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT,
64 instruction_address(), destination());
65 }
67 // Inserts a native call instruction at a given pc
68 void NativeCall::insert(address code_pos, address entry) {
69 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
70 #ifdef AMD64
71 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
72 #endif // AMD64
73 *code_pos = instruction_code;
74 *((int32_t *)(code_pos+1)) = (int32_t) disp;
75 ICache::invalidate_range(code_pos, instruction_size);
76 }
78 // MT-safe patching of a call instruction.
79 // First patches first word of instruction to two jmp's that jmps to them
80 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
81 // the jmp's with the first 4 byte of the new instruction.
82 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
83 assert(Patching_lock->is_locked() ||
84 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
85 assert (instr_addr != NULL, "illegal address for code patching");
87 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
88 if (os::is_MP()) {
89 guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
90 }
92 // First patch dummy jmp in place
93 unsigned char patch[4];
94 assert(sizeof(patch)==sizeof(jint), "sanity check");
95 patch[0] = 0xEB; // jmp rel8
96 patch[1] = 0xFE; // jmp to self
97 patch[2] = 0xEB;
98 patch[3] = 0xFE;
100 // First patch dummy jmp in place
101 *(jint*)instr_addr = *(jint *)patch;
103 // Invalidate. Opteron requires a flush after every write.
104 n_call->wrote(0);
106 // Patch 4th byte
107 instr_addr[4] = code_buffer[4];
109 n_call->wrote(4);
111 // Patch bytes 0-3
112 *(jint*)instr_addr = *(jint *)code_buffer;
114 n_call->wrote(0);
116 #ifdef ASSERT
117 // verify patching
118 for ( int i = 0; i < instruction_size; i++) {
119 address ptr = (address)((intptr_t)code_buffer + i);
120 int a_byte = (*ptr) & 0xFF;
121 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed");
122 }
123 #endif
125 }
128 // Similar to replace_mt_safe, but just changes the destination. The
129 // important thing is that free-running threads are able to execute this
130 // call instruction at all times. If the displacement field is aligned
131 // we can simply rely on atomicity of 32-bit writes to make sure other threads
132 // will see no intermediate states. Otherwise, the first two bytes of the
133 // call are guaranteed to be aligned, and can be atomically patched to a
134 // self-loop to guard the instruction while we change the other bytes.
136 // We cannot rely on locks here, since the free-running threads must run at
137 // full speed.
138 //
139 // Used in the runtime linkage of calls; see class CompiledIC.
140 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
141 void NativeCall::set_destination_mt_safe(address dest) {
142 debug_only(verify());
143 // Make sure patching code is locked. No two threads can patch at the same
144 // time but one may be executing this code.
145 assert(Patching_lock->is_locked() ||
146 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
147 // Both C1 and C2 should now be generating code which aligns the patched address
148 // to be within a single cache line except that C1 does not do the alignment on
149 // uniprocessor systems.
150 bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size ==
151 ((uintptr_t)displacement_address() + 3) / cache_line_size;
153 guarantee(!os::is_MP() || is_aligned, "destination must be aligned");
155 if (is_aligned) {
156 // Simple case: The destination lies within a single cache line.
157 set_destination(dest);
158 } else if ((uintptr_t)instruction_address() / cache_line_size ==
159 ((uintptr_t)instruction_address()+1) / cache_line_size) {
160 // Tricky case: The instruction prefix lies within a single cache line.
161 intptr_t disp = dest - return_address();
162 #ifdef AMD64
163 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
164 #endif // AMD64
166 int call_opcode = instruction_address()[0];
168 // First patch dummy jump in place:
169 {
170 u_char patch_jump[2];
171 patch_jump[0] = 0xEB; // jmp rel8
172 patch_jump[1] = 0xFE; // jmp to self
174 assert(sizeof(patch_jump)==sizeof(short), "sanity check");
175 *(short*)instruction_address() = *(short*)patch_jump;
176 }
177 // Invalidate. Opteron requires a flush after every write.
178 wrote(0);
180 // (Note: We assume any reader which has already started to read
181 // the unpatched call will completely read the whole unpatched call
182 // without seeing the next writes we are about to make.)
184 // Next, patch the last three bytes:
185 u_char patch_disp[5];
186 patch_disp[0] = call_opcode;
187 *(int32_t*)&patch_disp[1] = (int32_t)disp;
188 assert(sizeof(patch_disp)==instruction_size, "sanity check");
189 for (int i = sizeof(short); i < instruction_size; i++)
190 instruction_address()[i] = patch_disp[i];
192 // Invalidate. Opteron requires a flush after every write.
193 wrote(sizeof(short));
195 // (Note: We assume that any reader which reads the opcode we are
196 // about to repatch will also read the writes we just made.)
198 // Finally, overwrite the jump:
199 *(short*)instruction_address() = *(short*)patch_disp;
200 // Invalidate. Opteron requires a flush after every write.
201 wrote(0);
203 debug_only(verify());
204 guarantee(destination() == dest, "patch succeeded");
205 } else {
206 // Impossible: One or the other must be atomically writable.
207 ShouldNotReachHere();
208 }
209 }
212 void NativeMovConstReg::verify() {
213 #ifdef AMD64
214 // make sure code pattern is actually a mov reg64, imm64 instruction
215 if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) ||
216 (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) {
217 print();
218 fatal("not a REX.W[B] mov reg64, imm64");
219 }
220 #else
221 // make sure code pattern is actually a mov reg, imm32 instruction
222 u_char test_byte = *(u_char*)instruction_address();
223 u_char test_byte_2 = test_byte & ( 0xff ^ register_mask);
224 if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32");
225 #endif // AMD64
226 }
229 void NativeMovConstReg::print() {
230 tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
231 instruction_address(), data());
232 }
234 //-------------------------------------------------------------------
236 int NativeMovRegMem::instruction_start() const {
237 int off = 0;
238 u_char instr_0 = ubyte_at(off);
240 // See comment in Assembler::locate_operand() about VEX prefixes.
241 if (instr_0 == instruction_VEX_prefix_2bytes) {
242 assert((UseAVX > 0), "shouldn't have VEX prefix");
243 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
244 return 2;
245 }
246 if (instr_0 == instruction_VEX_prefix_3bytes) {
247 assert((UseAVX > 0), "shouldn't have VEX prefix");
248 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
249 return 3;
250 }
252 // First check to see if we have a (prefixed or not) xor
253 if (instr_0 >= instruction_prefix_wide_lo && // 0x40
254 instr_0 <= instruction_prefix_wide_hi) { // 0x4f
255 off++;
256 instr_0 = ubyte_at(off);
257 }
259 if (instr_0 == instruction_code_xor) {
260 off += 2;
261 instr_0 = ubyte_at(off);
262 }
264 // Now look for the real instruction and the many prefix/size specifiers.
266 if (instr_0 == instruction_operandsize_prefix ) { // 0x66
267 off++; // Not SSE instructions
268 instr_0 = ubyte_at(off);
269 }
271 if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3
272 instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2
273 off++;
274 instr_0 = ubyte_at(off);
275 }
277 if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
278 instr_0 <= instruction_prefix_wide_hi) { // 0x4f
279 off++;
280 instr_0 = ubyte_at(off);
281 }
284 if (instr_0 == instruction_extended_prefix ) { // 0x0f
285 off++;
286 }
288 return off;
289 }
291 address NativeMovRegMem::instruction_address() const {
292 return addr_at(instruction_start());
293 }
295 address NativeMovRegMem::next_instruction_address() const {
296 address ret = instruction_address() + instruction_size;
297 u_char instr_0 = *(u_char*) instruction_address();
298 switch (instr_0) {
299 case instruction_operandsize_prefix:
301 fatal("should have skipped instruction_operandsize_prefix");
302 break;
304 case instruction_extended_prefix:
305 fatal("should have skipped instruction_extended_prefix");
306 break;
308 case instruction_code_mem2reg_movslq: // 0x63
309 case instruction_code_mem2reg_movzxb: // 0xB6
310 case instruction_code_mem2reg_movsxb: // 0xBE
311 case instruction_code_mem2reg_movzxw: // 0xB7
312 case instruction_code_mem2reg_movsxw: // 0xBF
313 case instruction_code_reg2mem: // 0x89 (q/l)
314 case instruction_code_mem2reg: // 0x8B (q/l)
315 case instruction_code_reg2memb: // 0x88
316 case instruction_code_mem2regb: // 0x8a
318 case instruction_code_float_s: // 0xd9 fld_s a
319 case instruction_code_float_d: // 0xdd fld_d a
321 case instruction_code_xmm_load: // 0x10
322 case instruction_code_xmm_store: // 0x11
323 case instruction_code_xmm_lpd: // 0x12
324 {
325 // If there is an SIB then instruction is longer than expected
326 u_char mod_rm = *(u_char*)(instruction_address() + 1);
327 if ((mod_rm & 7) == 0x4) {
328 ret++;
329 }
330 }
331 case instruction_code_xor:
332 fatal("should have skipped xor lead in");
333 break;
335 default:
336 fatal("not a NativeMovRegMem");
337 }
338 return ret;
340 }
342 int NativeMovRegMem::offset() const{
343 int off = data_offset + instruction_start();
344 u_char mod_rm = *(u_char*)(instruction_address() + 1);
345 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
346 // the encoding to use an SIB byte. Which will have the nnnn
347 // field off by one byte
348 if ((mod_rm & 7) == 0x4) {
349 off++;
350 }
351 return int_at(off);
352 }
354 void NativeMovRegMem::set_offset(int x) {
355 int off = data_offset + instruction_start();
356 u_char mod_rm = *(u_char*)(instruction_address() + 1);
357 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
358 // the encoding to use an SIB byte. Which will have the nnnn
359 // field off by one byte
360 if ((mod_rm & 7) == 0x4) {
361 off++;
362 }
363 set_int_at(off, x);
364 }
366 void NativeMovRegMem::verify() {
367 // make sure code pattern is actually a mov [reg+offset], reg instruction
368 u_char test_byte = *(u_char*)instruction_address();
369 switch (test_byte) {
370 case instruction_code_reg2memb: // 0x88 movb a, r
371 case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit)
372 case instruction_code_mem2regb: // 0x8a movb r, a
373 case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit)
374 break;
376 case instruction_code_mem2reg_movslq: // 0x63 movsql r, a
377 case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb)
378 case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw)
379 case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb)
380 case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw)
381 break;
383 case instruction_code_float_s: // 0xd9 fld_s a
384 case instruction_code_float_d: // 0xdd fld_d a
385 case instruction_code_xmm_load: // 0x10 movsd xmm, a
386 case instruction_code_xmm_store: // 0x11 movsd a, xmm
387 case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a
388 break;
390 default:
391 fatal ("not a mov [reg+offs], reg instruction");
392 }
393 }
396 void NativeMovRegMem::print() {
397 tty->print_cr("0x%x: mov reg, [reg + %x]", instruction_address(), offset());
398 }
400 //-------------------------------------------------------------------
402 void NativeLoadAddress::verify() {
403 // make sure code pattern is actually a mov [reg+offset], reg instruction
404 u_char test_byte = *(u_char*)instruction_address();
405 #ifdef _LP64
406 if ( (test_byte == instruction_prefix_wide ||
407 test_byte == instruction_prefix_wide_extended) ) {
408 test_byte = *(u_char*)(instruction_address() + 1);
409 }
410 #endif // _LP64
411 if ( ! ((test_byte == lea_instruction_code)
412 LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
413 fatal ("not a lea reg, [reg+offs] instruction");
414 }
415 }
418 void NativeLoadAddress::print() {
419 tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset());
420 }
422 //--------------------------------------------------------------------------------
424 void NativeJump::verify() {
425 if (*(u_char*)instruction_address() != instruction_code) {
426 fatal("not a jump instruction");
427 }
428 }
431 void NativeJump::insert(address code_pos, address entry) {
432 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
433 #ifdef AMD64
434 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
435 #endif // AMD64
437 *code_pos = instruction_code;
438 *((int32_t*)(code_pos + 1)) = (int32_t)disp;
440 ICache::invalidate_range(code_pos, instruction_size);
441 }
443 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
444 // Patching to not_entrant can happen while activations of the method are
445 // in use. The patching in that instance must happen only when certain
446 // alignment restrictions are true. These guarantees check those
447 // conditions.
448 #ifdef AMD64
449 const int linesize = 64;
450 #else
451 const int linesize = 32;
452 #endif // AMD64
454 // Must be wordSize aligned
455 guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0,
456 "illegal address for code patching 2");
457 // First 5 bytes must be within the same cache line - 4827828
458 guarantee((uintptr_t) verified_entry / linesize ==
459 ((uintptr_t) verified_entry + 4) / linesize,
460 "illegal address for code patching 3");
461 }
464 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
465 // The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes.
466 // First patches the first word atomically to be a jump to itself.
467 // Then patches the last byte and then atomically patches the first word (4-bytes),
468 // thus inserting the desired jump
469 // This code is mt-safe with the following conditions: entry point is 4 byte aligned,
470 // entry point is in same cache line as unverified entry point, and the instruction being
471 // patched is >= 5 byte (size of patch).
472 //
473 // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit.
474 // In C1 the restriction is enforced by CodeEmitter::method_entry
475 //
476 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
477 // complete jump instruction (to be inserted) is in code_buffer;
478 unsigned char code_buffer[5];
479 code_buffer[0] = instruction_code;
480 intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4);
481 #ifdef AMD64
482 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
483 #endif // AMD64
484 *(int32_t*)(code_buffer + 1) = (int32_t)disp;
486 check_verified_entry_alignment(entry, verified_entry);
488 // Can't call nativeJump_at() because it's asserts jump exists
489 NativeJump* n_jump = (NativeJump*) verified_entry;
491 //First patch dummy jmp in place
493 unsigned char patch[4];
494 assert(sizeof(patch)==sizeof(int32_t), "sanity check");
495 patch[0] = 0xEB; // jmp rel8
496 patch[1] = 0xFE; // jmp to self
497 patch[2] = 0xEB;
498 patch[3] = 0xFE;
500 // First patch dummy jmp in place
501 *(int32_t*)verified_entry = *(int32_t *)patch;
503 n_jump->wrote(0);
505 // Patch 5th byte (from jump instruction)
506 verified_entry[4] = code_buffer[4];
508 n_jump->wrote(4);
510 // Patch bytes 0-3 (from jump instruction)
511 *(int32_t*)verified_entry = *(int32_t *)code_buffer;
512 // Invalidate. Opteron requires a flush after every write.
513 n_jump->wrote(0);
515 }
517 void NativePopReg::insert(address code_pos, Register reg) {
518 assert(reg->encoding() < 8, "no space for REX");
519 assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update");
520 *code_pos = (u_char)(instruction_code | reg->encoding());
521 ICache::invalidate_range(code_pos, instruction_size);
522 }
525 void NativeIllegalInstruction::insert(address code_pos) {
526 assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update");
527 *(short *)code_pos = instruction_code;
528 ICache::invalidate_range(code_pos, instruction_size);
529 }
531 void NativeGeneralJump::verify() {
532 assert(((NativeInstruction *)this)->is_jump() ||
533 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
534 }
537 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
538 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
539 #ifdef AMD64
540 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
541 #endif // AMD64
543 *code_pos = unconditional_long_jump;
544 *((int32_t *)(code_pos+1)) = (int32_t) disp;
545 ICache::invalidate_range(code_pos, instruction_size);
546 }
549 // MT-safe patching of a long jump instruction.
550 // First patches first word of instruction to two jmp's that jmps to them
551 // selfs (spinlock). Then patches the last byte, and then atomicly replaces
552 // the jmp's with the first 4 byte of the new instruction.
553 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
554 assert (instr_addr != NULL, "illegal address for code patching (4)");
555 NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump
557 // Temporary code
558 unsigned char patch[4];
559 assert(sizeof(patch)==sizeof(int32_t), "sanity check");
560 patch[0] = 0xEB; // jmp rel8
561 patch[1] = 0xFE; // jmp to self
562 patch[2] = 0xEB;
563 patch[3] = 0xFE;
565 // First patch dummy jmp in place
566 *(int32_t*)instr_addr = *(int32_t *)patch;
567 n_jump->wrote(0);
569 // Patch 4th byte
570 instr_addr[4] = code_buffer[4];
572 n_jump->wrote(4);
574 // Patch bytes 0-3
575 *(jint*)instr_addr = *(jint *)code_buffer;
577 n_jump->wrote(0);
579 #ifdef ASSERT
580 // verify patching
581 for ( int i = 0; i < instruction_size; i++) {
582 address ptr = (address)((intptr_t)code_buffer + i);
583 int a_byte = (*ptr) & 0xFF;
584 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed");
585 }
586 #endif
588 }
592 address NativeGeneralJump::jump_destination() const {
593 int op_code = ubyte_at(0);
594 bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F);
595 int offset = (op_code == 0x0F) ? 2 : 1;
596 int length = offset + ((is_rel32off) ? 4 : 1);
598 if (is_rel32off)
599 return addr_at(0) + length + int_at(offset);
600 else
601 return addr_at(0) + length + sbyte_at(offset);
602 }
604 bool NativeInstruction::is_dtrace_trap() {
605 return (*(int32_t*)this & 0xff) == 0xcc;
606 }