Fri, 11 Mar 2011 22:34:57 -0800
7012648: move JSR 292 to package java.lang.invoke and adjust names
Summary: package and class renaming only; delete unused methods and classes
Reviewed-by: twisti
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "assembler_sparc.inline.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "nativeInst_sparc.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "runtime/handles.hpp"
31 #include "runtime/sharedRuntime.hpp"
32 #include "runtime/stubRoutines.hpp"
33 #include "utilities/ostream.hpp"
34 #ifdef COMPILER1
35 #include "c1/c1_Runtime1.hpp"
36 #endif
39 bool NativeInstruction::is_dtrace_trap() {
40 return !is_nop();
41 }
43 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
44 ResourceMark rm;
45 CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
46 MacroAssembler* _masm = new MacroAssembler(&buf);
47 Register destreg;
49 destreg = inv_rd(*(unsigned int *)instaddr);
50 // Generate a the new sequence
51 _masm->patchable_sethi(x, destreg);
52 ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
53 }
55 void NativeInstruction::verify() {
56 // make sure code pattern is actually an instruction address
57 address addr = addr_at(0);
58 if (addr == 0 || ((intptr_t)addr & 3) != 0) {
59 fatal("not an instruction address");
60 }
61 }
63 void NativeInstruction::print() {
64 tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0));
65 }
67 void NativeInstruction::set_long_at(int offset, int i) {
68 address addr = addr_at(offset);
69 *(int*)addr = i;
70 ICache::invalidate_word(addr);
71 }
73 void NativeInstruction::set_jlong_at(int offset, jlong i) {
74 address addr = addr_at(offset);
75 *(jlong*)addr = i;
76 // Don't need to invalidate 2 words here, because
77 // the flush instruction operates on doublewords.
78 ICache::invalidate_word(addr);
79 }
81 void NativeInstruction::set_addr_at(int offset, address x) {
82 address addr = addr_at(offset);
83 assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
84 *(uintptr_t*)addr = (uintptr_t)x;
85 // Don't need to invalidate 2 words here in the 64-bit case,
86 // because the flush instruction operates on doublewords.
87 ICache::invalidate_word(addr);
88 // The Intel code has this assertion for NativeCall::set_destination,
89 // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
90 // NativeJump::set_jump_destination, and NativePushImm32::set_data
91 //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
92 }
94 bool NativeInstruction::is_zero_test(Register ®) {
95 int x = long_at(0);
96 Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
97 if (is_op3(x, temp, Assembler::arith_op) &&
98 inv_immed(x) && inv_rd(x) == G0) {
99 if (inv_rs1(x) == G0) {
100 reg = inv_rs2(x);
101 return true;
102 } else if (inv_rs2(x) == G0) {
103 reg = inv_rs1(x);
104 return true;
105 }
106 }
107 return false;
108 }
110 bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
111 int x = long_at(0);
112 if (is_op(x, Assembler::ldst_op) &&
113 inv_rs1(x) == reg && inv_immed(x)) {
114 return true;
115 }
116 return false;
117 }
119 void NativeCall::verify() {
120 NativeInstruction::verify();
121 // make sure code pattern is actually a call instruction
122 if (!is_op(long_at(0), Assembler::call_op)) {
123 fatal("not a call");
124 }
125 }
127 void NativeCall::print() {
128 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
129 }
132 // MT-safe patching of a call instruction (and following word).
133 // First patches the second word, and then atomicly replaces
134 // the first word with the first new instruction word.
135 // Other processors might briefly see the old first word
136 // followed by the new second word. This is OK if the old
137 // second word is harmless, and the new second word may be
138 // harmlessly executed in the delay slot of the call.
139 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
140 assert(Patching_lock->is_locked() ||
141 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
142 assert (instr_addr != NULL, "illegal address for code patching");
143 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
144 assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
145 int i0 = ((int*)code_buffer)[0];
146 int i1 = ((int*)code_buffer)[1];
147 int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
148 assert(inv_op(*contention_addr) == Assembler::arith_op ||
149 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
150 "must not interfere with original call");
151 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
152 n_call->set_long_at(1*BytesPerInstWord, i1);
153 n_call->set_long_at(0*BytesPerInstWord, i0);
154 // NOTE: It is possible that another thread T will execute
155 // only the second patched word.
156 // In other words, since the original instruction is this
157 // call patching_stub; nop (NativeCall)
158 // and the new sequence from the buffer is this:
159 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
160 // what T will execute is this:
161 // call patching_stub; add %r, %lo(K), %r
162 // thereby putting garbage into %r before calling the patching stub.
163 // This is OK, because the patching stub ignores the value of %r.
165 // Make sure the first-patched instruction, which may co-exist
166 // briefly with the call, will do something harmless.
167 assert(inv_op(*contention_addr) == Assembler::arith_op ||
168 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
169 "must not interfere with original call");
170 }
172 // Similar to replace_mt_safe, but just changes the destination. The
173 // important thing is that free-running threads are able to execute this
174 // call instruction at all times. Thus, the displacement field must be
175 // instruction-word-aligned. This is always true on SPARC.
176 //
177 // Used in the runtime linkage of calls; see class CompiledIC.
178 void NativeCall::set_destination_mt_safe(address dest) {
179 assert(Patching_lock->is_locked() ||
180 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
181 // set_destination uses set_long_at which does the ICache::invalidate
182 set_destination(dest);
183 }
185 // Code for unit testing implementation of NativeCall class
186 void NativeCall::test() {
187 #ifdef ASSERT
188 ResourceMark rm;
189 CodeBuffer cb("test", 100, 100);
190 MacroAssembler* a = new MacroAssembler(&cb);
191 NativeCall *nc;
192 uint idx;
193 int offsets[] = {
194 0x0,
195 0xfffffff0,
196 0x7ffffff0,
197 0x80000000,
198 0x20,
199 0x4000,
200 };
202 VM_Version::allow_all();
204 a->call( a->pc(), relocInfo::none );
205 a->delayed()->nop();
206 nc = nativeCall_at( cb.insts_begin() );
207 nc->print();
209 nc = nativeCall_overwriting_at( nc->next_instruction_address() );
210 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
211 nc->set_destination( cb.insts_begin() + offsets[idx] );
212 assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
213 nc->print();
214 }
216 nc = nativeCall_before( cb.insts_begin() + 8 );
217 nc->print();
219 VM_Version::revert();
220 #endif
221 }
222 // End code for unit testing implementation of NativeCall class
224 //-------------------------------------------------------------------
226 #ifdef _LP64
228 void NativeFarCall::set_destination(address dest) {
229 // Address materialized in the instruction stream, so nothing to do.
230 return;
231 #if 0 // What we'd do if we really did want to change the destination
232 if (destination() == dest) {
233 return;
234 }
235 ResourceMark rm;
236 CodeBuffer buf(addr_at(0), instruction_size + 1);
237 MacroAssembler* _masm = new MacroAssembler(&buf);
238 // Generate the new sequence
239 AddressLiteral(dest);
240 _masm->jumpl_to(dest, O7, O7);
241 ICache::invalidate_range(addr_at(0), instruction_size );
242 #endif
243 }
245 void NativeFarCall::verify() {
246 // make sure code pattern is actually a jumpl_to instruction
247 assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
248 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
249 nativeJump_at(addr_at(0))->verify();
250 }
252 bool NativeFarCall::is_call_at(address instr) {
253 return nativeInstruction_at(instr)->is_sethi();
254 }
256 void NativeFarCall::print() {
257 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
258 }
260 bool NativeFarCall::destination_is_compiled_verified_entry_point() {
261 nmethod* callee = CodeCache::find_nmethod(destination());
262 if (callee == NULL) {
263 return false;
264 } else {
265 return destination() == callee->verified_entry_point();
266 }
267 }
269 // MT-safe patching of a far call.
270 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
271 Unimplemented();
272 }
274 // Code for unit testing implementation of NativeFarCall class
275 void NativeFarCall::test() {
276 Unimplemented();
277 }
278 // End code for unit testing implementation of NativeFarCall class
280 #endif // _LP64
282 //-------------------------------------------------------------------
285 void NativeMovConstReg::verify() {
286 NativeInstruction::verify();
287 // make sure code pattern is actually a "set_oop" synthetic instruction
288 // see MacroAssembler::set_oop()
289 int i0 = long_at(sethi_offset);
290 int i1 = long_at(add_offset);
292 // verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg"
293 Register rd = inv_rd(i0);
294 #ifndef _LP64
295 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
296 is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
297 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
298 rd == inv_rs1(i1) && rd == inv_rd(i1))) {
299 fatal("not a set_oop");
300 }
301 #else
302 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
303 fatal("not a set_oop");
304 }
305 #endif
306 }
309 void NativeMovConstReg::print() {
310 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
311 }
314 #ifdef _LP64
315 intptr_t NativeMovConstReg::data() const {
316 return data64(addr_at(sethi_offset), long_at(add_offset));
317 }
318 #else
319 intptr_t NativeMovConstReg::data() const {
320 return data32(long_at(sethi_offset), long_at(add_offset));
321 }
322 #endif
325 void NativeMovConstReg::set_data(intptr_t x) {
326 #ifdef _LP64
327 set_data64_sethi(addr_at(sethi_offset), x);
328 #else
329 set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x));
330 #endif
331 set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
333 // also store the value into an oop_Relocation cell, if any
334 CodeBlob* cb = CodeCache::find_blob(instruction_address());
335 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
336 if (nm != NULL) {
337 RelocIterator iter(nm, instruction_address(), next_instruction_address());
338 oop* oop_addr = NULL;
339 while (iter.next()) {
340 if (iter.type() == relocInfo::oop_type) {
341 oop_Relocation *r = iter.oop_reloc();
342 if (oop_addr == NULL) {
343 oop_addr = r->oop_addr();
344 *oop_addr = (oop)x;
345 } else {
346 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
347 }
348 }
349 }
350 }
351 }
354 // Code for unit testing implementation of NativeMovConstReg class
355 void NativeMovConstReg::test() {
356 #ifdef ASSERT
357 ResourceMark rm;
358 CodeBuffer cb("test", 100, 100);
359 MacroAssembler* a = new MacroAssembler(&cb);
360 NativeMovConstReg* nm;
361 uint idx;
362 int offsets[] = {
363 0x0,
364 0x7fffffff,
365 0x80000000,
366 0xffffffff,
367 0x20,
368 4096,
369 4097,
370 };
372 VM_Version::allow_all();
374 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
375 a->sethi(al1, I3);
376 a->add(I3, al1.low10(), I3);
377 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
378 a->sethi(al2, O2);
379 a->add(O2, al2.low10(), O2);
381 nm = nativeMovConstReg_at( cb.insts_begin() );
382 nm->print();
384 nm = nativeMovConstReg_at( nm->next_instruction_address() );
385 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
386 nm->set_data( offsets[idx] );
387 assert(nm->data() == offsets[idx], "check unit test");
388 }
389 nm->print();
391 VM_Version::revert();
392 #endif
393 }
394 // End code for unit testing implementation of NativeMovConstReg class
396 //-------------------------------------------------------------------
398 void NativeMovConstRegPatching::verify() {
399 NativeInstruction::verify();
400 // Make sure code pattern is sethi/nop/add.
401 int i0 = long_at(sethi_offset);
402 int i1 = long_at(nop_offset);
403 int i2 = long_at(add_offset);
404 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
406 // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
407 // The casual reader should note that on Sparc a nop is a special case if sethi
408 // in which the destination register is %g0.
409 Register rd0 = inv_rd(i0);
410 Register rd1 = inv_rd(i1);
411 if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
412 is_op2(i1, Assembler::sethi_op2) && rd1 == G0 && // nop is a special case of sethi
413 is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
414 inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
415 rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
416 fatal("not a set_oop");
417 }
418 }
421 void NativeMovConstRegPatching::print() {
422 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
423 }
426 int NativeMovConstRegPatching::data() const {
427 #ifdef _LP64
428 return data64(addr_at(sethi_offset), long_at(add_offset));
429 #else
430 return data32(long_at(sethi_offset), long_at(add_offset));
431 #endif
432 }
435 void NativeMovConstRegPatching::set_data(int x) {
436 #ifdef _LP64
437 set_data64_sethi(addr_at(sethi_offset), x);
438 #else
439 set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
440 #endif
441 set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
443 // also store the value into an oop_Relocation cell, if any
444 CodeBlob* cb = CodeCache::find_blob(instruction_address());
445 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
446 if (nm != NULL) {
447 RelocIterator iter(nm, instruction_address(), next_instruction_address());
448 oop* oop_addr = NULL;
449 while (iter.next()) {
450 if (iter.type() == relocInfo::oop_type) {
451 oop_Relocation *r = iter.oop_reloc();
452 if (oop_addr == NULL) {
453 oop_addr = r->oop_addr();
454 *oop_addr = (oop)x;
455 } else {
456 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
457 }
458 }
459 }
460 }
461 }
464 // Code for unit testing implementation of NativeMovConstRegPatching class
465 void NativeMovConstRegPatching::test() {
466 #ifdef ASSERT
467 ResourceMark rm;
468 CodeBuffer cb("test", 100, 100);
469 MacroAssembler* a = new MacroAssembler(&cb);
470 NativeMovConstRegPatching* nm;
471 uint idx;
472 int offsets[] = {
473 0x0,
474 0x7fffffff,
475 0x80000000,
476 0xffffffff,
477 0x20,
478 4096,
479 4097,
480 };
482 VM_Version::allow_all();
484 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
485 a->sethi(al1, I3);
486 a->nop();
487 a->add(I3, al1.low10(), I3);
488 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
489 a->sethi(al2, O2);
490 a->nop();
491 a->add(O2, al2.low10(), O2);
493 nm = nativeMovConstRegPatching_at( cb.insts_begin() );
494 nm->print();
496 nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
497 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
498 nm->set_data( offsets[idx] );
499 assert(nm->data() == offsets[idx], "check unit test");
500 }
501 nm->print();
503 VM_Version::revert();
504 #endif // ASSERT
505 }
506 // End code for unit testing implementation of NativeMovConstRegPatching class
509 //-------------------------------------------------------------------
512 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
513 Untested("copy_instruction_to");
514 int instruction_size = next_instruction_address() - instruction_address();
515 for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
516 *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
517 }
518 }
521 void NativeMovRegMem::verify() {
522 NativeInstruction::verify();
523 // make sure code pattern is actually a "ld" or "st" of some sort.
524 int i0 = long_at(0);
525 int op3 = inv_op3(i0);
527 assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
529 if (!(is_op(i0, Assembler::ldst_op) &&
530 inv_immed(i0) &&
531 0 != (op3 < op3_ldst_int_limit
532 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
533 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
534 {
535 int i1 = long_at(ldst_offset);
536 Register rd = inv_rd(i0);
538 op3 = inv_op3(i1);
539 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
540 0 != (op3 < op3_ldst_int_limit
541 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
542 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
543 fatal("not a ld* or st* op");
544 }
545 }
546 }
549 void NativeMovRegMem::print() {
550 if (is_immediate()) {
551 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
552 } else {
553 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
554 }
555 }
558 // Code for unit testing implementation of NativeMovRegMem class
559 void NativeMovRegMem::test() {
560 #ifdef ASSERT
561 ResourceMark rm;
562 CodeBuffer cb("test", 1000, 1000);
563 MacroAssembler* a = new MacroAssembler(&cb);
564 NativeMovRegMem* nm;
565 uint idx = 0;
566 uint idx1;
567 int offsets[] = {
568 0x0,
569 0xffffffff,
570 0x7fffffff,
571 0x80000000,
572 4096,
573 4097,
574 0x20,
575 0x4000,
576 };
578 VM_Version::allow_all();
580 AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
581 AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
582 a->ldsw( G5, al1.low10(), G4 ); idx++;
583 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
584 a->ldsw( G5, I3, G4 ); idx++;
585 a->ldsb( G5, al1.low10(), G4 ); idx++;
586 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
587 a->ldsb( G5, I3, G4 ); idx++;
588 a->ldsh( G5, al1.low10(), G4 ); idx++;
589 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
590 a->ldsh( G5, I3, G4 ); idx++;
591 a->lduw( G5, al1.low10(), G4 ); idx++;
592 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
593 a->lduw( G5, I3, G4 ); idx++;
594 a->ldub( G5, al1.low10(), G4 ); idx++;
595 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
596 a->ldub( G5, I3, G4 ); idx++;
597 a->lduh( G5, al1.low10(), G4 ); idx++;
598 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
599 a->lduh( G5, I3, G4 ); idx++;
600 a->ldx( G5, al1.low10(), G4 ); idx++;
601 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
602 a->ldx( G5, I3, G4 ); idx++;
603 a->ldd( G5, al1.low10(), G4 ); idx++;
604 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
605 a->ldd( G5, I3, G4 ); idx++;
606 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
607 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
608 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
610 a->stw( G5, G4, al1.low10() ); idx++;
611 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
612 a->stw( G5, G4, I3 ); idx++;
613 a->stb( G5, G4, al1.low10() ); idx++;
614 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
615 a->stb( G5, G4, I3 ); idx++;
616 a->sth( G5, G4, al1.low10() ); idx++;
617 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
618 a->sth( G5, G4, I3 ); idx++;
619 a->stx( G5, G4, al1.low10() ); idx++;
620 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
621 a->stx( G5, G4, I3 ); idx++;
622 a->std( G5, G4, al1.low10() ); idx++;
623 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
624 a->std( G5, G4, I3 ); idx++;
625 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
626 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
627 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
629 nm = nativeMovRegMem_at( cb.insts_begin() );
630 nm->print();
631 nm->set_offset( low10(0) );
632 nm->print();
633 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
634 nm->print();
636 while (--idx) {
637 nm = nativeMovRegMem_at( nm->next_instruction_address() );
638 nm->print();
639 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
640 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
641 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
642 "check unit test");
643 nm->print();
644 }
645 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
646 nm->print();
647 }
649 VM_Version::revert();
650 #endif // ASSERT
651 }
653 // End code for unit testing implementation of NativeMovRegMem class
655 //--------------------------------------------------------------------------------
658 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) {
659 Untested("copy_instruction_to");
660 int instruction_size = next_instruction_address() - instruction_address();
661 for (int i = 0; i < instruction_size; i += wordSize) {
662 *(long*)(new_instruction_address + i) = *(long*)(address(this) + i);
663 }
664 }
667 void NativeMovRegMemPatching::verify() {
668 NativeInstruction::verify();
669 // make sure code pattern is actually a "ld" or "st" of some sort.
670 int i0 = long_at(0);
671 int op3 = inv_op3(i0);
673 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
675 if (!(is_op(i0, Assembler::ldst_op) &&
676 inv_immed(i0) &&
677 0 != (op3 < op3_ldst_int_limit
678 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
679 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) {
680 int i1 = long_at(ldst_offset);
681 Register rd = inv_rd(i0);
683 op3 = inv_op3(i1);
684 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
685 0 != (op3 < op3_ldst_int_limit
686 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
687 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
688 fatal("not a ld* or st* op");
689 }
690 }
691 }
694 void NativeMovRegMemPatching::print() {
695 if (is_immediate()) {
696 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
697 } else {
698 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
699 }
700 }
703 // Code for unit testing implementation of NativeMovRegMemPatching class
704 void NativeMovRegMemPatching::test() {
705 #ifdef ASSERT
706 ResourceMark rm;
707 CodeBuffer cb("test", 1000, 1000);
708 MacroAssembler* a = new MacroAssembler(&cb);
709 NativeMovRegMemPatching* nm;
710 uint idx = 0;
711 uint idx1;
712 int offsets[] = {
713 0x0,
714 0xffffffff,
715 0x7fffffff,
716 0x80000000,
717 4096,
718 4097,
719 0x20,
720 0x4000,
721 };
723 VM_Version::allow_all();
725 AddressLiteral al(0xffffffff, relocInfo::external_word_type);
726 a->ldsw( G5, al.low10(), G4); idx++;
727 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
728 a->ldsw( G5, I3, G4 ); idx++;
729 a->ldsb( G5, al.low10(), G4); idx++;
730 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
731 a->ldsb( G5, I3, G4 ); idx++;
732 a->ldsh( G5, al.low10(), G4); idx++;
733 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
734 a->ldsh( G5, I3, G4 ); idx++;
735 a->lduw( G5, al.low10(), G4); idx++;
736 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
737 a->lduw( G5, I3, G4 ); idx++;
738 a->ldub( G5, al.low10(), G4); idx++;
739 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
740 a->ldub( G5, I3, G4 ); idx++;
741 a->lduh( G5, al.low10(), G4); idx++;
742 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
743 a->lduh( G5, I3, G4 ); idx++;
744 a->ldx( G5, al.low10(), G4); idx++;
745 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
746 a->ldx( G5, I3, G4 ); idx++;
747 a->ldd( G5, al.low10(), G4); idx++;
748 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
749 a->ldd( G5, I3, G4 ); idx++;
750 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
751 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
752 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
754 a->stw( G5, G4, al.low10()); idx++;
755 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
756 a->stw( G5, G4, I3 ); idx++;
757 a->stb( G5, G4, al.low10()); idx++;
758 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
759 a->stb( G5, G4, I3 ); idx++;
760 a->sth( G5, G4, al.low10()); idx++;
761 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
762 a->sth( G5, G4, I3 ); idx++;
763 a->stx( G5, G4, al.low10()); idx++;
764 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
765 a->stx( G5, G4, I3 ); idx++;
766 a->std( G5, G4, al.low10()); idx++;
767 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
768 a->std( G5, G4, I3 ); idx++;
769 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
770 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
771 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
773 nm = nativeMovRegMemPatching_at( cb.insts_begin() );
774 nm->print();
775 nm->set_offset( low10(0) );
776 nm->print();
777 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
778 nm->print();
780 while (--idx) {
781 nm = nativeMovRegMemPatching_at( nm->next_instruction_address() );
782 nm->print();
783 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
784 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
785 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
786 "check unit test");
787 nm->print();
788 }
789 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
790 nm->print();
791 }
793 VM_Version::revert();
794 #endif // ASSERT
795 }
796 // End code for unit testing implementation of NativeMovRegMemPatching class
799 //--------------------------------------------------------------------------------
802 void NativeJump::verify() {
803 NativeInstruction::verify();
804 int i0 = long_at(sethi_offset);
805 int i1 = long_at(jmpl_offset);
806 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
807 // verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg"
808 Register rd = inv_rd(i0);
809 #ifndef _LP64
810 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
811 (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
812 (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
813 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
814 rd == inv_rs1(i1))) {
815 fatal("not a jump_to instruction");
816 }
817 #else
818 // In LP64, the jump instruction location varies for non relocatable
819 // jumps, for example is could be sethi, xor, jmp instead of the
820 // 7 instructions for sethi. So let's check sethi only.
821 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
822 fatal("not a jump_to instruction");
823 }
824 #endif
825 }
828 void NativeJump::print() {
829 tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination());
830 }
833 // Code for unit testing implementation of NativeJump class
834 void NativeJump::test() {
835 #ifdef ASSERT
836 ResourceMark rm;
837 CodeBuffer cb("test", 100, 100);
838 MacroAssembler* a = new MacroAssembler(&cb);
839 NativeJump* nj;
840 uint idx;
841 int offsets[] = {
842 0x0,
843 0xffffffff,
844 0x7fffffff,
845 0x80000000,
846 4096,
847 4097,
848 0x20,
849 0x4000,
850 };
852 VM_Version::allow_all();
854 AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
855 a->sethi(al, I3);
856 a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
857 a->delayed()->nop();
858 a->sethi(al, I3);
859 a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
860 a->delayed()->nop();
862 nj = nativeJump_at( cb.insts_begin() );
863 nj->print();
865 nj = nativeJump_at( nj->next_instruction_address() );
866 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
867 nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
868 assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
869 nj->print();
870 }
872 VM_Version::revert();
873 #endif // ASSERT
874 }
875 // End code for unit testing implementation of NativeJump class
878 void NativeJump::insert(address code_pos, address entry) {
879 Unimplemented();
880 }
882 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
883 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
884 // Atomic write can be only with 1 word.
885 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
886 // Here's one way to do it: Pre-allocate a three-word jump sequence somewhere
887 // in the header of the nmethod, within a short branch's span of the patch point.
888 // Set up the jump sequence using NativeJump::insert, and then use an annulled
889 // unconditional branch at the target site (an atomic 1-word update).
890 // Limitations: You can only patch nmethods, with any given nmethod patched at
891 // most once, and the patch must be in the nmethod's header.
892 // It's messy, but you can ask the CodeCache for the nmethod containing the
893 // target address.
895 // %%%%% For now, do something MT-stupid:
896 ResourceMark rm;
897 int code_size = 1 * BytesPerInstWord;
898 CodeBuffer cb(verified_entry, code_size + 1);
899 MacroAssembler* a = new MacroAssembler(&cb);
900 if (VM_Version::v9_instructions_work()) {
901 a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
902 } else {
903 a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
904 }
905 ICache::invalidate_range(verified_entry, code_size);
906 }
909 void NativeIllegalInstruction::insert(address code_pos) {
910 NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
911 nii->set_long_at(0, illegal_instruction());
912 }
914 static int illegal_instruction_bits = 0;
916 int NativeInstruction::illegal_instruction() {
917 if (illegal_instruction_bits == 0) {
918 ResourceMark rm;
919 char buf[40];
920 CodeBuffer cbuf((address)&buf[0], 20);
921 MacroAssembler* a = new MacroAssembler(&cbuf);
922 address ia = a->pc();
923 a->trap(ST_RESERVED_FOR_USER_0 + 1);
924 int bits = *(int*)ia;
925 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
926 illegal_instruction_bits = bits;
927 assert(illegal_instruction_bits != 0, "oops");
928 }
929 return illegal_instruction_bits;
930 }
932 static int ic_miss_trap_bits = 0;
934 bool NativeInstruction::is_ic_miss_trap() {
935 if (ic_miss_trap_bits == 0) {
936 ResourceMark rm;
937 char buf[40];
938 CodeBuffer cbuf((address)&buf[0], 20);
939 MacroAssembler* a = new MacroAssembler(&cbuf);
940 address ia = a->pc();
941 a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
942 int bits = *(int*)ia;
943 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
944 ic_miss_trap_bits = bits;
945 assert(ic_miss_trap_bits != 0, "oops");
946 }
947 return long_at(0) == ic_miss_trap_bits;
948 }
951 bool NativeInstruction::is_illegal() {
952 if (illegal_instruction_bits == 0) {
953 return false;
954 }
955 return long_at(0) == illegal_instruction_bits;
956 }
959 void NativeGeneralJump::verify() {
960 assert(((NativeInstruction *)this)->is_jump() ||
961 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
962 }
965 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
966 Assembler::Condition condition = Assembler::always;
967 int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
968 Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
969 NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
970 ni->set_long_at(0, x);
971 }
974 // MT-safe patching of a jmp instruction (and following word).
975 // First patches the second word, and then atomicly replaces
976 // the first word with the first new instruction word.
977 // Other processors might briefly see the old first word
978 // followed by the new second word. This is OK if the old
979 // second word is harmless, and the new second word may be
980 // harmlessly executed in the delay slot of the call.
981 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
982 assert(Patching_lock->is_locked() ||
983 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
984 assert (instr_addr != NULL, "illegal address for code patching");
985 NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr); // checking that it is a call
986 assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
987 int i0 = ((int*)code_buffer)[0];
988 int i1 = ((int*)code_buffer)[1];
989 int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
990 assert(inv_op(*contention_addr) == Assembler::arith_op ||
991 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
992 "must not interfere with original call");
993 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
994 h_jump->set_long_at(1*BytesPerInstWord, i1);
995 h_jump->set_long_at(0*BytesPerInstWord, i0);
996 // NOTE: It is possible that another thread T will execute
997 // only the second patched word.
998 // In other words, since the original instruction is this
999 // jmp patching_stub; nop (NativeGeneralJump)
1000 // and the new sequence from the buffer is this:
1001 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
1002 // what T will execute is this:
1003 // jmp patching_stub; add %r, %lo(K), %r
1004 // thereby putting garbage into %r before calling the patching stub.
1005 // This is OK, because the patching stub ignores the value of %r.
1007 // Make sure the first-patched instruction, which may co-exist
1008 // briefly with the call, will do something harmless.
1009 assert(inv_op(*contention_addr) == Assembler::arith_op ||
1010 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
1011 "must not interfere with original call");
1012 }