src/cpu/sparc/vm/nativeInst_sparc.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2657
d673ef06fe96
child 2708
1d1603768966
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

duke@435 1 /*
jrose@1934 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "assembler_sparc.inline.hpp"
stefank@2314 27 #include "memory/resourceArea.hpp"
stefank@2314 28 #include "nativeInst_sparc.hpp"
stefank@2314 29 #include "oops/oop.inline.hpp"
stefank@2314 30 #include "runtime/handles.hpp"
stefank@2314 31 #include "runtime/sharedRuntime.hpp"
stefank@2314 32 #include "runtime/stubRoutines.hpp"
stefank@2314 33 #include "utilities/ostream.hpp"
stefank@2314 34 #ifdef COMPILER1
stefank@2314 35 #include "c1/c1_Runtime1.hpp"
stefank@2314 36 #endif
duke@435 37
duke@435 38
kamg@551 39 bool NativeInstruction::is_dtrace_trap() {
kamg@551 40 return !is_nop();
kamg@551 41 }
kamg@551 42
duke@435 43 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
duke@435 44 ResourceMark rm;
duke@435 45 CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
duke@435 46 MacroAssembler* _masm = new MacroAssembler(&buf);
duke@435 47 Register destreg;
duke@435 48
duke@435 49 destreg = inv_rd(*(unsigned int *)instaddr);
duke@435 50 // Generate a the new sequence
twisti@1162 51 _masm->patchable_sethi(x, destreg);
duke@435 52 ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
duke@435 53 }
duke@435 54
never@2657 55 void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) {
never@2657 56 ResourceMark rm;
never@2657 57 unsigned char buffer[10 * BytesPerInstWord];
never@2657 58 CodeBuffer buf(buffer, 10 * BytesPerInstWord);
never@2657 59 MacroAssembler masm(&buf);
never@2657 60
never@2657 61 Register destreg = inv_rd(*(unsigned int *)instaddr);
never@2657 62 // Generate the proper sequence into a temporary buffer and compare
never@2657 63 // it with the original sequence.
never@2657 64 masm.patchable_sethi(x, destreg);
never@2657 65 int len = buffer - masm.pc();
never@2657 66 for (int i = 0; i < len; i++) {
never@2657 67 assert(instaddr[i] == buffer[i], "instructions must match");
never@2657 68 }
never@2657 69 }
never@2657 70
duke@435 71 void NativeInstruction::verify() {
duke@435 72 // make sure code pattern is actually an instruction address
duke@435 73 address addr = addr_at(0);
duke@435 74 if (addr == 0 || ((intptr_t)addr & 3) != 0) {
duke@435 75 fatal("not an instruction address");
duke@435 76 }
duke@435 77 }
duke@435 78
duke@435 79 void NativeInstruction::print() {
duke@435 80 tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0));
duke@435 81 }
duke@435 82
duke@435 83 void NativeInstruction::set_long_at(int offset, int i) {
duke@435 84 address addr = addr_at(offset);
duke@435 85 *(int*)addr = i;
duke@435 86 ICache::invalidate_word(addr);
duke@435 87 }
duke@435 88
duke@435 89 void NativeInstruction::set_jlong_at(int offset, jlong i) {
duke@435 90 address addr = addr_at(offset);
duke@435 91 *(jlong*)addr = i;
duke@435 92 // Don't need to invalidate 2 words here, because
duke@435 93 // the flush instruction operates on doublewords.
duke@435 94 ICache::invalidate_word(addr);
duke@435 95 }
duke@435 96
duke@435 97 void NativeInstruction::set_addr_at(int offset, address x) {
duke@435 98 address addr = addr_at(offset);
duke@435 99 assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
duke@435 100 *(uintptr_t*)addr = (uintptr_t)x;
duke@435 101 // Don't need to invalidate 2 words here in the 64-bit case,
duke@435 102 // because the flush instruction operates on doublewords.
duke@435 103 ICache::invalidate_word(addr);
duke@435 104 // The Intel code has this assertion for NativeCall::set_destination,
duke@435 105 // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
duke@435 106 // NativeJump::set_jump_destination, and NativePushImm32::set_data
duke@435 107 //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
duke@435 108 }
duke@435 109
duke@435 110 bool NativeInstruction::is_zero_test(Register &reg) {
duke@435 111 int x = long_at(0);
duke@435 112 Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
duke@435 113 if (is_op3(x, temp, Assembler::arith_op) &&
duke@435 114 inv_immed(x) && inv_rd(x) == G0) {
duke@435 115 if (inv_rs1(x) == G0) {
duke@435 116 reg = inv_rs2(x);
duke@435 117 return true;
duke@435 118 } else if (inv_rs2(x) == G0) {
duke@435 119 reg = inv_rs1(x);
duke@435 120 return true;
duke@435 121 }
duke@435 122 }
duke@435 123 return false;
duke@435 124 }
duke@435 125
duke@435 126 bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
duke@435 127 int x = long_at(0);
duke@435 128 if (is_op(x, Assembler::ldst_op) &&
duke@435 129 inv_rs1(x) == reg && inv_immed(x)) {
duke@435 130 return true;
duke@435 131 }
duke@435 132 return false;
duke@435 133 }
duke@435 134
duke@435 135 void NativeCall::verify() {
duke@435 136 NativeInstruction::verify();
duke@435 137 // make sure code pattern is actually a call instruction
duke@435 138 if (!is_op(long_at(0), Assembler::call_op)) {
duke@435 139 fatal("not a call");
duke@435 140 }
duke@435 141 }
duke@435 142
duke@435 143 void NativeCall::print() {
duke@435 144 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
duke@435 145 }
duke@435 146
duke@435 147
duke@435 148 // MT-safe patching of a call instruction (and following word).
duke@435 149 // First patches the second word, and then atomicly replaces
duke@435 150 // the first word with the first new instruction word.
duke@435 151 // Other processors might briefly see the old first word
duke@435 152 // followed by the new second word. This is OK if the old
duke@435 153 // second word is harmless, and the new second word may be
duke@435 154 // harmlessly executed in the delay slot of the call.
duke@435 155 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
duke@435 156 assert(Patching_lock->is_locked() ||
duke@435 157 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
duke@435 158 assert (instr_addr != NULL, "illegal address for code patching");
duke@435 159 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
duke@435 160 assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
duke@435 161 int i0 = ((int*)code_buffer)[0];
duke@435 162 int i1 = ((int*)code_buffer)[1];
duke@435 163 int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
duke@435 164 assert(inv_op(*contention_addr) == Assembler::arith_op ||
duke@435 165 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
duke@435 166 "must not interfere with original call");
duke@435 167 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
duke@435 168 n_call->set_long_at(1*BytesPerInstWord, i1);
duke@435 169 n_call->set_long_at(0*BytesPerInstWord, i0);
duke@435 170 // NOTE: It is possible that another thread T will execute
duke@435 171 // only the second patched word.
duke@435 172 // In other words, since the original instruction is this
duke@435 173 // call patching_stub; nop (NativeCall)
duke@435 174 // and the new sequence from the buffer is this:
duke@435 175 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
duke@435 176 // what T will execute is this:
duke@435 177 // call patching_stub; add %r, %lo(K), %r
duke@435 178 // thereby putting garbage into %r before calling the patching stub.
duke@435 179 // This is OK, because the patching stub ignores the value of %r.
duke@435 180
duke@435 181 // Make sure the first-patched instruction, which may co-exist
duke@435 182 // briefly with the call, will do something harmless.
duke@435 183 assert(inv_op(*contention_addr) == Assembler::arith_op ||
duke@435 184 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
duke@435 185 "must not interfere with original call");
duke@435 186 }
duke@435 187
duke@435 188 // Similar to replace_mt_safe, but just changes the destination. The
duke@435 189 // important thing is that free-running threads are able to execute this
duke@435 190 // call instruction at all times. Thus, the displacement field must be
duke@435 191 // instruction-word-aligned. This is always true on SPARC.
duke@435 192 //
duke@435 193 // Used in the runtime linkage of calls; see class CompiledIC.
duke@435 194 void NativeCall::set_destination_mt_safe(address dest) {
duke@435 195 assert(Patching_lock->is_locked() ||
duke@435 196 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
duke@435 197 // set_destination uses set_long_at which does the ICache::invalidate
duke@435 198 set_destination(dest);
duke@435 199 }
duke@435 200
duke@435 201 // Code for unit testing implementation of NativeCall class
duke@435 202 void NativeCall::test() {
duke@435 203 #ifdef ASSERT
duke@435 204 ResourceMark rm;
duke@435 205 CodeBuffer cb("test", 100, 100);
duke@435 206 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 207 NativeCall *nc;
duke@435 208 uint idx;
duke@435 209 int offsets[] = {
duke@435 210 0x0,
duke@435 211 0xfffffff0,
duke@435 212 0x7ffffff0,
duke@435 213 0x80000000,
duke@435 214 0x20,
duke@435 215 0x4000,
duke@435 216 };
duke@435 217
duke@435 218 VM_Version::allow_all();
duke@435 219
duke@435 220 a->call( a->pc(), relocInfo::none );
duke@435 221 a->delayed()->nop();
twisti@2103 222 nc = nativeCall_at( cb.insts_begin() );
duke@435 223 nc->print();
duke@435 224
duke@435 225 nc = nativeCall_overwriting_at( nc->next_instruction_address() );
duke@435 226 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
twisti@2103 227 nc->set_destination( cb.insts_begin() + offsets[idx] );
twisti@2103 228 assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
duke@435 229 nc->print();
duke@435 230 }
duke@435 231
twisti@2103 232 nc = nativeCall_before( cb.insts_begin() + 8 );
duke@435 233 nc->print();
duke@435 234
duke@435 235 VM_Version::revert();
duke@435 236 #endif
duke@435 237 }
duke@435 238 // End code for unit testing implementation of NativeCall class
duke@435 239
duke@435 240 //-------------------------------------------------------------------
duke@435 241
duke@435 242 #ifdef _LP64
duke@435 243
duke@435 244 void NativeFarCall::set_destination(address dest) {
duke@435 245 // Address materialized in the instruction stream, so nothing to do.
duke@435 246 return;
duke@435 247 #if 0 // What we'd do if we really did want to change the destination
duke@435 248 if (destination() == dest) {
duke@435 249 return;
duke@435 250 }
duke@435 251 ResourceMark rm;
duke@435 252 CodeBuffer buf(addr_at(0), instruction_size + 1);
duke@435 253 MacroAssembler* _masm = new MacroAssembler(&buf);
duke@435 254 // Generate the new sequence
twisti@1162 255 AddressLiteral(dest);
twisti@1162 256 _masm->jumpl_to(dest, O7, O7);
duke@435 257 ICache::invalidate_range(addr_at(0), instruction_size );
duke@435 258 #endif
duke@435 259 }
duke@435 260
duke@435 261 void NativeFarCall::verify() {
duke@435 262 // make sure code pattern is actually a jumpl_to instruction
duke@435 263 assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
duke@435 264 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@435 265 nativeJump_at(addr_at(0))->verify();
duke@435 266 }
duke@435 267
duke@435 268 bool NativeFarCall::is_call_at(address instr) {
duke@435 269 return nativeInstruction_at(instr)->is_sethi();
duke@435 270 }
duke@435 271
duke@435 272 void NativeFarCall::print() {
duke@435 273 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
duke@435 274 }
duke@435 275
duke@435 276 bool NativeFarCall::destination_is_compiled_verified_entry_point() {
duke@435 277 nmethod* callee = CodeCache::find_nmethod(destination());
duke@435 278 if (callee == NULL) {
duke@435 279 return false;
duke@435 280 } else {
duke@435 281 return destination() == callee->verified_entry_point();
duke@435 282 }
duke@435 283 }
duke@435 284
duke@435 285 // MT-safe patching of a far call.
duke@435 286 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
duke@435 287 Unimplemented();
duke@435 288 }
duke@435 289
duke@435 290 // Code for unit testing implementation of NativeFarCall class
duke@435 291 void NativeFarCall::test() {
duke@435 292 Unimplemented();
duke@435 293 }
duke@435 294 // End code for unit testing implementation of NativeFarCall class
duke@435 295
duke@435 296 #endif // _LP64
duke@435 297
duke@435 298 //-------------------------------------------------------------------
duke@435 299
duke@435 300
duke@435 301 void NativeMovConstReg::verify() {
duke@435 302 NativeInstruction::verify();
duke@435 303 // make sure code pattern is actually a "set_oop" synthetic instruction
duke@435 304 // see MacroAssembler::set_oop()
duke@435 305 int i0 = long_at(sethi_offset);
duke@435 306 int i1 = long_at(add_offset);
duke@435 307
duke@435 308 // verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg"
duke@435 309 Register rd = inv_rd(i0);
duke@435 310 #ifndef _LP64
duke@435 311 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
duke@435 312 is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
duke@435 313 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
duke@435 314 rd == inv_rs1(i1) && rd == inv_rd(i1))) {
duke@435 315 fatal("not a set_oop");
duke@435 316 }
duke@435 317 #else
duke@435 318 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
duke@435 319 fatal("not a set_oop");
duke@435 320 }
duke@435 321 #endif
duke@435 322 }
duke@435 323
duke@435 324
duke@435 325 void NativeMovConstReg::print() {
duke@435 326 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
duke@435 327 }
duke@435 328
duke@435 329
duke@435 330 #ifdef _LP64
duke@435 331 intptr_t NativeMovConstReg::data() const {
duke@435 332 return data64(addr_at(sethi_offset), long_at(add_offset));
duke@435 333 }
duke@435 334 #else
duke@435 335 intptr_t NativeMovConstReg::data() const {
duke@435 336 return data32(long_at(sethi_offset), long_at(add_offset));
duke@435 337 }
duke@435 338 #endif
duke@435 339
duke@435 340
duke@435 341 void NativeMovConstReg::set_data(intptr_t x) {
duke@435 342 #ifdef _LP64
duke@435 343 set_data64_sethi(addr_at(sethi_offset), x);
duke@435 344 #else
duke@435 345 set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x));
duke@435 346 #endif
duke@435 347 set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
duke@435 348
duke@435 349 // also store the value into an oop_Relocation cell, if any
twisti@1918 350 CodeBlob* cb = CodeCache::find_blob(instruction_address());
twisti@1918 351 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
duke@435 352 if (nm != NULL) {
duke@435 353 RelocIterator iter(nm, instruction_address(), next_instruction_address());
duke@435 354 oop* oop_addr = NULL;
duke@435 355 while (iter.next()) {
duke@435 356 if (iter.type() == relocInfo::oop_type) {
duke@435 357 oop_Relocation *r = iter.oop_reloc();
duke@435 358 if (oop_addr == NULL) {
duke@435 359 oop_addr = r->oop_addr();
duke@435 360 *oop_addr = (oop)x;
duke@435 361 } else {
duke@435 362 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
duke@435 363 }
duke@435 364 }
duke@435 365 }
duke@435 366 }
duke@435 367 }
duke@435 368
duke@435 369
duke@435 370 // Code for unit testing implementation of NativeMovConstReg class
duke@435 371 void NativeMovConstReg::test() {
duke@435 372 #ifdef ASSERT
duke@435 373 ResourceMark rm;
duke@435 374 CodeBuffer cb("test", 100, 100);
duke@435 375 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 376 NativeMovConstReg* nm;
duke@435 377 uint idx;
duke@435 378 int offsets[] = {
duke@435 379 0x0,
duke@435 380 0x7fffffff,
duke@435 381 0x80000000,
duke@435 382 0xffffffff,
duke@435 383 0x20,
duke@435 384 4096,
duke@435 385 4097,
duke@435 386 };
duke@435 387
duke@435 388 VM_Version::allow_all();
duke@435 389
twisti@1162 390 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
twisti@1162 391 a->sethi(al1, I3);
twisti@1162 392 a->add(I3, al1.low10(), I3);
twisti@1162 393 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
twisti@1162 394 a->sethi(al2, O2);
twisti@1162 395 a->add(O2, al2.low10(), O2);
duke@435 396
twisti@2103 397 nm = nativeMovConstReg_at( cb.insts_begin() );
duke@435 398 nm->print();
duke@435 399
duke@435 400 nm = nativeMovConstReg_at( nm->next_instruction_address() );
duke@435 401 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
duke@435 402 nm->set_data( offsets[idx] );
duke@435 403 assert(nm->data() == offsets[idx], "check unit test");
duke@435 404 }
duke@435 405 nm->print();
duke@435 406
duke@435 407 VM_Version::revert();
duke@435 408 #endif
duke@435 409 }
duke@435 410 // End code for unit testing implementation of NativeMovConstReg class
duke@435 411
duke@435 412 //-------------------------------------------------------------------
duke@435 413
duke@435 414 void NativeMovConstRegPatching::verify() {
duke@435 415 NativeInstruction::verify();
duke@435 416 // Make sure code pattern is sethi/nop/add.
duke@435 417 int i0 = long_at(sethi_offset);
duke@435 418 int i1 = long_at(nop_offset);
duke@435 419 int i2 = long_at(add_offset);
duke@435 420 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@435 421
duke@435 422 // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
duke@435 423 // The casual reader should note that on Sparc a nop is a special case if sethi
duke@435 424 // in which the destination register is %g0.
duke@435 425 Register rd0 = inv_rd(i0);
duke@435 426 Register rd1 = inv_rd(i1);
duke@435 427 if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
duke@435 428 is_op2(i1, Assembler::sethi_op2) && rd1 == G0 && // nop is a special case of sethi
duke@435 429 is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
duke@435 430 inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
duke@435 431 rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
duke@435 432 fatal("not a set_oop");
duke@435 433 }
duke@435 434 }
duke@435 435
duke@435 436
duke@435 437 void NativeMovConstRegPatching::print() {
duke@435 438 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
duke@435 439 }
duke@435 440
duke@435 441
duke@435 442 int NativeMovConstRegPatching::data() const {
duke@435 443 #ifdef _LP64
duke@435 444 return data64(addr_at(sethi_offset), long_at(add_offset));
duke@435 445 #else
duke@435 446 return data32(long_at(sethi_offset), long_at(add_offset));
duke@435 447 #endif
duke@435 448 }
duke@435 449
duke@435 450
duke@435 451 void NativeMovConstRegPatching::set_data(int x) {
duke@435 452 #ifdef _LP64
duke@435 453 set_data64_sethi(addr_at(sethi_offset), x);
duke@435 454 #else
duke@435 455 set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
duke@435 456 #endif
duke@435 457 set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
duke@435 458
duke@435 459 // also store the value into an oop_Relocation cell, if any
twisti@1918 460 CodeBlob* cb = CodeCache::find_blob(instruction_address());
twisti@1918 461 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
duke@435 462 if (nm != NULL) {
duke@435 463 RelocIterator iter(nm, instruction_address(), next_instruction_address());
duke@435 464 oop* oop_addr = NULL;
duke@435 465 while (iter.next()) {
duke@435 466 if (iter.type() == relocInfo::oop_type) {
duke@435 467 oop_Relocation *r = iter.oop_reloc();
duke@435 468 if (oop_addr == NULL) {
duke@435 469 oop_addr = r->oop_addr();
duke@435 470 *oop_addr = (oop)x;
duke@435 471 } else {
duke@435 472 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
duke@435 473 }
duke@435 474 }
duke@435 475 }
duke@435 476 }
duke@435 477 }
duke@435 478
duke@435 479
duke@435 480 // Code for unit testing implementation of NativeMovConstRegPatching class
duke@435 481 void NativeMovConstRegPatching::test() {
duke@435 482 #ifdef ASSERT
duke@435 483 ResourceMark rm;
duke@435 484 CodeBuffer cb("test", 100, 100);
duke@435 485 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 486 NativeMovConstRegPatching* nm;
duke@435 487 uint idx;
duke@435 488 int offsets[] = {
duke@435 489 0x0,
duke@435 490 0x7fffffff,
duke@435 491 0x80000000,
duke@435 492 0xffffffff,
duke@435 493 0x20,
duke@435 494 4096,
duke@435 495 4097,
duke@435 496 };
duke@435 497
duke@435 498 VM_Version::allow_all();
duke@435 499
twisti@1162 500 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
twisti@1162 501 a->sethi(al1, I3);
duke@435 502 a->nop();
twisti@1162 503 a->add(I3, al1.low10(), I3);
twisti@1162 504 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
twisti@1162 505 a->sethi(al2, O2);
duke@435 506 a->nop();
twisti@1162 507 a->add(O2, al2.low10(), O2);
duke@435 508
twisti@2103 509 nm = nativeMovConstRegPatching_at( cb.insts_begin() );
duke@435 510 nm->print();
duke@435 511
duke@435 512 nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
duke@435 513 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
duke@435 514 nm->set_data( offsets[idx] );
duke@435 515 assert(nm->data() == offsets[idx], "check unit test");
duke@435 516 }
duke@435 517 nm->print();
duke@435 518
duke@435 519 VM_Version::revert();
duke@435 520 #endif // ASSERT
duke@435 521 }
duke@435 522 // End code for unit testing implementation of NativeMovConstRegPatching class
duke@435 523
duke@435 524
duke@435 525 //-------------------------------------------------------------------
duke@435 526
duke@435 527
duke@435 528 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
duke@435 529 Untested("copy_instruction_to");
duke@435 530 int instruction_size = next_instruction_address() - instruction_address();
duke@435 531 for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
duke@435 532 *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
duke@435 533 }
duke@435 534 }
duke@435 535
duke@435 536
duke@435 537 void NativeMovRegMem::verify() {
duke@435 538 NativeInstruction::verify();
duke@435 539 // make sure code pattern is actually a "ld" or "st" of some sort.
duke@435 540 int i0 = long_at(0);
duke@435 541 int op3 = inv_op3(i0);
duke@435 542
duke@435 543 assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
duke@435 544
duke@435 545 if (!(is_op(i0, Assembler::ldst_op) &&
duke@435 546 inv_immed(i0) &&
duke@435 547 0 != (op3 < op3_ldst_int_limit
duke@435 548 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@435 549 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
duke@435 550 {
duke@435 551 int i1 = long_at(ldst_offset);
duke@435 552 Register rd = inv_rd(i0);
duke@435 553
duke@435 554 op3 = inv_op3(i1);
duke@435 555 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
duke@435 556 0 != (op3 < op3_ldst_int_limit
duke@435 557 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@435 558 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
duke@435 559 fatal("not a ld* or st* op");
duke@435 560 }
duke@435 561 }
duke@435 562 }
duke@435 563
duke@435 564
duke@435 565 void NativeMovRegMem::print() {
duke@435 566 if (is_immediate()) {
duke@435 567 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
duke@435 568 } else {
duke@435 569 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
duke@435 570 }
duke@435 571 }
duke@435 572
duke@435 573
duke@435 574 // Code for unit testing implementation of NativeMovRegMem class
duke@435 575 void NativeMovRegMem::test() {
duke@435 576 #ifdef ASSERT
duke@435 577 ResourceMark rm;
duke@435 578 CodeBuffer cb("test", 1000, 1000);
duke@435 579 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 580 NativeMovRegMem* nm;
duke@435 581 uint idx = 0;
duke@435 582 uint idx1;
duke@435 583 int offsets[] = {
duke@435 584 0x0,
duke@435 585 0xffffffff,
duke@435 586 0x7fffffff,
duke@435 587 0x80000000,
duke@435 588 4096,
duke@435 589 4097,
duke@435 590 0x20,
duke@435 591 0x4000,
duke@435 592 };
duke@435 593
duke@435 594 VM_Version::allow_all();
duke@435 595
twisti@1162 596 AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
twisti@1162 597 AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
twisti@1162 598 a->ldsw( G5, al1.low10(), G4 ); idx++;
twisti@1162 599 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 600 a->ldsw( G5, I3, G4 ); idx++;
twisti@1162 601 a->ldsb( G5, al1.low10(), G4 ); idx++;
twisti@1162 602 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 603 a->ldsb( G5, I3, G4 ); idx++;
twisti@1162 604 a->ldsh( G5, al1.low10(), G4 ); idx++;
twisti@1162 605 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 606 a->ldsh( G5, I3, G4 ); idx++;
twisti@1162 607 a->lduw( G5, al1.low10(), G4 ); idx++;
twisti@1162 608 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 609 a->lduw( G5, I3, G4 ); idx++;
twisti@1162 610 a->ldub( G5, al1.low10(), G4 ); idx++;
twisti@1162 611 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 612 a->ldub( G5, I3, G4 ); idx++;
twisti@1162 613 a->lduh( G5, al1.low10(), G4 ); idx++;
twisti@1162 614 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 615 a->lduh( G5, I3, G4 ); idx++;
twisti@1162 616 a->ldx( G5, al1.low10(), G4 ); idx++;
twisti@1162 617 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 618 a->ldx( G5, I3, G4 ); idx++;
twisti@1162 619 a->ldd( G5, al1.low10(), G4 ); idx++;
twisti@1162 620 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 621 a->ldd( G5, I3, G4 ); idx++;
duke@435 622 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
twisti@1162 623 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 624 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
duke@435 625
twisti@1162 626 a->stw( G5, G4, al1.low10() ); idx++;
twisti@1162 627 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 628 a->stw( G5, G4, I3 ); idx++;
twisti@1162 629 a->stb( G5, G4, al1.low10() ); idx++;
twisti@1162 630 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 631 a->stb( G5, G4, I3 ); idx++;
twisti@1162 632 a->sth( G5, G4, al1.low10() ); idx++;
twisti@1162 633 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 634 a->sth( G5, G4, I3 ); idx++;
twisti@1162 635 a->stx( G5, G4, al1.low10() ); idx++;
twisti@1162 636 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 637 a->stx( G5, G4, I3 ); idx++;
twisti@1162 638 a->std( G5, G4, al1.low10() ); idx++;
twisti@1162 639 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 640 a->std( G5, G4, I3 ); idx++;
duke@435 641 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
twisti@1162 642 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 643 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
duke@435 644
twisti@2103 645 nm = nativeMovRegMem_at( cb.insts_begin() );
duke@435 646 nm->print();
duke@435 647 nm->set_offset( low10(0) );
duke@435 648 nm->print();
duke@435 649 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@435 650 nm->print();
duke@435 651
duke@435 652 while (--idx) {
duke@435 653 nm = nativeMovRegMem_at( nm->next_instruction_address() );
duke@435 654 nm->print();
duke@435 655 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
duke@435 656 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
duke@435 657 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
duke@435 658 "check unit test");
duke@435 659 nm->print();
duke@435 660 }
duke@435 661 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@435 662 nm->print();
duke@435 663 }
duke@435 664
duke@435 665 VM_Version::revert();
duke@435 666 #endif // ASSERT
duke@435 667 }
duke@435 668
duke@435 669 // End code for unit testing implementation of NativeMovRegMem class
duke@435 670
duke@435 671 //--------------------------------------------------------------------------------
duke@435 672
duke@435 673
duke@435 674 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) {
duke@435 675 Untested("copy_instruction_to");
duke@435 676 int instruction_size = next_instruction_address() - instruction_address();
duke@435 677 for (int i = 0; i < instruction_size; i += wordSize) {
duke@435 678 *(long*)(new_instruction_address + i) = *(long*)(address(this) + i);
duke@435 679 }
duke@435 680 }
duke@435 681
duke@435 682
duke@435 683 void NativeMovRegMemPatching::verify() {
duke@435 684 NativeInstruction::verify();
duke@435 685 // make sure code pattern is actually a "ld" or "st" of some sort.
duke@435 686 int i0 = long_at(0);
duke@435 687 int op3 = inv_op3(i0);
duke@435 688
duke@435 689 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@435 690
duke@435 691 if (!(is_op(i0, Assembler::ldst_op) &&
duke@435 692 inv_immed(i0) &&
duke@435 693 0 != (op3 < op3_ldst_int_limit
duke@435 694 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@435 695 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) {
duke@435 696 int i1 = long_at(ldst_offset);
duke@435 697 Register rd = inv_rd(i0);
duke@435 698
duke@435 699 op3 = inv_op3(i1);
duke@435 700 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
duke@435 701 0 != (op3 < op3_ldst_int_limit
duke@435 702 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@435 703 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
duke@435 704 fatal("not a ld* or st* op");
duke@435 705 }
duke@435 706 }
duke@435 707 }
duke@435 708
duke@435 709
duke@435 710 void NativeMovRegMemPatching::print() {
duke@435 711 if (is_immediate()) {
duke@435 712 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
duke@435 713 } else {
duke@435 714 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
duke@435 715 }
duke@435 716 }
duke@435 717
duke@435 718
duke@435 719 // Code for unit testing implementation of NativeMovRegMemPatching class
duke@435 720 void NativeMovRegMemPatching::test() {
duke@435 721 #ifdef ASSERT
duke@435 722 ResourceMark rm;
duke@435 723 CodeBuffer cb("test", 1000, 1000);
duke@435 724 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 725 NativeMovRegMemPatching* nm;
duke@435 726 uint idx = 0;
duke@435 727 uint idx1;
duke@435 728 int offsets[] = {
duke@435 729 0x0,
duke@435 730 0xffffffff,
duke@435 731 0x7fffffff,
duke@435 732 0x80000000,
duke@435 733 4096,
duke@435 734 4097,
duke@435 735 0x20,
duke@435 736 0x4000,
duke@435 737 };
duke@435 738
duke@435 739 VM_Version::allow_all();
duke@435 740
twisti@1162 741 AddressLiteral al(0xffffffff, relocInfo::external_word_type);
twisti@1162 742 a->ldsw( G5, al.low10(), G4); idx++;
twisti@1162 743 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 744 a->ldsw( G5, I3, G4 ); idx++;
twisti@1162 745 a->ldsb( G5, al.low10(), G4); idx++;
twisti@1162 746 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 747 a->ldsb( G5, I3, G4 ); idx++;
twisti@1162 748 a->ldsh( G5, al.low10(), G4); idx++;
twisti@1162 749 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 750 a->ldsh( G5, I3, G4 ); idx++;
twisti@1162 751 a->lduw( G5, al.low10(), G4); idx++;
twisti@1162 752 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 753 a->lduw( G5, I3, G4 ); idx++;
twisti@1162 754 a->ldub( G5, al.low10(), G4); idx++;
twisti@1162 755 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 756 a->ldub( G5, I3, G4 ); idx++;
twisti@1162 757 a->lduh( G5, al.low10(), G4); idx++;
twisti@1162 758 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 759 a->lduh( G5, I3, G4 ); idx++;
twisti@1162 760 a->ldx( G5, al.low10(), G4); idx++;
twisti@1162 761 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
twisti@1162 762 a->ldx( G5, I3, G4 ); idx++;
twisti@1162 763 a->ldd( G5, al.low10(), G4); idx++;
twisti@1162 764 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
twisti@1162 765 a->ldd( G5, I3, G4 ); idx++;
twisti@1162 766 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
twisti@1162 767 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
twisti@1162 768 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
duke@435 769
twisti@1162 770 a->stw( G5, G4, al.low10()); idx++;
twisti@1162 771 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 772 a->stw( G5, G4, I3 ); idx++;
twisti@1162 773 a->stb( G5, G4, al.low10()); idx++;
twisti@1162 774 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 775 a->stb( G5, G4, I3 ); idx++;
twisti@1162 776 a->sth( G5, G4, al.low10()); idx++;
twisti@1162 777 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 778 a->sth( G5, G4, I3 ); idx++;
twisti@1162 779 a->stx( G5, G4, al.low10()); idx++;
twisti@1162 780 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 781 a->stx( G5, G4, I3 ); idx++;
twisti@1162 782 a->std( G5, G4, al.low10()); idx++;
twisti@1162 783 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 784 a->std( G5, G4, I3 ); idx++;
duke@435 785 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
twisti@1162 786 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 787 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
duke@435 788
twisti@2103 789 nm = nativeMovRegMemPatching_at( cb.insts_begin() );
duke@435 790 nm->print();
duke@435 791 nm->set_offset( low10(0) );
duke@435 792 nm->print();
duke@435 793 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@435 794 nm->print();
duke@435 795
duke@435 796 while (--idx) {
duke@435 797 nm = nativeMovRegMemPatching_at( nm->next_instruction_address() );
duke@435 798 nm->print();
duke@435 799 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
duke@435 800 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
duke@435 801 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
duke@435 802 "check unit test");
duke@435 803 nm->print();
duke@435 804 }
duke@435 805 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@435 806 nm->print();
duke@435 807 }
duke@435 808
duke@435 809 VM_Version::revert();
duke@435 810 #endif // ASSERT
duke@435 811 }
duke@435 812 // End code for unit testing implementation of NativeMovRegMemPatching class
duke@435 813
duke@435 814
duke@435 815 //--------------------------------------------------------------------------------
duke@435 816
duke@435 817
duke@435 818 void NativeJump::verify() {
duke@435 819 NativeInstruction::verify();
duke@435 820 int i0 = long_at(sethi_offset);
duke@435 821 int i1 = long_at(jmpl_offset);
duke@435 822 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@435 823 // verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg"
duke@435 824 Register rd = inv_rd(i0);
duke@435 825 #ifndef _LP64
duke@435 826 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
duke@435 827 (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
duke@435 828 (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
duke@435 829 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
duke@435 830 rd == inv_rs1(i1))) {
duke@435 831 fatal("not a jump_to instruction");
duke@435 832 }
duke@435 833 #else
duke@435 834 // In LP64, the jump instruction location varies for non relocatable
duke@435 835 // jumps, for example is could be sethi, xor, jmp instead of the
duke@435 836 // 7 instructions for sethi. So let's check sethi only.
duke@435 837 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
duke@435 838 fatal("not a jump_to instruction");
duke@435 839 }
duke@435 840 #endif
duke@435 841 }
duke@435 842
duke@435 843
duke@435 844 void NativeJump::print() {
duke@435 845 tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination());
duke@435 846 }
duke@435 847
duke@435 848
duke@435 849 // Code for unit testing implementation of NativeJump class
duke@435 850 void NativeJump::test() {
duke@435 851 #ifdef ASSERT
duke@435 852 ResourceMark rm;
duke@435 853 CodeBuffer cb("test", 100, 100);
duke@435 854 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 855 NativeJump* nj;
duke@435 856 uint idx;
duke@435 857 int offsets[] = {
duke@435 858 0x0,
duke@435 859 0xffffffff,
duke@435 860 0x7fffffff,
duke@435 861 0x80000000,
duke@435 862 4096,
duke@435 863 4097,
duke@435 864 0x20,
duke@435 865 0x4000,
duke@435 866 };
duke@435 867
duke@435 868 VM_Version::allow_all();
duke@435 869
twisti@1162 870 AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
twisti@1162 871 a->sethi(al, I3);
twisti@1162 872 a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
duke@435 873 a->delayed()->nop();
twisti@1162 874 a->sethi(al, I3);
twisti@1162 875 a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
duke@435 876 a->delayed()->nop();
duke@435 877
twisti@2103 878 nj = nativeJump_at( cb.insts_begin() );
duke@435 879 nj->print();
duke@435 880
duke@435 881 nj = nativeJump_at( nj->next_instruction_address() );
duke@435 882 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
duke@435 883 nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
duke@435 884 assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
duke@435 885 nj->print();
duke@435 886 }
duke@435 887
duke@435 888 VM_Version::revert();
duke@435 889 #endif // ASSERT
duke@435 890 }
duke@435 891 // End code for unit testing implementation of NativeJump class
duke@435 892
duke@435 893
duke@435 894 void NativeJump::insert(address code_pos, address entry) {
duke@435 895 Unimplemented();
duke@435 896 }
duke@435 897
duke@435 898 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
duke@435 899 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
duke@435 900 // Atomic write can be only with 1 word.
duke@435 901 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
duke@435 902 // Here's one way to do it: Pre-allocate a three-word jump sequence somewhere
duke@435 903 // in the header of the nmethod, within a short branch's span of the patch point.
duke@435 904 // Set up the jump sequence using NativeJump::insert, and then use an annulled
duke@435 905 // unconditional branch at the target site (an atomic 1-word update).
duke@435 906 // Limitations: You can only patch nmethods, with any given nmethod patched at
duke@435 907 // most once, and the patch must be in the nmethod's header.
duke@435 908 // It's messy, but you can ask the CodeCache for the nmethod containing the
duke@435 909 // target address.
duke@435 910
duke@435 911 // %%%%% For now, do something MT-stupid:
duke@435 912 ResourceMark rm;
duke@435 913 int code_size = 1 * BytesPerInstWord;
duke@435 914 CodeBuffer cb(verified_entry, code_size + 1);
duke@435 915 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 916 if (VM_Version::v9_instructions_work()) {
duke@435 917 a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
duke@435 918 } else {
duke@435 919 a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
duke@435 920 }
duke@435 921 ICache::invalidate_range(verified_entry, code_size);
duke@435 922 }
duke@435 923
duke@435 924
duke@435 925 void NativeIllegalInstruction::insert(address code_pos) {
duke@435 926 NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
duke@435 927 nii->set_long_at(0, illegal_instruction());
duke@435 928 }
duke@435 929
duke@435 930 static int illegal_instruction_bits = 0;
duke@435 931
duke@435 932 int NativeInstruction::illegal_instruction() {
duke@435 933 if (illegal_instruction_bits == 0) {
duke@435 934 ResourceMark rm;
duke@435 935 char buf[40];
duke@435 936 CodeBuffer cbuf((address)&buf[0], 20);
duke@435 937 MacroAssembler* a = new MacroAssembler(&cbuf);
duke@435 938 address ia = a->pc();
duke@435 939 a->trap(ST_RESERVED_FOR_USER_0 + 1);
duke@435 940 int bits = *(int*)ia;
duke@435 941 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
duke@435 942 illegal_instruction_bits = bits;
duke@435 943 assert(illegal_instruction_bits != 0, "oops");
duke@435 944 }
duke@435 945 return illegal_instruction_bits;
duke@435 946 }
duke@435 947
duke@435 948 static int ic_miss_trap_bits = 0;
duke@435 949
duke@435 950 bool NativeInstruction::is_ic_miss_trap() {
duke@435 951 if (ic_miss_trap_bits == 0) {
duke@435 952 ResourceMark rm;
duke@435 953 char buf[40];
duke@435 954 CodeBuffer cbuf((address)&buf[0], 20);
duke@435 955 MacroAssembler* a = new MacroAssembler(&cbuf);
duke@435 956 address ia = a->pc();
duke@435 957 a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
duke@435 958 int bits = *(int*)ia;
duke@435 959 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
duke@435 960 ic_miss_trap_bits = bits;
duke@435 961 assert(ic_miss_trap_bits != 0, "oops");
duke@435 962 }
duke@435 963 return long_at(0) == ic_miss_trap_bits;
duke@435 964 }
duke@435 965
duke@435 966
duke@435 967 bool NativeInstruction::is_illegal() {
duke@435 968 if (illegal_instruction_bits == 0) {
duke@435 969 return false;
duke@435 970 }
duke@435 971 return long_at(0) == illegal_instruction_bits;
duke@435 972 }
duke@435 973
duke@435 974
duke@435 975 void NativeGeneralJump::verify() {
duke@435 976 assert(((NativeInstruction *)this)->is_jump() ||
duke@435 977 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
duke@435 978 }
duke@435 979
duke@435 980
duke@435 981 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
duke@435 982 Assembler::Condition condition = Assembler::always;
duke@435 983 int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
duke@435 984 Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
duke@435 985 NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
duke@435 986 ni->set_long_at(0, x);
duke@435 987 }
duke@435 988
duke@435 989
duke@435 990 // MT-safe patching of a jmp instruction (and following word).
duke@435 991 // First patches the second word, and then atomicly replaces
duke@435 992 // the first word with the first new instruction word.
duke@435 993 // Other processors might briefly see the old first word
duke@435 994 // followed by the new second word. This is OK if the old
duke@435 995 // second word is harmless, and the new second word may be
duke@435 996 // harmlessly executed in the delay slot of the call.
duke@435 997 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
duke@435 998 assert(Patching_lock->is_locked() ||
duke@435 999 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
duke@435 1000 assert (instr_addr != NULL, "illegal address for code patching");
duke@435 1001 NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr); // checking that it is a call
duke@435 1002 assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
duke@435 1003 int i0 = ((int*)code_buffer)[0];
duke@435 1004 int i1 = ((int*)code_buffer)[1];
duke@435 1005 int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
duke@435 1006 assert(inv_op(*contention_addr) == Assembler::arith_op ||
duke@435 1007 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
duke@435 1008 "must not interfere with original call");
duke@435 1009 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
duke@435 1010 h_jump->set_long_at(1*BytesPerInstWord, i1);
duke@435 1011 h_jump->set_long_at(0*BytesPerInstWord, i0);
duke@435 1012 // NOTE: It is possible that another thread T will execute
duke@435 1013 // only the second patched word.
duke@435 1014 // In other words, since the original instruction is this
duke@435 1015 // jmp patching_stub; nop (NativeGeneralJump)
duke@435 1016 // and the new sequence from the buffer is this:
duke@435 1017 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
duke@435 1018 // what T will execute is this:
duke@435 1019 // jmp patching_stub; add %r, %lo(K), %r
duke@435 1020 // thereby putting garbage into %r before calling the patching stub.
duke@435 1021 // This is OK, because the patching stub ignores the value of %r.
duke@435 1022
duke@435 1023 // Make sure the first-patched instruction, which may co-exist
duke@435 1024 // briefly with the call, will do something harmless.
duke@435 1025 assert(inv_op(*contention_addr) == Assembler::arith_op ||
duke@435 1026 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
duke@435 1027 "must not interfere with original call");
duke@435 1028 }

mercurial