src/cpu/sparc/vm/nativeInst_sparc.cpp

Thu, 26 Sep 2013 10:25:02 -0400

author
hseigel
date
Thu, 26 Sep 2013 10:25:02 -0400
changeset 5784
190899198332
parent 5283
46c544b8fbfc
child 6876
710a3c8b516e
child 8647
0b611970fa8b
permissions
-rw-r--r--

7195622: CheckUnhandledOops has limited usefulness now
Summary: Enable CHECK_UNHANDLED_OOPS in fastdebug builds across all supported platforms.
Reviewed-by: coleenp, hseigel, dholmes, stefank, twisti, ihse, rdurbin
Contributed-by: lois.foltan@oracle.com

duke@435 1 /*
hseigel@5784 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
twisti@4323 26 #include "asm/macroAssembler.hpp"
stefank@2314 27 #include "memory/resourceArea.hpp"
stefank@2314 28 #include "nativeInst_sparc.hpp"
stefank@2314 29 #include "oops/oop.inline.hpp"
stefank@2314 30 #include "runtime/handles.hpp"
stefank@2314 31 #include "runtime/sharedRuntime.hpp"
stefank@2314 32 #include "runtime/stubRoutines.hpp"
stefank@2314 33 #include "utilities/ostream.hpp"
stefank@2314 34 #ifdef COMPILER1
stefank@2314 35 #include "c1/c1_Runtime1.hpp"
stefank@2314 36 #endif
duke@435 37
duke@435 38
kamg@551 39 bool NativeInstruction::is_dtrace_trap() {
kamg@551 40 return !is_nop();
kamg@551 41 }
kamg@551 42
duke@435 43 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
duke@435 44 ResourceMark rm;
duke@435 45 CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
duke@435 46 MacroAssembler* _masm = new MacroAssembler(&buf);
duke@435 47 Register destreg;
duke@435 48
duke@435 49 destreg = inv_rd(*(unsigned int *)instaddr);
duke@435 50 // Generate a the new sequence
twisti@1162 51 _masm->patchable_sethi(x, destreg);
duke@435 52 ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
duke@435 53 }
duke@435 54
never@2657 55 void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) {
never@2657 56 ResourceMark rm;
never@2657 57 unsigned char buffer[10 * BytesPerInstWord];
never@2657 58 CodeBuffer buf(buffer, 10 * BytesPerInstWord);
never@2657 59 MacroAssembler masm(&buf);
never@2657 60
never@2657 61 Register destreg = inv_rd(*(unsigned int *)instaddr);
never@2657 62 // Generate the proper sequence into a temporary buffer and compare
never@2657 63 // it with the original sequence.
never@2657 64 masm.patchable_sethi(x, destreg);
never@2657 65 int len = buffer - masm.pc();
never@2657 66 for (int i = 0; i < len; i++) {
never@2657 67 assert(instaddr[i] == buffer[i], "instructions must match");
never@2657 68 }
never@2657 69 }
never@2657 70
duke@435 71 void NativeInstruction::verify() {
duke@435 72 // make sure code pattern is actually an instruction address
duke@435 73 address addr = addr_at(0);
duke@435 74 if (addr == 0 || ((intptr_t)addr & 3) != 0) {
duke@435 75 fatal("not an instruction address");
duke@435 76 }
duke@435 77 }
duke@435 78
duke@435 79 void NativeInstruction::print() {
duke@435 80 tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0));
duke@435 81 }
duke@435 82
duke@435 83 void NativeInstruction::set_long_at(int offset, int i) {
duke@435 84 address addr = addr_at(offset);
duke@435 85 *(int*)addr = i;
duke@435 86 ICache::invalidate_word(addr);
duke@435 87 }
duke@435 88
duke@435 89 void NativeInstruction::set_jlong_at(int offset, jlong i) {
duke@435 90 address addr = addr_at(offset);
duke@435 91 *(jlong*)addr = i;
duke@435 92 // Don't need to invalidate 2 words here, because
duke@435 93 // the flush instruction operates on doublewords.
duke@435 94 ICache::invalidate_word(addr);
duke@435 95 }
duke@435 96
duke@435 97 void NativeInstruction::set_addr_at(int offset, address x) {
duke@435 98 address addr = addr_at(offset);
duke@435 99 assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
duke@435 100 *(uintptr_t*)addr = (uintptr_t)x;
duke@435 101 // Don't need to invalidate 2 words here in the 64-bit case,
duke@435 102 // because the flush instruction operates on doublewords.
duke@435 103 ICache::invalidate_word(addr);
duke@435 104 // The Intel code has this assertion for NativeCall::set_destination,
duke@435 105 // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
duke@435 106 // NativeJump::set_jump_destination, and NativePushImm32::set_data
duke@435 107 //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
duke@435 108 }
duke@435 109
duke@435 110 bool NativeInstruction::is_zero_test(Register &reg) {
duke@435 111 int x = long_at(0);
duke@435 112 Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
duke@435 113 if (is_op3(x, temp, Assembler::arith_op) &&
duke@435 114 inv_immed(x) && inv_rd(x) == G0) {
duke@435 115 if (inv_rs1(x) == G0) {
duke@435 116 reg = inv_rs2(x);
duke@435 117 return true;
duke@435 118 } else if (inv_rs2(x) == G0) {
duke@435 119 reg = inv_rs1(x);
duke@435 120 return true;
duke@435 121 }
duke@435 122 }
duke@435 123 return false;
duke@435 124 }
duke@435 125
duke@435 126 bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
duke@435 127 int x = long_at(0);
duke@435 128 if (is_op(x, Assembler::ldst_op) &&
duke@435 129 inv_rs1(x) == reg && inv_immed(x)) {
duke@435 130 return true;
duke@435 131 }
duke@435 132 return false;
duke@435 133 }
duke@435 134
duke@435 135 void NativeCall::verify() {
duke@435 136 NativeInstruction::verify();
duke@435 137 // make sure code pattern is actually a call instruction
duke@435 138 if (!is_op(long_at(0), Assembler::call_op)) {
duke@435 139 fatal("not a call");
duke@435 140 }
duke@435 141 }
duke@435 142
duke@435 143 void NativeCall::print() {
duke@435 144 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
duke@435 145 }
duke@435 146
duke@435 147
duke@435 148 // MT-safe patching of a call instruction (and following word).
duke@435 149 // First patches the second word, and then atomicly replaces
duke@435 150 // the first word with the first new instruction word.
duke@435 151 // Other processors might briefly see the old first word
duke@435 152 // followed by the new second word. This is OK if the old
duke@435 153 // second word is harmless, and the new second word may be
duke@435 154 // harmlessly executed in the delay slot of the call.
duke@435 155 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
duke@435 156 assert(Patching_lock->is_locked() ||
duke@435 157 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
duke@435 158 assert (instr_addr != NULL, "illegal address for code patching");
duke@435 159 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
duke@435 160 assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
duke@435 161 int i0 = ((int*)code_buffer)[0];
duke@435 162 int i1 = ((int*)code_buffer)[1];
duke@435 163 int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
duke@435 164 assert(inv_op(*contention_addr) == Assembler::arith_op ||
morris@5283 165 *contention_addr == nop_instruction(),
duke@435 166 "must not interfere with original call");
duke@435 167 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
duke@435 168 n_call->set_long_at(1*BytesPerInstWord, i1);
duke@435 169 n_call->set_long_at(0*BytesPerInstWord, i0);
duke@435 170 // NOTE: It is possible that another thread T will execute
duke@435 171 // only the second patched word.
duke@435 172 // In other words, since the original instruction is this
duke@435 173 // call patching_stub; nop (NativeCall)
duke@435 174 // and the new sequence from the buffer is this:
duke@435 175 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
duke@435 176 // what T will execute is this:
duke@435 177 // call patching_stub; add %r, %lo(K), %r
duke@435 178 // thereby putting garbage into %r before calling the patching stub.
duke@435 179 // This is OK, because the patching stub ignores the value of %r.
duke@435 180
duke@435 181 // Make sure the first-patched instruction, which may co-exist
duke@435 182 // briefly with the call, will do something harmless.
duke@435 183 assert(inv_op(*contention_addr) == Assembler::arith_op ||
morris@5283 184 *contention_addr == nop_instruction(),
duke@435 185 "must not interfere with original call");
duke@435 186 }
duke@435 187
duke@435 188 // Similar to replace_mt_safe, but just changes the destination. The
duke@435 189 // important thing is that free-running threads are able to execute this
duke@435 190 // call instruction at all times. Thus, the displacement field must be
duke@435 191 // instruction-word-aligned. This is always true on SPARC.
duke@435 192 //
duke@435 193 // Used in the runtime linkage of calls; see class CompiledIC.
duke@435 194 void NativeCall::set_destination_mt_safe(address dest) {
duke@435 195 assert(Patching_lock->is_locked() ||
duke@435 196 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
duke@435 197 // set_destination uses set_long_at which does the ICache::invalidate
duke@435 198 set_destination(dest);
duke@435 199 }
duke@435 200
duke@435 201 // Code for unit testing implementation of NativeCall class
duke@435 202 void NativeCall::test() {
duke@435 203 #ifdef ASSERT
duke@435 204 ResourceMark rm;
duke@435 205 CodeBuffer cb("test", 100, 100);
duke@435 206 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 207 NativeCall *nc;
duke@435 208 uint idx;
duke@435 209 int offsets[] = {
duke@435 210 0x0,
duke@435 211 0xfffffff0,
duke@435 212 0x7ffffff0,
duke@435 213 0x80000000,
duke@435 214 0x20,
duke@435 215 0x4000,
duke@435 216 };
duke@435 217
duke@435 218 VM_Version::allow_all();
duke@435 219
duke@435 220 a->call( a->pc(), relocInfo::none );
duke@435 221 a->delayed()->nop();
twisti@2103 222 nc = nativeCall_at( cb.insts_begin() );
duke@435 223 nc->print();
duke@435 224
duke@435 225 nc = nativeCall_overwriting_at( nc->next_instruction_address() );
duke@435 226 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
twisti@2103 227 nc->set_destination( cb.insts_begin() + offsets[idx] );
twisti@2103 228 assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
duke@435 229 nc->print();
duke@435 230 }
duke@435 231
twisti@2103 232 nc = nativeCall_before( cb.insts_begin() + 8 );
duke@435 233 nc->print();
duke@435 234
duke@435 235 VM_Version::revert();
duke@435 236 #endif
duke@435 237 }
duke@435 238 // End code for unit testing implementation of NativeCall class
duke@435 239
duke@435 240 //-------------------------------------------------------------------
duke@435 241
duke@435 242 #ifdef _LP64
duke@435 243
duke@435 244 void NativeFarCall::set_destination(address dest) {
duke@435 245 // Address materialized in the instruction stream, so nothing to do.
duke@435 246 return;
duke@435 247 #if 0 // What we'd do if we really did want to change the destination
duke@435 248 if (destination() == dest) {
duke@435 249 return;
duke@435 250 }
duke@435 251 ResourceMark rm;
duke@435 252 CodeBuffer buf(addr_at(0), instruction_size + 1);
duke@435 253 MacroAssembler* _masm = new MacroAssembler(&buf);
duke@435 254 // Generate the new sequence
twisti@1162 255 AddressLiteral(dest);
twisti@1162 256 _masm->jumpl_to(dest, O7, O7);
duke@435 257 ICache::invalidate_range(addr_at(0), instruction_size );
duke@435 258 #endif
duke@435 259 }
duke@435 260
duke@435 261 void NativeFarCall::verify() {
duke@435 262 // make sure code pattern is actually a jumpl_to instruction
duke@435 263 assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
duke@435 264 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@435 265 nativeJump_at(addr_at(0))->verify();
duke@435 266 }
duke@435 267
duke@435 268 bool NativeFarCall::is_call_at(address instr) {
duke@435 269 return nativeInstruction_at(instr)->is_sethi();
duke@435 270 }
duke@435 271
duke@435 272 void NativeFarCall::print() {
duke@435 273 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
duke@435 274 }
duke@435 275
duke@435 276 bool NativeFarCall::destination_is_compiled_verified_entry_point() {
duke@435 277 nmethod* callee = CodeCache::find_nmethod(destination());
duke@435 278 if (callee == NULL) {
duke@435 279 return false;
duke@435 280 } else {
duke@435 281 return destination() == callee->verified_entry_point();
duke@435 282 }
duke@435 283 }
duke@435 284
duke@435 285 // MT-safe patching of a far call.
duke@435 286 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
duke@435 287 Unimplemented();
duke@435 288 }
duke@435 289
duke@435 290 // Code for unit testing implementation of NativeFarCall class
duke@435 291 void NativeFarCall::test() {
duke@435 292 Unimplemented();
duke@435 293 }
duke@435 294 // End code for unit testing implementation of NativeFarCall class
duke@435 295
duke@435 296 #endif // _LP64
duke@435 297
duke@435 298 //-------------------------------------------------------------------
duke@435 299
duke@435 300
duke@435 301 void NativeMovConstReg::verify() {
duke@435 302 NativeInstruction::verify();
coleenp@4037 303 // make sure code pattern is actually a "set_metadata" synthetic instruction
duke@435 304 // see MacroAssembler::set_oop()
duke@435 305 int i0 = long_at(sethi_offset);
duke@435 306 int i1 = long_at(add_offset);
duke@435 307
duke@435 308 // verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg"
duke@435 309 Register rd = inv_rd(i0);
duke@435 310 #ifndef _LP64
duke@435 311 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
duke@435 312 is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
duke@435 313 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
duke@435 314 rd == inv_rs1(i1) && rd == inv_rd(i1))) {
coleenp@4037 315 fatal("not a set_metadata");
duke@435 316 }
duke@435 317 #else
duke@435 318 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
coleenp@4037 319 fatal("not a set_metadata");
duke@435 320 }
duke@435 321 #endif
duke@435 322 }
duke@435 323
duke@435 324
duke@435 325 void NativeMovConstReg::print() {
duke@435 326 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
duke@435 327 }
duke@435 328
duke@435 329
duke@435 330 #ifdef _LP64
duke@435 331 intptr_t NativeMovConstReg::data() const {
duke@435 332 return data64(addr_at(sethi_offset), long_at(add_offset));
duke@435 333 }
duke@435 334 #else
duke@435 335 intptr_t NativeMovConstReg::data() const {
duke@435 336 return data32(long_at(sethi_offset), long_at(add_offset));
duke@435 337 }
duke@435 338 #endif
duke@435 339
duke@435 340
duke@435 341 void NativeMovConstReg::set_data(intptr_t x) {
duke@435 342 #ifdef _LP64
duke@435 343 set_data64_sethi(addr_at(sethi_offset), x);
duke@435 344 #else
duke@435 345 set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x));
duke@435 346 #endif
duke@435 347 set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
duke@435 348
duke@435 349 // also store the value into an oop_Relocation cell, if any
twisti@1918 350 CodeBlob* cb = CodeCache::find_blob(instruction_address());
twisti@1918 351 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
duke@435 352 if (nm != NULL) {
duke@435 353 RelocIterator iter(nm, instruction_address(), next_instruction_address());
duke@435 354 oop* oop_addr = NULL;
coleenp@4037 355 Metadata** metadata_addr = NULL;
duke@435 356 while (iter.next()) {
duke@435 357 if (iter.type() == relocInfo::oop_type) {
duke@435 358 oop_Relocation *r = iter.oop_reloc();
duke@435 359 if (oop_addr == NULL) {
duke@435 360 oop_addr = r->oop_addr();
hseigel@5784 361 *oop_addr = cast_to_oop(x);
duke@435 362 } else {
duke@435 363 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
duke@435 364 }
duke@435 365 }
coleenp@4037 366 if (iter.type() == relocInfo::metadata_type) {
coleenp@4037 367 metadata_Relocation *r = iter.metadata_reloc();
coleenp@4037 368 if (metadata_addr == NULL) {
coleenp@4037 369 metadata_addr = r->metadata_addr();
coleenp@4037 370 *metadata_addr = (Metadata*)x;
coleenp@4037 371 } else {
coleenp@4037 372 assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
coleenp@4037 373 }
coleenp@4037 374 }
duke@435 375 }
duke@435 376 }
duke@435 377 }
duke@435 378
duke@435 379
duke@435 380 // Code for unit testing implementation of NativeMovConstReg class
duke@435 381 void NativeMovConstReg::test() {
duke@435 382 #ifdef ASSERT
duke@435 383 ResourceMark rm;
duke@435 384 CodeBuffer cb("test", 100, 100);
duke@435 385 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 386 NativeMovConstReg* nm;
duke@435 387 uint idx;
duke@435 388 int offsets[] = {
duke@435 389 0x0,
duke@435 390 0x7fffffff,
duke@435 391 0x80000000,
duke@435 392 0xffffffff,
duke@435 393 0x20,
duke@435 394 4096,
duke@435 395 4097,
duke@435 396 };
duke@435 397
duke@435 398 VM_Version::allow_all();
duke@435 399
twisti@1162 400 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
twisti@1162 401 a->sethi(al1, I3);
twisti@1162 402 a->add(I3, al1.low10(), I3);
twisti@1162 403 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
twisti@1162 404 a->sethi(al2, O2);
twisti@1162 405 a->add(O2, al2.low10(), O2);
duke@435 406
twisti@2103 407 nm = nativeMovConstReg_at( cb.insts_begin() );
duke@435 408 nm->print();
duke@435 409
duke@435 410 nm = nativeMovConstReg_at( nm->next_instruction_address() );
duke@435 411 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
duke@435 412 nm->set_data( offsets[idx] );
duke@435 413 assert(nm->data() == offsets[idx], "check unit test");
duke@435 414 }
duke@435 415 nm->print();
duke@435 416
duke@435 417 VM_Version::revert();
duke@435 418 #endif
duke@435 419 }
duke@435 420 // End code for unit testing implementation of NativeMovConstReg class
duke@435 421
duke@435 422 //-------------------------------------------------------------------
duke@435 423
duke@435 424 void NativeMovConstRegPatching::verify() {
duke@435 425 NativeInstruction::verify();
duke@435 426 // Make sure code pattern is sethi/nop/add.
duke@435 427 int i0 = long_at(sethi_offset);
duke@435 428 int i1 = long_at(nop_offset);
duke@435 429 int i2 = long_at(add_offset);
duke@435 430 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@435 431
duke@435 432 // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
duke@435 433 // The casual reader should note that on Sparc a nop is a special case if sethi
duke@435 434 // in which the destination register is %g0.
duke@435 435 Register rd0 = inv_rd(i0);
duke@435 436 Register rd1 = inv_rd(i1);
duke@435 437 if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
duke@435 438 is_op2(i1, Assembler::sethi_op2) && rd1 == G0 && // nop is a special case of sethi
duke@435 439 is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
duke@435 440 inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
duke@435 441 rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
coleenp@4037 442 fatal("not a set_metadata");
duke@435 443 }
duke@435 444 }
duke@435 445
duke@435 446
duke@435 447 void NativeMovConstRegPatching::print() {
duke@435 448 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
duke@435 449 }
duke@435 450
duke@435 451
duke@435 452 int NativeMovConstRegPatching::data() const {
duke@435 453 #ifdef _LP64
duke@435 454 return data64(addr_at(sethi_offset), long_at(add_offset));
duke@435 455 #else
duke@435 456 return data32(long_at(sethi_offset), long_at(add_offset));
duke@435 457 #endif
duke@435 458 }
duke@435 459
duke@435 460
duke@435 461 void NativeMovConstRegPatching::set_data(int x) {
duke@435 462 #ifdef _LP64
duke@435 463 set_data64_sethi(addr_at(sethi_offset), x);
duke@435 464 #else
duke@435 465 set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
duke@435 466 #endif
duke@435 467 set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
duke@435 468
duke@435 469 // also store the value into an oop_Relocation cell, if any
twisti@1918 470 CodeBlob* cb = CodeCache::find_blob(instruction_address());
twisti@1918 471 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
duke@435 472 if (nm != NULL) {
duke@435 473 RelocIterator iter(nm, instruction_address(), next_instruction_address());
duke@435 474 oop* oop_addr = NULL;
coleenp@4037 475 Metadata** metadata_addr = NULL;
duke@435 476 while (iter.next()) {
duke@435 477 if (iter.type() == relocInfo::oop_type) {
duke@435 478 oop_Relocation *r = iter.oop_reloc();
duke@435 479 if (oop_addr == NULL) {
duke@435 480 oop_addr = r->oop_addr();
hseigel@5784 481 *oop_addr = cast_to_oop(x);
duke@435 482 } else {
duke@435 483 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
duke@435 484 }
duke@435 485 }
coleenp@4037 486 if (iter.type() == relocInfo::metadata_type) {
coleenp@4037 487 metadata_Relocation *r = iter.metadata_reloc();
coleenp@4037 488 if (metadata_addr == NULL) {
coleenp@4037 489 metadata_addr = r->metadata_addr();
coleenp@4037 490 *metadata_addr = (Metadata*)x;
coleenp@4037 491 } else {
coleenp@4037 492 assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
coleenp@4037 493 }
coleenp@4037 494 }
duke@435 495 }
duke@435 496 }
duke@435 497 }
duke@435 498
duke@435 499
duke@435 500 // Code for unit testing implementation of NativeMovConstRegPatching class
duke@435 501 void NativeMovConstRegPatching::test() {
duke@435 502 #ifdef ASSERT
duke@435 503 ResourceMark rm;
duke@435 504 CodeBuffer cb("test", 100, 100);
duke@435 505 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 506 NativeMovConstRegPatching* nm;
duke@435 507 uint idx;
duke@435 508 int offsets[] = {
duke@435 509 0x0,
duke@435 510 0x7fffffff,
duke@435 511 0x80000000,
duke@435 512 0xffffffff,
duke@435 513 0x20,
duke@435 514 4096,
duke@435 515 4097,
duke@435 516 };
duke@435 517
duke@435 518 VM_Version::allow_all();
duke@435 519
twisti@1162 520 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
twisti@1162 521 a->sethi(al1, I3);
duke@435 522 a->nop();
twisti@1162 523 a->add(I3, al1.low10(), I3);
twisti@1162 524 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
twisti@1162 525 a->sethi(al2, O2);
duke@435 526 a->nop();
twisti@1162 527 a->add(O2, al2.low10(), O2);
duke@435 528
twisti@2103 529 nm = nativeMovConstRegPatching_at( cb.insts_begin() );
duke@435 530 nm->print();
duke@435 531
duke@435 532 nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
duke@435 533 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
duke@435 534 nm->set_data( offsets[idx] );
duke@435 535 assert(nm->data() == offsets[idx], "check unit test");
duke@435 536 }
duke@435 537 nm->print();
duke@435 538
duke@435 539 VM_Version::revert();
duke@435 540 #endif // ASSERT
duke@435 541 }
duke@435 542 // End code for unit testing implementation of NativeMovConstRegPatching class
duke@435 543
duke@435 544
duke@435 545 //-------------------------------------------------------------------
duke@435 546
duke@435 547
duke@435 548 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
duke@435 549 Untested("copy_instruction_to");
duke@435 550 int instruction_size = next_instruction_address() - instruction_address();
duke@435 551 for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
duke@435 552 *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
duke@435 553 }
duke@435 554 }
duke@435 555
duke@435 556
duke@435 557 void NativeMovRegMem::verify() {
duke@435 558 NativeInstruction::verify();
duke@435 559 // make sure code pattern is actually a "ld" or "st" of some sort.
duke@435 560 int i0 = long_at(0);
duke@435 561 int op3 = inv_op3(i0);
duke@435 562
duke@435 563 assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
duke@435 564
duke@435 565 if (!(is_op(i0, Assembler::ldst_op) &&
duke@435 566 inv_immed(i0) &&
duke@435 567 0 != (op3 < op3_ldst_int_limit
duke@435 568 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@435 569 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
duke@435 570 {
duke@435 571 int i1 = long_at(ldst_offset);
duke@435 572 Register rd = inv_rd(i0);
duke@435 573
duke@435 574 op3 = inv_op3(i1);
duke@435 575 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
duke@435 576 0 != (op3 < op3_ldst_int_limit
duke@435 577 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@435 578 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
duke@435 579 fatal("not a ld* or st* op");
duke@435 580 }
duke@435 581 }
duke@435 582 }
duke@435 583
duke@435 584
duke@435 585 void NativeMovRegMem::print() {
duke@435 586 if (is_immediate()) {
duke@435 587 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
duke@435 588 } else {
duke@435 589 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
duke@435 590 }
duke@435 591 }
duke@435 592
duke@435 593
duke@435 594 // Code for unit testing implementation of NativeMovRegMem class
duke@435 595 void NativeMovRegMem::test() {
duke@435 596 #ifdef ASSERT
duke@435 597 ResourceMark rm;
duke@435 598 CodeBuffer cb("test", 1000, 1000);
duke@435 599 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 600 NativeMovRegMem* nm;
duke@435 601 uint idx = 0;
duke@435 602 uint idx1;
duke@435 603 int offsets[] = {
duke@435 604 0x0,
duke@435 605 0xffffffff,
duke@435 606 0x7fffffff,
duke@435 607 0x80000000,
duke@435 608 4096,
duke@435 609 4097,
duke@435 610 0x20,
duke@435 611 0x4000,
duke@435 612 };
duke@435 613
duke@435 614 VM_Version::allow_all();
duke@435 615
twisti@1162 616 AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
twisti@1162 617 AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
twisti@1162 618 a->ldsw( G5, al1.low10(), G4 ); idx++;
twisti@1162 619 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 620 a->ldsw( G5, I3, G4 ); idx++;
twisti@1162 621 a->ldsb( G5, al1.low10(), G4 ); idx++;
twisti@1162 622 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 623 a->ldsb( G5, I3, G4 ); idx++;
twisti@1162 624 a->ldsh( G5, al1.low10(), G4 ); idx++;
twisti@1162 625 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 626 a->ldsh( G5, I3, G4 ); idx++;
twisti@1162 627 a->lduw( G5, al1.low10(), G4 ); idx++;
twisti@1162 628 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 629 a->lduw( G5, I3, G4 ); idx++;
twisti@1162 630 a->ldub( G5, al1.low10(), G4 ); idx++;
twisti@1162 631 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 632 a->ldub( G5, I3, G4 ); idx++;
twisti@1162 633 a->lduh( G5, al1.low10(), G4 ); idx++;
twisti@1162 634 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 635 a->lduh( G5, I3, G4 ); idx++;
twisti@1162 636 a->ldx( G5, al1.low10(), G4 ); idx++;
twisti@1162 637 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 638 a->ldx( G5, I3, G4 ); idx++;
twisti@1162 639 a->ldd( G5, al1.low10(), G4 ); idx++;
twisti@1162 640 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 641 a->ldd( G5, I3, G4 ); idx++;
duke@435 642 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
twisti@1162 643 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 644 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
duke@435 645
twisti@1162 646 a->stw( G5, G4, al1.low10() ); idx++;
twisti@1162 647 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 648 a->stw( G5, G4, I3 ); idx++;
twisti@1162 649 a->stb( G5, G4, al1.low10() ); idx++;
twisti@1162 650 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 651 a->stb( G5, G4, I3 ); idx++;
twisti@1162 652 a->sth( G5, G4, al1.low10() ); idx++;
twisti@1162 653 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 654 a->sth( G5, G4, I3 ); idx++;
twisti@1162 655 a->stx( G5, G4, al1.low10() ); idx++;
twisti@1162 656 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 657 a->stx( G5, G4, I3 ); idx++;
twisti@1162 658 a->std( G5, G4, al1.low10() ); idx++;
twisti@1162 659 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 660 a->std( G5, G4, I3 ); idx++;
duke@435 661 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
twisti@1162 662 a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
duke@435 663 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
duke@435 664
twisti@2103 665 nm = nativeMovRegMem_at( cb.insts_begin() );
duke@435 666 nm->print();
duke@435 667 nm->set_offset( low10(0) );
duke@435 668 nm->print();
duke@435 669 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@435 670 nm->print();
duke@435 671
duke@435 672 while (--idx) {
duke@435 673 nm = nativeMovRegMem_at( nm->next_instruction_address() );
duke@435 674 nm->print();
duke@435 675 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
duke@435 676 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
duke@435 677 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
duke@435 678 "check unit test");
duke@435 679 nm->print();
duke@435 680 }
duke@435 681 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@435 682 nm->print();
duke@435 683 }
duke@435 684
duke@435 685 VM_Version::revert();
duke@435 686 #endif // ASSERT
duke@435 687 }
duke@435 688
duke@435 689 // End code for unit testing implementation of NativeMovRegMem class
duke@435 690
duke@435 691 //--------------------------------------------------------------------------------
duke@435 692
duke@435 693
duke@435 694 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) {
duke@435 695 Untested("copy_instruction_to");
duke@435 696 int instruction_size = next_instruction_address() - instruction_address();
duke@435 697 for (int i = 0; i < instruction_size; i += wordSize) {
duke@435 698 *(long*)(new_instruction_address + i) = *(long*)(address(this) + i);
duke@435 699 }
duke@435 700 }
duke@435 701
duke@435 702
duke@435 703 void NativeMovRegMemPatching::verify() {
duke@435 704 NativeInstruction::verify();
duke@435 705 // make sure code pattern is actually a "ld" or "st" of some sort.
duke@435 706 int i0 = long_at(0);
duke@435 707 int op3 = inv_op3(i0);
duke@435 708
duke@435 709 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@435 710
duke@435 711 if (!(is_op(i0, Assembler::ldst_op) &&
duke@435 712 inv_immed(i0) &&
duke@435 713 0 != (op3 < op3_ldst_int_limit
duke@435 714 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@435 715 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) {
duke@435 716 int i1 = long_at(ldst_offset);
duke@435 717 Register rd = inv_rd(i0);
duke@435 718
duke@435 719 op3 = inv_op3(i1);
duke@435 720 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
duke@435 721 0 != (op3 < op3_ldst_int_limit
duke@435 722 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@435 723 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
duke@435 724 fatal("not a ld* or st* op");
duke@435 725 }
duke@435 726 }
duke@435 727 }
duke@435 728
duke@435 729
duke@435 730 void NativeMovRegMemPatching::print() {
duke@435 731 if (is_immediate()) {
duke@435 732 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
duke@435 733 } else {
duke@435 734 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
duke@435 735 }
duke@435 736 }
duke@435 737
duke@435 738
duke@435 739 // Code for unit testing implementation of NativeMovRegMemPatching class
duke@435 740 void NativeMovRegMemPatching::test() {
duke@435 741 #ifdef ASSERT
duke@435 742 ResourceMark rm;
duke@435 743 CodeBuffer cb("test", 1000, 1000);
duke@435 744 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 745 NativeMovRegMemPatching* nm;
duke@435 746 uint idx = 0;
duke@435 747 uint idx1;
duke@435 748 int offsets[] = {
duke@435 749 0x0,
duke@435 750 0xffffffff,
duke@435 751 0x7fffffff,
duke@435 752 0x80000000,
duke@435 753 4096,
duke@435 754 4097,
duke@435 755 0x20,
duke@435 756 0x4000,
duke@435 757 };
duke@435 758
duke@435 759 VM_Version::allow_all();
duke@435 760
twisti@1162 761 AddressLiteral al(0xffffffff, relocInfo::external_word_type);
twisti@1162 762 a->ldsw( G5, al.low10(), G4); idx++;
twisti@1162 763 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 764 a->ldsw( G5, I3, G4 ); idx++;
twisti@1162 765 a->ldsb( G5, al.low10(), G4); idx++;
twisti@1162 766 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 767 a->ldsb( G5, I3, G4 ); idx++;
twisti@1162 768 a->ldsh( G5, al.low10(), G4); idx++;
twisti@1162 769 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 770 a->ldsh( G5, I3, G4 ); idx++;
twisti@1162 771 a->lduw( G5, al.low10(), G4); idx++;
twisti@1162 772 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 773 a->lduw( G5, I3, G4 ); idx++;
twisti@1162 774 a->ldub( G5, al.low10(), G4); idx++;
twisti@1162 775 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 776 a->ldub( G5, I3, G4 ); idx++;
twisti@1162 777 a->lduh( G5, al.low10(), G4); idx++;
twisti@1162 778 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 779 a->lduh( G5, I3, G4 ); idx++;
twisti@1162 780 a->ldx( G5, al.low10(), G4); idx++;
twisti@1162 781 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
twisti@1162 782 a->ldx( G5, I3, G4 ); idx++;
twisti@1162 783 a->ldd( G5, al.low10(), G4); idx++;
twisti@1162 784 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
twisti@1162 785 a->ldd( G5, I3, G4 ); idx++;
twisti@1162 786 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
twisti@1162 787 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
twisti@1162 788 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
duke@435 789
twisti@1162 790 a->stw( G5, G4, al.low10()); idx++;
twisti@1162 791 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 792 a->stw( G5, G4, I3 ); idx++;
twisti@1162 793 a->stb( G5, G4, al.low10()); idx++;
twisti@1162 794 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 795 a->stb( G5, G4, I3 ); idx++;
twisti@1162 796 a->sth( G5, G4, al.low10()); idx++;
twisti@1162 797 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 798 a->sth( G5, G4, I3 ); idx++;
twisti@1162 799 a->stx( G5, G4, al.low10()); idx++;
twisti@1162 800 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 801 a->stx( G5, G4, I3 ); idx++;
twisti@1162 802 a->std( G5, G4, al.low10()); idx++;
twisti@1162 803 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 804 a->std( G5, G4, I3 ); idx++;
duke@435 805 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
twisti@1162 806 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
duke@435 807 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
duke@435 808
twisti@2103 809 nm = nativeMovRegMemPatching_at( cb.insts_begin() );
duke@435 810 nm->print();
duke@435 811 nm->set_offset( low10(0) );
duke@435 812 nm->print();
duke@435 813 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@435 814 nm->print();
duke@435 815
duke@435 816 while (--idx) {
duke@435 817 nm = nativeMovRegMemPatching_at( nm->next_instruction_address() );
duke@435 818 nm->print();
duke@435 819 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
duke@435 820 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
duke@435 821 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
duke@435 822 "check unit test");
duke@435 823 nm->print();
duke@435 824 }
duke@435 825 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@435 826 nm->print();
duke@435 827 }
duke@435 828
duke@435 829 VM_Version::revert();
duke@435 830 #endif // ASSERT
duke@435 831 }
duke@435 832 // End code for unit testing implementation of NativeMovRegMemPatching class
duke@435 833
duke@435 834
duke@435 835 //--------------------------------------------------------------------------------
duke@435 836
duke@435 837
duke@435 838 void NativeJump::verify() {
duke@435 839 NativeInstruction::verify();
duke@435 840 int i0 = long_at(sethi_offset);
duke@435 841 int i1 = long_at(jmpl_offset);
duke@435 842 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@435 843 // verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg"
duke@435 844 Register rd = inv_rd(i0);
duke@435 845 #ifndef _LP64
duke@435 846 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
duke@435 847 (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
duke@435 848 (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
duke@435 849 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
duke@435 850 rd == inv_rs1(i1))) {
duke@435 851 fatal("not a jump_to instruction");
duke@435 852 }
duke@435 853 #else
duke@435 854 // In LP64, the jump instruction location varies for non relocatable
duke@435 855 // jumps, for example is could be sethi, xor, jmp instead of the
duke@435 856 // 7 instructions for sethi. So let's check sethi only.
duke@435 857 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
duke@435 858 fatal("not a jump_to instruction");
duke@435 859 }
duke@435 860 #endif
duke@435 861 }
duke@435 862
duke@435 863
duke@435 864 void NativeJump::print() {
duke@435 865 tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination());
duke@435 866 }
duke@435 867
duke@435 868
duke@435 869 // Code for unit testing implementation of NativeJump class
duke@435 870 void NativeJump::test() {
duke@435 871 #ifdef ASSERT
duke@435 872 ResourceMark rm;
duke@435 873 CodeBuffer cb("test", 100, 100);
duke@435 874 MacroAssembler* a = new MacroAssembler(&cb);
duke@435 875 NativeJump* nj;
duke@435 876 uint idx;
duke@435 877 int offsets[] = {
duke@435 878 0x0,
duke@435 879 0xffffffff,
duke@435 880 0x7fffffff,
duke@435 881 0x80000000,
duke@435 882 4096,
duke@435 883 4097,
duke@435 884 0x20,
duke@435 885 0x4000,
duke@435 886 };
duke@435 887
duke@435 888 VM_Version::allow_all();
duke@435 889
twisti@1162 890 AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
twisti@1162 891 a->sethi(al, I3);
twisti@1162 892 a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
duke@435 893 a->delayed()->nop();
twisti@1162 894 a->sethi(al, I3);
twisti@1162 895 a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
duke@435 896 a->delayed()->nop();
duke@435 897
twisti@2103 898 nj = nativeJump_at( cb.insts_begin() );
duke@435 899 nj->print();
duke@435 900
duke@435 901 nj = nativeJump_at( nj->next_instruction_address() );
duke@435 902 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
duke@435 903 nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
duke@435 904 assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
duke@435 905 nj->print();
duke@435 906 }
duke@435 907
duke@435 908 VM_Version::revert();
duke@435 909 #endif // ASSERT
duke@435 910 }
duke@435 911 // End code for unit testing implementation of NativeJump class
duke@435 912
duke@435 913
duke@435 914 void NativeJump::insert(address code_pos, address entry) {
duke@435 915 Unimplemented();
duke@435 916 }
duke@435 917
duke@435 918 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
duke@435 919 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
duke@435 920 // Atomic write can be only with 1 word.
duke@435 921 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
duke@435 922 // Here's one way to do it: Pre-allocate a three-word jump sequence somewhere
duke@435 923 // in the header of the nmethod, within a short branch's span of the patch point.
duke@435 924 // Set up the jump sequence using NativeJump::insert, and then use an annulled
duke@435 925 // unconditional branch at the target site (an atomic 1-word update).
duke@435 926 // Limitations: You can only patch nmethods, with any given nmethod patched at
duke@435 927 // most once, and the patch must be in the nmethod's header.
duke@435 928 // It's messy, but you can ask the CodeCache for the nmethod containing the
duke@435 929 // target address.
duke@435 930
duke@435 931 // %%%%% For now, do something MT-stupid:
duke@435 932 ResourceMark rm;
duke@435 933 int code_size = 1 * BytesPerInstWord;
duke@435 934 CodeBuffer cb(verified_entry, code_size + 1);
duke@435 935 MacroAssembler* a = new MacroAssembler(&cb);
morris@5283 936 a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
duke@435 937 ICache::invalidate_range(verified_entry, code_size);
duke@435 938 }
duke@435 939
duke@435 940
duke@435 941 void NativeIllegalInstruction::insert(address code_pos) {
duke@435 942 NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
duke@435 943 nii->set_long_at(0, illegal_instruction());
duke@435 944 }
duke@435 945
duke@435 946 static int illegal_instruction_bits = 0;
duke@435 947
duke@435 948 int NativeInstruction::illegal_instruction() {
duke@435 949 if (illegal_instruction_bits == 0) {
duke@435 950 ResourceMark rm;
duke@435 951 char buf[40];
duke@435 952 CodeBuffer cbuf((address)&buf[0], 20);
duke@435 953 MacroAssembler* a = new MacroAssembler(&cbuf);
duke@435 954 address ia = a->pc();
duke@435 955 a->trap(ST_RESERVED_FOR_USER_0 + 1);
duke@435 956 int bits = *(int*)ia;
duke@435 957 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
duke@435 958 illegal_instruction_bits = bits;
duke@435 959 assert(illegal_instruction_bits != 0, "oops");
duke@435 960 }
duke@435 961 return illegal_instruction_bits;
duke@435 962 }
duke@435 963
duke@435 964 static int ic_miss_trap_bits = 0;
duke@435 965
duke@435 966 bool NativeInstruction::is_ic_miss_trap() {
duke@435 967 if (ic_miss_trap_bits == 0) {
duke@435 968 ResourceMark rm;
duke@435 969 char buf[40];
duke@435 970 CodeBuffer cbuf((address)&buf[0], 20);
duke@435 971 MacroAssembler* a = new MacroAssembler(&cbuf);
duke@435 972 address ia = a->pc();
duke@435 973 a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
duke@435 974 int bits = *(int*)ia;
duke@435 975 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
duke@435 976 ic_miss_trap_bits = bits;
duke@435 977 assert(ic_miss_trap_bits != 0, "oops");
duke@435 978 }
duke@435 979 return long_at(0) == ic_miss_trap_bits;
duke@435 980 }
duke@435 981
duke@435 982
duke@435 983 bool NativeInstruction::is_illegal() {
duke@435 984 if (illegal_instruction_bits == 0) {
duke@435 985 return false;
duke@435 986 }
duke@435 987 return long_at(0) == illegal_instruction_bits;
duke@435 988 }
duke@435 989
duke@435 990
duke@435 991 void NativeGeneralJump::verify() {
duke@435 992 assert(((NativeInstruction *)this)->is_jump() ||
duke@435 993 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
duke@435 994 }
duke@435 995
duke@435 996
duke@435 997 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
duke@435 998 Assembler::Condition condition = Assembler::always;
duke@435 999 int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
duke@435 1000 Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
duke@435 1001 NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
duke@435 1002 ni->set_long_at(0, x);
duke@435 1003 }
duke@435 1004
duke@435 1005
duke@435 1006 // MT-safe patching of a jmp instruction (and following word).
duke@435 1007 // First patches the second word, and then atomicly replaces
duke@435 1008 // the first word with the first new instruction word.
duke@435 1009 // Other processors might briefly see the old first word
duke@435 1010 // followed by the new second word. This is OK if the old
duke@435 1011 // second word is harmless, and the new second word may be
duke@435 1012 // harmlessly executed in the delay slot of the call.
duke@435 1013 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
duke@435 1014 assert(Patching_lock->is_locked() ||
duke@435 1015 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
duke@435 1016 assert (instr_addr != NULL, "illegal address for code patching");
duke@435 1017 NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr); // checking that it is a call
duke@435 1018 assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
duke@435 1019 int i0 = ((int*)code_buffer)[0];
duke@435 1020 int i1 = ((int*)code_buffer)[1];
duke@435 1021 int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
duke@435 1022 assert(inv_op(*contention_addr) == Assembler::arith_op ||
morris@5283 1023 *contention_addr == nop_instruction(),
duke@435 1024 "must not interfere with original call");
duke@435 1025 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
duke@435 1026 h_jump->set_long_at(1*BytesPerInstWord, i1);
duke@435 1027 h_jump->set_long_at(0*BytesPerInstWord, i0);
duke@435 1028 // NOTE: It is possible that another thread T will execute
duke@435 1029 // only the second patched word.
duke@435 1030 // In other words, since the original instruction is this
duke@435 1031 // jmp patching_stub; nop (NativeGeneralJump)
duke@435 1032 // and the new sequence from the buffer is this:
duke@435 1033 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
duke@435 1034 // what T will execute is this:
duke@435 1035 // jmp patching_stub; add %r, %lo(K), %r
duke@435 1036 // thereby putting garbage into %r before calling the patching stub.
duke@435 1037 // This is OK, because the patching stub ignores the value of %r.
duke@435 1038
duke@435 1039 // Make sure the first-patched instruction, which may co-exist
duke@435 1040 // briefly with the call, will do something harmless.
duke@435 1041 assert(inv_op(*contention_addr) == Assembler::arith_op ||
morris@5283 1042 *contention_addr == nop_instruction(),
duke@435 1043 "must not interfere with original call");
duke@435 1044 }

mercurial