src/cpu/sparc/vm/nativeInst_sparc.cpp

Tue, 03 Aug 2010 08:13:38 -0400

author
bobv
date
Tue, 03 Aug 2010 08:13:38 -0400
changeset 2036
126ea7725993
parent 1934
e9ff18c4ace7
child 2103
3e8fbc61cee8
permissions
-rw-r--r--

6953477: Increase portability and flexibility of building Hotspot
Summary: A collection of portability improvements including shared code support for PPC, ARM platforms, software floating point, cross compilation support and improvements in error crash detail.
Reviewed-by: phh, never, coleenp, dholmes

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_nativeInst_sparc.cpp.incl"
    29 bool NativeInstruction::is_dtrace_trap() {
    30   return !is_nop();
    31 }
    33 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
    34   ResourceMark rm;
    35   CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
    36   MacroAssembler* _masm = new MacroAssembler(&buf);
    37   Register destreg;
    39   destreg = inv_rd(*(unsigned int *)instaddr);
    40   // Generate a the new sequence
    41   _masm->patchable_sethi(x, destreg);
    42   ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
    43 }
    45 void NativeInstruction::verify() {
    46   // make sure code pattern is actually an instruction address
    47   address addr = addr_at(0);
    48   if (addr == 0 || ((intptr_t)addr & 3) != 0) {
    49     fatal("not an instruction address");
    50   }
    51 }
    53 void NativeInstruction::print() {
    54   tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0));
    55 }
    57 void NativeInstruction::set_long_at(int offset, int i) {
    58   address addr = addr_at(offset);
    59   *(int*)addr = i;
    60   ICache::invalidate_word(addr);
    61 }
    63 void NativeInstruction::set_jlong_at(int offset, jlong i) {
    64   address addr = addr_at(offset);
    65   *(jlong*)addr = i;
    66   // Don't need to invalidate 2 words here, because
    67   // the flush instruction operates on doublewords.
    68   ICache::invalidate_word(addr);
    69 }
    71 void NativeInstruction::set_addr_at(int offset, address x) {
    72   address addr = addr_at(offset);
    73   assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
    74   *(uintptr_t*)addr = (uintptr_t)x;
    75   // Don't need to invalidate 2 words here in the 64-bit case,
    76   // because the flush instruction operates on doublewords.
    77   ICache::invalidate_word(addr);
    78   // The Intel code has this assertion for NativeCall::set_destination,
    79   // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
    80   // NativeJump::set_jump_destination, and NativePushImm32::set_data
    81   //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
    82 }
    84 bool NativeInstruction::is_zero_test(Register &reg) {
    85   int x = long_at(0);
    86   Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
    87   if (is_op3(x, temp, Assembler::arith_op) &&
    88       inv_immed(x) && inv_rd(x) == G0) {
    89       if (inv_rs1(x) == G0) {
    90         reg = inv_rs2(x);
    91         return true;
    92       } else if (inv_rs2(x) == G0) {
    93         reg = inv_rs1(x);
    94         return true;
    95       }
    96   }
    97   return false;
    98 }
   100 bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
   101   int x = long_at(0);
   102   if (is_op(x, Assembler::ldst_op) &&
   103       inv_rs1(x) == reg && inv_immed(x)) {
   104     return true;
   105   }
   106   return false;
   107 }
   109 void NativeCall::verify() {
   110   NativeInstruction::verify();
   111   // make sure code pattern is actually a call instruction
   112   if (!is_op(long_at(0), Assembler::call_op)) {
   113     fatal("not a call");
   114   }
   115 }
   117 void NativeCall::print() {
   118   tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
   119 }
   122 // MT-safe patching of a call instruction (and following word).
   123 // First patches the second word, and then atomicly replaces
   124 // the first word with the first new instruction word.
   125 // Other processors might briefly see the old first word
   126 // followed by the new second word.  This is OK if the old
   127 // second word is harmless, and the new second word may be
   128 // harmlessly executed in the delay slot of the call.
   129 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
   130   assert(Patching_lock->is_locked() ||
   131          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
   132    assert (instr_addr != NULL, "illegal address for code patching");
   133    NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
   134    assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
   135    int i0 = ((int*)code_buffer)[0];
   136    int i1 = ((int*)code_buffer)[1];
   137    int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
   138    assert(inv_op(*contention_addr) == Assembler::arith_op ||
   139           *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
   140           "must not interfere with original call");
   141    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
   142    n_call->set_long_at(1*BytesPerInstWord, i1);
   143    n_call->set_long_at(0*BytesPerInstWord, i0);
   144    // NOTE:  It is possible that another thread T will execute
   145    // only the second patched word.
   146    // In other words, since the original instruction is this
   147    //    call patching_stub; nop                   (NativeCall)
   148    // and the new sequence from the buffer is this:
   149    //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
   150    // what T will execute is this:
   151    //    call patching_stub; add %r, %lo(K), %r
   152    // thereby putting garbage into %r before calling the patching stub.
   153    // This is OK, because the patching stub ignores the value of %r.
   155    // Make sure the first-patched instruction, which may co-exist
   156    // briefly with the call, will do something harmless.
   157    assert(inv_op(*contention_addr) == Assembler::arith_op ||
   158           *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
   159           "must not interfere with original call");
   160 }
   162 // Similar to replace_mt_safe, but just changes the destination.  The
   163 // important thing is that free-running threads are able to execute this
   164 // call instruction at all times.  Thus, the displacement field must be
   165 // instruction-word-aligned.  This is always true on SPARC.
   166 //
   167 // Used in the runtime linkage of calls; see class CompiledIC.
   168 void NativeCall::set_destination_mt_safe(address dest) {
   169   assert(Patching_lock->is_locked() ||
   170          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
   171   // set_destination uses set_long_at which does the ICache::invalidate
   172   set_destination(dest);
   173 }
   175 // Code for unit testing implementation of NativeCall class
   176 void NativeCall::test() {
   177 #ifdef ASSERT
   178   ResourceMark rm;
   179   CodeBuffer cb("test", 100, 100);
   180   MacroAssembler* a = new MacroAssembler(&cb);
   181   NativeCall  *nc;
   182   uint idx;
   183   int offsets[] = {
   184     0x0,
   185     0xfffffff0,
   186     0x7ffffff0,
   187     0x80000000,
   188     0x20,
   189     0x4000,
   190   };
   192   VM_Version::allow_all();
   194   a->call( a->pc(), relocInfo::none );
   195   a->delayed()->nop();
   196   nc = nativeCall_at( cb.code_begin() );
   197   nc->print();
   199   nc = nativeCall_overwriting_at( nc->next_instruction_address() );
   200   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
   201     nc->set_destination( cb.code_begin() + offsets[idx] );
   202     assert(nc->destination() == (cb.code_begin() + offsets[idx]), "check unit test");
   203     nc->print();
   204   }
   206   nc = nativeCall_before( cb.code_begin() + 8 );
   207   nc->print();
   209   VM_Version::revert();
   210 #endif
   211 }
   212 // End code for unit testing implementation of NativeCall class
   214 //-------------------------------------------------------------------
   216 #ifdef _LP64
   218 void NativeFarCall::set_destination(address dest) {
   219   // Address materialized in the instruction stream, so nothing to do.
   220   return;
   221 #if 0 // What we'd do if we really did want to change the destination
   222   if (destination() == dest) {
   223     return;
   224   }
   225   ResourceMark rm;
   226   CodeBuffer buf(addr_at(0), instruction_size + 1);
   227   MacroAssembler* _masm = new MacroAssembler(&buf);
   228   // Generate the new sequence
   229   AddressLiteral(dest);
   230   _masm->jumpl_to(dest, O7, O7);
   231   ICache::invalidate_range(addr_at(0), instruction_size );
   232 #endif
   233 }
   235 void NativeFarCall::verify() {
   236   // make sure code pattern is actually a jumpl_to instruction
   237   assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
   238   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
   239   nativeJump_at(addr_at(0))->verify();
   240 }
   242 bool NativeFarCall::is_call_at(address instr) {
   243   return nativeInstruction_at(instr)->is_sethi();
   244 }
   246 void NativeFarCall::print() {
   247   tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
   248 }
   250 bool NativeFarCall::destination_is_compiled_verified_entry_point() {
   251   nmethod* callee = CodeCache::find_nmethod(destination());
   252   if (callee == NULL) {
   253     return false;
   254   } else {
   255     return destination() == callee->verified_entry_point();
   256   }
   257 }
   259 // MT-safe patching of a far call.
   260 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
   261   Unimplemented();
   262 }
   264 // Code for unit testing implementation of NativeFarCall class
   265 void NativeFarCall::test() {
   266   Unimplemented();
   267 }
   268 // End code for unit testing implementation of NativeFarCall class
   270 #endif // _LP64
   272 //-------------------------------------------------------------------
   275 void NativeMovConstReg::verify() {
   276   NativeInstruction::verify();
   277   // make sure code pattern is actually a "set_oop" synthetic instruction
   278   // see MacroAssembler::set_oop()
   279   int i0 = long_at(sethi_offset);
   280   int i1 = long_at(add_offset);
   282   // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
   283   Register rd = inv_rd(i0);
   284 #ifndef _LP64
   285   if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
   286         is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
   287         inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
   288         rd == inv_rs1(i1) && rd == inv_rd(i1))) {
   289     fatal("not a set_oop");
   290   }
   291 #else
   292   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
   293     fatal("not a set_oop");
   294   }
   295 #endif
   296 }
   299 void NativeMovConstReg::print() {
   300   tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
   301 }
   304 #ifdef _LP64
   305 intptr_t NativeMovConstReg::data() const {
   306   return data64(addr_at(sethi_offset), long_at(add_offset));
   307 }
   308 #else
   309 intptr_t NativeMovConstReg::data() const {
   310   return data32(long_at(sethi_offset), long_at(add_offset));
   311 }
   312 #endif
   315 void NativeMovConstReg::set_data(intptr_t x) {
   316 #ifdef _LP64
   317   set_data64_sethi(addr_at(sethi_offset), x);
   318 #else
   319   set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
   320 #endif
   321   set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
   323   // also store the value into an oop_Relocation cell, if any
   324   CodeBlob* cb = CodeCache::find_blob(instruction_address());
   325   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
   326   if (nm != NULL) {
   327     RelocIterator iter(nm, instruction_address(), next_instruction_address());
   328     oop* oop_addr = NULL;
   329     while (iter.next()) {
   330       if (iter.type() == relocInfo::oop_type) {
   331         oop_Relocation *r = iter.oop_reloc();
   332         if (oop_addr == NULL) {
   333           oop_addr = r->oop_addr();
   334           *oop_addr = (oop)x;
   335         } else {
   336           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
   337         }
   338       }
   339     }
   340   }
   341 }
   344 // Code for unit testing implementation of NativeMovConstReg class
   345 void NativeMovConstReg::test() {
   346 #ifdef ASSERT
   347   ResourceMark rm;
   348   CodeBuffer cb("test", 100, 100);
   349   MacroAssembler* a = new MacroAssembler(&cb);
   350   NativeMovConstReg* nm;
   351   uint idx;
   352   int offsets[] = {
   353     0x0,
   354     0x7fffffff,
   355     0x80000000,
   356     0xffffffff,
   357     0x20,
   358     4096,
   359     4097,
   360   };
   362   VM_Version::allow_all();
   364   AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
   365   a->sethi(al1, I3);
   366   a->add(I3, al1.low10(), I3);
   367   AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
   368   a->sethi(al2, O2);
   369   a->add(O2, al2.low10(), O2);
   371   nm = nativeMovConstReg_at( cb.code_begin() );
   372   nm->print();
   374   nm = nativeMovConstReg_at( nm->next_instruction_address() );
   375   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
   376     nm->set_data( offsets[idx] );
   377     assert(nm->data() == offsets[idx], "check unit test");
   378   }
   379   nm->print();
   381   VM_Version::revert();
   382 #endif
   383 }
   384 // End code for unit testing implementation of NativeMovConstReg class
   386 //-------------------------------------------------------------------
   388 void NativeMovConstRegPatching::verify() {
   389   NativeInstruction::verify();
   390   // Make sure code pattern is sethi/nop/add.
   391   int i0 = long_at(sethi_offset);
   392   int i1 = long_at(nop_offset);
   393   int i2 = long_at(add_offset);
   394   assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
   396   // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
   397   // The casual reader should note that on Sparc a nop is a special case if sethi
   398   // in which the destination register is %g0.
   399   Register rd0 = inv_rd(i0);
   400   Register rd1 = inv_rd(i1);
   401   if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
   402         is_op2(i1, Assembler::sethi_op2) && rd1 == G0 &&        // nop is a special case of sethi
   403         is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
   404         inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
   405         rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
   406     fatal("not a set_oop");
   407   }
   408 }
   411 void NativeMovConstRegPatching::print() {
   412   tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
   413 }
   416 int NativeMovConstRegPatching::data() const {
   417 #ifdef _LP64
   418   return data64(addr_at(sethi_offset), long_at(add_offset));
   419 #else
   420   return data32(long_at(sethi_offset), long_at(add_offset));
   421 #endif
   422 }
   425 void NativeMovConstRegPatching::set_data(int x) {
   426 #ifdef _LP64
   427   set_data64_sethi(addr_at(sethi_offset), x);
   428 #else
   429   set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
   430 #endif
   431   set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
   433   // also store the value into an oop_Relocation cell, if any
   434   CodeBlob* cb = CodeCache::find_blob(instruction_address());
   435   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
   436   if (nm != NULL) {
   437     RelocIterator iter(nm, instruction_address(), next_instruction_address());
   438     oop* oop_addr = NULL;
   439     while (iter.next()) {
   440       if (iter.type() == relocInfo::oop_type) {
   441         oop_Relocation *r = iter.oop_reloc();
   442         if (oop_addr == NULL) {
   443           oop_addr = r->oop_addr();
   444           *oop_addr = (oop)x;
   445         } else {
   446           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
   447         }
   448       }
   449     }
   450   }
   451 }
   454 // Code for unit testing implementation of NativeMovConstRegPatching class
   455 void NativeMovConstRegPatching::test() {
   456 #ifdef ASSERT
   457   ResourceMark rm;
   458   CodeBuffer cb("test", 100, 100);
   459   MacroAssembler* a = new MacroAssembler(&cb);
   460   NativeMovConstRegPatching* nm;
   461   uint idx;
   462   int offsets[] = {
   463     0x0,
   464     0x7fffffff,
   465     0x80000000,
   466     0xffffffff,
   467     0x20,
   468     4096,
   469     4097,
   470   };
   472   VM_Version::allow_all();
   474   AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
   475   a->sethi(al1, I3);
   476   a->nop();
   477   a->add(I3, al1.low10(), I3);
   478   AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
   479   a->sethi(al2, O2);
   480   a->nop();
   481   a->add(O2, al2.low10(), O2);
   483   nm = nativeMovConstRegPatching_at( cb.code_begin() );
   484   nm->print();
   486   nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
   487   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
   488     nm->set_data( offsets[idx] );
   489     assert(nm->data() == offsets[idx], "check unit test");
   490   }
   491   nm->print();
   493   VM_Version::revert();
   494 #endif // ASSERT
   495 }
   496 // End code for unit testing implementation of NativeMovConstRegPatching class
   499 //-------------------------------------------------------------------
   502 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
   503   Untested("copy_instruction_to");
   504   int instruction_size = next_instruction_address() - instruction_address();
   505   for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
   506     *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
   507   }
   508 }
   511 void NativeMovRegMem::verify() {
   512   NativeInstruction::verify();
   513   // make sure code pattern is actually a "ld" or "st" of some sort.
   514   int i0 = long_at(0);
   515   int op3 = inv_op3(i0);
   517   assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
   519   if (!(is_op(i0, Assembler::ldst_op) &&
   520         inv_immed(i0) &&
   521         0 != (op3 < op3_ldst_int_limit
   522          ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
   523          : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
   524   {
   525     int i1 = long_at(ldst_offset);
   526     Register rd = inv_rd(i0);
   528     op3 = inv_op3(i1);
   529     if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
   530          0 != (op3 < op3_ldst_int_limit
   531               ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
   532                : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
   533       fatal("not a ld* or st* op");
   534     }
   535   }
   536 }
   539 void NativeMovRegMem::print() {
   540   if (is_immediate()) {
   541     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
   542   } else {
   543     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
   544   }
   545 }
   548 // Code for unit testing implementation of NativeMovRegMem class
   549 void NativeMovRegMem::test() {
   550 #ifdef ASSERT
   551   ResourceMark rm;
   552   CodeBuffer cb("test", 1000, 1000);
   553   MacroAssembler* a = new MacroAssembler(&cb);
   554   NativeMovRegMem* nm;
   555   uint idx = 0;
   556   uint idx1;
   557   int offsets[] = {
   558     0x0,
   559     0xffffffff,
   560     0x7fffffff,
   561     0x80000000,
   562     4096,
   563     4097,
   564     0x20,
   565     0x4000,
   566   };
   568   VM_Version::allow_all();
   570   AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
   571   AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
   572   a->ldsw( G5, al1.low10(), G4 ); idx++;
   573   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   574   a->ldsw( G5, I3, G4 ); idx++;
   575   a->ldsb( G5, al1.low10(), G4 ); idx++;
   576   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   577   a->ldsb( G5, I3, G4 ); idx++;
   578   a->ldsh( G5, al1.low10(), G4 ); idx++;
   579   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   580   a->ldsh( G5, I3, G4 ); idx++;
   581   a->lduw( G5, al1.low10(), G4 ); idx++;
   582   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   583   a->lduw( G5, I3, G4 ); idx++;
   584   a->ldub( G5, al1.low10(), G4 ); idx++;
   585   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   586   a->ldub( G5, I3, G4 ); idx++;
   587   a->lduh( G5, al1.low10(), G4 ); idx++;
   588   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   589   a->lduh( G5, I3, G4 ); idx++;
   590   a->ldx( G5, al1.low10(), G4 ); idx++;
   591   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   592   a->ldx( G5, I3, G4 ); idx++;
   593   a->ldd( G5, al1.low10(), G4 ); idx++;
   594   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   595   a->ldd( G5, I3, G4 ); idx++;
   596   a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
   597   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   598   a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
   600   a->stw( G5, G4, al1.low10() ); idx++;
   601   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   602   a->stw( G5, G4, I3 ); idx++;
   603   a->stb( G5, G4, al1.low10() ); idx++;
   604   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   605   a->stb( G5, G4, I3 ); idx++;
   606   a->sth( G5, G4, al1.low10() ); idx++;
   607   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   608   a->sth( G5, G4, I3 ); idx++;
   609   a->stx( G5, G4, al1.low10() ); idx++;
   610   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   611   a->stx( G5, G4, I3 ); idx++;
   612   a->std( G5, G4, al1.low10() ); idx++;
   613   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   614   a->std( G5, G4, I3 ); idx++;
   615   a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
   616   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
   617   a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
   619   nm = nativeMovRegMem_at( cb.code_begin() );
   620   nm->print();
   621   nm->set_offset( low10(0) );
   622   nm->print();
   623   nm->add_offset_in_bytes( low10(0xbb) * wordSize );
   624   nm->print();
   626   while (--idx) {
   627     nm = nativeMovRegMem_at( nm->next_instruction_address() );
   628     nm->print();
   629     for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
   630       nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
   631       assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
   632              "check unit test");
   633       nm->print();
   634     }
   635     nm->add_offset_in_bytes( low10(0xbb) * wordSize );
   636     nm->print();
   637   }
   639   VM_Version::revert();
   640 #endif // ASSERT
   641 }
   643 // End code for unit testing implementation of NativeMovRegMem class
   645 //--------------------------------------------------------------------------------
   648 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) {
   649   Untested("copy_instruction_to");
   650   int instruction_size = next_instruction_address() - instruction_address();
   651   for (int i = 0; i < instruction_size; i += wordSize) {
   652     *(long*)(new_instruction_address + i) = *(long*)(address(this) + i);
   653   }
   654 }
   657 void NativeMovRegMemPatching::verify() {
   658   NativeInstruction::verify();
   659   // make sure code pattern is actually a "ld" or "st" of some sort.
   660   int i0 = long_at(0);
   661   int op3 = inv_op3(i0);
   663   assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
   665   if (!(is_op(i0, Assembler::ldst_op) &&
   666         inv_immed(i0) &&
   667         0 != (op3 < op3_ldst_int_limit
   668          ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
   669          : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) {
   670     int i1 = long_at(ldst_offset);
   671     Register rd = inv_rd(i0);
   673     op3 = inv_op3(i1);
   674     if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
   675          0 != (op3 < op3_ldst_int_limit
   676               ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
   677               : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
   678       fatal("not a ld* or st* op");
   679     }
   680   }
   681 }
   684 void NativeMovRegMemPatching::print() {
   685   if (is_immediate()) {
   686     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
   687   } else {
   688     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
   689   }
   690 }
   693 // Code for unit testing implementation of NativeMovRegMemPatching class
   694 void NativeMovRegMemPatching::test() {
   695 #ifdef ASSERT
   696   ResourceMark rm;
   697   CodeBuffer cb("test", 1000, 1000);
   698   MacroAssembler* a = new MacroAssembler(&cb);
   699   NativeMovRegMemPatching* nm;
   700   uint idx = 0;
   701   uint idx1;
   702   int offsets[] = {
   703     0x0,
   704     0xffffffff,
   705     0x7fffffff,
   706     0x80000000,
   707     4096,
   708     4097,
   709     0x20,
   710     0x4000,
   711   };
   713   VM_Version::allow_all();
   715   AddressLiteral al(0xffffffff, relocInfo::external_word_type);
   716   a->ldsw( G5, al.low10(), G4); idx++;
   717   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   718   a->ldsw( G5, I3, G4 ); idx++;
   719   a->ldsb( G5, al.low10(), G4); idx++;
   720   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   721   a->ldsb( G5, I3, G4 ); idx++;
   722   a->ldsh( G5, al.low10(), G4); idx++;
   723   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   724   a->ldsh( G5, I3, G4 ); idx++;
   725   a->lduw( G5, al.low10(), G4); idx++;
   726   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   727   a->lduw( G5, I3, G4 ); idx++;
   728   a->ldub( G5, al.low10(), G4); idx++;
   729   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   730   a->ldub( G5, I3, G4 ); idx++;
   731   a->lduh( G5, al.low10(), G4); idx++;
   732   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   733   a->lduh( G5, I3, G4 ); idx++;
   734   a->ldx(  G5, al.low10(), G4); idx++;
   735   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   736   a->ldx(  G5, I3, G4 ); idx++;
   737   a->ldd(  G5, al.low10(), G4); idx++;
   738   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   739   a->ldd(  G5, I3, G4 ); idx++;
   740   a->ldf(  FloatRegisterImpl::D, O2, -1, F14 ); idx++;
   741   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   742   a->ldf(  FloatRegisterImpl::S, O0, I3, F15 ); idx++;
   744   a->stw( G5, G4, al.low10()); idx++;
   745   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   746   a->stw( G5, G4, I3 ); idx++;
   747   a->stb( G5, G4, al.low10()); idx++;
   748   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   749   a->stb( G5, G4, I3 ); idx++;
   750   a->sth( G5, G4, al.low10()); idx++;
   751   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   752   a->sth( G5, G4, I3 ); idx++;
   753   a->stx( G5, G4, al.low10()); idx++;
   754   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   755   a->stx( G5, G4, I3 ); idx++;
   756   a->std( G5, G4, al.low10()); idx++;
   757   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   758   a->std( G5, G4, I3 ); idx++;
   759   a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
   760   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
   761   a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
   763   nm = nativeMovRegMemPatching_at( cb.code_begin() );
   764   nm->print();
   765   nm->set_offset( low10(0) );
   766   nm->print();
   767   nm->add_offset_in_bytes( low10(0xbb) * wordSize );
   768   nm->print();
   770   while (--idx) {
   771     nm = nativeMovRegMemPatching_at( nm->next_instruction_address() );
   772     nm->print();
   773     for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
   774       nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
   775       assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
   776              "check unit test");
   777       nm->print();
   778     }
   779     nm->add_offset_in_bytes( low10(0xbb) * wordSize );
   780     nm->print();
   781   }
   783   VM_Version::revert();
   784 #endif // ASSERT
   785 }
   786 // End code for unit testing implementation of NativeMovRegMemPatching class
   789 //--------------------------------------------------------------------------------
   792 void NativeJump::verify() {
   793   NativeInstruction::verify();
   794   int i0 = long_at(sethi_offset);
   795   int i1 = long_at(jmpl_offset);
   796   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
   797   // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
   798   Register rd = inv_rd(i0);
   799 #ifndef _LP64
   800   if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
   801         (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
   802         (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
   803         inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
   804         rd == inv_rs1(i1))) {
   805     fatal("not a jump_to instruction");
   806   }
   807 #else
   808   // In LP64, the jump instruction location varies for non relocatable
   809   // jumps, for example is could be sethi, xor, jmp instead of the
   810   // 7 instructions for sethi.  So let's check sethi only.
   811   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
   812     fatal("not a jump_to instruction");
   813   }
   814 #endif
   815 }
   818 void NativeJump::print() {
   819   tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination());
   820 }
   823 // Code for unit testing implementation of NativeJump class
   824 void NativeJump::test() {
   825 #ifdef ASSERT
   826   ResourceMark rm;
   827   CodeBuffer cb("test", 100, 100);
   828   MacroAssembler* a = new MacroAssembler(&cb);
   829   NativeJump* nj;
   830   uint idx;
   831   int offsets[] = {
   832     0x0,
   833     0xffffffff,
   834     0x7fffffff,
   835     0x80000000,
   836     4096,
   837     4097,
   838     0x20,
   839     0x4000,
   840   };
   842   VM_Version::allow_all();
   844   AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
   845   a->sethi(al, I3);
   846   a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
   847   a->delayed()->nop();
   848   a->sethi(al, I3);
   849   a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
   850   a->delayed()->nop();
   852   nj = nativeJump_at( cb.code_begin() );
   853   nj->print();
   855   nj = nativeJump_at( nj->next_instruction_address() );
   856   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
   857     nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
   858     assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
   859     nj->print();
   860   }
   862   VM_Version::revert();
   863 #endif // ASSERT
   864 }
   865 // End code for unit testing implementation of NativeJump class
   868 void NativeJump::insert(address code_pos, address entry) {
   869   Unimplemented();
   870 }
   872 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
   873 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
   874 // Atomic write can be only with 1 word.
   875 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
   876   // Here's one way to do it:  Pre-allocate a three-word jump sequence somewhere
   877   // in the header of the nmethod, within a short branch's span of the patch point.
   878   // Set up the jump sequence using NativeJump::insert, and then use an annulled
   879   // unconditional branch at the target site (an atomic 1-word update).
   880   // Limitations:  You can only patch nmethods, with any given nmethod patched at
   881   // most once, and the patch must be in the nmethod's header.
   882   // It's messy, but you can ask the CodeCache for the nmethod containing the
   883   // target address.
   885   // %%%%% For now, do something MT-stupid:
   886   ResourceMark rm;
   887   int code_size = 1 * BytesPerInstWord;
   888   CodeBuffer cb(verified_entry, code_size + 1);
   889   MacroAssembler* a = new MacroAssembler(&cb);
   890   if (VM_Version::v9_instructions_work()) {
   891     a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
   892   } else {
   893     a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
   894   }
   895   ICache::invalidate_range(verified_entry, code_size);
   896 }
   899 void NativeIllegalInstruction::insert(address code_pos) {
   900   NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
   901   nii->set_long_at(0, illegal_instruction());
   902 }
   904 static int illegal_instruction_bits = 0;
   906 int NativeInstruction::illegal_instruction() {
   907   if (illegal_instruction_bits == 0) {
   908     ResourceMark rm;
   909     char buf[40];
   910     CodeBuffer cbuf((address)&buf[0], 20);
   911     MacroAssembler* a = new MacroAssembler(&cbuf);
   912     address ia = a->pc();
   913     a->trap(ST_RESERVED_FOR_USER_0 + 1);
   914     int bits = *(int*)ia;
   915     assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
   916     illegal_instruction_bits = bits;
   917     assert(illegal_instruction_bits != 0, "oops");
   918   }
   919   return illegal_instruction_bits;
   920 }
   922 static int ic_miss_trap_bits = 0;
   924 bool NativeInstruction::is_ic_miss_trap() {
   925   if (ic_miss_trap_bits == 0) {
   926     ResourceMark rm;
   927     char buf[40];
   928     CodeBuffer cbuf((address)&buf[0], 20);
   929     MacroAssembler* a = new MacroAssembler(&cbuf);
   930     address ia = a->pc();
   931     a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
   932     int bits = *(int*)ia;
   933     assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
   934     ic_miss_trap_bits = bits;
   935     assert(ic_miss_trap_bits != 0, "oops");
   936   }
   937   return long_at(0) == ic_miss_trap_bits;
   938 }
   941 bool NativeInstruction::is_illegal() {
   942   if (illegal_instruction_bits == 0) {
   943     return false;
   944   }
   945   return long_at(0) == illegal_instruction_bits;
   946 }
   949 void NativeGeneralJump::verify() {
   950   assert(((NativeInstruction *)this)->is_jump() ||
   951          ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
   952 }
   955 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
   956   Assembler::Condition condition = Assembler::always;
   957   int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
   958     Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
   959   NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
   960   ni->set_long_at(0, x);
   961 }
   964 // MT-safe patching of a jmp instruction (and following word).
   965 // First patches the second word, and then atomicly replaces
   966 // the first word with the first new instruction word.
   967 // Other processors might briefly see the old first word
   968 // followed by the new second word.  This is OK if the old
   969 // second word is harmless, and the new second word may be
   970 // harmlessly executed in the delay slot of the call.
   971 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
   972    assert(Patching_lock->is_locked() ||
   973          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
   974    assert (instr_addr != NULL, "illegal address for code patching");
   975    NativeGeneralJump* h_jump =  nativeGeneralJump_at (instr_addr); // checking that it is a call
   976    assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
   977    int i0 = ((int*)code_buffer)[0];
   978    int i1 = ((int*)code_buffer)[1];
   979    int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
   980    assert(inv_op(*contention_addr) == Assembler::arith_op ||
   981           *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
   982           "must not interfere with original call");
   983    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
   984    h_jump->set_long_at(1*BytesPerInstWord, i1);
   985    h_jump->set_long_at(0*BytesPerInstWord, i0);
   986    // NOTE:  It is possible that another thread T will execute
   987    // only the second patched word.
   988    // In other words, since the original instruction is this
   989    //    jmp patching_stub; nop                    (NativeGeneralJump)
   990    // and the new sequence from the buffer is this:
   991    //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
   992    // what T will execute is this:
   993    //    jmp patching_stub; add %r, %lo(K), %r
   994    // thereby putting garbage into %r before calling the patching stub.
   995    // This is OK, because the patching stub ignores the value of %r.
   997    // Make sure the first-patched instruction, which may co-exist
   998    // briefly with the call, will do something harmless.
   999    assert(inv_op(*contention_addr) == Assembler::arith_op ||
  1000           *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
  1001           "must not interfere with original call");

mercurial