src/cpu/sparc/vm/nativeInst_sparc.hpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2657
d673ef06fe96
child 2708
1d1603768966
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef CPU_SPARC_VM_NATIVEINST_SPARC_HPP
    26 #define CPU_SPARC_VM_NATIVEINST_SPARC_HPP
    28 #include "asm/assembler.hpp"
    29 #include "memory/allocation.hpp"
    30 #include "runtime/icache.hpp"
    31 #include "runtime/os.hpp"
    32 #include "utilities/top.hpp"
    34 // We have interface for the following instructions:
    35 // - NativeInstruction
    36 // - - NativeCall
    37 // - - NativeFarCall
    38 // - - NativeMovConstReg
    39 // - - NativeMovConstRegPatching
    40 // - - NativeMovRegMem
    41 // - - NativeMovRegMemPatching
    42 // - - NativeJump
    43 // - - NativeGeneralJump
    44 // - - NativeIllegalInstruction
    45 // The base class for different kinds of native instruction abstractions.
    46 // Provides the primitive operations to manipulate code relative to this.
    47 class NativeInstruction VALUE_OBJ_CLASS_SPEC {
    48   friend class Relocation;
    50  public:
    51   enum Sparc_specific_constants {
    52     nop_instruction_size        =    4
    53   };
    55   bool is_dtrace_trap();
    56   bool is_nop()                        { return long_at(0) == nop_instruction(); }
    57   bool is_call()                       { return is_op(long_at(0), Assembler::call_op); }
    58   bool is_sethi()                      { return (is_op2(long_at(0), Assembler::sethi_op2)
    59                                           && inv_rd(long_at(0)) != G0); }
    61   bool sets_cc() {
    62     // conservative (returns true for some instructions that do not set the
    63     // the condition code, such as, "save".
    64     // Does not return true for the deprecated tagged instructions, such as, TADDcc
    65     int x = long_at(0);
    66     return (is_op(x, Assembler::arith_op) &&
    67             (inv_op3(x) & Assembler::cc_bit_op3) == Assembler::cc_bit_op3);
    68   }
    69   bool is_illegal();
    70   bool is_zombie() {
    71     int x = long_at(0);
    72     return is_op3(x,
    73                   VM_Version::v9_instructions_work() ?
    74                     Assembler::ldsw_op3 : Assembler::lduw_op3,
    75                   Assembler::ldst_op)
    76         && Assembler::inv_rs1(x) == G0
    77         && Assembler::inv_rd(x) == O7;
    78   }
    79   bool is_ic_miss_trap();       // Inline-cache uses a trap to detect a miss
    80   bool is_return() {
    81     // is it the output of MacroAssembler::ret or MacroAssembler::retl?
    82     int x = long_at(0);
    83     const int pc_return_offset = 8; // see frame_sparc.hpp
    84     return is_op3(x, Assembler::jmpl_op3, Assembler::arith_op)
    85         && (inv_rs1(x) == I7 || inv_rs1(x) == O7)
    86         && inv_immed(x) && inv_simm(x, 13) == pc_return_offset
    87         && inv_rd(x) == G0;
    88   }
    89   bool is_int_jump() {
    90     // is it the output of MacroAssembler::b?
    91     int x = long_at(0);
    92     return is_op2(x, Assembler::bp_op2) || is_op2(x, Assembler::br_op2);
    93   }
    94   bool is_float_jump() {
    95     // is it the output of MacroAssembler::fb?
    96     int x = long_at(0);
    97     return is_op2(x, Assembler::fbp_op2) || is_op2(x, Assembler::fb_op2);
    98   }
    99   bool is_jump() {
   100     return is_int_jump() || is_float_jump();
   101   }
   102   bool is_cond_jump() {
   103     int x = long_at(0);
   104     return (is_int_jump() && Assembler::inv_cond(x) != Assembler::always) ||
   105            (is_float_jump() && Assembler::inv_cond(x) != Assembler::f_always);
   106   }
   108   bool is_stack_bang() {
   109     int x = long_at(0);
   110     return is_op3(x, Assembler::stw_op3, Assembler::ldst_op) &&
   111       (inv_rd(x) == G0) && (inv_rs1(x) == SP) && (inv_rs2(x) == G3_scratch);
   112   }
   114   bool is_prefetch() {
   115     int x = long_at(0);
   116     return is_op3(x, Assembler::prefetch_op3, Assembler::ldst_op);
   117   }
   119   bool is_membar() {
   120     int x = long_at(0);
   121     return is_op3(x, Assembler::membar_op3, Assembler::arith_op) &&
   122       (inv_rd(x) == G0) && (inv_rs1(x) == O7);
   123   }
   125   bool is_safepoint_poll() {
   126     int x = long_at(0);
   127 #ifdef _LP64
   128     return is_op3(x, Assembler::ldx_op3,  Assembler::ldst_op) &&
   129 #else
   130     return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) &&
   131 #endif
   132       (inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0);
   133   }
   135   bool is_zero_test(Register &reg);
   136   bool is_load_store_with_small_offset(Register reg);
   138  public:
   139 #ifdef ASSERT
   140   static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | Assembler::u_field(5, 18, 14) | Assembler::rd(O7); }
   141 #else
   142   // Temporary fix: in optimized mode, u_field is a macro for efficiency reasons (see Assembler::u_field) - needs to be fixed
   143   static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) |            u_field(5, 18, 14) | Assembler::rd(O7); }
   144 #endif
   145   static int nop_instruction()         { return Assembler::op(Assembler::branch_op) | Assembler::op2(Assembler::sethi_op2); }
   146   static int illegal_instruction();    // the output of __ breakpoint_trap()
   147   static int call_instruction(address destination, address pc) { return Assembler::op(Assembler::call_op) | Assembler::wdisp((intptr_t)destination, (intptr_t)pc, 30); }
   149   static int branch_instruction(Assembler::op2s op2val, Assembler::Condition c, bool a) {
   150     return Assembler::op(Assembler::branch_op) | Assembler::op2(op2val) | Assembler::annul(a) | Assembler::cond(c);
   151   }
   153   static int op3_instruction(Assembler::ops opval, Register rd, Assembler::op3s op3val, Register rs1, int simm13a) {
   154     return Assembler::op(opval) | Assembler::rd(rd) | Assembler::op3(op3val) | Assembler::rs1(rs1) | Assembler::immed(true) | Assembler::simm(simm13a, 13);
   155   }
   157   static int sethi_instruction(Register rd, int imm22a) {
   158     return Assembler::op(Assembler::branch_op) | Assembler::rd(rd) | Assembler::op2(Assembler::sethi_op2) | Assembler::hi22(imm22a);
   159   }
   161  protected:
   162   address  addr_at(int offset) const    { return address(this) + offset; }
   163   int      long_at(int offset) const    { return *(int*)addr_at(offset); }
   164   void set_long_at(int offset, int i);      /* deals with I-cache */
   165   void set_jlong_at(int offset, jlong i);   /* deals with I-cache */
   166   void set_addr_at(int offset, address x);  /* deals with I-cache */
   168   address instruction_address() const       { return addr_at(0); }
   169   address next_instruction_address() const  { return addr_at(BytesPerInstWord); }
   171   static bool is_op( int x, Assembler::ops opval)  {
   172     return Assembler::inv_op(x) == opval;
   173   }
   174   static bool is_op2(int x, Assembler::op2s op2val) {
   175     return Assembler::inv_op(x) == Assembler::branch_op && Assembler::inv_op2(x) == op2val;
   176   }
   177   static bool is_op3(int x, Assembler::op3s op3val, Assembler::ops opval) {
   178     return Assembler::inv_op(x) == opval && Assembler::inv_op3(x) == op3val;
   179   }
   181   // utilities to help subclasses decode:
   182   static Register inv_rd(  int x ) { return Assembler::inv_rd( x); }
   183   static Register inv_rs1( int x ) { return Assembler::inv_rs1(x); }
   184   static Register inv_rs2( int x ) { return Assembler::inv_rs2(x); }
   186   static bool inv_immed( int x ) { return Assembler::inv_immed(x); }
   187   static bool inv_annul( int x ) { return (Assembler::annul(true) & x) != 0; }
   188   static int  inv_cond(  int x ) { return Assembler::inv_cond(x); }
   190   static int inv_op(  int x ) { return Assembler::inv_op( x); }
   191   static int inv_op2( int x ) { return Assembler::inv_op2(x); }
   192   static int inv_op3( int x ) { return Assembler::inv_op3(x); }
   194   static int inv_simm(    int x, int nbits ) { return Assembler::inv_simm(x, nbits); }
   195   static intptr_t inv_wdisp(   int x, int nbits ) { return Assembler::inv_wdisp(  x, 0, nbits); }
   196   static intptr_t inv_wdisp16( int x )            { return Assembler::inv_wdisp16(x, 0); }
   197   static int branch_destination_offset(int x) { return Assembler::branch_destination(x, 0); }
   198   static int patch_branch_destination_offset(int dest_offset, int x) {
   199     return Assembler::patched_branch(dest_offset, x, 0);
   200   }
   201   void set_annul_bit() { set_long_at(0, long_at(0) | Assembler::annul(true)); }
   203   // utility for checking if x is either of 2 small constants
   204   static bool is_either(int x, int k1, int k2) {
   205     // return x == k1 || x == k2;
   206     return (1 << x) & (1 << k1 | 1 << k2);
   207   }
   209   // utility for checking overflow of signed instruction fields
   210   static bool fits_in_simm(int x, int nbits) {
   211     // cf. Assembler::assert_signed_range()
   212     // return -(1 << nbits-1) <= x  &&  x < ( 1 << nbits-1),
   213     return (unsigned)(x + (1 << nbits-1)) < (unsigned)(1 << nbits);
   214   }
   216   // set a signed immediate field
   217   static int set_simm(int insn, int imm, int nbits) {
   218     return (insn &~ Assembler::simm(-1, nbits)) | Assembler::simm(imm, nbits);
   219   }
   221   // set a wdisp field (disp should be the difference of two addresses)
   222   static int set_wdisp(int insn, intptr_t disp, int nbits) {
   223     return (insn &~ Assembler::wdisp((intptr_t)-4, (intptr_t)0, nbits)) | Assembler::wdisp(disp, 0, nbits);
   224   }
   226   static int set_wdisp16(int insn, intptr_t disp) {
   227     return (insn &~ Assembler::wdisp16((intptr_t)-4, 0)) | Assembler::wdisp16(disp, 0);
   228   }
   230   // get a simm13 field from an arithmetic or memory instruction
   231   static int get_simm13(int insn) {
   232     assert(is_either(Assembler::inv_op(insn),
   233                      Assembler::arith_op, Assembler::ldst_op) &&
   234             (insn & Assembler::immed(true)), "must have a simm13 field");
   235     return Assembler::inv_simm(insn, 13);
   236   }
   238   // set the simm13 field of an arithmetic or memory instruction
   239   static bool set_simm13(int insn, int imm) {
   240     get_simm13(insn);           // tickle the assertion check
   241     return set_simm(insn, imm, 13);
   242   }
   244   // combine the fields of a sethi stream (7 instructions ) and an add, jmp or ld/st
   245   static intptr_t data64( address pc, int arith_insn ) {
   246     assert(is_op2(*(unsigned int *)pc, Assembler::sethi_op2), "must be sethi");
   247     intptr_t hi = (intptr_t)gethi( (unsigned int *)pc );
   248     intptr_t lo = (intptr_t)get_simm13(arith_insn);
   249     assert((unsigned)lo < (1 << 10), "offset field of set_oop must be 10 bits");
   250     return hi | lo;
   251   }
   253   // Regenerate the instruction sequence that performs the 64 bit
   254   // sethi.  This only does the sethi.  The disp field (bottom 10 bits)
   255   // must be handled separately.
   256   static void set_data64_sethi(address instaddr, intptr_t x);
   257   static void verify_data64_sethi(address instaddr, intptr_t x);
   259   // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st)
   260   static int data32(int sethi_insn, int arith_insn) {
   261     assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
   262     int hi = Assembler::inv_hi22(sethi_insn);
   263     int lo = get_simm13(arith_insn);
   264     assert((unsigned)lo < (1 << 10), "offset field of set_oop must be 10 bits");
   265     return hi | lo;
   266   }
   268   static int set_data32_sethi(int sethi_insn, int imm) {
   269     // note that Assembler::hi22 clips the low 10 bits for us
   270     assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
   271     return (sethi_insn &~ Assembler::hi22(-1)) | Assembler::hi22(imm);
   272   }
   274   static int set_data32_simm13(int arith_insn, int imm) {
   275     get_simm13(arith_insn);             // tickle the assertion check
   276     int imm10 = Assembler::low10(imm);
   277     return (arith_insn &~ Assembler::simm(-1, 13)) | Assembler::simm(imm10, 13);
   278   }
   280   static int low10(int imm) {
   281     return Assembler::low10(imm);
   282   }
   284   // Perform the inverse of the LP64 Macroassembler::sethi
   285   // routine.  Extracts the 54 bits of address from the instruction
   286   // stream. This routine must agree with the sethi routine in
   287   // assembler_inline_sparc.hpp
   288   static address gethi( unsigned int *pc ) {
   289     int i = 0;
   290     uintptr_t adr;
   291     // We first start out with the real sethi instruction
   292     assert(is_op2(*pc, Assembler::sethi_op2), "in gethi - must be sethi");
   293     adr = (unsigned int)Assembler::inv_hi22( *(pc++) );
   294     i++;
   295     while ( i < 7 ) {
   296        // We're done if we hit a nop
   297        if ( (int)*pc == nop_instruction() ) break;
   298        assert ( Assembler::inv_op(*pc) == Assembler::arith_op, "in gethi - must be arith_op" );
   299        switch  ( Assembler::inv_op3(*pc) ) {
   300          case Assembler::xor_op3:
   301            adr ^= (intptr_t)get_simm13( *pc );
   302            return ( (address)adr );
   303            break;
   304          case Assembler::sll_op3:
   305            adr <<= ( *pc & 0x3f );
   306            break;
   307          case Assembler::or_op3:
   308            adr |= (intptr_t)get_simm13( *pc );
   309            break;
   310          default:
   311            assert ( 0, "in gethi - Should not reach here" );
   312            break;
   313        }
   314        pc++;
   315        i++;
   316     }
   317     return ( (address)adr );
   318   }
   320  public:
   321   void  verify();
   322   void  print();
   324   // unit test stuff
   325   static void test() {}                 // override for testing
   327   inline friend NativeInstruction* nativeInstruction_at(address address);
   328 };
   330 inline NativeInstruction* nativeInstruction_at(address address) {
   331     NativeInstruction* inst = (NativeInstruction*)address;
   332 #ifdef ASSERT
   333       inst->verify();
   334 #endif
   335     return inst;
   336 }
   340 //-----------------------------------------------------------------------------
   342 // The NativeCall is an abstraction for accessing/manipulating native call imm32 instructions.
   343 // (used to manipulate inline caches, primitive & dll calls, etc.)
   344 inline NativeCall* nativeCall_at(address instr);
   345 inline NativeCall* nativeCall_overwriting_at(address instr,
   346                                              address destination);
   347 inline NativeCall* nativeCall_before(address return_address);
   348 class NativeCall: public NativeInstruction {
   349  public:
   350   enum Sparc_specific_constants {
   351     instruction_size                   = 8,
   352     return_address_offset              = 8,
   353     call_displacement_width            = 30,
   354     displacement_offset                = 0,
   355     instruction_offset                 = 0
   356   };
   357   address instruction_address() const       { return addr_at(0); }
   358   address next_instruction_address() const  { return addr_at(instruction_size); }
   359   address return_address() const            { return addr_at(return_address_offset); }
   361   address destination() const               { return inv_wdisp(long_at(0), call_displacement_width) + instruction_address(); }
   362   address displacement_address() const      { return addr_at(displacement_offset); }
   363   void  set_destination(address dest)       { set_long_at(0, set_wdisp(long_at(0), dest - instruction_address(), call_displacement_width)); }
   364   void  set_destination_mt_safe(address dest);
   366   void  verify_alignment() {} // do nothing on sparc
   367   void  verify();
   368   void  print();
   370   // unit test stuff
   371   static void  test();
   373   // Creation
   374   friend inline NativeCall* nativeCall_at(address instr);
   375   friend NativeCall* nativeCall_overwriting_at(address instr, address destination = NULL) {
   376     // insert a "blank" call:
   377     NativeCall* call = (NativeCall*)instr;
   378     call->set_long_at(0 * BytesPerInstWord, call_instruction(destination, instr));
   379     call->set_long_at(1 * BytesPerInstWord, nop_instruction());
   380     assert(call->addr_at(2 * BytesPerInstWord) - instr == instruction_size, "instruction size");
   381     // check its structure now:
   382     assert(nativeCall_at(instr)->destination() == destination, "correct call destination");
   383     return call;
   384   }
   386   friend inline NativeCall* nativeCall_before(address return_address) {
   387     NativeCall* call = (NativeCall*)(return_address - return_address_offset);
   388     #ifdef ASSERT
   389       call->verify();
   390     #endif
   391     return call;
   392   }
   394   static bool is_call_at(address instr) {
   395     return nativeInstruction_at(instr)->is_call();
   396   }
   398   static bool is_call_before(address instr) {
   399     return nativeInstruction_at(instr - return_address_offset)->is_call();
   400   }
   402   static bool is_call_to(address instr, address target) {
   403     return nativeInstruction_at(instr)->is_call() &&
   404       nativeCall_at(instr)->destination() == target;
   405   }
   407   // MT-safe patching of a call instruction.
   408   static void insert(address code_pos, address entry) {
   409     (void)nativeCall_overwriting_at(code_pos, entry);
   410   }
   412   static void replace_mt_safe(address instr_addr, address code_buffer);
   413 };
   414 inline NativeCall* nativeCall_at(address instr) {
   415   NativeCall* call = (NativeCall*)instr;
   416 #ifdef ASSERT
   417   call->verify();
   418 #endif
   419   return call;
   420 }
   422 // The NativeFarCall is an abstraction for accessing/manipulating native call-anywhere
   423 // instructions in the sparcv9 vm.  Used to call native methods which may be loaded
   424 // anywhere in the address space, possibly out of reach of a call instruction.
   426 #ifndef _LP64
   428 // On 32-bit systems, a far call is the same as a near one.
   429 class NativeFarCall;
   430 inline NativeFarCall* nativeFarCall_at(address instr);
   431 class NativeFarCall : public NativeCall {
   432 public:
   433   friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); }
   434   friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL)
   435                                                         { return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); }
   436   friend NativeFarCall* nativeFarCall_before(address return_address)
   437                                                         { return (NativeFarCall*)nativeCall_before(return_address); }
   438 };
   440 #else
   442 // The format of this extended-range call is:
   443 //      jumpl_to addr, lreg
   444 //      == sethi %hi54(addr), O7 ;  jumpl O7, %lo10(addr), O7 ;  <delay>
   445 // That is, it is essentially the same as a NativeJump.
   446 class NativeFarCall;
   447 inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination);
   448 inline NativeFarCall* nativeFarCall_at(address instr);
   449 class NativeFarCall: public NativeInstruction {
   450  public:
   451   enum Sparc_specific_constants {
   452     // instruction_size includes the delay slot instruction.
   453     instruction_size                   = 9 * BytesPerInstWord,
   454     return_address_offset              = 9 * BytesPerInstWord,
   455     jmpl_offset                        = 7 * BytesPerInstWord,
   456     displacement_offset                = 0,
   457     instruction_offset                 = 0
   458   };
   459   address instruction_address() const       { return addr_at(0); }
   460   address next_instruction_address() const  { return addr_at(instruction_size); }
   461   address return_address() const            { return addr_at(return_address_offset); }
   463   address destination() const {
   464     return (address) data64(addr_at(0), long_at(jmpl_offset));
   465   }
   466   address displacement_address() const      { return addr_at(displacement_offset); }
   467   void set_destination(address dest);
   469   bool destination_is_compiled_verified_entry_point();
   471   void  verify();
   472   void  print();
   474   // unit test stuff
   475   static void  test();
   477   // Creation
   478   friend inline NativeFarCall* nativeFarCall_at(address instr) {
   479     NativeFarCall* call = (NativeFarCall*)instr;
   480     #ifdef ASSERT
   481       call->verify();
   482     #endif
   483     return call;
   484   }
   486   friend inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL) {
   487     Unimplemented();
   488     NativeFarCall* call = (NativeFarCall*)instr;
   489     return call;
   490   }
   492   friend NativeFarCall* nativeFarCall_before(address return_address) {
   493     NativeFarCall* call = (NativeFarCall*)(return_address - return_address_offset);
   494     #ifdef ASSERT
   495       call->verify();
   496     #endif
   497     return call;
   498   }
   500   static bool is_call_at(address instr);
   502   // MT-safe patching of a call instruction.
   503   static void insert(address code_pos, address entry) {
   504     (void)nativeFarCall_overwriting_at(code_pos, entry);
   505   }
   506   static void replace_mt_safe(address instr_addr, address code_buffer);
   507 };
   509 #endif // _LP64
   511 // An interface for accessing/manipulating native set_oop imm, reg instructions.
   512 // (used to manipulate inlined data references, etc.)
   513 //      set_oop imm, reg
   514 //      == sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg
   515 class NativeMovConstReg;
   516 inline NativeMovConstReg* nativeMovConstReg_at(address address);
   517 class NativeMovConstReg: public NativeInstruction {
   518  public:
   519   enum Sparc_specific_constants {
   520     sethi_offset           = 0,
   521 #ifdef _LP64
   522     add_offset             = 7 * BytesPerInstWord,
   523     instruction_size       = 8 * BytesPerInstWord
   524 #else
   525     add_offset             = 4,
   526     instruction_size       = 8
   527 #endif
   528   };
   530   address instruction_address() const       { return addr_at(0); }
   531   address next_instruction_address() const  { return addr_at(instruction_size); }
   533   // (The [set_]data accessor respects oop_type relocs also.)
   534   intptr_t data() const;
   535   void set_data(intptr_t x);
   537   // report the destination register
   538   Register destination() { return inv_rd(long_at(sethi_offset)); }
   540   void  verify();
   541   void  print();
   543   // unit test stuff
   544   static void test();
   546   // Creation
   547   friend inline NativeMovConstReg* nativeMovConstReg_at(address address) {
   548     NativeMovConstReg* test = (NativeMovConstReg*)address;
   549     #ifdef ASSERT
   550       test->verify();
   551     #endif
   552     return test;
   553   }
   556   friend NativeMovConstReg* nativeMovConstReg_before(address address) {
   557     NativeMovConstReg* test = (NativeMovConstReg*)(address - instruction_size);
   558     #ifdef ASSERT
   559       test->verify();
   560     #endif
   561     return test;
   562   }
   564 };
   567 // An interface for accessing/manipulating native set_oop imm, reg instructions.
   568 // (used to manipulate inlined data references, etc.)
   569 //      set_oop imm, reg
   570 //      == sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg
   571 //
   572 // Note that it is identical to NativeMovConstReg with the exception of a nop between the
   573 // sethi and the add.  The nop is required to be in the delay slot of the call instruction
   574 // which overwrites the sethi during patching.
   575 class NativeMovConstRegPatching;
   576 inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);class NativeMovConstRegPatching: public NativeInstruction {
   577  public:
   578   enum Sparc_specific_constants {
   579     sethi_offset           = 0,
   580 #ifdef _LP64
   581     nop_offset             = 7 * BytesPerInstWord,
   582 #else
   583     nop_offset             = sethi_offset + BytesPerInstWord,
   584 #endif
   585     add_offset             = nop_offset   + BytesPerInstWord,
   586     instruction_size       = add_offset   + BytesPerInstWord
   587   };
   589   address instruction_address() const       { return addr_at(0); }
   590   address next_instruction_address() const  { return addr_at(instruction_size); }
   592   // (The [set_]data accessor respects oop_type relocs also.)
   593   int data() const;
   594   void  set_data(int x);
   596   // report the destination register
   597   Register destination() { return inv_rd(long_at(sethi_offset)); }
   599   void  verify();
   600   void  print();
   602   // unit test stuff
   603   static void test();
   605   // Creation
   606   friend inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
   607     NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)address;
   608     #ifdef ASSERT
   609       test->verify();
   610     #endif
   611     return test;
   612   }
   615   friend NativeMovConstRegPatching* nativeMovConstRegPatching_before(address address) {
   616     NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_size);
   617     #ifdef ASSERT
   618       test->verify();
   619     #endif
   620     return test;
   621   }
   623 };
   626 // An interface for accessing/manipulating native memory ops
   627 //      ld* [reg + offset], reg
   628 //      st* reg, [reg + offset]
   629 //      sethi %hi(imm), reg; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2
   630 //      sethi %hi(imm), reg; add reg, %lo(imm), reg; st* reg2, [reg1 + reg]
   631 // Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x}
   632 //
   633 class NativeMovRegMem;
   634 inline NativeMovRegMem* nativeMovRegMem_at (address address);
   635 class NativeMovRegMem: public NativeInstruction {
   636  public:
   637   enum Sparc_specific_constants {
   638     op3_mask_ld = 1 << Assembler::lduw_op3 |
   639                   1 << Assembler::ldub_op3 |
   640                   1 << Assembler::lduh_op3 |
   641                   1 << Assembler::ldd_op3 |
   642                   1 << Assembler::ldsw_op3 |
   643                   1 << Assembler::ldsb_op3 |
   644                   1 << Assembler::ldsh_op3 |
   645                   1 << Assembler::ldx_op3,
   646     op3_mask_st = 1 << Assembler::stw_op3 |
   647                   1 << Assembler::stb_op3 |
   648                   1 << Assembler::sth_op3 |
   649                   1 << Assembler::std_op3 |
   650                   1 << Assembler::stx_op3,
   651     op3_ldst_int_limit = Assembler::ldf_op3,
   652     op3_mask_ldf = 1 << (Assembler::ldf_op3  - op3_ldst_int_limit) |
   653                    1 << (Assembler::lddf_op3 - op3_ldst_int_limit),
   654     op3_mask_stf = 1 << (Assembler::stf_op3  - op3_ldst_int_limit) |
   655                    1 << (Assembler::stdf_op3 - op3_ldst_int_limit),
   657     offset_width    = 13,
   658     sethi_offset    = 0,
   659 #ifdef _LP64
   660     add_offset      = 7 * BytesPerInstWord,
   661 #else
   662     add_offset      = 4,
   663 #endif
   664     ldst_offset     = add_offset + BytesPerInstWord
   665   };
   666   bool is_immediate() const {
   667     // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset]
   668     int i0 = long_at(0);
   669     return (is_op(i0, Assembler::ldst_op));
   670   }
   672   address instruction_address() const           { return addr_at(0); }
   673   address next_instruction_address() const      {
   674 #ifdef _LP64
   675     return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
   676 #else
   677     return addr_at(is_immediate() ? 4 : 12);
   678 #endif
   679   }
   680   intptr_t   offset() const                             {
   681      return is_immediate()? inv_simm(long_at(0), offset_width) :
   682                             nativeMovConstReg_at(addr_at(0))->data();
   683   }
   684   void  set_offset(intptr_t x) {
   685     if (is_immediate()) {
   686       guarantee(fits_in_simm(x, offset_width), "data block offset overflow");
   687       set_long_at(0, set_simm(long_at(0), x, offset_width));
   688     } else
   689       nativeMovConstReg_at(addr_at(0))->set_data(x);
   690   }
   692   void  add_offset_in_bytes(intptr_t radd_offset)     {
   693       set_offset (offset() + radd_offset);
   694   }
   696   void  copy_instruction_to(address new_instruction_address);
   698   void verify();
   699   void print ();
   701   // unit test stuff
   702   static void test();
   704  private:
   705   friend inline NativeMovRegMem* nativeMovRegMem_at (address address) {
   706     NativeMovRegMem* test = (NativeMovRegMem*)address;
   707     #ifdef ASSERT
   708       test->verify();
   709     #endif
   710     return test;
   711   }
   712 };
   715 // An interface for accessing/manipulating native memory ops
   716 //      ld* [reg + offset], reg
   717 //      st* reg, [reg + offset]
   718 //      sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2
   719 //      sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; st* reg2, [reg1 + reg]
   720 // Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x}
   721 //
   722 // Note that it is identical to NativeMovRegMem with the exception of a nop between the
   723 // sethi and the add.  The nop is required to be in the delay slot of the call instruction
   724 // which overwrites the sethi during patching.
   725 class NativeMovRegMemPatching;
   726 inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address);
   727 class NativeMovRegMemPatching: public NativeInstruction {
   728  public:
   729   enum Sparc_specific_constants {
   730     op3_mask_ld = 1 << Assembler::lduw_op3 |
   731                   1 << Assembler::ldub_op3 |
   732                   1 << Assembler::lduh_op3 |
   733                   1 << Assembler::ldd_op3 |
   734                   1 << Assembler::ldsw_op3 |
   735                   1 << Assembler::ldsb_op3 |
   736                   1 << Assembler::ldsh_op3 |
   737                   1 << Assembler::ldx_op3,
   738     op3_mask_st = 1 << Assembler::stw_op3 |
   739                   1 << Assembler::stb_op3 |
   740                   1 << Assembler::sth_op3 |
   741                   1 << Assembler::std_op3 |
   742                   1 << Assembler::stx_op3,
   743     op3_ldst_int_limit = Assembler::ldf_op3,
   744     op3_mask_ldf = 1 << (Assembler::ldf_op3  - op3_ldst_int_limit) |
   745                    1 << (Assembler::lddf_op3 - op3_ldst_int_limit),
   746     op3_mask_stf = 1 << (Assembler::stf_op3  - op3_ldst_int_limit) |
   747                    1 << (Assembler::stdf_op3 - op3_ldst_int_limit),
   749     offset_width    = 13,
   750     sethi_offset    = 0,
   751 #ifdef _LP64
   752     nop_offset      = 7 * BytesPerInstWord,
   753 #else
   754     nop_offset      = 4,
   755 #endif
   756     add_offset      = nop_offset + BytesPerInstWord,
   757     ldst_offset     = add_offset + BytesPerInstWord
   758   };
   759   bool is_immediate() const {
   760     // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset]
   761     int i0 = long_at(0);
   762     return (is_op(i0, Assembler::ldst_op));
   763   }
   765   address instruction_address() const           { return addr_at(0); }
   766   address next_instruction_address() const      {
   767     return addr_at(is_immediate()? 4 : 16);
   768   }
   769   int   offset() const                          {
   770      return is_immediate()? inv_simm(long_at(0), offset_width) :
   771                             nativeMovConstRegPatching_at(addr_at(0))->data();
   772   }
   773   void  set_offset(int x) {
   774     if (is_immediate()) {
   775       guarantee(fits_in_simm(x, offset_width), "data block offset overflow");
   776       set_long_at(0, set_simm(long_at(0), x, offset_width));
   777     }
   778     else
   779       nativeMovConstRegPatching_at(addr_at(0))->set_data(x);
   780   }
   782   void  add_offset_in_bytes(intptr_t radd_offset)     {
   783       set_offset (offset() + radd_offset);
   784   }
   786   void  copy_instruction_to(address new_instruction_address);
   788   void verify();
   789   void print ();
   791   // unit test stuff
   792   static void test();
   794  private:
   795   friend inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {
   796     NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)address;
   797     #ifdef ASSERT
   798       test->verify();
   799     #endif
   800     return test;
   801   }
   802 };
   805 // An interface for accessing/manipulating native jumps
   806 //      jump_to addr
   807 //      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), G0 ;  <delay>
   808 //      jumpl_to addr, lreg
   809 //      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), lreg ;  <delay>
   810 class NativeJump;
   811 inline NativeJump* nativeJump_at(address address);
   812 class NativeJump: public NativeInstruction {
   813  private:
   814   void guarantee_displacement(int disp, int width) {
   815     guarantee(fits_in_simm(disp, width + 2), "branch displacement overflow");
   816   }
   818  public:
   819   enum Sparc_specific_constants {
   820     sethi_offset           = 0,
   821 #ifdef _LP64
   822     jmpl_offset            = 7 * BytesPerInstWord,
   823     instruction_size       = 9 * BytesPerInstWord  // includes delay slot
   824 #else
   825     jmpl_offset            = 1 * BytesPerInstWord,
   826     instruction_size       = 3 * BytesPerInstWord  // includes delay slot
   827 #endif
   828   };
   830   address instruction_address() const       { return addr_at(0); }
   831   address next_instruction_address() const  { return addr_at(instruction_size); }
   833 #ifdef _LP64
   834   address jump_destination() const {
   835     return (address) data64(instruction_address(), long_at(jmpl_offset));
   836   }
   837   void set_jump_destination(address dest) {
   838     set_data64_sethi( instruction_address(), (intptr_t)dest);
   839     set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
   840   }
   841 #else
   842   address jump_destination() const {
   843     return (address) data32(long_at(sethi_offset), long_at(jmpl_offset));
   844   }
   845   void set_jump_destination(address dest) {
   846     set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), (intptr_t)dest));
   847     set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
   848   }
   849 #endif
   851   // Creation
   852   friend inline NativeJump* nativeJump_at(address address) {
   853     NativeJump* jump = (NativeJump*)address;
   854     #ifdef ASSERT
   855       jump->verify();
   856     #endif
   857     return jump;
   858   }
   860   void verify();
   861   void print();
   863   // Unit testing stuff
   864   static void test();
   866   // Insertion of native jump instruction
   867   static void insert(address code_pos, address entry);
   868   // MT-safe insertion of native jump at verified method entry
   869   static void check_verified_entry_alignment(address entry, address verified_entry) {
   870     // nothing to do for sparc.
   871   }
   872   static void patch_verified_entry(address entry, address verified_entry, address dest);
   873 };
   877 // Despite the name, handles only simple branches.
   878 class NativeGeneralJump;
   879 inline NativeGeneralJump* nativeGeneralJump_at(address address);
   880 class NativeGeneralJump: public NativeInstruction {
   881  public:
   882   enum Sparc_specific_constants {
   883     instruction_size                   = 8
   884   };
   886   address instruction_address() const       { return addr_at(0); }
   887   address jump_destination()    const       { return addr_at(0) + branch_destination_offset(long_at(0)); }
   888   void set_jump_destination(address dest) {
   889     int patched_instr = patch_branch_destination_offset(dest - addr_at(0), long_at(0));
   890     set_long_at(0, patched_instr);
   891   }
   892   void set_annul() { set_annul_bit(); }
   893   NativeInstruction *delay_slot_instr() { return nativeInstruction_at(addr_at(4));}
   894   void fill_delay_slot(int instr) { set_long_at(4, instr);}
   895   Assembler::Condition condition() {
   896     int x = long_at(0);
   897     return (Assembler::Condition) Assembler::inv_cond(x);
   898   }
   900   // Creation
   901   friend inline NativeGeneralJump* nativeGeneralJump_at(address address) {
   902     NativeGeneralJump* jump = (NativeGeneralJump*)(address);
   903 #ifdef ASSERT
   904       jump->verify();
   905 #endif
   906     return jump;
   907   }
   909   // Insertion of native general jump instruction
   910   static void insert_unconditional(address code_pos, address entry);
   911   static void replace_mt_safe(address instr_addr, address code_buffer);
   913   void verify();
   914 };
   917 class NativeIllegalInstruction: public NativeInstruction {
   918  public:
   919   enum Sparc_specific_constants {
   920     instruction_size            =    4
   921   };
   923   // Insert illegal opcode as specific address
   924   static void insert(address code_pos);
   925 };
   927 #endif // CPU_SPARC_VM_NATIVEINST_SPARC_HPP

mercurial