src/cpu/sparc/vm/assembler_sparc.inline.hpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2441
c17b998c5926
child 2950
cba7b5c2d53f
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
    26 #define CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
    28 #include "asm/assembler.inline.hpp"
    29 #include "asm/codeBuffer.hpp"
    30 #include "code/codeCache.hpp"
    31 #include "runtime/handles.inline.hpp"
    33 inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
    34   jint& stub_inst = *(jint*) branch;
    35   stub_inst = patched_branch(target - branch, stub_inst, 0);
    36 }
    38 #ifndef PRODUCT
    39 inline void MacroAssembler::pd_print_patched_instruction(address branch) {
    40   jint stub_inst = *(jint*) branch;
    41   print_instruction(stub_inst);
    42   ::tty->print("%s", " (unresolved)");
    43 }
    44 #endif // PRODUCT
    46 inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
    49 inline int AddressLiteral::low10() const {
    50   return Assembler::low10(value());
    51 }
    54 // inlines for SPARC assembler -- dmu 5/97
    56 inline void Assembler::check_delay() {
    57 # ifdef CHECK_DELAY
    58   guarantee( delay_state != at_delay_slot, "must say delayed() when filling delay slot");
    59   delay_state = no_delay;
    60 # endif
    61 }
    63 inline void Assembler::emit_long(int x) {
    64   check_delay();
    65   AbstractAssembler::emit_long(x);
    66 }
    68 inline void Assembler::emit_data(int x, relocInfo::relocType rtype) {
    69   relocate(rtype);
    70   emit_long(x);
    71 }
    73 inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
    74   relocate(rspec);
    75   emit_long(x);
    76 }
    79 inline void Assembler::add(Register s1, Register s2, Register d )                             { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
    80 inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); }
    81 inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); }
    83 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt);  has_delay_slot(); }
    84 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
    86 inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
    87 inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L)); }
    89 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt);  has_delay_slot(); }
    90 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
    92 inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
    93 inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
    95 inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep();   emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
    96 inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
    98 inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt);  has_delay_slot(); }
    99 inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { bp(c, a, cc, p, target(L)); }
   101 inline void Assembler::call( address d,  relocInfo::relocType rt ) { emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt);  has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
   102 inline void Assembler::call( Label& L,   relocInfo::relocType rt ) { call( target(L), rt); }
   104 inline void Assembler::flush( Register s1, Register s2) { emit_long( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
   105 inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   107 inline void Assembler::jmpl( Register s1, Register s2, Register d                          ) { emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2));  has_delay_slot(); }
   108 inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec);  has_delay_slot(); }
   110 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
   111   if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
   112   else                  ldf(w, s1, s2.as_constant(), d);
   113 }
   115 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
   116 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
   118 inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); }
   120 inline void Assembler::ldfsr(  Register s1, Register s2) { v9_dep();   emit_long( op(ldst_op) |             op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
   121 inline void Assembler::ldfsr(  Register s1, int simm13a) { v9_dep();   emit_data( op(ldst_op) |             op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   122 inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only();  emit_long( op(ldst_op) | rd(G1)    | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
   123 inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only();  emit_data( op(ldst_op) | rd(G1)    | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   125 inline void Assembler::ldc(   Register s1, Register s2, int crd) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(ldc_op3  ) | rs1(s1) | rs2(s2) ); }
   126 inline void Assembler::ldc(   Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   127 inline void Assembler::lddc(  Register s1, Register s2, int crd) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); }
   128 inline void Assembler::lddc(  Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   129 inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); }
   130 inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   132 inline void Assembler::ldsb(  Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
   133 inline void Assembler::ldsb(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   135 inline void Assembler::ldsh(  Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | rs2(s2) ); }
   136 inline void Assembler::ldsh(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   137 inline void Assembler::ldsw(  Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | rs2(s2) ); }
   138 inline void Assembler::ldsw(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   139 inline void Assembler::ldub(  Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | rs2(s2) ); }
   140 inline void Assembler::ldub(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   141 inline void Assembler::lduh(  Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | rs2(s2) ); }
   142 inline void Assembler::lduh(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   143 inline void Assembler::lduw(  Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | rs2(s2) ); }
   144 inline void Assembler::lduw(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   146 inline void Assembler::ldx(   Register s1, Register s2, Register d) { v9_only();  emit_long( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | rs2(s2) ); }
   147 inline void Assembler::ldx(   Register s1, int simm13a, Register d) { v9_only();  emit_data( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   148 inline void Assembler::ldd(   Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
   149 inline void Assembler::ldd(   Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   151 #ifdef _LP64
   152 // Make all 32 bit loads signed so 64 bit registers maintain proper sign
   153 inline void Assembler::ld(  Register s1, Register s2, Register d)      { ldsw( s1, s2, d); }
   154 inline void Assembler::ld(  Register s1, int simm13a, Register d)      { ldsw( s1, simm13a, d); }
   155 #else
   156 inline void Assembler::ld(  Register s1, Register s2, Register d)      { lduw( s1, s2, d); }
   157 inline void Assembler::ld(  Register s1, int simm13a, Register d)      { lduw( s1, simm13a, d); }
   158 #endif
   160 #ifdef ASSERT
   161   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
   162 # ifdef _LP64
   163 inline void Assembler::ld(  Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
   164 # else
   165 inline void Assembler::ld(  Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
   166 # endif
   167 #endif
   169 inline void Assembler::ld(  const Address& a, Register d, int offset) {
   170   if (a.has_index()) { assert(offset == 0, ""); ld(  a.base(), a.index(),         d); }
   171   else               {                          ld(  a.base(), a.disp() + offset, d); }
   172 }
   173 inline void Assembler::ldsb(const Address& a, Register d, int offset) {
   174   if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(),         d); }
   175   else               {                          ldsb(a.base(), a.disp() + offset, d); }
   176 }
   177 inline void Assembler::ldsh(const Address& a, Register d, int offset) {
   178   if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(),         d); }
   179   else               {                          ldsh(a.base(), a.disp() + offset, d); }
   180 }
   181 inline void Assembler::ldsw(const Address& a, Register d, int offset) {
   182   if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(),         d); }
   183   else               {                          ldsw(a.base(), a.disp() + offset, d); }
   184 }
   185 inline void Assembler::ldub(const Address& a, Register d, int offset) {
   186   if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(),         d); }
   187   else               {                          ldub(a.base(), a.disp() + offset, d); }
   188 }
   189 inline void Assembler::lduh(const Address& a, Register d, int offset) {
   190   if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(),         d); }
   191   else               {                          lduh(a.base(), a.disp() + offset, d); }
   192 }
   193 inline void Assembler::lduw(const Address& a, Register d, int offset) {
   194   if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(),         d); }
   195   else               {                          lduw(a.base(), a.disp() + offset, d); }
   196 }
   197 inline void Assembler::ldd( const Address& a, Register d, int offset) {
   198   if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(),         d); }
   199   else               {                          ldd( a.base(), a.disp() + offset, d); }
   200 }
   201 inline void Assembler::ldx( const Address& a, Register d, int offset) {
   202   if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(),         d); }
   203   else               {                          ldx( a.base(), a.disp() + offset, d); }
   204 }
   206 inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
   207 inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
   208 inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
   209 inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
   210 inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
   211 inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
   212 inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
   213 inline void Assembler::ld(  Register s1, RegisterOrConstant s2, Register d) { ld(  Address(s1, s2), d); }
   214 inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
   216 // form effective addresses this way:
   217 inline void Assembler::add(const Address& a, Register d, int offset) {
   218   if (a.has_index())   add(a.base(), a.index(),         d);
   219   else               { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
   220   if (offset != 0)     add(d,        offset,            d);
   221 }
   222 inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
   223   if (s2.is_register())  add(s1, s2.as_register(),          d);
   224   else                 { add(s1, s2.as_constant() + offset, d); offset = 0; }
   225   if (offset != 0)       add(d,  offset,                    d);
   226 }
   228 inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
   229   if (s2.is_register())  andn(s1, s2.as_register(), d);
   230   else                   andn(s1, s2.as_constant(), d);
   231 }
   233 inline void Assembler::ldstub(  Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
   234 inline void Assembler::ldstub(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   237 inline void Assembler::prefetch(Register s1, Register s2, PrefetchFcn f) { v9_only();  emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); }
   238 inline void Assembler::prefetch(Register s1, int simm13a, PrefetchFcn f) { v9_only();  emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   240 inline void Assembler::prefetch(const Address& a, PrefetchFcn f, int offset) { v9_only(); relocate(a.rspec(offset)); prefetch(a.base(), a.disp() + offset, f); }
   243 inline void Assembler::rett( Register s1, Register s2                         ) { emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2));  has_delay_slot(); }
   244 inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt);  has_delay_slot(); }
   246 inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rspec ) { emit_data( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec); }
   248   // pp 222
   250 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
   251   if (s2.is_register()) stf(w, d, s1, s2.as_register());
   252   else                  stf(w, d, s1, s2.as_constant());
   253 }
   255 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
   256 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   258 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) { relocate(a.rspec(offset)); stf(w, d, a.base(), a.disp() + offset); }
   260 inline void Assembler::stfsr(  Register s1, Register s2) { v9_dep();   emit_long( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
   261 inline void Assembler::stfsr(  Register s1, int simm13a) { v9_dep();   emit_data( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   262 inline void Assembler::stxfsr( Register s1, Register s2) { v9_only();  emit_long( op(ldst_op) | rd(G1)    | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
   263 inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only();  emit_data( op(ldst_op) | rd(G1)    | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   265   // p 226
   267 inline void Assembler::stb(  Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | rs2(s2) ); }
   268 inline void Assembler::stb(  Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   269 inline void Assembler::sth(  Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | rs2(s2) ); }
   270 inline void Assembler::sth(  Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   271 inline void Assembler::stw(  Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | rs2(s2) ); }
   272 inline void Assembler::stw(  Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   275 inline void Assembler::stx(  Register d, Register s1, Register s2) { v9_only();  emit_long( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | rs2(s2) ); }
   276 inline void Assembler::stx(  Register d, Register s1, int simm13a) { v9_only();  emit_data( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   277 inline void Assembler::std(  Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
   278 inline void Assembler::std(  Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   280 inline void Assembler::st( Register d, Register s1, Register s2)      { stw(d, s1, s2); }
   281 inline void Assembler::st( Register d, Register s1, int simm13a)      { stw(d, s1, simm13a); }
   283 #ifdef ASSERT
   284 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
   285 inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
   286 #endif
   288 inline void Assembler::stb(Register d, const Address& a, int offset) {
   289   if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index()        ); }
   290   else               {                          stb(d, a.base(), a.disp() + offset); }
   291 }
   292 inline void Assembler::sth(Register d, const Address& a, int offset) {
   293   if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index()        ); }
   294   else               {                          sth(d, a.base(), a.disp() + offset); }
   295 }
   296 inline void Assembler::stw(Register d, const Address& a, int offset) {
   297   if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index()        ); }
   298   else               {                          stw(d, a.base(), a.disp() + offset); }
   299 }
   300 inline void Assembler::st( Register d, const Address& a, int offset) {
   301   if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index()        ); }
   302   else               {                          st( d, a.base(), a.disp() + offset); }
   303 }
   304 inline void Assembler::std(Register d, const Address& a, int offset) {
   305   if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index()        ); }
   306   else               {                          std(d, a.base(), a.disp() + offset); }
   307 }
   308 inline void Assembler::stx(Register d, const Address& a, int offset) {
   309   if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index()        ); }
   310   else               {                          stx(d, a.base(), a.disp() + offset); }
   311 }
   313 inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
   314 inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
   315 inline void Assembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
   316 inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
   317 inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
   318 inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
   320 // v8 p 99
   322 inline void Assembler::stc(    int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
   323 inline void Assembler::stc(    int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   324 inline void Assembler::stdc(   int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
   325 inline void Assembler::stdc(   int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   326 inline void Assembler::stcsr(  int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
   327 inline void Assembler::stcsr(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   328 inline void Assembler::stdcq(  int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
   329 inline void Assembler::stdcq(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   331 inline void Assembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
   332   if (s2.is_register())  sub(s1, s2.as_register(),          d);
   333   else                 { sub(s1, s2.as_constant() + offset, d); offset = 0; }
   334   if (offset != 0)       sub(d,  offset,                    d);
   335 }
   337 // pp 231
   339 inline void Assembler::swap(    Register s1, Register s2, Register d) { v9_dep();  emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
   340 inline void Assembler::swap(    Register s1, int simm13a, Register d) { v9_dep();  emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
   342 inline void Assembler::swap(    Address& a, Register d, int offset ) { relocate(a.rspec(offset)); swap(  a.base(), a.disp() + offset, d ); }
   345 // Use the right loads/stores for the platform
   346 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
   347 #ifdef _LP64
   348   Assembler::ldx(s1, s2, d);
   349 #else
   350   Assembler::ld( s1, s2, d);
   351 #endif
   352 }
   354 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
   355 #ifdef _LP64
   356   Assembler::ldx(s1, simm13a, d);
   357 #else
   358   Assembler::ld( s1, simm13a, d);
   359 #endif
   360 }
   362 #ifdef ASSERT
   363 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
   364 inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
   365   ld_ptr(s1, in_bytes(simm13a), d);
   366 }
   367 #endif
   369 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
   370 #ifdef _LP64
   371   Assembler::ldx(s1, s2, d);
   372 #else
   373   Assembler::ld( s1, s2, d);
   374 #endif
   375 }
   377 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
   378 #ifdef _LP64
   379   Assembler::ldx(a, d, offset);
   380 #else
   381   Assembler::ld( a, d, offset);
   382 #endif
   383 }
   385 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
   386 #ifdef _LP64
   387   Assembler::stx(d, s1, s2);
   388 #else
   389   Assembler::st( d, s1, s2);
   390 #endif
   391 }
   393 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
   394 #ifdef _LP64
   395   Assembler::stx(d, s1, simm13a);
   396 #else
   397   Assembler::st( d, s1, simm13a);
   398 #endif
   399 }
   401 #ifdef ASSERT
   402 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
   403 inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
   404   st_ptr(d, s1, in_bytes(simm13a));
   405 }
   406 #endif
   408 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
   409 #ifdef _LP64
   410   Assembler::stx(d, s1, s2);
   411 #else
   412   Assembler::st( d, s1, s2);
   413 #endif
   414 }
   416 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
   417 #ifdef _LP64
   418   Assembler::stx(d, a, offset);
   419 #else
   420   Assembler::st( d, a, offset);
   421 #endif
   422 }
   424 // Use the right loads/stores for the platform
   425 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
   426 #ifdef _LP64
   427   Assembler::ldx(s1, s2, d);
   428 #else
   429   Assembler::ldd(s1, s2, d);
   430 #endif
   431 }
   433 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
   434 #ifdef _LP64
   435   Assembler::ldx(s1, simm13a, d);
   436 #else
   437   Assembler::ldd(s1, simm13a, d);
   438 #endif
   439 }
   441 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
   442 #ifdef _LP64
   443   Assembler::ldx(s1, s2, d);
   444 #else
   445   Assembler::ldd(s1, s2, d);
   446 #endif
   447 }
   449 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
   450 #ifdef _LP64
   451   Assembler::ldx(a, d, offset);
   452 #else
   453   Assembler::ldd(a, d, offset);
   454 #endif
   455 }
   457 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
   458 #ifdef _LP64
   459   Assembler::stx(d, s1, s2);
   460 #else
   461   Assembler::std(d, s1, s2);
   462 #endif
   463 }
   465 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
   466 #ifdef _LP64
   467   Assembler::stx(d, s1, simm13a);
   468 #else
   469   Assembler::std(d, s1, simm13a);
   470 #endif
   471 }
   473 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
   474 #ifdef _LP64
   475   Assembler::stx(d, s1, s2);
   476 #else
   477   Assembler::std(d, s1, s2);
   478 #endif
   479 }
   481 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
   482 #ifdef _LP64
   483   Assembler::stx(d, a, offset);
   484 #else
   485   Assembler::std(d, a, offset);
   486 #endif
   487 }
   489 // Functions for isolating 64 bit shifts for LP64
   491 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
   492 #ifdef _LP64
   493   Assembler::sllx(s1, s2, d);
   494 #else
   495   Assembler::sll( s1, s2, d);
   496 #endif
   497 }
   499 inline void MacroAssembler::sll_ptr( Register s1, int imm6a,   Register d ) {
   500 #ifdef _LP64
   501   Assembler::sllx(s1, imm6a, d);
   502 #else
   503   Assembler::sll( s1, imm6a, d);
   504 #endif
   505 }
   507 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
   508 #ifdef _LP64
   509   Assembler::srlx(s1, s2, d);
   510 #else
   511   Assembler::srl( s1, s2, d);
   512 #endif
   513 }
   515 inline void MacroAssembler::srl_ptr( Register s1, int imm6a,   Register d ) {
   516 #ifdef _LP64
   517   Assembler::srlx(s1, imm6a, d);
   518 #else
   519   Assembler::srl( s1, imm6a, d);
   520 #endif
   521 }
   523 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
   524   if (s2.is_register())  sll_ptr(s1, s2.as_register(), d);
   525   else                   sll_ptr(s1, s2.as_constant(), d);
   526 }
   528 // Use the right branch for the platform
   530 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
   531   if (VM_Version::v9_instructions_work())
   532     Assembler::bp(c, a, icc, p, d, rt);
   533   else
   534     Assembler::br(c, a, d, rt);
   535 }
   537 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
   538   br(c, a, p, target(L));
   539 }
   542 // Branch that tests either xcc or icc depending on the
   543 // architecture compiled (LP64 or not)
   544 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
   545 #ifdef _LP64
   546     Assembler::bp(c, a, xcc, p, d, rt);
   547 #else
   548     MacroAssembler::br(c, a, p, d, rt);
   549 #endif
   550 }
   552 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
   553   brx(c, a, p, target(L));
   554 }
   556 inline void MacroAssembler::ba( bool a, Label& L ) {
   557   br(always, a, pt, L);
   558 }
   560 // Warning: V9 only functions
   561 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
   562   Assembler::bp(c, a, cc, p, d, rt);
   563 }
   565 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
   566   Assembler::bp(c, a, cc, p, L);
   567 }
   569 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
   570   if (VM_Version::v9_instructions_work())
   571     fbp(c, a, fcc0, p, d, rt);
   572   else
   573     Assembler::fb(c, a, d, rt);
   574 }
   576 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
   577   fb(c, a, p, target(L));
   578 }
   580 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
   581   Assembler::fbp(c, a, cc, p, d, rt);
   582 }
   584 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
   585   Assembler::fbp(c, a, cc, p, L);
   586 }
   588 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
   589 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
   591 inline bool MacroAssembler::is_far_target(address d) {
   592   return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
   593 }
   595 // Call with a check to see if we need to deal with the added
   596 // expense of relocation and if we overflow the displacement
   597 // of the quick call instruction.
   598 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
   599 #ifdef _LP64
   600   intptr_t disp;
   601   // NULL is ok because it will be relocated later.
   602   // Must change NULL to a reachable address in order to
   603   // pass asserts here and in wdisp.
   604   if ( d == NULL )
   605     d = pc();
   607   // Is this address within range of the call instruction?
   608   // If not, use the expensive instruction sequence
   609   if (is_far_target(d)) {
   610     relocate(rt);
   611     AddressLiteral dest(d);
   612     jumpl_to(dest, O7, O7);
   613   } else {
   614     Assembler::call(d, rt);
   615   }
   616 #else
   617   Assembler::call( d, rt );
   618 #endif
   619 }
   621 inline void MacroAssembler::call( Label& L,   relocInfo::relocType rt ) {
   622   MacroAssembler::call( target(L), rt);
   623 }
   627 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
   628 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
   630 // prefetch instruction
   631 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
   632   if (VM_Version::v9_instructions_work())
   633     Assembler::bp( never, true, xcc, pt, d, rt );
   634 }
   635 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
   638 // clobbers o7 on V8!!
   639 // returns delta from gotten pc to addr after
   640 inline int MacroAssembler::get_pc( Register d ) {
   641   int x = offset();
   642   if (VM_Version::v9_instructions_work())
   643     rdpc(d);
   644   else {
   645     Label lbl;
   646     Assembler::call(lbl, relocInfo::none);  // No relocation as this is call to pc+0x8
   647     if (d == O7)  delayed()->nop();
   648     else          delayed()->mov(O7, d);
   649     bind(lbl);
   650   }
   651   return offset() - x;
   652 }
   655 // Note:  All MacroAssembler::set_foo functions are defined out-of-line.
   658 // Loads the current PC of the following instruction as an immediate value in
   659 // 2 instructions.  All PCs in the CodeCache are within 2 Gig of each other.
   660 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
   661   intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
   662 #ifdef _LP64
   663   Unimplemented();
   664 #else
   665   Assembler::sethi(  thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
   666   Assembler::add(reg,thepc &  0x3ff, reg, internal_word_Relocation::spec((address)thepc));
   667 #endif
   668   return thepc;
   669 }
   672 inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
   673   assert_not_delayed();
   674   sethi(addrlit, d);
   675   ld(d, addrlit.low10() + offset, d);
   676 }
   679 inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
   680   assert_not_delayed();
   681   sethi(addrlit, d);
   682   ld_ptr(d, addrlit.low10() + offset, d);
   683 }
   686 inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
   687   assert_not_delayed();
   688   sethi(addrlit, temp);
   689   st(s, temp, addrlit.low10() + offset);
   690 }
   693 inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
   694   assert_not_delayed();
   695   sethi(addrlit, temp);
   696   st_ptr(s, temp, addrlit.low10() + offset);
   697 }
   700 // This code sequence is relocatable to any address, even on LP64.
   701 inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
   702   assert_not_delayed();
   703   // Force fixed length sethi because NativeJump and NativeFarCall don't handle
   704   // variable length instruction streams.
   705   patchable_sethi(addrlit, temp);
   706   jmpl(temp, addrlit.low10() + offset, d);
   707 }
   710 inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
   711   jumpl_to(addrlit, temp, G0, offset);
   712 }
   715 inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
   716                                              int ld_offset, int jmp_offset) {
   717   assert_not_delayed();
   718   //sethi(al);                   // sethi is caller responsibility for this one
   719   ld_ptr(a, temp, ld_offset);
   720   jmp(temp, jmp_offset);
   721 }
   724 inline void MacroAssembler::set_oop(jobject obj, Register d) {
   725   set_oop(allocate_oop_address(obj), d);
   726 }
   729 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
   730   set_oop(constant_oop_address(obj), d);
   731 }
   734 inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
   735   assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
   736   set(obj_addr, d);
   737 }
   740 inline void MacroAssembler::load_argument( Argument& a, Register  d ) {
   741   if (a.is_register())
   742     mov(a.as_register(), d);
   743   else
   744     ld (a.as_address(),  d);
   745 }
   747 inline void MacroAssembler::store_argument( Register s, Argument& a ) {
   748   if (a.is_register())
   749     mov(s, a.as_register());
   750   else
   751     st_ptr (s, a.as_address());         // ABI says everything is right justified.
   752 }
   754 inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
   755   if (a.is_register())
   756     mov(s, a.as_register());
   757   else
   758     st_ptr (s, a.as_address());
   759 }
   762 #ifdef _LP64
   763 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
   764   if (a.is_float_register())
   765 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
   766     fmov(FloatRegisterImpl::S, s, a.as_float_register() );
   767   else
   768     // Floats are stored in the high half of the stack entry
   769     // The low half is undefined per the ABI.
   770     stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
   771 }
   773 inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
   774   if (a.is_float_register())
   775 // V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
   776     fmov(FloatRegisterImpl::D, s, a.as_double_register() );
   777   else
   778     stf(FloatRegisterImpl::D, s, a.as_address());
   779 }
   781 inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
   782   if (a.is_register())
   783     mov(s, a.as_register());
   784   else
   785     stx(s, a.as_address());
   786 }
   787 #endif
   789 inline void MacroAssembler::clrb( Register s1, Register s2) {  stb( G0, s1, s2 ); }
   790 inline void MacroAssembler::clrh( Register s1, Register s2) {  sth( G0, s1, s2 ); }
   791 inline void MacroAssembler::clr(  Register s1, Register s2) {  stw( G0, s1, s2 ); }
   792 inline void MacroAssembler::clrx( Register s1, Register s2) {  stx( G0, s1, s2 ); }
   794 inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
   795 inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
   796 inline void MacroAssembler::clr(  Register s1, int simm13a) { stw( G0, s1, simm13a); }
   797 inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
   799 // returns if membar generates anything, obviously this code should mirror
   800 // membar below.
   801 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
   802   if( !os::is_MP() ) return false;  // Not needed on single CPU
   803   if( VM_Version::v9_instructions_work() ) {
   804     const Membar_mask_bits effective_mask =
   805         Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
   806     return (effective_mask != 0);
   807   } else {
   808     return true;
   809   }
   810 }
   812 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
   813   // Uniprocessors do not need memory barriers
   814   if (!os::is_MP()) return;
   815   // Weakened for current Sparcs and TSO.  See the v9 manual, sections 8.4.3,
   816   // 8.4.4.3, a.31 and a.50.
   817   if( VM_Version::v9_instructions_work() ) {
   818     // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
   819     // of the mmask subfield of const7a that does anything that isn't done
   820     // implicitly is StoreLoad.
   821     const Membar_mask_bits effective_mask =
   822         Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
   823     if ( effective_mask != 0 ) {
   824       Assembler::membar( effective_mask );
   825     }
   826   } else {
   827     // stbar is the closest there is on v8.  Equivalent to membar(StoreStore).  We
   828     // do not issue the stbar because to my knowledge all v8 machines implement TSO,
   829     // which guarantees that all stores behave as if an stbar were issued just after
   830     // each one of them.  On these machines, stbar ought to be a nop.  There doesn't
   831     // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
   832     // it can't be specified by stbar, nor have I come up with a way to simulate it.
   833     //
   834     // Addendum.  Dave says that ldstub guarantees a write buffer flush to coherent
   835     // space.  Put one here to be on the safe side.
   836     Assembler::ldstub(SP, 0, G0);
   837   }
   838 }
   840 #endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP

mercurial