Tue, 06 Dec 2011 18:28:51 -0500
7117052: instanceKlass::_init_state can be u1 type
Summary: Change instanceKlass::_init_state field to u1 type.
Reviewed-by: bdelsart, coleenp, dholmes, phh, never
Contributed-by: Jiangli Zhou <jiangli.zhou@oracle.com>
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
26 #define CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
28 #include "asm/assembler.inline.hpp"
29 #include "asm/codeBuffer.hpp"
30 #include "code/codeCache.hpp"
31 #include "runtime/handles.inline.hpp"
33 inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
34 jint& stub_inst = *(jint*) branch;
35 stub_inst = patched_branch(target - branch, stub_inst, 0);
36 }
38 #ifndef PRODUCT
39 inline void MacroAssembler::pd_print_patched_instruction(address branch) {
40 jint stub_inst = *(jint*) branch;
41 print_instruction(stub_inst);
42 ::tty->print("%s", " (unresolved)");
43 }
44 #endif // PRODUCT
46 inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
49 inline int AddressLiteral::low10() const {
50 return Assembler::low10(value());
51 }
54 // inlines for SPARC assembler -- dmu 5/97
56 inline void Assembler::check_delay() {
57 # ifdef CHECK_DELAY
58 guarantee( delay_state != at_delay_slot, "must say delayed() when filling delay slot");
59 delay_state = no_delay;
60 # endif
61 }
63 inline void Assembler::emit_long(int x) {
64 check_delay();
65 AbstractAssembler::emit_long(x);
66 }
68 inline void Assembler::emit_data(int x, relocInfo::relocType rtype) {
69 relocate(rtype);
70 emit_long(x);
71 }
73 inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
74 relocate(rspec);
75 emit_long(x);
76 }
79 inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
80 inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); }
81 inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); }
83 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
84 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
86 inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
87 inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L)); }
89 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
90 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
92 inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
93 inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
95 inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
96 inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
98 inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
99 inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { bp(c, a, cc, p, target(L)); }
101 // compare and branch
102 inline void Assembler::cbcond(Condition c, CC cc, Register s1, Register s2, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | rs2(s2)); }
103 inline void Assembler::cbcond(Condition c, CC cc, Register s1, int simm5, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | immed(true) | simm(simm5, 5)); }
105 inline void Assembler::call( address d, relocInfo::relocType rt ) { cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
106 inline void Assembler::call( Label& L, relocInfo::relocType rt ) { call( target(L), rt); }
108 inline void Assembler::flush( Register s1, Register s2) { emit_long( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
109 inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
111 inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti(); emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
112 inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
114 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
115 if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
116 else ldf(w, s1, s2.as_constant(), d);
117 }
119 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
120 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
122 inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); }
124 inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
125 inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
126 inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
127 inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
129 inline void Assembler::ldc( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | rs2(s2) ); }
130 inline void Assembler::ldc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
131 inline void Assembler::lddc( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); }
132 inline void Assembler::lddc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
133 inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); }
134 inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
136 inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
137 inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
139 inline void Assembler::ldsh( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | rs2(s2) ); }
140 inline void Assembler::ldsh( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
141 inline void Assembler::ldsw( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | rs2(s2) ); }
142 inline void Assembler::ldsw( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
143 inline void Assembler::ldub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | rs2(s2) ); }
144 inline void Assembler::ldub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
145 inline void Assembler::lduh( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | rs2(s2) ); }
146 inline void Assembler::lduh( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
147 inline void Assembler::lduw( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | rs2(s2) ); }
148 inline void Assembler::lduw( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
150 inline void Assembler::ldx( Register s1, Register s2, Register d) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | rs2(s2) ); }
151 inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
152 inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
153 inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
155 #ifdef _LP64
156 // Make all 32 bit loads signed so 64 bit registers maintain proper sign
157 inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
158 inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
159 #else
160 inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
161 inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
162 #endif
164 #ifdef ASSERT
165 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
166 # ifdef _LP64
167 inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
168 # else
169 inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
170 # endif
171 #endif
173 inline void Assembler::ld( const Address& a, Register d, int offset) {
174 if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
175 else { ld( a.base(), a.disp() + offset, d); }
176 }
177 inline void Assembler::ldsb(const Address& a, Register d, int offset) {
178 if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
179 else { ldsb(a.base(), a.disp() + offset, d); }
180 }
181 inline void Assembler::ldsh(const Address& a, Register d, int offset) {
182 if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
183 else { ldsh(a.base(), a.disp() + offset, d); }
184 }
185 inline void Assembler::ldsw(const Address& a, Register d, int offset) {
186 if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
187 else { ldsw(a.base(), a.disp() + offset, d); }
188 }
189 inline void Assembler::ldub(const Address& a, Register d, int offset) {
190 if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
191 else { ldub(a.base(), a.disp() + offset, d); }
192 }
193 inline void Assembler::lduh(const Address& a, Register d, int offset) {
194 if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
195 else { lduh(a.base(), a.disp() + offset, d); }
196 }
197 inline void Assembler::lduw(const Address& a, Register d, int offset) {
198 if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
199 else { lduw(a.base(), a.disp() + offset, d); }
200 }
201 inline void Assembler::ldd( const Address& a, Register d, int offset) {
202 if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
203 else { ldd( a.base(), a.disp() + offset, d); }
204 }
205 inline void Assembler::ldx( const Address& a, Register d, int offset) {
206 if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
207 else { ldx( a.base(), a.disp() + offset, d); }
208 }
210 inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
211 inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
212 inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
213 inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
214 inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
215 inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
216 inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
217 inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
218 inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
220 // form effective addresses this way:
221 inline void Assembler::add(const Address& a, Register d, int offset) {
222 if (a.has_index()) add(a.base(), a.index(), d);
223 else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
224 if (offset != 0) add(d, offset, d);
225 }
226 inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
227 if (s2.is_register()) add(s1, s2.as_register(), d);
228 else { add(s1, s2.as_constant() + offset, d); offset = 0; }
229 if (offset != 0) add(d, offset, d);
230 }
232 inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
233 if (s2.is_register()) andn(s1, s2.as_register(), d);
234 else andn(s1, s2.as_constant(), d);
235 }
237 inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
238 inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
241 inline void Assembler::prefetch(Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); }
242 inline void Assembler::prefetch(Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
244 inline void Assembler::prefetch(const Address& a, PrefetchFcn f, int offset) { v9_only(); relocate(a.rspec(offset)); prefetch(a.base(), a.disp() + offset, f); }
247 inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
248 inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
250 inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rspec ) { emit_data( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec); }
252 // pp 222
254 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
255 if (s2.is_register()) stf(w, d, s1, s2.as_register());
256 else stf(w, d, s1, s2.as_constant());
257 }
259 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
260 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
262 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
263 relocate(a.rspec(offset));
264 if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); }
265 else { stf(w, d, a.base(), a.disp() + offset); }
266 }
268 inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
269 inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
270 inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
271 inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
273 // p 226
275 inline void Assembler::stb( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | rs2(s2) ); }
276 inline void Assembler::stb( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
277 inline void Assembler::sth( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | rs2(s2) ); }
278 inline void Assembler::sth( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
279 inline void Assembler::stw( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | rs2(s2) ); }
280 inline void Assembler::stw( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
283 inline void Assembler::stx( Register d, Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | rs2(s2) ); }
284 inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
285 inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
286 inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
288 inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); }
289 inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
291 #ifdef ASSERT
292 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
293 inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
294 #endif
296 inline void Assembler::stb(Register d, const Address& a, int offset) {
297 if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
298 else { stb(d, a.base(), a.disp() + offset); }
299 }
300 inline void Assembler::sth(Register d, const Address& a, int offset) {
301 if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
302 else { sth(d, a.base(), a.disp() + offset); }
303 }
304 inline void Assembler::stw(Register d, const Address& a, int offset) {
305 if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
306 else { stw(d, a.base(), a.disp() + offset); }
307 }
308 inline void Assembler::st( Register d, const Address& a, int offset) {
309 if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
310 else { st( d, a.base(), a.disp() + offset); }
311 }
312 inline void Assembler::std(Register d, const Address& a, int offset) {
313 if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
314 else { std(d, a.base(), a.disp() + offset); }
315 }
316 inline void Assembler::stx(Register d, const Address& a, int offset) {
317 if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
318 else { stx(d, a.base(), a.disp() + offset); }
319 }
321 inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
322 inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
323 inline void Assembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
324 inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
325 inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
326 inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
328 // v8 p 99
330 inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
331 inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
332 inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
333 inline void Assembler::stdc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
334 inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
335 inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
336 inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
337 inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
339 inline void Assembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
340 if (s2.is_register()) sub(s1, s2.as_register(), d);
341 else { sub(s1, s2.as_constant() + offset, d); offset = 0; }
342 if (offset != 0) sub(d, offset, d);
343 }
345 // pp 231
347 inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
348 inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
350 inline void Assembler::swap( Address& a, Register d, int offset ) { relocate(a.rspec(offset)); swap( a.base(), a.disp() + offset, d ); }
353 // Use the right loads/stores for the platform
354 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
355 #ifdef _LP64
356 Assembler::ldx(s1, s2, d);
357 #else
358 Assembler::ld( s1, s2, d);
359 #endif
360 }
362 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
363 #ifdef _LP64
364 Assembler::ldx(s1, simm13a, d);
365 #else
366 Assembler::ld( s1, simm13a, d);
367 #endif
368 }
370 #ifdef ASSERT
371 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
372 inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
373 ld_ptr(s1, in_bytes(simm13a), d);
374 }
375 #endif
377 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
378 #ifdef _LP64
379 Assembler::ldx(s1, s2, d);
380 #else
381 Assembler::ld( s1, s2, d);
382 #endif
383 }
385 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
386 #ifdef _LP64
387 Assembler::ldx(a, d, offset);
388 #else
389 Assembler::ld( a, d, offset);
390 #endif
391 }
393 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
394 #ifdef _LP64
395 Assembler::stx(d, s1, s2);
396 #else
397 Assembler::st( d, s1, s2);
398 #endif
399 }
401 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
402 #ifdef _LP64
403 Assembler::stx(d, s1, simm13a);
404 #else
405 Assembler::st( d, s1, simm13a);
406 #endif
407 }
409 #ifdef ASSERT
410 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
411 inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
412 st_ptr(d, s1, in_bytes(simm13a));
413 }
414 #endif
416 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
417 #ifdef _LP64
418 Assembler::stx(d, s1, s2);
419 #else
420 Assembler::st( d, s1, s2);
421 #endif
422 }
424 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
425 #ifdef _LP64
426 Assembler::stx(d, a, offset);
427 #else
428 Assembler::st( d, a, offset);
429 #endif
430 }
432 // Use the right loads/stores for the platform
433 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
434 #ifdef _LP64
435 Assembler::ldx(s1, s2, d);
436 #else
437 Assembler::ldd(s1, s2, d);
438 #endif
439 }
441 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
442 #ifdef _LP64
443 Assembler::ldx(s1, simm13a, d);
444 #else
445 Assembler::ldd(s1, simm13a, d);
446 #endif
447 }
449 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
450 #ifdef _LP64
451 Assembler::ldx(s1, s2, d);
452 #else
453 Assembler::ldd(s1, s2, d);
454 #endif
455 }
457 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
458 #ifdef _LP64
459 Assembler::ldx(a, d, offset);
460 #else
461 Assembler::ldd(a, d, offset);
462 #endif
463 }
465 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
466 #ifdef _LP64
467 Assembler::stx(d, s1, s2);
468 #else
469 Assembler::std(d, s1, s2);
470 #endif
471 }
473 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
474 #ifdef _LP64
475 Assembler::stx(d, s1, simm13a);
476 #else
477 Assembler::std(d, s1, simm13a);
478 #endif
479 }
481 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
482 #ifdef _LP64
483 Assembler::stx(d, s1, s2);
484 #else
485 Assembler::std(d, s1, s2);
486 #endif
487 }
489 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
490 #ifdef _LP64
491 Assembler::stx(d, a, offset);
492 #else
493 Assembler::std(d, a, offset);
494 #endif
495 }
497 // Functions for isolating 64 bit shifts for LP64
499 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
500 #ifdef _LP64
501 Assembler::sllx(s1, s2, d);
502 #else
503 Assembler::sll( s1, s2, d);
504 #endif
505 }
507 inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
508 #ifdef _LP64
509 Assembler::sllx(s1, imm6a, d);
510 #else
511 Assembler::sll( s1, imm6a, d);
512 #endif
513 }
515 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
516 #ifdef _LP64
517 Assembler::srlx(s1, s2, d);
518 #else
519 Assembler::srl( s1, s2, d);
520 #endif
521 }
523 inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
524 #ifdef _LP64
525 Assembler::srlx(s1, imm6a, d);
526 #else
527 Assembler::srl( s1, imm6a, d);
528 #endif
529 }
531 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
532 if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
533 else sll_ptr(s1, s2.as_constant(), d);
534 }
536 // Use the right branch for the platform
538 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
539 if (VM_Version::v9_instructions_work())
540 Assembler::bp(c, a, icc, p, d, rt);
541 else
542 Assembler::br(c, a, d, rt);
543 }
545 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
546 br(c, a, p, target(L));
547 }
550 // Branch that tests either xcc or icc depending on the
551 // architecture compiled (LP64 or not)
552 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
553 #ifdef _LP64
554 Assembler::bp(c, a, xcc, p, d, rt);
555 #else
556 MacroAssembler::br(c, a, p, d, rt);
557 #endif
558 }
560 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
561 brx(c, a, p, target(L));
562 }
564 inline void MacroAssembler::ba( Label& L ) {
565 br(always, false, pt, L);
566 }
568 // Warning: V9 only functions
569 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
570 Assembler::bp(c, a, cc, p, d, rt);
571 }
573 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
574 Assembler::bp(c, a, cc, p, L);
575 }
577 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
578 if (VM_Version::v9_instructions_work())
579 fbp(c, a, fcc0, p, d, rt);
580 else
581 Assembler::fb(c, a, d, rt);
582 }
584 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
585 fb(c, a, p, target(L));
586 }
588 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
589 Assembler::fbp(c, a, cc, p, d, rt);
590 }
592 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
593 Assembler::fbp(c, a, cc, p, L);
594 }
596 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
597 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
599 inline bool MacroAssembler::is_far_target(address d) {
600 if (ForceUnreachable) {
601 // References outside the code cache should be treated as far
602 return d < CodeCache::low_bound() || d > CodeCache::high_bound();
603 }
604 return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
605 }
607 // Call with a check to see if we need to deal with the added
608 // expense of relocation and if we overflow the displacement
609 // of the quick call instruction.
610 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
611 #ifdef _LP64
612 intptr_t disp;
613 // NULL is ok because it will be relocated later.
614 // Must change NULL to a reachable address in order to
615 // pass asserts here and in wdisp.
616 if ( d == NULL )
617 d = pc();
619 // Is this address within range of the call instruction?
620 // If not, use the expensive instruction sequence
621 if (is_far_target(d)) {
622 relocate(rt);
623 AddressLiteral dest(d);
624 jumpl_to(dest, O7, O7);
625 } else {
626 Assembler::call(d, rt);
627 }
628 #else
629 Assembler::call( d, rt );
630 #endif
631 }
633 inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
634 MacroAssembler::call( target(L), rt);
635 }
639 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
640 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
642 // prefetch instruction
643 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
644 if (VM_Version::v9_instructions_work())
645 Assembler::bp( never, true, xcc, pt, d, rt );
646 }
647 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
650 // clobbers o7 on V8!!
651 // returns delta from gotten pc to addr after
652 inline int MacroAssembler::get_pc( Register d ) {
653 int x = offset();
654 if (VM_Version::v9_instructions_work())
655 rdpc(d);
656 else {
657 Label lbl;
658 Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
659 if (d == O7) delayed()->nop();
660 else delayed()->mov(O7, d);
661 bind(lbl);
662 }
663 return offset() - x;
664 }
667 // Note: All MacroAssembler::set_foo functions are defined out-of-line.
670 // Loads the current PC of the following instruction as an immediate value in
671 // 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
672 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
673 intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
674 #ifdef _LP64
675 Unimplemented();
676 #else
677 Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
678 Assembler::add(reg,thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
679 #endif
680 return thepc;
681 }
684 inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
685 assert_not_delayed();
686 if (ForceUnreachable) {
687 patchable_sethi(addrlit, d);
688 } else {
689 sethi(addrlit, d);
690 }
691 ld(d, addrlit.low10() + offset, d);
692 }
695 inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
696 assert_not_delayed();
697 if (ForceUnreachable) {
698 patchable_sethi(addrlit, d);
699 } else {
700 sethi(addrlit, d);
701 }
702 ld_ptr(d, addrlit.low10() + offset, d);
703 }
706 inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
707 assert_not_delayed();
708 if (ForceUnreachable) {
709 patchable_sethi(addrlit, temp);
710 } else {
711 sethi(addrlit, temp);
712 }
713 st(s, temp, addrlit.low10() + offset);
714 }
717 inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
718 assert_not_delayed();
719 if (ForceUnreachable) {
720 patchable_sethi(addrlit, temp);
721 } else {
722 sethi(addrlit, temp);
723 }
724 st_ptr(s, temp, addrlit.low10() + offset);
725 }
728 // This code sequence is relocatable to any address, even on LP64.
729 inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
730 assert_not_delayed();
731 // Force fixed length sethi because NativeJump and NativeFarCall don't handle
732 // variable length instruction streams.
733 patchable_sethi(addrlit, temp);
734 jmpl(temp, addrlit.low10() + offset, d);
735 }
738 inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
739 jumpl_to(addrlit, temp, G0, offset);
740 }
743 inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
744 int ld_offset, int jmp_offset) {
745 assert_not_delayed();
746 //sethi(al); // sethi is caller responsibility for this one
747 ld_ptr(a, temp, ld_offset);
748 jmp(temp, jmp_offset);
749 }
752 inline void MacroAssembler::set_oop(jobject obj, Register d) {
753 set_oop(allocate_oop_address(obj), d);
754 }
757 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
758 set_oop(constant_oop_address(obj), d);
759 }
762 inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
763 assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
764 set(obj_addr, d);
765 }
768 inline void MacroAssembler::load_argument( Argument& a, Register d ) {
769 if (a.is_register())
770 mov(a.as_register(), d);
771 else
772 ld (a.as_address(), d);
773 }
775 inline void MacroAssembler::store_argument( Register s, Argument& a ) {
776 if (a.is_register())
777 mov(s, a.as_register());
778 else
779 st_ptr (s, a.as_address()); // ABI says everything is right justified.
780 }
782 inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
783 if (a.is_register())
784 mov(s, a.as_register());
785 else
786 st_ptr (s, a.as_address());
787 }
790 #ifdef _LP64
791 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
792 if (a.is_float_register())
793 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
794 fmov(FloatRegisterImpl::S, s, a.as_float_register() );
795 else
796 // Floats are stored in the high half of the stack entry
797 // The low half is undefined per the ABI.
798 stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
799 }
801 inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
802 if (a.is_float_register())
803 // V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
804 fmov(FloatRegisterImpl::D, s, a.as_double_register() );
805 else
806 stf(FloatRegisterImpl::D, s, a.as_address());
807 }
809 inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
810 if (a.is_register())
811 mov(s, a.as_register());
812 else
813 stx(s, a.as_address());
814 }
815 #endif
817 inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
818 inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
819 inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
820 inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
822 inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
823 inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
824 inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
825 inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
827 // returns if membar generates anything, obviously this code should mirror
828 // membar below.
829 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
830 if( !os::is_MP() ) return false; // Not needed on single CPU
831 if( VM_Version::v9_instructions_work() ) {
832 const Membar_mask_bits effective_mask =
833 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
834 return (effective_mask != 0);
835 } else {
836 return true;
837 }
838 }
840 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
841 // Uniprocessors do not need memory barriers
842 if (!os::is_MP()) return;
843 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
844 // 8.4.4.3, a.31 and a.50.
845 if( VM_Version::v9_instructions_work() ) {
846 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
847 // of the mmask subfield of const7a that does anything that isn't done
848 // implicitly is StoreLoad.
849 const Membar_mask_bits effective_mask =
850 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
851 if ( effective_mask != 0 ) {
852 Assembler::membar( effective_mask );
853 }
854 } else {
855 // stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
856 // do not issue the stbar because to my knowledge all v8 machines implement TSO,
857 // which guarantees that all stores behave as if an stbar were issued just after
858 // each one of them. On these machines, stbar ought to be a nop. There doesn't
859 // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
860 // it can't be specified by stbar, nor have I come up with a way to simulate it.
861 //
862 // Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
863 // space. Put one here to be on the safe side.
864 Assembler::ldstub(SP, 0, G0);
865 }
866 }
868 #endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP