Tue, 06 Oct 2009 02:11:49 -0700
6879902: CTW failure jdk6_18/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp:845
Summary: For signatures with a large number of arguments the offset for the float store becomes too big and does not fit in 13-bit.
Reviewed-by: kvn, never
1 /*
2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
26 jint& stub_inst = *(jint*) branch;
27 stub_inst = patched_branch(target - branch, stub_inst, 0);
28 }
30 #ifndef PRODUCT
31 inline void MacroAssembler::pd_print_patched_instruction(address branch) {
32 jint stub_inst = *(jint*) branch;
33 print_instruction(stub_inst);
34 ::tty->print("%s", " (unresolved)");
35 }
36 #endif // PRODUCT
38 inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
41 inline int AddressLiteral::low10() const {
42 return Assembler::low10(value());
43 }
46 // inlines for SPARC assembler -- dmu 5/97
48 inline void Assembler::check_delay() {
49 # ifdef CHECK_DELAY
50 guarantee( delay_state != at_delay_slot, "must say delayed() when filling delay slot");
51 delay_state = no_delay;
52 # endif
53 }
55 inline void Assembler::emit_long(int x) {
56 check_delay();
57 AbstractAssembler::emit_long(x);
58 }
60 inline void Assembler::emit_data(int x, relocInfo::relocType rtype) {
61 relocate(rtype);
62 emit_long(x);
63 }
65 inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
66 relocate(rspec);
67 emit_long(x);
68 }
71 inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
72 inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); }
73 inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); }
75 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
76 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
78 inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
79 inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L)); }
81 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
82 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
84 inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
85 inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
87 inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
88 inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
90 inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
91 inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { bp(c, a, cc, p, target(L)); }
93 inline void Assembler::call( address d, relocInfo::relocType rt ) { emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
94 inline void Assembler::call( Label& L, relocInfo::relocType rt ) { call( target(L), rt); }
96 inline void Assembler::flush( Register s1, Register s2) { emit_long( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
97 inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
99 inline void Assembler::jmpl( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
100 inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
102 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
103 if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
104 else ldf(w, s1, s2.as_constant(), d);
105 }
107 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
108 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
110 inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); }
112 inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
113 inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
114 inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
115 inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
117 inline void Assembler::ldc( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | rs2(s2) ); }
118 inline void Assembler::ldc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
119 inline void Assembler::lddc( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); }
120 inline void Assembler::lddc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
121 inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); }
122 inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
124 inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
125 inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
127 inline void Assembler::ldsh( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | rs2(s2) ); }
128 inline void Assembler::ldsh( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
129 inline void Assembler::ldsw( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | rs2(s2) ); }
130 inline void Assembler::ldsw( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
131 inline void Assembler::ldub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | rs2(s2) ); }
132 inline void Assembler::ldub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
133 inline void Assembler::lduh( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | rs2(s2) ); }
134 inline void Assembler::lduh( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
135 inline void Assembler::lduw( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | rs2(s2) ); }
136 inline void Assembler::lduw( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
138 inline void Assembler::ldx( Register s1, Register s2, Register d) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | rs2(s2) ); }
139 inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
140 inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
141 inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
143 #ifdef _LP64
144 // Make all 32 bit loads signed so 64 bit registers maintain proper sign
145 inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
146 inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
147 #else
148 inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
149 inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
150 #endif
152 #ifdef ASSERT
153 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
154 # ifdef _LP64
155 inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
156 # else
157 inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
158 # endif
159 #endif
161 inline void Assembler::ld( const Address& a, Register d, int offset) {
162 if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
163 else { ld( a.base(), a.disp() + offset, d); }
164 }
165 inline void Assembler::ldsb(const Address& a, Register d, int offset) {
166 if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
167 else { ldsb(a.base(), a.disp() + offset, d); }
168 }
169 inline void Assembler::ldsh(const Address& a, Register d, int offset) {
170 if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
171 else { ldsh(a.base(), a.disp() + offset, d); }
172 }
173 inline void Assembler::ldsw(const Address& a, Register d, int offset) {
174 if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
175 else { ldsw(a.base(), a.disp() + offset, d); }
176 }
177 inline void Assembler::ldub(const Address& a, Register d, int offset) {
178 if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
179 else { ldub(a.base(), a.disp() + offset, d); }
180 }
181 inline void Assembler::lduh(const Address& a, Register d, int offset) {
182 if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
183 else { lduh(a.base(), a.disp() + offset, d); }
184 }
185 inline void Assembler::lduw(const Address& a, Register d, int offset) {
186 if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
187 else { lduw(a.base(), a.disp() + offset, d); }
188 }
189 inline void Assembler::ldd( const Address& a, Register d, int offset) {
190 if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
191 else { ldd( a.base(), a.disp() + offset, d); }
192 }
193 inline void Assembler::ldx( const Address& a, Register d, int offset) {
194 if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
195 else { ldx( a.base(), a.disp() + offset, d); }
196 }
198 inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
199 inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
200 inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
201 inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
202 inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
203 inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
204 inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
205 inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
206 inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
208 // form effective addresses this way:
209 inline void Assembler::add( Register s1, RegisterOrConstant s2, Register d, int offset) {
210 if (s2.is_register()) add(s1, s2.as_register(), d);
211 else { add(s1, s2.as_constant() + offset, d); offset = 0; }
212 if (offset != 0) add(d, offset, d);
213 }
215 inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
216 inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
219 inline void Assembler::prefetch(Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); }
220 inline void Assembler::prefetch(Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
222 inline void Assembler::prefetch(const Address& a, PrefetchFcn f, int offset) { v9_only(); relocate(a.rspec(offset)); prefetch(a.base(), a.disp() + offset, f); }
225 inline void Assembler::rett( Register s1, Register s2 ) { emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
226 inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
228 inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rspec ) { emit_data( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec); }
230 // pp 222
232 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
233 if (s2.is_register()) stf(w, d, s1, s2.as_register());
234 else stf(w, d, s1, s2.as_constant());
235 }
237 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
238 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
240 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) { relocate(a.rspec(offset)); stf(w, d, a.base(), a.disp() + offset); }
242 inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
243 inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
244 inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
245 inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
247 // p 226
249 inline void Assembler::stb( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | rs2(s2) ); }
250 inline void Assembler::stb( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
251 inline void Assembler::sth( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | rs2(s2) ); }
252 inline void Assembler::sth( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
253 inline void Assembler::stw( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | rs2(s2) ); }
254 inline void Assembler::stw( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
257 inline void Assembler::stx( Register d, Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | rs2(s2) ); }
258 inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
259 inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
260 inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
262 inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); }
263 inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
265 #ifdef ASSERT
266 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
267 inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
268 #endif
270 inline void Assembler::stb(Register d, const Address& a, int offset) {
271 if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
272 else { stb(d, a.base(), a.disp() + offset); }
273 }
274 inline void Assembler::sth(Register d, const Address& a, int offset) {
275 if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
276 else { sth(d, a.base(), a.disp() + offset); }
277 }
278 inline void Assembler::stw(Register d, const Address& a, int offset) {
279 if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
280 else { stw(d, a.base(), a.disp() + offset); }
281 }
282 inline void Assembler::st( Register d, const Address& a, int offset) {
283 if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
284 else { st( d, a.base(), a.disp() + offset); }
285 }
286 inline void Assembler::std(Register d, const Address& a, int offset) {
287 if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
288 else { std(d, a.base(), a.disp() + offset); }
289 }
290 inline void Assembler::stx(Register d, const Address& a, int offset) {
291 if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
292 else { stx(d, a.base(), a.disp() + offset); }
293 }
295 inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
296 inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
297 inline void Assembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
298 inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
299 inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
300 inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
302 // v8 p 99
304 inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
305 inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
306 inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
307 inline void Assembler::stdc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
308 inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
309 inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
310 inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
311 inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
314 // pp 231
316 inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
317 inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
319 inline void Assembler::swap( Address& a, Register d, int offset ) { relocate(a.rspec(offset)); swap( a.base(), a.disp() + offset, d ); }
322 // Use the right loads/stores for the platform
323 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
324 #ifdef _LP64
325 Assembler::ldx(s1, s2, d);
326 #else
327 Assembler::ld( s1, s2, d);
328 #endif
329 }
331 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
332 #ifdef _LP64
333 Assembler::ldx(s1, simm13a, d);
334 #else
335 Assembler::ld( s1, simm13a, d);
336 #endif
337 }
339 #ifdef ASSERT
340 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
341 inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
342 ld_ptr(s1, in_bytes(simm13a), d);
343 }
344 #endif
346 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
347 #ifdef _LP64
348 Assembler::ldx(s1, s2, d);
349 #else
350 Assembler::ld( s1, s2, d);
351 #endif
352 }
354 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
355 #ifdef _LP64
356 Assembler::ldx(a, d, offset);
357 #else
358 Assembler::ld( a, d, offset);
359 #endif
360 }
362 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
363 #ifdef _LP64
364 Assembler::stx(d, s1, s2);
365 #else
366 Assembler::st( d, s1, s2);
367 #endif
368 }
370 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
371 #ifdef _LP64
372 Assembler::stx(d, s1, simm13a);
373 #else
374 Assembler::st( d, s1, simm13a);
375 #endif
376 }
378 #ifdef ASSERT
379 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
380 inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
381 st_ptr(d, s1, in_bytes(simm13a));
382 }
383 #endif
385 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
386 #ifdef _LP64
387 Assembler::stx(d, s1, s2);
388 #else
389 Assembler::st( d, s1, s2);
390 #endif
391 }
393 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
394 #ifdef _LP64
395 Assembler::stx(d, a, offset);
396 #else
397 Assembler::st( d, a, offset);
398 #endif
399 }
401 // Use the right loads/stores for the platform
402 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
403 #ifdef _LP64
404 Assembler::ldx(s1, s2, d);
405 #else
406 Assembler::ldd(s1, s2, d);
407 #endif
408 }
410 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
411 #ifdef _LP64
412 Assembler::ldx(s1, simm13a, d);
413 #else
414 Assembler::ldd(s1, simm13a, d);
415 #endif
416 }
418 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
419 #ifdef _LP64
420 Assembler::ldx(s1, s2, d);
421 #else
422 Assembler::ldd(s1, s2, d);
423 #endif
424 }
426 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
427 #ifdef _LP64
428 Assembler::ldx(a, d, offset);
429 #else
430 Assembler::ldd(a, d, offset);
431 #endif
432 }
434 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
435 #ifdef _LP64
436 Assembler::stx(d, s1, s2);
437 #else
438 Assembler::std(d, s1, s2);
439 #endif
440 }
442 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
443 #ifdef _LP64
444 Assembler::stx(d, s1, simm13a);
445 #else
446 Assembler::std(d, s1, simm13a);
447 #endif
448 }
450 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
451 #ifdef _LP64
452 Assembler::stx(d, s1, s2);
453 #else
454 Assembler::std(d, s1, s2);
455 #endif
456 }
458 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
459 #ifdef _LP64
460 Assembler::stx(d, a, offset);
461 #else
462 Assembler::std(d, a, offset);
463 #endif
464 }
466 // Functions for isolating 64 bit shifts for LP64
468 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
469 #ifdef _LP64
470 Assembler::sllx(s1, s2, d);
471 #else
472 Assembler::sll( s1, s2, d);
473 #endif
474 }
476 inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
477 #ifdef _LP64
478 Assembler::sllx(s1, imm6a, d);
479 #else
480 Assembler::sll( s1, imm6a, d);
481 #endif
482 }
484 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
485 #ifdef _LP64
486 Assembler::srlx(s1, s2, d);
487 #else
488 Assembler::srl( s1, s2, d);
489 #endif
490 }
492 inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
493 #ifdef _LP64
494 Assembler::srlx(s1, imm6a, d);
495 #else
496 Assembler::srl( s1, imm6a, d);
497 #endif
498 }
500 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
501 if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
502 else sll_ptr(s1, s2.as_constant(), d);
503 }
505 // Use the right branch for the platform
507 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
508 if (VM_Version::v9_instructions_work())
509 Assembler::bp(c, a, icc, p, d, rt);
510 else
511 Assembler::br(c, a, d, rt);
512 }
514 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
515 br(c, a, p, target(L));
516 }
519 // Branch that tests either xcc or icc depending on the
520 // architecture compiled (LP64 or not)
521 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
522 #ifdef _LP64
523 Assembler::bp(c, a, xcc, p, d, rt);
524 #else
525 MacroAssembler::br(c, a, p, d, rt);
526 #endif
527 }
529 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
530 brx(c, a, p, target(L));
531 }
533 inline void MacroAssembler::ba( bool a, Label& L ) {
534 br(always, a, pt, L);
535 }
537 // Warning: V9 only functions
538 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
539 Assembler::bp(c, a, cc, p, d, rt);
540 }
542 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
543 Assembler::bp(c, a, cc, p, L);
544 }
546 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
547 if (VM_Version::v9_instructions_work())
548 fbp(c, a, fcc0, p, d, rt);
549 else
550 Assembler::fb(c, a, d, rt);
551 }
553 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
554 fb(c, a, p, target(L));
555 }
557 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
558 Assembler::fbp(c, a, cc, p, d, rt);
559 }
561 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
562 Assembler::fbp(c, a, cc, p, L);
563 }
565 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
566 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
568 // Call with a check to see if we need to deal with the added
569 // expense of relocation and if we overflow the displacement
570 // of the quick call instruction./
571 // Check to see if we have to deal with relocations
572 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
573 #ifdef _LP64
574 intptr_t disp;
575 // NULL is ok because it will be relocated later.
576 // Must change NULL to a reachable address in order to
577 // pass asserts here and in wdisp.
578 if ( d == NULL )
579 d = pc();
581 // Is this address within range of the call instruction?
582 // If not, use the expensive instruction sequence
583 disp = (intptr_t)d - (intptr_t)pc();
584 if ( disp != (intptr_t)(int32_t)disp ) {
585 relocate(rt);
586 AddressLiteral dest(d);
587 jumpl_to(dest, O7, O7);
588 }
589 else {
590 Assembler::call( d, rt );
591 }
592 #else
593 Assembler::call( d, rt );
594 #endif
595 }
597 inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
598 MacroAssembler::call( target(L), rt);
599 }
603 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
604 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
606 // prefetch instruction
607 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
608 if (VM_Version::v9_instructions_work())
609 Assembler::bp( never, true, xcc, pt, d, rt );
610 }
611 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
614 // clobbers o7 on V8!!
615 // returns delta from gotten pc to addr after
616 inline int MacroAssembler::get_pc( Register d ) {
617 int x = offset();
618 if (VM_Version::v9_instructions_work())
619 rdpc(d);
620 else {
621 Label lbl;
622 Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
623 if (d == O7) delayed()->nop();
624 else delayed()->mov(O7, d);
625 bind(lbl);
626 }
627 return offset() - x;
628 }
631 // Note: All MacroAssembler::set_foo functions are defined out-of-line.
634 // Loads the current PC of the following instruction as an immediate value in
635 // 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
636 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
637 intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
638 #ifdef _LP64
639 Unimplemented();
640 #else
641 Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
642 Assembler::add(reg,thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
643 #endif
644 return thepc;
645 }
648 inline void MacroAssembler::load_contents(AddressLiteral& addrlit, Register d, int offset) {
649 assert_not_delayed();
650 sethi(addrlit, d);
651 ld(d, addrlit.low10() + offset, d);
652 }
655 inline void MacroAssembler::load_ptr_contents(AddressLiteral& addrlit, Register d, int offset) {
656 assert_not_delayed();
657 sethi(addrlit, d);
658 ld_ptr(d, addrlit.low10() + offset, d);
659 }
662 inline void MacroAssembler::store_contents(Register s, AddressLiteral& addrlit, Register temp, int offset) {
663 assert_not_delayed();
664 sethi(addrlit, temp);
665 st(s, temp, addrlit.low10() + offset);
666 }
669 inline void MacroAssembler::store_ptr_contents(Register s, AddressLiteral& addrlit, Register temp, int offset) {
670 assert_not_delayed();
671 sethi(addrlit, temp);
672 st_ptr(s, temp, addrlit.low10() + offset);
673 }
676 // This code sequence is relocatable to any address, even on LP64.
677 inline void MacroAssembler::jumpl_to(AddressLiteral& addrlit, Register temp, Register d, int offset) {
678 assert_not_delayed();
679 // Force fixed length sethi because NativeJump and NativeFarCall don't handle
680 // variable length instruction streams.
681 patchable_sethi(addrlit, temp);
682 jmpl(temp, addrlit.low10() + offset, d);
683 }
686 inline void MacroAssembler::jump_to(AddressLiteral& addrlit, Register temp, int offset) {
687 jumpl_to(addrlit, temp, G0, offset);
688 }
691 inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
692 int ld_offset, int jmp_offset) {
693 assert_not_delayed();
694 //sethi(al); // sethi is caller responsibility for this one
695 ld_ptr(a, temp, ld_offset);
696 jmp(temp, jmp_offset);
697 }
700 inline void MacroAssembler::set_oop(jobject obj, Register d) {
701 set_oop(allocate_oop_address(obj), d);
702 }
705 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
706 set_oop(constant_oop_address(obj), d);
707 }
710 inline void MacroAssembler::set_oop(AddressLiteral& obj_addr, Register d) {
711 assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
712 set(obj_addr, d);
713 }
716 inline void MacroAssembler::load_argument( Argument& a, Register d ) {
717 if (a.is_register())
718 mov(a.as_register(), d);
719 else
720 ld (a.as_address(), d);
721 }
723 inline void MacroAssembler::store_argument( Register s, Argument& a ) {
724 if (a.is_register())
725 mov(s, a.as_register());
726 else
727 st_ptr (s, a.as_address()); // ABI says everything is right justified.
728 }
730 inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
731 if (a.is_register())
732 mov(s, a.as_register());
733 else
734 st_ptr (s, a.as_address());
735 }
738 #ifdef _LP64
739 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
740 if (a.is_float_register())
741 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
742 fmov(FloatRegisterImpl::S, s, a.as_float_register() );
743 else
744 // Floats are stored in the high half of the stack entry
745 // The low half is undefined per the ABI.
746 stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
747 }
749 inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
750 if (a.is_float_register())
751 // V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
752 fmov(FloatRegisterImpl::D, s, a.as_double_register() );
753 else
754 stf(FloatRegisterImpl::D, s, a.as_address());
755 }
757 inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
758 if (a.is_register())
759 mov(s, a.as_register());
760 else
761 stx(s, a.as_address());
762 }
763 #endif
765 inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
766 inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
767 inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
768 inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
770 inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
771 inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
772 inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
773 inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
775 // returns if membar generates anything, obviously this code should mirror
776 // membar below.
777 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
778 if( !os::is_MP() ) return false; // Not needed on single CPU
779 if( VM_Version::v9_instructions_work() ) {
780 const Membar_mask_bits effective_mask =
781 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
782 return (effective_mask != 0);
783 } else {
784 return true;
785 }
786 }
788 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
789 // Uniprocessors do not need memory barriers
790 if (!os::is_MP()) return;
791 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
792 // 8.4.4.3, a.31 and a.50.
793 if( VM_Version::v9_instructions_work() ) {
794 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
795 // of the mmask subfield of const7a that does anything that isn't done
796 // implicitly is StoreLoad.
797 const Membar_mask_bits effective_mask =
798 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
799 if ( effective_mask != 0 ) {
800 Assembler::membar( effective_mask );
801 }
802 } else {
803 // stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
804 // do not issue the stbar because to my knowledge all v8 machines implement TSO,
805 // which guarantees that all stores behave as if an stbar were issued just after
806 // each one of them. On these machines, stbar ought to be a nop. There doesn't
807 // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
808 // it can't be specified by stbar, nor have I come up with a way to simulate it.
809 //
810 // Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
811 // space. Put one here to be on the safe side.
812 Assembler::ldstub(SP, 0, G0);
813 }
814 }