Tue, 30 Nov 2010 23:23:40 -0800
6985015: C1 needs to support compressed oops
Summary: This change implements compressed oops for C1 for x64 and sparc. The changes are mostly on the codegen level, with a few exceptions when we do access things outside of the heap that are uncompressed from the IR. Compressed oops are now also enabled with tiered.
Reviewed-by: twisti, kvn, never, phh
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
26 #define CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
28 #include "asm/assembler.inline.hpp"
29 #include "asm/codeBuffer.hpp"
30 #include "code/codeCache.hpp"
31 #include "runtime/handles.inline.hpp"
33 inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
34 jint& stub_inst = *(jint*) branch;
35 stub_inst = patched_branch(target - branch, stub_inst, 0);
36 }
38 #ifndef PRODUCT
39 inline void MacroAssembler::pd_print_patched_instruction(address branch) {
40 jint stub_inst = *(jint*) branch;
41 print_instruction(stub_inst);
42 ::tty->print("%s", " (unresolved)");
43 }
44 #endif // PRODUCT
46 inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
49 inline int AddressLiteral::low10() const {
50 return Assembler::low10(value());
51 }
54 // inlines for SPARC assembler -- dmu 5/97
56 inline void Assembler::check_delay() {
57 # ifdef CHECK_DELAY
58 guarantee( delay_state != at_delay_slot, "must say delayed() when filling delay slot");
59 delay_state = no_delay;
60 # endif
61 }
63 inline void Assembler::emit_long(int x) {
64 check_delay();
65 AbstractAssembler::emit_long(x);
66 }
68 inline void Assembler::emit_data(int x, relocInfo::relocType rtype) {
69 relocate(rtype);
70 emit_long(x);
71 }
73 inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
74 relocate(rspec);
75 emit_long(x);
76 }
79 inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
80 inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); }
81 inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); }
83 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
84 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
86 inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
87 inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L)); }
89 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
90 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
92 inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
93 inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
95 inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
96 inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
98 inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
99 inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { bp(c, a, cc, p, target(L)); }
101 inline void Assembler::call( address d, relocInfo::relocType rt ) { emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
102 inline void Assembler::call( Label& L, relocInfo::relocType rt ) { call( target(L), rt); }
104 inline void Assembler::flush( Register s1, Register s2) { emit_long( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
105 inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
107 inline void Assembler::jmpl( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
108 inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
110 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
111 if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
112 else ldf(w, s1, s2.as_constant(), d);
113 }
115 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
116 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
118 inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); }
120 inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
121 inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
122 inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
123 inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
125 inline void Assembler::ldc( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | rs2(s2) ); }
126 inline void Assembler::ldc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
127 inline void Assembler::lddc( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); }
128 inline void Assembler::lddc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
129 inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); }
130 inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
132 inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
133 inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
135 inline void Assembler::ldsh( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | rs2(s2) ); }
136 inline void Assembler::ldsh( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
137 inline void Assembler::ldsw( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | rs2(s2) ); }
138 inline void Assembler::ldsw( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
139 inline void Assembler::ldub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | rs2(s2) ); }
140 inline void Assembler::ldub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
141 inline void Assembler::lduh( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | rs2(s2) ); }
142 inline void Assembler::lduh( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
143 inline void Assembler::lduw( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | rs2(s2) ); }
144 inline void Assembler::lduw( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
146 inline void Assembler::ldx( Register s1, Register s2, Register d) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | rs2(s2) ); }
147 inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
148 inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
149 inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
151 #ifdef _LP64
152 // Make all 32 bit loads signed so 64 bit registers maintain proper sign
153 inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
154 inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
155 #else
156 inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
157 inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
158 #endif
160 #ifdef ASSERT
161 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
162 # ifdef _LP64
163 inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
164 # else
165 inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
166 # endif
167 #endif
169 inline void Assembler::ld( const Address& a, Register d, int offset) {
170 if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
171 else { ld( a.base(), a.disp() + offset, d); }
172 }
173 inline void Assembler::ldsb(const Address& a, Register d, int offset) {
174 if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
175 else { ldsb(a.base(), a.disp() + offset, d); }
176 }
177 inline void Assembler::ldsh(const Address& a, Register d, int offset) {
178 if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
179 else { ldsh(a.base(), a.disp() + offset, d); }
180 }
181 inline void Assembler::ldsw(const Address& a, Register d, int offset) {
182 if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
183 else { ldsw(a.base(), a.disp() + offset, d); }
184 }
185 inline void Assembler::ldub(const Address& a, Register d, int offset) {
186 if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
187 else { ldub(a.base(), a.disp() + offset, d); }
188 }
189 inline void Assembler::lduh(const Address& a, Register d, int offset) {
190 if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
191 else { lduh(a.base(), a.disp() + offset, d); }
192 }
193 inline void Assembler::lduw(const Address& a, Register d, int offset) {
194 if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
195 else { lduw(a.base(), a.disp() + offset, d); }
196 }
197 inline void Assembler::ldd( const Address& a, Register d, int offset) {
198 if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
199 else { ldd( a.base(), a.disp() + offset, d); }
200 }
201 inline void Assembler::ldx( const Address& a, Register d, int offset) {
202 if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
203 else { ldx( a.base(), a.disp() + offset, d); }
204 }
206 inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
207 inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
208 inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
209 inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
210 inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
211 inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
212 inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
213 inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
214 inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
216 // form effective addresses this way:
217 inline void Assembler::add(const Address& a, Register d, int offset) {
218 if (a.has_index()) add(a.base(), a.index(), d);
219 else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
220 if (offset != 0) add(d, offset, d);
221 }
222 inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
223 if (s2.is_register()) add(s1, s2.as_register(), d);
224 else { add(s1, s2.as_constant() + offset, d); offset = 0; }
225 if (offset != 0) add(d, offset, d);
226 }
228 inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
229 if (s2.is_register()) andn(s1, s2.as_register(), d);
230 else andn(s1, s2.as_constant(), d);
231 }
233 inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
234 inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
237 inline void Assembler::prefetch(Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); }
238 inline void Assembler::prefetch(Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
240 inline void Assembler::prefetch(const Address& a, PrefetchFcn f, int offset) { v9_only(); relocate(a.rspec(offset)); prefetch(a.base(), a.disp() + offset, f); }
243 inline void Assembler::rett( Register s1, Register s2 ) { emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
244 inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
246 inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rspec ) { emit_data( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec); }
248 // pp 222
250 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
251 if (s2.is_register()) stf(w, d, s1, s2.as_register());
252 else stf(w, d, s1, s2.as_constant());
253 }
255 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
256 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
258 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) { relocate(a.rspec(offset)); stf(w, d, a.base(), a.disp() + offset); }
260 inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
261 inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
262 inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
263 inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
265 // p 226
267 inline void Assembler::stb( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | rs2(s2) ); }
268 inline void Assembler::stb( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
269 inline void Assembler::sth( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | rs2(s2) ); }
270 inline void Assembler::sth( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
271 inline void Assembler::stw( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | rs2(s2) ); }
272 inline void Assembler::stw( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
275 inline void Assembler::stx( Register d, Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | rs2(s2) ); }
276 inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
277 inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
278 inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
280 inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); }
281 inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
283 #ifdef ASSERT
284 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
285 inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
286 #endif
288 inline void Assembler::stb(Register d, const Address& a, int offset) {
289 if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
290 else { stb(d, a.base(), a.disp() + offset); }
291 }
292 inline void Assembler::sth(Register d, const Address& a, int offset) {
293 if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
294 else { sth(d, a.base(), a.disp() + offset); }
295 }
296 inline void Assembler::stw(Register d, const Address& a, int offset) {
297 if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
298 else { stw(d, a.base(), a.disp() + offset); }
299 }
300 inline void Assembler::st( Register d, const Address& a, int offset) {
301 if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
302 else { st( d, a.base(), a.disp() + offset); }
303 }
304 inline void Assembler::std(Register d, const Address& a, int offset) {
305 if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
306 else { std(d, a.base(), a.disp() + offset); }
307 }
308 inline void Assembler::stx(Register d, const Address& a, int offset) {
309 if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
310 else { stx(d, a.base(), a.disp() + offset); }
311 }
313 inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
314 inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
315 inline void Assembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
316 inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
317 inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
318 inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
320 // v8 p 99
322 inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
323 inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
324 inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
325 inline void Assembler::stdc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
326 inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
327 inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
328 inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
329 inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
332 // pp 231
334 inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
335 inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
337 inline void Assembler::swap( Address& a, Register d, int offset ) { relocate(a.rspec(offset)); swap( a.base(), a.disp() + offset, d ); }
340 // Use the right loads/stores for the platform
341 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
342 #ifdef _LP64
343 Assembler::ldx(s1, s2, d);
344 #else
345 Assembler::ld( s1, s2, d);
346 #endif
347 }
349 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
350 #ifdef _LP64
351 Assembler::ldx(s1, simm13a, d);
352 #else
353 Assembler::ld( s1, simm13a, d);
354 #endif
355 }
357 #ifdef ASSERT
358 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
359 inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
360 ld_ptr(s1, in_bytes(simm13a), d);
361 }
362 #endif
364 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
365 #ifdef _LP64
366 Assembler::ldx(s1, s2, d);
367 #else
368 Assembler::ld( s1, s2, d);
369 #endif
370 }
372 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
373 #ifdef _LP64
374 Assembler::ldx(a, d, offset);
375 #else
376 Assembler::ld( a, d, offset);
377 #endif
378 }
380 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
381 #ifdef _LP64
382 Assembler::stx(d, s1, s2);
383 #else
384 Assembler::st( d, s1, s2);
385 #endif
386 }
388 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
389 #ifdef _LP64
390 Assembler::stx(d, s1, simm13a);
391 #else
392 Assembler::st( d, s1, simm13a);
393 #endif
394 }
396 #ifdef ASSERT
397 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
398 inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
399 st_ptr(d, s1, in_bytes(simm13a));
400 }
401 #endif
403 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
404 #ifdef _LP64
405 Assembler::stx(d, s1, s2);
406 #else
407 Assembler::st( d, s1, s2);
408 #endif
409 }
411 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
412 #ifdef _LP64
413 Assembler::stx(d, a, offset);
414 #else
415 Assembler::st( d, a, offset);
416 #endif
417 }
419 // Use the right loads/stores for the platform
420 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
421 #ifdef _LP64
422 Assembler::ldx(s1, s2, d);
423 #else
424 Assembler::ldd(s1, s2, d);
425 #endif
426 }
428 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
429 #ifdef _LP64
430 Assembler::ldx(s1, simm13a, d);
431 #else
432 Assembler::ldd(s1, simm13a, d);
433 #endif
434 }
436 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
437 #ifdef _LP64
438 Assembler::ldx(s1, s2, d);
439 #else
440 Assembler::ldd(s1, s2, d);
441 #endif
442 }
444 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
445 #ifdef _LP64
446 Assembler::ldx(a, d, offset);
447 #else
448 Assembler::ldd(a, d, offset);
449 #endif
450 }
452 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
453 #ifdef _LP64
454 Assembler::stx(d, s1, s2);
455 #else
456 Assembler::std(d, s1, s2);
457 #endif
458 }
460 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
461 #ifdef _LP64
462 Assembler::stx(d, s1, simm13a);
463 #else
464 Assembler::std(d, s1, simm13a);
465 #endif
466 }
468 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
469 #ifdef _LP64
470 Assembler::stx(d, s1, s2);
471 #else
472 Assembler::std(d, s1, s2);
473 #endif
474 }
476 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
477 #ifdef _LP64
478 Assembler::stx(d, a, offset);
479 #else
480 Assembler::std(d, a, offset);
481 #endif
482 }
484 // Functions for isolating 64 bit shifts for LP64
486 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
487 #ifdef _LP64
488 Assembler::sllx(s1, s2, d);
489 #else
490 Assembler::sll( s1, s2, d);
491 #endif
492 }
494 inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
495 #ifdef _LP64
496 Assembler::sllx(s1, imm6a, d);
497 #else
498 Assembler::sll( s1, imm6a, d);
499 #endif
500 }
502 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
503 #ifdef _LP64
504 Assembler::srlx(s1, s2, d);
505 #else
506 Assembler::srl( s1, s2, d);
507 #endif
508 }
510 inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
511 #ifdef _LP64
512 Assembler::srlx(s1, imm6a, d);
513 #else
514 Assembler::srl( s1, imm6a, d);
515 #endif
516 }
518 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
519 if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
520 else sll_ptr(s1, s2.as_constant(), d);
521 }
523 // Use the right branch for the platform
525 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
526 if (VM_Version::v9_instructions_work())
527 Assembler::bp(c, a, icc, p, d, rt);
528 else
529 Assembler::br(c, a, d, rt);
530 }
532 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
533 br(c, a, p, target(L));
534 }
537 // Branch that tests either xcc or icc depending on the
538 // architecture compiled (LP64 or not)
539 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
540 #ifdef _LP64
541 Assembler::bp(c, a, xcc, p, d, rt);
542 #else
543 MacroAssembler::br(c, a, p, d, rt);
544 #endif
545 }
547 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
548 brx(c, a, p, target(L));
549 }
551 inline void MacroAssembler::ba( bool a, Label& L ) {
552 br(always, a, pt, L);
553 }
555 // Warning: V9 only functions
556 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
557 Assembler::bp(c, a, cc, p, d, rt);
558 }
560 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
561 Assembler::bp(c, a, cc, p, L);
562 }
564 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
565 if (VM_Version::v9_instructions_work())
566 fbp(c, a, fcc0, p, d, rt);
567 else
568 Assembler::fb(c, a, d, rt);
569 }
571 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
572 fb(c, a, p, target(L));
573 }
575 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
576 Assembler::fbp(c, a, cc, p, d, rt);
577 }
579 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
580 Assembler::fbp(c, a, cc, p, L);
581 }
583 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
584 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
586 // Call with a check to see if we need to deal with the added
587 // expense of relocation and if we overflow the displacement
588 // of the quick call instruction./
589 // Check to see if we have to deal with relocations
590 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
591 #ifdef _LP64
592 intptr_t disp;
593 // NULL is ok because it will be relocated later.
594 // Must change NULL to a reachable address in order to
595 // pass asserts here and in wdisp.
596 if ( d == NULL )
597 d = pc();
599 // Is this address within range of the call instruction?
600 // If not, use the expensive instruction sequence
601 disp = (intptr_t)d - (intptr_t)pc();
602 if ( disp != (intptr_t)(int32_t)disp ) {
603 relocate(rt);
604 AddressLiteral dest(d);
605 jumpl_to(dest, O7, O7);
606 }
607 else {
608 Assembler::call( d, rt );
609 }
610 #else
611 Assembler::call( d, rt );
612 #endif
613 }
615 inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
616 MacroAssembler::call( target(L), rt);
617 }
621 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
622 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
624 // prefetch instruction
625 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
626 if (VM_Version::v9_instructions_work())
627 Assembler::bp( never, true, xcc, pt, d, rt );
628 }
629 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
632 // clobbers o7 on V8!!
633 // returns delta from gotten pc to addr after
634 inline int MacroAssembler::get_pc( Register d ) {
635 int x = offset();
636 if (VM_Version::v9_instructions_work())
637 rdpc(d);
638 else {
639 Label lbl;
640 Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
641 if (d == O7) delayed()->nop();
642 else delayed()->mov(O7, d);
643 bind(lbl);
644 }
645 return offset() - x;
646 }
649 // Note: All MacroAssembler::set_foo functions are defined out-of-line.
652 // Loads the current PC of the following instruction as an immediate value in
653 // 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
654 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
655 intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
656 #ifdef _LP64
657 Unimplemented();
658 #else
659 Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
660 Assembler::add(reg,thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
661 #endif
662 return thepc;
663 }
666 inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
667 assert_not_delayed();
668 sethi(addrlit, d);
669 ld(d, addrlit.low10() + offset, d);
670 }
673 inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
674 assert_not_delayed();
675 sethi(addrlit, d);
676 ld_ptr(d, addrlit.low10() + offset, d);
677 }
680 inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
681 assert_not_delayed();
682 sethi(addrlit, temp);
683 st(s, temp, addrlit.low10() + offset);
684 }
687 inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
688 assert_not_delayed();
689 sethi(addrlit, temp);
690 st_ptr(s, temp, addrlit.low10() + offset);
691 }
694 // This code sequence is relocatable to any address, even on LP64.
695 inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
696 assert_not_delayed();
697 // Force fixed length sethi because NativeJump and NativeFarCall don't handle
698 // variable length instruction streams.
699 patchable_sethi(addrlit, temp);
700 jmpl(temp, addrlit.low10() + offset, d);
701 }
704 inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
705 jumpl_to(addrlit, temp, G0, offset);
706 }
709 inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
710 int ld_offset, int jmp_offset) {
711 assert_not_delayed();
712 //sethi(al); // sethi is caller responsibility for this one
713 ld_ptr(a, temp, ld_offset);
714 jmp(temp, jmp_offset);
715 }
718 inline void MacroAssembler::set_oop(jobject obj, Register d) {
719 set_oop(allocate_oop_address(obj), d);
720 }
723 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
724 set_oop(constant_oop_address(obj), d);
725 }
728 inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
729 assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
730 set(obj_addr, d);
731 }
734 inline void MacroAssembler::load_argument( Argument& a, Register d ) {
735 if (a.is_register())
736 mov(a.as_register(), d);
737 else
738 ld (a.as_address(), d);
739 }
741 inline void MacroAssembler::store_argument( Register s, Argument& a ) {
742 if (a.is_register())
743 mov(s, a.as_register());
744 else
745 st_ptr (s, a.as_address()); // ABI says everything is right justified.
746 }
748 inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
749 if (a.is_register())
750 mov(s, a.as_register());
751 else
752 st_ptr (s, a.as_address());
753 }
756 #ifdef _LP64
757 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
758 if (a.is_float_register())
759 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
760 fmov(FloatRegisterImpl::S, s, a.as_float_register() );
761 else
762 // Floats are stored in the high half of the stack entry
763 // The low half is undefined per the ABI.
764 stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
765 }
767 inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
768 if (a.is_float_register())
769 // V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
770 fmov(FloatRegisterImpl::D, s, a.as_double_register() );
771 else
772 stf(FloatRegisterImpl::D, s, a.as_address());
773 }
775 inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
776 if (a.is_register())
777 mov(s, a.as_register());
778 else
779 stx(s, a.as_address());
780 }
781 #endif
783 inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
784 inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
785 inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
786 inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
788 inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
789 inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
790 inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
791 inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
793 // returns if membar generates anything, obviously this code should mirror
794 // membar below.
795 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
796 if( !os::is_MP() ) return false; // Not needed on single CPU
797 if( VM_Version::v9_instructions_work() ) {
798 const Membar_mask_bits effective_mask =
799 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
800 return (effective_mask != 0);
801 } else {
802 return true;
803 }
804 }
806 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
807 // Uniprocessors do not need memory barriers
808 if (!os::is_MP()) return;
809 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
810 // 8.4.4.3, a.31 and a.50.
811 if( VM_Version::v9_instructions_work() ) {
812 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
813 // of the mmask subfield of const7a that does anything that isn't done
814 // implicitly is StoreLoad.
815 const Membar_mask_bits effective_mask =
816 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
817 if ( effective_mask != 0 ) {
818 Assembler::membar( effective_mask );
819 }
820 } else {
821 // stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
822 // do not issue the stbar because to my knowledge all v8 machines implement TSO,
823 // which guarantees that all stores behave as if an stbar were issued just after
824 // each one of them. On these machines, stbar ought to be a nop. There doesn't
825 // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
826 // it can't be specified by stbar, nor have I come up with a way to simulate it.
827 //
828 // Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
829 // space. Put one here to be on the safe side.
830 Assembler::ldstub(SP, 0, G0);
831 }
832 }
834 #endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP