Mon, 04 Nov 2013 21:59:54 +0100
8027445: SIGSEGV at TestFloatingDecimal.testAppendToDouble()I
Summary: String.equals() intrinsic shouldn't use integer length input in pointer arithmetic without an i2l.
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
26 #define CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
28 #include "asm/assembler.inline.hpp"
29 #include "asm/macroAssembler.hpp"
30 #include "asm/codeBuffer.hpp"
31 #include "code/codeCache.hpp"
33 inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
36 inline int AddressLiteral::low10() const {
37 return Assembler::low10(value());
38 }
41 inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
42 jint& stub_inst = *(jint*) branch;
43 stub_inst = patched_branch(target - branch, stub_inst, 0);
44 }
46 // Use the right loads/stores for the platform
47 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
48 #ifdef _LP64
49 Assembler::ldx(s1, s2, d);
50 #else
51 ld( s1, s2, d);
52 #endif
53 }
55 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
56 #ifdef _LP64
57 Assembler::ldx(s1, simm13a, d);
58 #else
59 ld( s1, simm13a, d);
60 #endif
61 }
63 #ifdef ASSERT
64 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
65 inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
66 ld_ptr(s1, in_bytes(simm13a), d);
67 }
68 #endif
70 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
71 #ifdef _LP64
72 ldx(s1, s2, d);
73 #else
74 ld( s1, s2, d);
75 #endif
76 }
78 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
79 #ifdef _LP64
80 ldx(a, d, offset);
81 #else
82 ld( a, d, offset);
83 #endif
84 }
86 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
87 #ifdef _LP64
88 Assembler::stx(d, s1, s2);
89 #else
90 st( d, s1, s2);
91 #endif
92 }
94 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
95 #ifdef _LP64
96 Assembler::stx(d, s1, simm13a);
97 #else
98 st( d, s1, simm13a);
99 #endif
100 }
102 #ifdef ASSERT
103 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
104 inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
105 st_ptr(d, s1, in_bytes(simm13a));
106 }
107 #endif
109 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
110 #ifdef _LP64
111 stx(d, s1, s2);
112 #else
113 st( d, s1, s2);
114 #endif
115 }
117 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
118 #ifdef _LP64
119 stx(d, a, offset);
120 #else
121 st( d, a, offset);
122 #endif
123 }
125 // Use the right loads/stores for the platform
126 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
127 #ifdef _LP64
128 Assembler::ldx(s1, s2, d);
129 #else
130 Assembler::ldd(s1, s2, d);
131 #endif
132 }
134 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
135 #ifdef _LP64
136 Assembler::ldx(s1, simm13a, d);
137 #else
138 Assembler::ldd(s1, simm13a, d);
139 #endif
140 }
142 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
143 #ifdef _LP64
144 ldx(s1, s2, d);
145 #else
146 ldd(s1, s2, d);
147 #endif
148 }
150 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
151 #ifdef _LP64
152 ldx(a, d, offset);
153 #else
154 ldd(a, d, offset);
155 #endif
156 }
158 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
159 #ifdef _LP64
160 Assembler::stx(d, s1, s2);
161 #else
162 Assembler::std(d, s1, s2);
163 #endif
164 }
166 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
167 #ifdef _LP64
168 Assembler::stx(d, s1, simm13a);
169 #else
170 Assembler::std(d, s1, simm13a);
171 #endif
172 }
174 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
175 #ifdef _LP64
176 stx(d, s1, s2);
177 #else
178 std(d, s1, s2);
179 #endif
180 }
182 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
183 #ifdef _LP64
184 stx(d, a, offset);
185 #else
186 std(d, a, offset);
187 #endif
188 }
190 // Functions for isolating 64 bit shifts for LP64
192 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
193 #ifdef _LP64
194 Assembler::sllx(s1, s2, d);
195 #else
196 Assembler::sll( s1, s2, d);
197 #endif
198 }
200 inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
201 #ifdef _LP64
202 Assembler::sllx(s1, imm6a, d);
203 #else
204 Assembler::sll( s1, imm6a, d);
205 #endif
206 }
208 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
209 #ifdef _LP64
210 Assembler::srlx(s1, s2, d);
211 #else
212 Assembler::srl( s1, s2, d);
213 #endif
214 }
216 inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
217 #ifdef _LP64
218 Assembler::srlx(s1, imm6a, d);
219 #else
220 Assembler::srl( s1, imm6a, d);
221 #endif
222 }
224 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
225 if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
226 else sll_ptr(s1, s2.as_constant(), d);
227 }
229 // Use the right branch for the platform
231 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
232 Assembler::bp(c, a, icc, p, d, rt);
233 }
235 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
236 br(c, a, p, target(L));
237 }
240 // Branch that tests either xcc or icc depending on the
241 // architecture compiled (LP64 or not)
242 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
243 #ifdef _LP64
244 Assembler::bp(c, a, xcc, p, d, rt);
245 #else
246 MacroAssembler::br(c, a, p, d, rt);
247 #endif
248 }
250 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
251 brx(c, a, p, target(L));
252 }
254 inline void MacroAssembler::ba( Label& L ) {
255 br(always, false, pt, L);
256 }
258 // Warning: V9 only functions
259 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
260 Assembler::bp(c, a, cc, p, d, rt);
261 }
263 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
264 Assembler::bp(c, a, cc, p, L);
265 }
267 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
268 fbp(c, a, fcc0, p, d, rt);
269 }
271 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
272 fb(c, a, p, target(L));
273 }
275 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
276 Assembler::fbp(c, a, cc, p, d, rt);
277 }
279 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
280 Assembler::fbp(c, a, cc, p, L);
281 }
283 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
284 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
286 inline bool MacroAssembler::is_far_target(address d) {
287 if (ForceUnreachable) {
288 // References outside the code cache should be treated as far
289 return d < CodeCache::low_bound() || d > CodeCache::high_bound();
290 }
291 return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
292 }
294 // Call with a check to see if we need to deal with the added
295 // expense of relocation and if we overflow the displacement
296 // of the quick call instruction.
297 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
298 #ifdef _LP64
299 intptr_t disp;
300 // NULL is ok because it will be relocated later.
301 // Must change NULL to a reachable address in order to
302 // pass asserts here and in wdisp.
303 if ( d == NULL )
304 d = pc();
306 // Is this address within range of the call instruction?
307 // If not, use the expensive instruction sequence
308 if (is_far_target(d)) {
309 relocate(rt);
310 AddressLiteral dest(d);
311 jumpl_to(dest, O7, O7);
312 } else {
313 Assembler::call(d, rt);
314 }
315 #else
316 Assembler::call( d, rt );
317 #endif
318 }
320 inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
321 MacroAssembler::call( target(L), rt);
322 }
326 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
327 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
329 // prefetch instruction
330 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
331 Assembler::bp( never, true, xcc, pt, d, rt );
332 Assembler::bp( never, true, xcc, pt, d, rt );
333 }
334 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
337 // clobbers o7 on V8!!
338 // returns delta from gotten pc to addr after
339 inline int MacroAssembler::get_pc( Register d ) {
340 int x = offset();
341 rdpc(d);
342 return offset() - x;
343 }
346 // Note: All MacroAssembler::set_foo functions are defined out-of-line.
349 // Loads the current PC of the following instruction as an immediate value in
350 // 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
351 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
352 intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
353 #ifdef _LP64
354 Unimplemented();
355 #else
356 Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
357 add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
358 #endif
359 return thepc;
360 }
363 inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
364 assert_not_delayed();
365 if (ForceUnreachable) {
366 patchable_sethi(addrlit, d);
367 } else {
368 sethi(addrlit, d);
369 }
370 ld(d, addrlit.low10() + offset, d);
371 }
374 inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
375 assert_not_delayed();
376 if (ForceUnreachable) {
377 patchable_sethi(addrlit, d);
378 } else {
379 sethi(addrlit, d);
380 }
381 ldub(d, addrlit.low10() + offset, d);
382 }
385 inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
386 assert_not_delayed();
387 if (ForceUnreachable) {
388 patchable_sethi(addrlit, d);
389 } else {
390 sethi(addrlit, d);
391 }
392 ld_ptr(d, addrlit.low10() + offset, d);
393 }
396 inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
397 assert_not_delayed();
398 if (ForceUnreachable) {
399 patchable_sethi(addrlit, temp);
400 } else {
401 sethi(addrlit, temp);
402 }
403 st(s, temp, addrlit.low10() + offset);
404 }
407 inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
408 assert_not_delayed();
409 if (ForceUnreachable) {
410 patchable_sethi(addrlit, temp);
411 } else {
412 sethi(addrlit, temp);
413 }
414 st_ptr(s, temp, addrlit.low10() + offset);
415 }
418 // This code sequence is relocatable to any address, even on LP64.
419 inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
420 assert_not_delayed();
421 // Force fixed length sethi because NativeJump and NativeFarCall don't handle
422 // variable length instruction streams.
423 patchable_sethi(addrlit, temp);
424 jmpl(temp, addrlit.low10() + offset, d);
425 }
428 inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
429 jumpl_to(addrlit, temp, G0, offset);
430 }
433 inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
434 int ld_offset, int jmp_offset) {
435 assert_not_delayed();
436 //sethi(al); // sethi is caller responsibility for this one
437 ld_ptr(a, temp, ld_offset);
438 jmp(temp, jmp_offset);
439 }
442 inline void MacroAssembler::set_metadata(Metadata* obj, Register d) {
443 set_metadata(allocate_metadata_address(obj), d);
444 }
446 inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) {
447 set_metadata(constant_metadata_address(obj), d);
448 }
450 inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) {
451 assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc");
452 set(obj_addr, d);
453 }
455 inline void MacroAssembler::set_oop(jobject obj, Register d) {
456 set_oop(allocate_oop_address(obj), d);
457 }
460 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
461 set_oop(constant_oop_address(obj), d);
462 }
465 inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
466 assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
467 set(obj_addr, d);
468 }
471 inline void MacroAssembler::load_argument( Argument& a, Register d ) {
472 if (a.is_register())
473 mov(a.as_register(), d);
474 else
475 ld (a.as_address(), d);
476 }
478 inline void MacroAssembler::store_argument( Register s, Argument& a ) {
479 if (a.is_register())
480 mov(s, a.as_register());
481 else
482 st_ptr (s, a.as_address()); // ABI says everything is right justified.
483 }
485 inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
486 if (a.is_register())
487 mov(s, a.as_register());
488 else
489 st_ptr (s, a.as_address());
490 }
493 #ifdef _LP64
494 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
495 if (a.is_float_register())
496 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
497 fmov(FloatRegisterImpl::S, s, a.as_float_register() );
498 else
499 // Floats are stored in the high half of the stack entry
500 // The low half is undefined per the ABI.
501 stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
502 }
504 inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
505 if (a.is_float_register())
506 // V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
507 fmov(FloatRegisterImpl::D, s, a.as_double_register() );
508 else
509 stf(FloatRegisterImpl::D, s, a.as_address());
510 }
512 inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
513 if (a.is_register())
514 mov(s, a.as_register());
515 else
516 stx(s, a.as_address());
517 }
518 #endif
520 inline void MacroAssembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype) {
521 relocate(rtype);
522 add(s1, simm13a, d);
523 }
524 inline void MacroAssembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec) {
525 relocate(rspec);
526 add(s1, simm13a, d);
527 }
529 // form effective addresses this way:
530 inline void MacroAssembler::add(const Address& a, Register d, int offset) {
531 if (a.has_index()) add(a.base(), a.index(), d);
532 else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
533 if (offset != 0) add(d, offset, d);
534 }
535 inline void MacroAssembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
536 if (s2.is_register()) add(s1, s2.as_register(), d);
537 else { add(s1, s2.as_constant() + offset, d); offset = 0; }
538 if (offset != 0) add(d, offset, d);
539 }
541 inline void MacroAssembler::andn(Register s1, RegisterOrConstant s2, Register d) {
542 if (s2.is_register()) andn(s1, s2.as_register(), d);
543 else andn(s1, s2.as_constant(), d);
544 }
546 inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
547 inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
548 inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
549 inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
551 inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
552 inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
553 inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
554 inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
556 #ifdef _LP64
557 // Make all 32 bit loads signed so 64 bit registers maintain proper sign
558 inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
559 inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
560 #else
561 inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
562 inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
563 #endif
565 #ifdef ASSERT
566 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
567 # ifdef _LP64
568 inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
569 # else
570 inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
571 # endif
572 #endif
574 inline void MacroAssembler::ld( const Address& a, Register d, int offset) {
575 if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
576 else { ld( a.base(), a.disp() + offset, d); }
577 }
579 inline void MacroAssembler::ldsb(const Address& a, Register d, int offset) {
580 if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
581 else { ldsb(a.base(), a.disp() + offset, d); }
582 }
583 inline void MacroAssembler::ldsh(const Address& a, Register d, int offset) {
584 if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
585 else { ldsh(a.base(), a.disp() + offset, d); }
586 }
587 inline void MacroAssembler::ldsw(const Address& a, Register d, int offset) {
588 if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
589 else { ldsw(a.base(), a.disp() + offset, d); }
590 }
591 inline void MacroAssembler::ldub(const Address& a, Register d, int offset) {
592 if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
593 else { ldub(a.base(), a.disp() + offset, d); }
594 }
595 inline void MacroAssembler::lduh(const Address& a, Register d, int offset) {
596 if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
597 else { lduh(a.base(), a.disp() + offset, d); }
598 }
599 inline void MacroAssembler::lduw(const Address& a, Register d, int offset) {
600 if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
601 else { lduw(a.base(), a.disp() + offset, d); }
602 }
603 inline void MacroAssembler::ldd( const Address& a, Register d, int offset) {
604 if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
605 else { ldd( a.base(), a.disp() + offset, d); }
606 }
607 inline void MacroAssembler::ldx( const Address& a, Register d, int offset) {
608 if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
609 else { ldx( a.base(), a.disp() + offset, d); }
610 }
612 inline void MacroAssembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
613 inline void MacroAssembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
614 inline void MacroAssembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
615 inline void MacroAssembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
616 inline void MacroAssembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
617 inline void MacroAssembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
618 inline void MacroAssembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
619 inline void MacroAssembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
620 inline void MacroAssembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
622 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
623 if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
624 else ldf(w, s1, s2.as_constant(), d);
625 }
627 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) {
628 relocate(a.rspec(offset));
629 ldf(w, a.base(), a.disp() + offset, d);
630 }
632 // returns if membar generates anything, obviously this code should mirror
633 // membar below.
634 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
635 if (!os::is_MP())
636 return false; // Not needed on single CPU
637 const Membar_mask_bits effective_mask =
638 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
639 return (effective_mask != 0);
640 }
642 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
643 // Uniprocessors do not need memory barriers
644 if (!os::is_MP())
645 return;
646 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
647 // 8.4.4.3, a.31 and a.50.
648 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
649 // of the mmask subfield of const7a that does anything that isn't done
650 // implicitly is StoreLoad.
651 const Membar_mask_bits effective_mask =
652 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
653 if (effective_mask != 0) {
654 Assembler::membar(effective_mask);
655 }
656 }
658 inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) {
659 relocate(a.rspec(offset));
660 assert(!a.has_index(), "");
661 prefetch(a.base(), a.disp() + offset, f);
662 }
664 inline void MacroAssembler::st(Register d, Register s1, Register s2) { stw(d, s1, s2); }
665 inline void MacroAssembler::st(Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
667 #ifdef ASSERT
668 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
669 inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
670 #endif
672 inline void MacroAssembler::st(Register d, const Address& a, int offset) {
673 if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
674 else { st( d, a.base(), a.disp() + offset); }
675 }
677 inline void MacroAssembler::stb(Register d, const Address& a, int offset) {
678 if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
679 else { stb(d, a.base(), a.disp() + offset); }
680 }
681 inline void MacroAssembler::sth(Register d, const Address& a, int offset) {
682 if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
683 else { sth(d, a.base(), a.disp() + offset); }
684 }
685 inline void MacroAssembler::stw(Register d, const Address& a, int offset) {
686 if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
687 else { stw(d, a.base(), a.disp() + offset); }
688 }
689 inline void MacroAssembler::std(Register d, const Address& a, int offset) {
690 if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
691 else { std(d, a.base(), a.disp() + offset); }
692 }
693 inline void MacroAssembler::stx(Register d, const Address& a, int offset) {
694 if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
695 else { stx(d, a.base(), a.disp() + offset); }
696 }
698 inline void MacroAssembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
699 inline void MacroAssembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
700 inline void MacroAssembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
701 inline void MacroAssembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
702 inline void MacroAssembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
703 inline void MacroAssembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
705 inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
706 if (s2.is_register()) stf(w, d, s1, s2.as_register());
707 else stf(w, d, s1, s2.as_constant());
708 }
710 inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
711 relocate(a.rspec(offset));
712 if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); }
713 else { stf(w, d, a.base(), a.disp() + offset); }
714 }
716 inline void MacroAssembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
717 if (s2.is_register()) sub(s1, s2.as_register(), d);
718 else { sub(s1, s2.as_constant() + offset, d); offset = 0; }
719 if (offset != 0) sub(d, offset, d);
720 }
722 inline void MacroAssembler::swap(const Address& a, Register d, int offset) {
723 relocate(a.rspec(offset));
724 if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); }
725 else { swap(a.base(), a.disp() + offset, d); }
726 }
728 #endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP