Fri, 20 Mar 2009 23:19:36 -0700
6814659: separable cleanups and subroutines for 6655638
Summary: preparatory but separable changes for method handles
Reviewed-by: kvn, never
1 /*
2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 class BiasedLockingCounters;
27 // <sys/trap.h> promises that the system will not use traps 16-31
28 #define ST_RESERVED_FOR_USER_0 0x10
30 /* Written: David Ungar 4/19/97 */
32 // Contains all the definitions needed for sparc assembly code generation.
34 // Register aliases for parts of the system:
36 // 64 bit values can be kept in g1-g5, o1-o5 and o7 and all 64 bits are safe
37 // across context switches in V8+ ABI. Of course, there are no 64 bit regs
38 // in V8 ABI. All 64 bits are preserved in V9 ABI for all registers.
40 // g2-g4 are scratch registers called "application globals". Their
41 // meaning is reserved to the "compilation system"--which means us!
42 // They are are not supposed to be touched by ordinary C code, although
43 // highly-optimized C code might steal them for temps. They are safe
44 // across thread switches, and the ABI requires that they be safe
45 // across function calls.
46 //
47 // g1 and g3 are touched by more modules. V8 allows g1 to be clobbered
48 // across func calls, and V8+ also allows g5 to be clobbered across
49 // func calls. Also, g1 and g5 can get touched while doing shared
50 // library loading.
51 //
52 // We must not touch g7 (it is the thread-self register) and g6 is
53 // reserved for certain tools. g0, of course, is always zero.
54 //
55 // (Sources: SunSoft Compilers Group, thread library engineers.)
57 // %%%% The interpreter should be revisited to reduce global scratch regs.
59 // This global always holds the current JavaThread pointer:
61 REGISTER_DECLARATION(Register, G2_thread , G2);
62 REGISTER_DECLARATION(Register, G6_heapbase , G6);
64 // The following globals are part of the Java calling convention:
66 REGISTER_DECLARATION(Register, G5_method , G5);
67 REGISTER_DECLARATION(Register, G5_megamorphic_method , G5_method);
68 REGISTER_DECLARATION(Register, G5_inline_cache_reg , G5_method);
70 // The following globals are used for the new C1 & interpreter calling convention:
71 REGISTER_DECLARATION(Register, Gargs , G4); // pointing to the last argument
73 // This local is used to preserve G2_thread in the interpreter and in stubs:
74 REGISTER_DECLARATION(Register, L7_thread_cache , L7);
76 // These globals are used as scratch registers in the interpreter:
78 REGISTER_DECLARATION(Register, Gframe_size , G1); // SAME REG as G1_scratch
79 REGISTER_DECLARATION(Register, G1_scratch , G1); // also SAME
80 REGISTER_DECLARATION(Register, G3_scratch , G3);
81 REGISTER_DECLARATION(Register, G4_scratch , G4);
83 // These globals are used as short-lived scratch registers in the compiler:
85 REGISTER_DECLARATION(Register, Gtemp , G5);
87 // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
88 // because a single patchable "set" instruction (NativeMovConstReg,
89 // or NativeMovConstPatching for compiler1) instruction
90 // serves to set up either quantity, depending on whether the compiled
91 // call site is an inline cache or is megamorphic. See the function
92 // CompiledIC::set_to_megamorphic.
93 //
94 // On the other hand, G5_inline_cache_klass must differ from G5_method,
95 // because both registers are needed for an inline cache that calls
96 // an interpreted method.
97 //
98 // Note that G5_method is only the method-self for the interpreter,
99 // and is logically unrelated to G5_megamorphic_method.
100 //
101 // Invariants on G2_thread (the JavaThread pointer):
102 // - it should not be used for any other purpose anywhere
103 // - it must be re-initialized by StubRoutines::call_stub()
104 // - it must be preserved around every use of call_VM
106 // We can consider using g2/g3/g4 to cache more values than the
107 // JavaThread, such as the card-marking base or perhaps pointers into
108 // Eden. It's something of a waste to use them as scratch temporaries,
109 // since they are not supposed to be volatile. (Of course, if we find
110 // that Java doesn't benefit from application globals, then we can just
111 // use them as ordinary temporaries.)
112 //
113 // Since g1 and g5 (and/or g6) are the volatile (caller-save) registers,
114 // it makes sense to use them routinely for procedure linkage,
115 // whenever the On registers are not applicable. Examples: G5_method,
116 // G5_inline_cache_klass, and a double handful of miscellaneous compiler
117 // stubs. This means that compiler stubs, etc., should be kept to a
118 // maximum of two or three G-register arguments.
121 // stub frames
123 REGISTER_DECLARATION(Register, Lentry_args , L0); // pointer to args passed to callee (interpreter) not stub itself
125 // Interpreter frames
127 #ifdef CC_INTERP
128 REGISTER_DECLARATION(Register, Lstate , L0); // interpreter state object pointer
129 REGISTER_DECLARATION(Register, L1_scratch , L1); // scratch
130 REGISTER_DECLARATION(Register, Lmirror , L1); // mirror (for native methods only)
131 REGISTER_DECLARATION(Register, L2_scratch , L2);
132 REGISTER_DECLARATION(Register, L3_scratch , L3);
133 REGISTER_DECLARATION(Register, L4_scratch , L4);
134 REGISTER_DECLARATION(Register, Lscratch , L5); // C1 uses
135 REGISTER_DECLARATION(Register, Lscratch2 , L6); // C1 uses
136 REGISTER_DECLARATION(Register, L7_scratch , L7); // constant pool cache
137 REGISTER_DECLARATION(Register, O5_savedSP , O5);
138 REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
139 // a copy SP, so in 64-bit it's a biased value. The bias
140 // is added and removed as needed in the frame code.
141 // Interface to signature handler
142 REGISTER_DECLARATION(Register, Llocals , L7); // pointer to locals for signature handler
143 REGISTER_DECLARATION(Register, Lmethod , L6); // methodOop when calling signature handler
145 #else
146 REGISTER_DECLARATION(Register, Lesp , L0); // expression stack pointer
147 REGISTER_DECLARATION(Register, Lbcp , L1); // pointer to next bytecode
148 REGISTER_DECLARATION(Register, Lmethod , L2);
149 REGISTER_DECLARATION(Register, Llocals , L3);
150 REGISTER_DECLARATION(Register, Largs , L3); // pointer to locals for signature handler
151 // must match Llocals in asm interpreter
152 REGISTER_DECLARATION(Register, Lmonitors , L4);
153 REGISTER_DECLARATION(Register, Lbyte_code , L5);
154 // When calling out from the interpreter we record SP so that we can remove any extra stack
155 // space allocated during adapter transitions. This register is only live from the point
156 // of the call until we return.
157 REGISTER_DECLARATION(Register, Llast_SP , L5);
158 REGISTER_DECLARATION(Register, Lscratch , L5);
159 REGISTER_DECLARATION(Register, Lscratch2 , L6);
160 REGISTER_DECLARATION(Register, LcpoolCache , L6); // constant pool cache
162 REGISTER_DECLARATION(Register, O5_savedSP , O5);
163 REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
164 // a copy SP, so in 64-bit it's a biased value. The bias
165 // is added and removed as needed in the frame code.
166 REGISTER_DECLARATION(Register, IdispatchTables , I4); // Base address of the bytecode dispatch tables
167 REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
168 REGISTER_DECLARATION(Register, ImethodDataPtr , I2); // Pointer to the current method data
169 #endif /* CC_INTERP */
171 // NOTE: Lscratch2 and LcpoolCache point to the same registers in
172 // the interpreter code. If Lscratch2 needs to be used for some
173 // purpose than LcpoolCache should be restore after that for
174 // the interpreter to work right
175 // (These assignments must be compatible with L7_thread_cache; see above.)
177 // Since Lbcp points into the middle of the method object,
178 // it is temporarily converted into a "bcx" during GC.
180 // Exception processing
181 // These registers are passed into exception handlers.
182 // All exception handlers require the exception object being thrown.
183 // In addition, an nmethod's exception handler must be passed
184 // the address of the call site within the nmethod, to allow
185 // proper selection of the applicable catch block.
186 // (Interpreter frames use their own bcp() for this purpose.)
187 //
188 // The Oissuing_pc value is not always needed. When jumping to a
189 // handler that is known to be interpreted, the Oissuing_pc value can be
190 // omitted. An actual catch block in compiled code receives (from its
191 // nmethod's exception handler) the thrown exception in the Oexception,
192 // but it doesn't need the Oissuing_pc.
193 //
194 // If an exception handler (either interpreted or compiled)
195 // discovers there is no applicable catch block, it updates
196 // the Oissuing_pc to the continuation PC of its own caller,
197 // pops back to that caller's stack frame, and executes that
198 // caller's exception handler. Obviously, this process will
199 // iterate until the control stack is popped back to a method
200 // containing an applicable catch block. A key invariant is
201 // that the Oissuing_pc value is always a value local to
202 // the method whose exception handler is currently executing.
203 //
204 // Note: The issuing PC value is __not__ a raw return address (I7 value).
205 // It is a "return pc", the address __following__ the call.
206 // Raw return addresses are converted to issuing PCs by frame::pc(),
207 // or by stubs. Issuing PCs can be used directly with PC range tables.
208 //
209 REGISTER_DECLARATION(Register, Oexception , O0); // exception being thrown
210 REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
213 // These must occur after the declarations above
214 #ifndef DONT_USE_REGISTER_DEFINES
216 #define Gthread AS_REGISTER(Register, Gthread)
217 #define Gmethod AS_REGISTER(Register, Gmethod)
218 #define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
219 #define Ginline_cache_reg AS_REGISTER(Register, Ginline_cache_reg)
220 #define Gargs AS_REGISTER(Register, Gargs)
221 #define Lthread_cache AS_REGISTER(Register, Lthread_cache)
222 #define Gframe_size AS_REGISTER(Register, Gframe_size)
223 #define Gtemp AS_REGISTER(Register, Gtemp)
225 #ifdef CC_INTERP
226 #define Lstate AS_REGISTER(Register, Lstate)
227 #define Lesp AS_REGISTER(Register, Lesp)
228 #define L1_scratch AS_REGISTER(Register, L1_scratch)
229 #define Lmirror AS_REGISTER(Register, Lmirror)
230 #define L2_scratch AS_REGISTER(Register, L2_scratch)
231 #define L3_scratch AS_REGISTER(Register, L3_scratch)
232 #define L4_scratch AS_REGISTER(Register, L4_scratch)
233 #define Lscratch AS_REGISTER(Register, Lscratch)
234 #define Lscratch2 AS_REGISTER(Register, Lscratch2)
235 #define L7_scratch AS_REGISTER(Register, L7_scratch)
236 #define Ostate AS_REGISTER(Register, Ostate)
237 #else
238 #define Lesp AS_REGISTER(Register, Lesp)
239 #define Lbcp AS_REGISTER(Register, Lbcp)
240 #define Lmethod AS_REGISTER(Register, Lmethod)
241 #define Llocals AS_REGISTER(Register, Llocals)
242 #define Lmonitors AS_REGISTER(Register, Lmonitors)
243 #define Lbyte_code AS_REGISTER(Register, Lbyte_code)
244 #define Lscratch AS_REGISTER(Register, Lscratch)
245 #define Lscratch2 AS_REGISTER(Register, Lscratch2)
246 #define LcpoolCache AS_REGISTER(Register, LcpoolCache)
247 #endif /* ! CC_INTERP */
249 #define Lentry_args AS_REGISTER(Register, Lentry_args)
250 #define I5_savedSP AS_REGISTER(Register, I5_savedSP)
251 #define O5_savedSP AS_REGISTER(Register, O5_savedSP)
252 #define IdispatchAddress AS_REGISTER(Register, IdispatchAddress)
253 #define ImethodDataPtr AS_REGISTER(Register, ImethodDataPtr)
254 #define IdispatchTables AS_REGISTER(Register, IdispatchTables)
256 #define Oexception AS_REGISTER(Register, Oexception)
257 #define Oissuing_pc AS_REGISTER(Register, Oissuing_pc)
260 #endif
262 // Address is an abstraction used to represent a memory location.
263 //
264 // Note: A register location is represented via a Register, not
265 // via an address for efficiency & simplicity reasons.
267 class Address VALUE_OBJ_CLASS_SPEC {
268 private:
269 Register _base;
270 #ifdef _LP64
271 int _hi32; // bits 63::32
272 int _low32; // bits 31::0
273 #endif
274 int _hi;
275 int _disp;
276 RelocationHolder _rspec;
278 RelocationHolder rspec_from_rtype(relocInfo::relocType rt, address a = NULL) {
279 switch (rt) {
280 case relocInfo::external_word_type:
281 return external_word_Relocation::spec(a);
282 case relocInfo::internal_word_type:
283 return internal_word_Relocation::spec(a);
284 #ifdef _LP64
285 case relocInfo::opt_virtual_call_type:
286 return opt_virtual_call_Relocation::spec();
287 case relocInfo::static_call_type:
288 return static_call_Relocation::spec();
289 case relocInfo::runtime_call_type:
290 return runtime_call_Relocation::spec();
291 #endif
292 case relocInfo::none:
293 return RelocationHolder();
294 default:
295 ShouldNotReachHere();
296 return RelocationHolder();
297 }
298 }
300 public:
301 Address(Register b, address a, relocInfo::relocType rt = relocInfo::none)
302 : _rspec(rspec_from_rtype(rt, a))
303 {
304 _base = b;
305 #ifdef _LP64
306 _hi32 = (intptr_t)a >> 32; // top 32 bits in 64 bit word
307 _low32 = (intptr_t)a & ~0; // low 32 bits in 64 bit word
308 #endif
309 _hi = (intptr_t)a & ~0x3ff; // top 22 bits in low word
310 _disp = (intptr_t)a & 0x3ff; // bottom 10 bits
311 }
313 Address(Register b, address a, RelocationHolder const& rspec)
314 : _rspec(rspec)
315 {
316 _base = b;
317 #ifdef _LP64
318 _hi32 = (intptr_t)a >> 32; // top 32 bits in 64 bit word
319 _low32 = (intptr_t)a & ~0; // low 32 bits in 64 bit word
320 #endif
321 _hi = (intptr_t)a & ~0x3ff; // top 22 bits
322 _disp = (intptr_t)a & 0x3ff; // bottom 10 bits
323 }
325 Address(Register b, intptr_t h, intptr_t d, RelocationHolder const& rspec = RelocationHolder())
326 : _rspec(rspec)
327 {
328 _base = b;
329 #ifdef _LP64
330 // [RGV] Put in Assert to force me to check usage of this constructor
331 assert( h == 0, "Check usage of this constructor" );
332 _hi32 = h;
333 _low32 = d;
334 _hi = h;
335 _disp = d;
336 #else
337 _hi = h;
338 _disp = d;
339 #endif
340 }
342 Address()
343 : _rspec(RelocationHolder())
344 {
345 _base = G0;
346 #ifdef _LP64
347 _hi32 = 0;
348 _low32 = 0;
349 #endif
350 _hi = 0;
351 _disp = 0;
352 }
354 // fancier constructors
356 enum addr_type {
357 extra_in_argument, // in the In registers
358 extra_out_argument // in the Outs
359 };
361 Address( addr_type, int );
363 // accessors
365 Register base() const { return _base; }
366 #ifdef _LP64
367 int hi32() const { return _hi32; }
368 int low32() const { return _low32; }
369 #endif
370 int hi() const { return _hi; }
371 int disp() const { return _disp; }
372 #ifdef _LP64
373 intptr_t value() const { return ((intptr_t)_hi32 << 32) |
374 (intptr_t)(uint32_t)_low32; }
375 #else
376 int value() const { return _hi | _disp; }
377 #endif
378 const relocInfo::relocType rtype() { return _rspec.type(); }
379 const RelocationHolder& rspec() { return _rspec; }
381 RelocationHolder rspec(int offset) const {
382 return offset == 0 ? _rspec : _rspec.plus(offset);
383 }
385 inline bool is_simm13(int offset = 0); // check disp+offset for overflow
387 Address plus_disp(int disp) const { // bump disp by a small amount
388 Address a = (*this);
389 a._disp += disp;
390 return a;
391 }
393 Address split_disp() const { // deal with disp overflow
394 Address a = (*this);
395 int hi_disp = _disp & ~0x3ff;
396 if (hi_disp != 0) {
397 a._disp -= hi_disp;
398 a._hi += hi_disp;
399 }
400 return a;
401 }
403 Address after_save() const {
404 Address a = (*this);
405 a._base = a._base->after_save();
406 return a;
407 }
409 Address after_restore() const {
410 Address a = (*this);
411 a._base = a._base->after_restore();
412 return a;
413 }
415 friend class Assembler;
416 };
419 inline Address RegisterImpl::address_in_saved_window() const {
420 return (Address(SP, 0, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
421 }
425 // Argument is an abstraction used to represent an outgoing
426 // actual argument or an incoming formal parameter, whether
427 // it resides in memory or in a register, in a manner consistent
428 // with the SPARC Application Binary Interface, or ABI. This is
429 // often referred to as the native or C calling convention.
431 class Argument VALUE_OBJ_CLASS_SPEC {
432 private:
433 int _number;
434 bool _is_in;
436 public:
437 #ifdef _LP64
438 enum {
439 n_register_parameters = 6, // only 6 registers may contain integer parameters
440 n_float_register_parameters = 16 // Can have up to 16 floating registers
441 };
442 #else
443 enum {
444 n_register_parameters = 6 // only 6 registers may contain integer parameters
445 };
446 #endif
448 // creation
449 Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
451 int number() const { return _number; }
452 bool is_in() const { return _is_in; }
453 bool is_out() const { return !is_in(); }
455 Argument successor() const { return Argument(number() + 1, is_in()); }
456 Argument as_in() const { return Argument(number(), true ); }
457 Argument as_out() const { return Argument(number(), false); }
459 // locating register-based arguments:
460 bool is_register() const { return _number < n_register_parameters; }
462 #ifdef _LP64
463 // locating Floating Point register-based arguments:
464 bool is_float_register() const { return _number < n_float_register_parameters; }
466 FloatRegister as_float_register() const {
467 assert(is_float_register(), "must be a register argument");
468 return as_FloatRegister(( number() *2 ) + 1);
469 }
470 FloatRegister as_double_register() const {
471 assert(is_float_register(), "must be a register argument");
472 return as_FloatRegister(( number() *2 ));
473 }
474 #endif
476 Register as_register() const {
477 assert(is_register(), "must be a register argument");
478 return is_in() ? as_iRegister(number()) : as_oRegister(number());
479 }
481 // locating memory-based arguments
482 Address as_address() const {
483 assert(!is_register(), "must be a memory argument");
484 return address_in_frame();
485 }
487 // When applied to a register-based argument, give the corresponding address
488 // into the 6-word area "into which callee may store register arguments"
489 // (This is a different place than the corresponding register-save area location.)
490 Address address_in_frame() const {
491 return Address( is_in() ? Address::extra_in_argument
492 : Address::extra_out_argument,
493 _number );
494 }
496 // debugging
497 const char* name() const;
499 friend class Assembler;
500 };
503 // The SPARC Assembler: Pure assembler doing NO optimizations on the instruction
504 // level; i.e., what you write
505 // is what you get. The Assembler is generating code into a CodeBuffer.
507 class Assembler : public AbstractAssembler {
508 protected:
510 static void print_instruction(int inst);
511 static int patched_branch(int dest_pos, int inst, int inst_pos);
512 static int branch_destination(int inst, int pos);
515 friend class AbstractAssembler;
517 // code patchers need various routines like inv_wdisp()
518 friend class NativeInstruction;
519 friend class NativeGeneralJump;
520 friend class Relocation;
521 friend class Label;
523 public:
524 // op carries format info; see page 62 & 267
526 enum ops {
527 call_op = 1, // fmt 1
528 branch_op = 0, // also sethi (fmt2)
529 arith_op = 2, // fmt 3, arith & misc
530 ldst_op = 3 // fmt 3, load/store
531 };
533 enum op2s {
534 bpr_op2 = 3,
535 fb_op2 = 6,
536 fbp_op2 = 5,
537 br_op2 = 2,
538 bp_op2 = 1,
539 cb_op2 = 7, // V8
540 sethi_op2 = 4
541 };
543 enum op3s {
544 // selected op3s
545 add_op3 = 0x00,
546 and_op3 = 0x01,
547 or_op3 = 0x02,
548 xor_op3 = 0x03,
549 sub_op3 = 0x04,
550 andn_op3 = 0x05,
551 orn_op3 = 0x06,
552 xnor_op3 = 0x07,
553 addc_op3 = 0x08,
554 mulx_op3 = 0x09,
555 umul_op3 = 0x0a,
556 smul_op3 = 0x0b,
557 subc_op3 = 0x0c,
558 udivx_op3 = 0x0d,
559 udiv_op3 = 0x0e,
560 sdiv_op3 = 0x0f,
562 addcc_op3 = 0x10,
563 andcc_op3 = 0x11,
564 orcc_op3 = 0x12,
565 xorcc_op3 = 0x13,
566 subcc_op3 = 0x14,
567 andncc_op3 = 0x15,
568 orncc_op3 = 0x16,
569 xnorcc_op3 = 0x17,
570 addccc_op3 = 0x18,
571 umulcc_op3 = 0x1a,
572 smulcc_op3 = 0x1b,
573 subccc_op3 = 0x1c,
574 udivcc_op3 = 0x1e,
575 sdivcc_op3 = 0x1f,
577 taddcc_op3 = 0x20,
578 tsubcc_op3 = 0x21,
579 taddcctv_op3 = 0x22,
580 tsubcctv_op3 = 0x23,
581 mulscc_op3 = 0x24,
582 sll_op3 = 0x25,
583 sllx_op3 = 0x25,
584 srl_op3 = 0x26,
585 srlx_op3 = 0x26,
586 sra_op3 = 0x27,
587 srax_op3 = 0x27,
588 rdreg_op3 = 0x28,
589 membar_op3 = 0x28,
591 flushw_op3 = 0x2b,
592 movcc_op3 = 0x2c,
593 sdivx_op3 = 0x2d,
594 popc_op3 = 0x2e,
595 movr_op3 = 0x2f,
597 sir_op3 = 0x30,
598 wrreg_op3 = 0x30,
599 saved_op3 = 0x31,
601 fpop1_op3 = 0x34,
602 fpop2_op3 = 0x35,
603 impdep1_op3 = 0x36,
604 impdep2_op3 = 0x37,
605 jmpl_op3 = 0x38,
606 rett_op3 = 0x39,
607 trap_op3 = 0x3a,
608 flush_op3 = 0x3b,
609 save_op3 = 0x3c,
610 restore_op3 = 0x3d,
611 done_op3 = 0x3e,
612 retry_op3 = 0x3e,
614 lduw_op3 = 0x00,
615 ldub_op3 = 0x01,
616 lduh_op3 = 0x02,
617 ldd_op3 = 0x03,
618 stw_op3 = 0x04,
619 stb_op3 = 0x05,
620 sth_op3 = 0x06,
621 std_op3 = 0x07,
622 ldsw_op3 = 0x08,
623 ldsb_op3 = 0x09,
624 ldsh_op3 = 0x0a,
625 ldx_op3 = 0x0b,
627 ldstub_op3 = 0x0d,
628 stx_op3 = 0x0e,
629 swap_op3 = 0x0f,
631 lduwa_op3 = 0x10,
632 ldxa_op3 = 0x1b,
634 stwa_op3 = 0x14,
635 stxa_op3 = 0x1e,
637 ldf_op3 = 0x20,
638 ldfsr_op3 = 0x21,
639 ldqf_op3 = 0x22,
640 lddf_op3 = 0x23,
641 stf_op3 = 0x24,
642 stfsr_op3 = 0x25,
643 stqf_op3 = 0x26,
644 stdf_op3 = 0x27,
646 prefetch_op3 = 0x2d,
649 ldc_op3 = 0x30,
650 ldcsr_op3 = 0x31,
651 lddc_op3 = 0x33,
652 stc_op3 = 0x34,
653 stcsr_op3 = 0x35,
654 stdcq_op3 = 0x36,
655 stdc_op3 = 0x37,
657 casa_op3 = 0x3c,
658 casxa_op3 = 0x3e,
660 alt_bit_op3 = 0x10,
661 cc_bit_op3 = 0x10
662 };
664 enum opfs {
665 // selected opfs
666 fmovs_opf = 0x01,
667 fmovd_opf = 0x02,
669 fnegs_opf = 0x05,
670 fnegd_opf = 0x06,
672 fadds_opf = 0x41,
673 faddd_opf = 0x42,
674 fsubs_opf = 0x45,
675 fsubd_opf = 0x46,
677 fmuls_opf = 0x49,
678 fmuld_opf = 0x4a,
679 fdivs_opf = 0x4d,
680 fdivd_opf = 0x4e,
682 fcmps_opf = 0x51,
683 fcmpd_opf = 0x52,
685 fstox_opf = 0x81,
686 fdtox_opf = 0x82,
687 fxtos_opf = 0x84,
688 fxtod_opf = 0x88,
689 fitos_opf = 0xc4,
690 fdtos_opf = 0xc6,
691 fitod_opf = 0xc8,
692 fstod_opf = 0xc9,
693 fstoi_opf = 0xd1,
694 fdtoi_opf = 0xd2
695 };
697 enum RCondition { rc_z = 1, rc_lez = 2, rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7 };
699 enum Condition {
700 // for FBfcc & FBPfcc instruction
701 f_never = 0,
702 f_notEqual = 1,
703 f_notZero = 1,
704 f_lessOrGreater = 2,
705 f_unorderedOrLess = 3,
706 f_less = 4,
707 f_unorderedOrGreater = 5,
708 f_greater = 6,
709 f_unordered = 7,
710 f_always = 8,
711 f_equal = 9,
712 f_zero = 9,
713 f_unorderedOrEqual = 10,
714 f_greaterOrEqual = 11,
715 f_unorderedOrGreaterOrEqual = 12,
716 f_lessOrEqual = 13,
717 f_unorderedOrLessOrEqual = 14,
718 f_ordered = 15,
720 // V8 coproc, pp 123 v8 manual
722 cp_always = 8,
723 cp_never = 0,
724 cp_3 = 7,
725 cp_2 = 6,
726 cp_2or3 = 5,
727 cp_1 = 4,
728 cp_1or3 = 3,
729 cp_1or2 = 2,
730 cp_1or2or3 = 1,
731 cp_0 = 9,
732 cp_0or3 = 10,
733 cp_0or2 = 11,
734 cp_0or2or3 = 12,
735 cp_0or1 = 13,
736 cp_0or1or3 = 14,
737 cp_0or1or2 = 15,
740 // for integers
742 never = 0,
743 equal = 1,
744 zero = 1,
745 lessEqual = 2,
746 less = 3,
747 lessEqualUnsigned = 4,
748 lessUnsigned = 5,
749 carrySet = 5,
750 negative = 6,
751 overflowSet = 7,
752 always = 8,
753 notEqual = 9,
754 notZero = 9,
755 greater = 10,
756 greaterEqual = 11,
757 greaterUnsigned = 12,
758 greaterEqualUnsigned = 13,
759 carryClear = 13,
760 positive = 14,
761 overflowClear = 15
762 };
764 enum CC {
765 icc = 0, xcc = 2,
766 // ptr_cc is the correct condition code for a pointer or intptr_t:
767 ptr_cc = NOT_LP64(icc) LP64_ONLY(xcc),
768 fcc0 = 0, fcc1 = 1, fcc2 = 2, fcc3 = 3
769 };
771 enum PrefetchFcn {
772 severalReads = 0, oneRead = 1, severalWritesAndPossiblyReads = 2, oneWrite = 3, page = 4
773 };
775 public:
776 // Helper functions for groups of instructions
778 enum Predict { pt = 1, pn = 0 }; // pt = predict taken
780 enum Membar_mask_bits { // page 184, v9
781 StoreStore = 1 << 3,
782 LoadStore = 1 << 2,
783 StoreLoad = 1 << 1,
784 LoadLoad = 1 << 0,
786 Sync = 1 << 6,
787 MemIssue = 1 << 5,
788 Lookaside = 1 << 4
789 };
791 // test if x is within signed immediate range for nbits
792 static bool is_simm(int x, int nbits) { return -( 1 << nbits-1 ) <= x && x < ( 1 << nbits-1 ); }
794 // test if -4096 <= x <= 4095
795 static bool is_simm13(int x) { return is_simm(x, 13); }
797 enum ASIs { // page 72, v9
798 ASI_PRIMARY = 0x80,
799 ASI_PRIMARY_LITTLE = 0x88
800 // add more from book as needed
801 };
803 protected:
804 // helpers
806 // x is supposed to fit in a field "nbits" wide
807 // and be sign-extended. Check the range.
809 static void assert_signed_range(intptr_t x, int nbits) {
810 assert( nbits == 32
811 || -(1 << nbits-1) <= x && x < ( 1 << nbits-1),
812 "value out of range");
813 }
815 static void assert_signed_word_disp_range(intptr_t x, int nbits) {
816 assert( (x & 3) == 0, "not word aligned");
817 assert_signed_range(x, nbits + 2);
818 }
820 static void assert_unsigned_const(int x, int nbits) {
821 assert( juint(x) < juint(1 << nbits), "unsigned constant out of range");
822 }
824 // fields: note bits numbered from LSB = 0,
825 // fields known by inclusive bit range
827 static int fmask(juint hi_bit, juint lo_bit) {
828 assert( hi_bit >= lo_bit && 0 <= lo_bit && hi_bit < 32, "bad bits");
829 return (1 << ( hi_bit-lo_bit + 1 )) - 1;
830 }
832 // inverse of u_field
834 static int inv_u_field(int x, int hi_bit, int lo_bit) {
835 juint r = juint(x) >> lo_bit;
836 r &= fmask( hi_bit, lo_bit);
837 return int(r);
838 }
841 // signed version: extract from field and sign-extend
843 static int inv_s_field(int x, int hi_bit, int lo_bit) {
844 int sign_shift = 31 - hi_bit;
845 return inv_u_field( ((x << sign_shift) >> sign_shift), hi_bit, lo_bit);
846 }
848 // given a field that ranges from hi_bit to lo_bit (inclusive,
849 // LSB = 0), and an unsigned value for the field,
850 // shift it into the field
852 #ifdef ASSERT
853 static int u_field(int x, int hi_bit, int lo_bit) {
854 assert( ( x & ~fmask(hi_bit, lo_bit)) == 0,
855 "value out of range");
856 int r = x << lo_bit;
857 assert( inv_u_field(r, hi_bit, lo_bit) == x, "just checking");
858 return r;
859 }
860 #else
861 // make sure this is inlined as it will reduce code size significantly
862 #define u_field(x, hi_bit, lo_bit) ((x) << (lo_bit))
863 #endif
865 static int inv_op( int x ) { return inv_u_field(x, 31, 30); }
866 static int inv_op2( int x ) { return inv_u_field(x, 24, 22); }
867 static int inv_op3( int x ) { return inv_u_field(x, 24, 19); }
868 static int inv_cond( int x ){ return inv_u_field(x, 28, 25); }
870 static bool inv_immed( int x ) { return (x & Assembler::immed(true)) != 0; }
872 static Register inv_rd( int x ) { return as_Register(inv_u_field(x, 29, 25)); }
873 static Register inv_rs1( int x ) { return as_Register(inv_u_field(x, 18, 14)); }
874 static Register inv_rs2( int x ) { return as_Register(inv_u_field(x, 4, 0)); }
876 static int op( int x) { return u_field(x, 31, 30); }
877 static int rd( Register r) { return u_field(r->encoding(), 29, 25); }
878 static int fcn( int x) { return u_field(x, 29, 25); }
879 static int op3( int x) { return u_field(x, 24, 19); }
880 static int rs1( Register r) { return u_field(r->encoding(), 18, 14); }
881 static int rs2( Register r) { return u_field(r->encoding(), 4, 0); }
882 static int annul( bool a) { return u_field(a ? 1 : 0, 29, 29); }
883 static int cond( int x) { return u_field(x, 28, 25); }
884 static int cond_mov( int x) { return u_field(x, 17, 14); }
885 static int rcond( RCondition x) { return u_field(x, 12, 10); }
886 static int op2( int x) { return u_field(x, 24, 22); }
887 static int predict( bool p) { return u_field(p ? 1 : 0, 19, 19); }
888 static int branchcc( CC fcca) { return u_field(fcca, 21, 20); }
889 static int cmpcc( CC fcca) { return u_field(fcca, 26, 25); }
890 static int imm_asi( int x) { return u_field(x, 12, 5); }
891 static int immed( bool i) { return u_field(i ? 1 : 0, 13, 13); }
892 static int opf_low6( int w) { return u_field(w, 10, 5); }
893 static int opf_low5( int w) { return u_field(w, 9, 5); }
894 static int trapcc( CC cc) { return u_field(cc, 12, 11); }
895 static int sx( int i) { return u_field(i, 12, 12); } // shift x=1 means 64-bit
896 static int opf( int x) { return u_field(x, 13, 5); }
898 static int opf_cc( CC c, bool useFloat ) { return u_field((useFloat ? 0 : 4) + c, 13, 11); }
899 static int mov_cc( CC c, bool useFloat ) { return u_field(useFloat ? 0 : 1, 18, 18) | u_field(c, 12, 11); }
901 static int fd( FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 29, 25); };
902 static int fs1(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 18, 14); };
903 static int fs2(FloatRegister r, FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 4, 0); };
905 // some float instructions use this encoding on the op3 field
906 static int alt_op3(int op, FloatRegisterImpl::Width w) {
907 int r;
908 switch(w) {
909 case FloatRegisterImpl::S: r = op + 0; break;
910 case FloatRegisterImpl::D: r = op + 3; break;
911 case FloatRegisterImpl::Q: r = op + 2; break;
912 default: ShouldNotReachHere(); break;
913 }
914 return op3(r);
915 }
918 // compute inverse of simm
919 static int inv_simm(int x, int nbits) {
920 return (int)(x << (32 - nbits)) >> (32 - nbits);
921 }
923 static int inv_simm13( int x ) { return inv_simm(x, 13); }
925 // signed immediate, in low bits, nbits long
926 static int simm(int x, int nbits) {
927 assert_signed_range(x, nbits);
928 return x & (( 1 << nbits ) - 1);
929 }
931 // compute inverse of wdisp16
932 static intptr_t inv_wdisp16(int x, intptr_t pos) {
933 int lo = x & (( 1 << 14 ) - 1);
934 int hi = (x >> 20) & 3;
935 if (hi >= 2) hi |= ~1;
936 return (((hi << 14) | lo) << 2) + pos;
937 }
939 // word offset, 14 bits at LSend, 2 bits at B21, B20
940 static int wdisp16(intptr_t x, intptr_t off) {
941 intptr_t xx = x - off;
942 assert_signed_word_disp_range(xx, 16);
943 int r = (xx >> 2) & ((1 << 14) - 1)
944 | ( ( (xx>>(2+14)) & 3 ) << 20 );
945 assert( inv_wdisp16(r, off) == x, "inverse is not inverse");
946 return r;
947 }
950 // word displacement in low-order nbits bits
952 static intptr_t inv_wdisp( int x, intptr_t pos, int nbits ) {
953 int pre_sign_extend = x & (( 1 << nbits ) - 1);
954 int r = pre_sign_extend >= ( 1 << (nbits-1) )
955 ? pre_sign_extend | ~(( 1 << nbits ) - 1)
956 : pre_sign_extend;
957 return (r << 2) + pos;
958 }
960 static int wdisp( intptr_t x, intptr_t off, int nbits ) {
961 intptr_t xx = x - off;
962 assert_signed_word_disp_range(xx, nbits);
963 int r = (xx >> 2) & (( 1 << nbits ) - 1);
964 assert( inv_wdisp( r, off, nbits ) == x, "inverse not inverse");
965 return r;
966 }
969 // Extract the top 32 bits in a 64 bit word
970 static int32_t hi32( int64_t x ) {
971 int32_t r = int32_t( (uint64_t)x >> 32 );
972 return r;
973 }
975 // given a sethi instruction, extract the constant, left-justified
976 static int inv_hi22( int x ) {
977 return x << 10;
978 }
980 // create an imm22 field, given a 32-bit left-justified constant
981 static int hi22( int x ) {
982 int r = int( juint(x) >> 10 );
983 assert( (r & ~((1 << 22) - 1)) == 0, "just checkin'");
984 return r;
985 }
987 // create a low10 __value__ (not a field) for a given a 32-bit constant
988 static int low10( int x ) {
989 return x & ((1 << 10) - 1);
990 }
992 // instruction only in v9
993 static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
995 // instruction only in v8
996 static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); }
998 // instruction deprecated in v9
999 static void v9_dep() { } // do nothing for now
1001 // some float instructions only exist for single prec. on v8
1002 static void v8_s_only(FloatRegisterImpl::Width w) { if (w != FloatRegisterImpl::S) v9_only(); }
1004 // v8 has no CC field
1005 static void v8_no_cc(CC cc) { if (cc) v9_only(); }
1007 protected:
1008 // Simple delay-slot scheme:
1009 // In order to check the programmer, the assembler keeps track of deley slots.
1010 // It forbids CTIs in delay slots (conservative, but should be OK).
1011 // Also, when putting an instruction into a delay slot, you must say
1012 // asm->delayed()->add(...), in order to check that you don't omit
1013 // delay-slot instructions.
1014 // To implement this, we use a simple FSA
1016 #ifdef ASSERT
1017 #define CHECK_DELAY
1018 #endif
1019 #ifdef CHECK_DELAY
1020 enum Delay_state { no_delay, at_delay_slot, filling_delay_slot } delay_state;
1021 #endif
1023 public:
1024 // Tells assembler next instruction must NOT be in delay slot.
1025 // Use at start of multinstruction macros.
1026 void assert_not_delayed() {
1027 // This is a separate overloading to avoid creation of string constants
1028 // in non-asserted code--with some compilers this pollutes the object code.
1029 #ifdef CHECK_DELAY
1030 assert_not_delayed("next instruction should not be a delay slot");
1031 #endif
1032 }
1033 void assert_not_delayed(const char* msg) {
1034 #ifdef CHECK_DELAY
1035 assert_msg ( delay_state == no_delay, msg);
1036 #endif
1037 }
1039 protected:
1040 // Delay slot helpers
1041 // cti is called when emitting control-transfer instruction,
1042 // BEFORE doing the emitting.
1043 // Only effective when assertion-checking is enabled.
1044 void cti() {
1045 #ifdef CHECK_DELAY
1046 assert_not_delayed("cti should not be in delay slot");
1047 #endif
1048 }
1050 // called when emitting cti with a delay slot, AFTER emitting
1051 void has_delay_slot() {
1052 #ifdef CHECK_DELAY
1053 assert_not_delayed("just checking");
1054 delay_state = at_delay_slot;
1055 #endif
1056 }
1058 public:
1059 // Tells assembler you know that next instruction is delayed
1060 Assembler* delayed() {
1061 #ifdef CHECK_DELAY
1062 assert ( delay_state == at_delay_slot, "delayed instruction is not in delay slot");
1063 delay_state = filling_delay_slot;
1064 #endif
1065 return this;
1066 }
1068 void flush() {
1069 #ifdef CHECK_DELAY
1070 assert ( delay_state == no_delay, "ending code with a delay slot");
1071 #endif
1072 AbstractAssembler::flush();
1073 }
1075 inline void emit_long(int); // shadows AbstractAssembler::emit_long
1076 inline void emit_data(int x) { emit_long(x); }
1077 inline void emit_data(int, RelocationHolder const&);
1078 inline void emit_data(int, relocInfo::relocType rtype);
1079 // helper for above fcns
1080 inline void check_delay();
1083 public:
1084 // instructions, refer to page numbers in the SPARC Architecture Manual, V9
1086 // pp 135 (addc was addx in v8)
1088 inline void add( Register s1, Register s2, Register d );
1089 inline void add( Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none);
1090 inline void add( Register s1, int simm13a, Register d, RelocationHolder const& rspec);
1091 inline void add( Register s1, RegisterOrConstant s2, Register d, int offset = 0);
1092 inline void add( const Address& a, Register d, int offset = 0);
1094 void addcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1095 void addcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1096 void addc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | rs2(s2) ); }
1097 void addc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1098 void addccc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1099 void addccc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1101 // pp 136
1103 inline void bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none );
1104 inline void bpr( RCondition c, bool a, Predict p, Register s1, Label& L);
1106 protected: // use MacroAssembler::br instead
1108 // pp 138
1110 inline void fb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1111 inline void fb( Condition c, bool a, Label& L );
1113 // pp 141
1115 inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1116 inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
1118 public:
1120 // pp 144
1122 inline void br( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1123 inline void br( Condition c, bool a, Label& L );
1125 // pp 146
1127 inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1128 inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
1130 // pp 121 (V8)
1132 inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1133 inline void cb( Condition c, bool a, Label& L );
1135 // pp 149
1137 inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
1138 inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
1140 // pp 150
1142 // These instructions compare the contents of s2 with the contents of
1143 // memory at address in s1. If the values are equal, the contents of memory
1144 // at address s1 is swapped with the data in d. If the values are not equal,
1145 // the the contents of memory at s1 is loaded into d, without the swap.
1147 void casa( Register s1, Register s2, Register d, int ia = -1 ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(casa_op3 ) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); }
1148 void casxa( Register s1, Register s2, Register d, int ia = -1 ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(casxa_op3) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); }
1150 // pp 152
1152 void udiv( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | rs2(s2)); }
1153 void udiv( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1154 void sdiv( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | rs2(s2)); }
1155 void sdiv( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1156 void udivcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
1157 void udivcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1158 void sdivcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
1159 void sdivcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1161 // pp 155
1163 void done() { v9_only(); cti(); emit_long( op(arith_op) | fcn(0) | op3(done_op3) ); }
1164 void retry() { v9_only(); cti(); emit_long( op(arith_op) | fcn(1) | op3(retry_op3) ); }
1166 // pp 156
1168 void fadd( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x40 + w) | fs2(s2, w)); }
1169 void fsub( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x44 + w) | fs2(s2, w)); }
1171 // pp 157
1173 void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_long( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
1174 void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_long( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
1176 // pp 159
1178 void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); }
1179 void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); }
1181 // pp 160
1183 void ftof( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | opf(0xc0 + sw + dw*4) | fs2(s, sw)); }
1185 // pp 161
1187 void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, w)); }
1188 void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, w)); }
1190 // pp 162
1192 void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
1194 void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
1196 // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available
1197 // on v8 to do negation of single, double and quad precision floats.
1199 void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x05) | fs2(sd, w)); }
1201 void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
1203 // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available
1204 // on v8 to do abs operation on single/double/quad precision floats.
1206 void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); }
1208 // pp 163
1210 void fmul( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x48 + w) | fs2(s2, w)); }
1211 void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); }
1212 void fdiv( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x4c + w) | fs2(s2, w)); }
1214 // pp 164
1216 void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); }
1218 // pp 165
1220 inline void flush( Register s1, Register s2 );
1221 inline void flush( Register s1, int simm13a);
1223 // pp 167
1225 void flushw() { v9_only(); emit_long( op(arith_op) | op3(flushw_op3) ); }
1227 // pp 168
1229 void illtrap( int const22a) { if (const22a != 0) v9_only(); emit_long( op(branch_op) | u_field(const22a, 21, 0) ); }
1230 // v8 unimp == illtrap(0)
1232 // pp 169
1234 void impdep1( int id1, int const19a ) { v9_only(); emit_long( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
1235 void impdep2( int id1, int const19a ) { v9_only(); emit_long( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
1237 // pp 149 (v8)
1239 void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_long( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
1240 void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_long( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
1242 // pp 170
1244 void jmpl( Register s1, Register s2, Register d );
1245 void jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec = RelocationHolder() );
1247 inline void jmpl( Address& a, Register d, int offset = 0);
1249 // 171
1251 inline void ldf( FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d );
1252 inline void ldf( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d );
1254 inline void ldf( FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
1257 inline void ldfsr( Register s1, Register s2 );
1258 inline void ldfsr( Register s1, int simm13a);
1259 inline void ldxfsr( Register s1, Register s2 );
1260 inline void ldxfsr( Register s1, int simm13a);
1262 // pp 94 (v8)
1264 inline void ldc( Register s1, Register s2, int crd );
1265 inline void ldc( Register s1, int simm13a, int crd);
1266 inline void lddc( Register s1, Register s2, int crd );
1267 inline void lddc( Register s1, int simm13a, int crd);
1268 inline void ldcsr( Register s1, Register s2, int crd );
1269 inline void ldcsr( Register s1, int simm13a, int crd);
1272 // 173
1274 void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1275 void ldfa( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1277 // pp 175, lduw is ld on v8
1279 inline void ldsb( Register s1, Register s2, Register d );
1280 inline void ldsb( Register s1, int simm13a, Register d);
1281 inline void ldsh( Register s1, Register s2, Register d );
1282 inline void ldsh( Register s1, int simm13a, Register d);
1283 inline void ldsw( Register s1, Register s2, Register d );
1284 inline void ldsw( Register s1, int simm13a, Register d);
1285 inline void ldub( Register s1, Register s2, Register d );
1286 inline void ldub( Register s1, int simm13a, Register d);
1287 inline void lduh( Register s1, Register s2, Register d );
1288 inline void lduh( Register s1, int simm13a, Register d);
1289 inline void lduw( Register s1, Register s2, Register d );
1290 inline void lduw( Register s1, int simm13a, Register d);
1291 inline void ldx( Register s1, Register s2, Register d );
1292 inline void ldx( Register s1, int simm13a, Register d);
1293 inline void ld( Register s1, Register s2, Register d );
1294 inline void ld( Register s1, int simm13a, Register d);
1295 inline void ldd( Register s1, Register s2, Register d );
1296 inline void ldd( Register s1, int simm13a, Register d);
1298 inline void ldsb( const Address& a, Register d, int offset = 0 );
1299 inline void ldsh( const Address& a, Register d, int offset = 0 );
1300 inline void ldsw( const Address& a, Register d, int offset = 0 );
1301 inline void ldub( const Address& a, Register d, int offset = 0 );
1302 inline void lduh( const Address& a, Register d, int offset = 0 );
1303 inline void lduw( const Address& a, Register d, int offset = 0 );
1304 inline void ldx( const Address& a, Register d, int offset = 0 );
1305 inline void ld( const Address& a, Register d, int offset = 0 );
1306 inline void ldd( const Address& a, Register d, int offset = 0 );
1308 inline void ldub( Register s1, RegisterOrConstant s2, Register d );
1309 inline void ldsb( Register s1, RegisterOrConstant s2, Register d );
1310 inline void lduh( Register s1, RegisterOrConstant s2, Register d );
1311 inline void ldsh( Register s1, RegisterOrConstant s2, Register d );
1312 inline void lduw( Register s1, RegisterOrConstant s2, Register d );
1313 inline void ldsw( Register s1, RegisterOrConstant s2, Register d );
1314 inline void ldx( Register s1, RegisterOrConstant s2, Register d );
1315 inline void ld( Register s1, RegisterOrConstant s2, Register d );
1316 inline void ldd( Register s1, RegisterOrConstant s2, Register d );
1318 // pp 177
1320 void ldsba( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1321 void ldsba( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1322 void ldsha( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1323 void ldsha( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1324 void ldswa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1325 void ldswa( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1326 void lduba( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1327 void lduba( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1328 void lduha( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1329 void lduha( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1330 void lduwa( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1331 void lduwa( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1332 void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1333 void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1334 void ldda( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1335 void ldda( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1337 // pp 179
1339 inline void ldstub( Register s1, Register s2, Register d );
1340 inline void ldstub( Register s1, int simm13a, Register d);
1342 // pp 180
1344 void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1345 void ldstuba( Register s1, int simm13a, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1347 // pp 181
1349 void and3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); }
1350 void and3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1351 void andcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1352 void andcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1353 void andn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | rs2(s2) ); }
1354 void andn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1355 void andncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1356 void andncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1357 void or3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
1358 void or3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1359 void orcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1360 void orcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1361 void orn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
1362 void orn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1363 void orncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1364 void orncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1365 void xor3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); }
1366 void xor3( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1367 void xorcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1368 void xorcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1369 void xnor( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | rs2(s2) ); }
1370 void xnor( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1371 void xnorcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1372 void xnorcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1374 // pp 183
1376 void membar( Membar_mask_bits const7a ) { v9_only(); emit_long( op(arith_op) | op3(membar_op3) | rs1(O7) | immed(true) | u_field( int(const7a), 6, 0)); }
1378 // pp 185
1380 void fmov( FloatRegisterImpl::Width w, Condition c, bool floatCC, CC cca, FloatRegister s2, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop2_op3) | cond_mov(c) | opf_cc(cca, floatCC) | opf_low6(w) | fs2(s2, w)); }
1382 // pp 189
1384 void fmov( FloatRegisterImpl::Width w, RCondition c, Register s1, FloatRegister s2, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop2_op3) | rs1(s1) | rcond(c) | opf_low5(4 + w) | fs2(s2, w)); }
1386 // pp 191
1388 void movcc( Condition c, bool floatCC, CC cca, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | rs2(s2) ); }
1389 void movcc( Condition c, bool floatCC, CC cca, int simm11a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | immed(true) | simm(simm11a, 11) ); }
1391 // pp 195
1393 void movr( RCondition c, Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | rs2(s2) ); }
1394 void movr( RCondition c, Register s1, int simm10a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | immed(true) | simm(simm10a, 10) ); }
1396 // pp 196
1398 void mulx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | rs2(s2) ); }
1399 void mulx( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1400 void sdivx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | rs2(s2) ); }
1401 void sdivx( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1402 void udivx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | rs2(s2) ); }
1403 void udivx( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1405 // pp 197
1407 void umul( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | rs2(s2) ); }
1408 void umul( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1409 void smul( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | rs2(s2) ); }
1410 void smul( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1411 void umulcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1412 void umulcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1413 void smulcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1414 void smulcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1416 // pp 199
1418 void mulscc( Register s1, Register s2, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); }
1419 void mulscc( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1421 // pp 201
1423 void nop() { emit_long( op(branch_op) | op2(sethi_op2) ); }
1426 // pp 202
1428 void popc( Register s, Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(popc_op3) | rs2(s)); }
1429 void popc( int simm13a, Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(popc_op3) | immed(true) | simm(simm13a, 13)); }
1431 // pp 203
1433 void prefetch( Register s1, Register s2, PrefetchFcn f);
1434 void prefetch( Register s1, int simm13a, PrefetchFcn f);
1435 void prefetcha( Register s1, Register s2, int ia, PrefetchFcn f ) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1436 void prefetcha( Register s1, int simm13a, PrefetchFcn f ) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1438 inline void prefetch(const Address& a, PrefetchFcn F, int offset = 0);
1440 // pp 208
1442 // not implementing read privileged register
1444 inline void rdy( Register d) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(0, 18, 14)); }
1445 inline void rdccr( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(2, 18, 14)); }
1446 inline void rdasi( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(3, 18, 14)); }
1447 inline void rdtick( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(4, 18, 14)); } // Spoon!
1448 inline void rdpc( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(5, 18, 14)); }
1449 inline void rdfprs( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(6, 18, 14)); }
1451 // pp 213
1453 inline void rett( Register s1, Register s2);
1454 inline void rett( Register s1, int simm13a, relocInfo::relocType rt = relocInfo::none);
1456 // pp 214
1458 void save( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | rs2(s2) ); }
1459 void save( Register s1, int simm13a, Register d ) {
1460 // make sure frame is at least large enough for the register save area
1461 assert(-simm13a >= 16 * wordSize, "frame too small");
1462 emit_long( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) );
1463 }
1465 void restore( Register s1 = G0, Register s2 = G0, Register d = G0 ) { emit_long( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | rs2(s2) ); }
1466 void restore( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1468 // pp 216
1470 void saved() { v9_only(); emit_long( op(arith_op) | fcn(0) | op3(saved_op3)); }
1471 void restored() { v9_only(); emit_long( op(arith_op) | fcn(1) | op3(saved_op3)); }
1473 // pp 217
1475 inline void sethi( int imm22a, Register d, RelocationHolder const& rspec = RelocationHolder() );
1476 // pp 218
1478 void sll( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1479 void sll( Register s1, int imm5a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1480 void srl( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1481 void srl( Register s1, int imm5a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1482 void sra( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1483 void sra( Register s1, int imm5a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1485 void sllx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1486 void sllx( Register s1, int imm6a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1487 void srlx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1488 void srlx( Register s1, int imm6a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1489 void srax( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1490 void srax( Register s1, int imm6a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1492 // pp 220
1494 void sir( int simm13a ) { emit_long( op(arith_op) | fcn(15) | op3(sir_op3) | immed(true) | simm(simm13a, 13)); }
1496 // pp 221
1498 void stbar() { emit_long( op(arith_op) | op3(membar_op3) | u_field(15, 18, 14)); }
1500 // pp 222
1502 inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2 );
1503 inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a);
1504 inline void stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset = 0);
1506 inline void stfsr( Register s1, Register s2 );
1507 inline void stfsr( Register s1, int simm13a);
1508 inline void stxfsr( Register s1, Register s2 );
1509 inline void stxfsr( Register s1, int simm13a);
1511 // pp 224
1513 void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2, int ia ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1514 void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a ) { v9_only(); emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1516 // p 226
1518 inline void stb( Register d, Register s1, Register s2 );
1519 inline void stb( Register d, Register s1, int simm13a);
1520 inline void sth( Register d, Register s1, Register s2 );
1521 inline void sth( Register d, Register s1, int simm13a);
1522 inline void stw( Register d, Register s1, Register s2 );
1523 inline void stw( Register d, Register s1, int simm13a);
1524 inline void st( Register d, Register s1, Register s2 );
1525 inline void st( Register d, Register s1, int simm13a);
1526 inline void stx( Register d, Register s1, Register s2 );
1527 inline void stx( Register d, Register s1, int simm13a);
1528 inline void std( Register d, Register s1, Register s2 );
1529 inline void std( Register d, Register s1, int simm13a);
1531 inline void stb( Register d, const Address& a, int offset = 0 );
1532 inline void sth( Register d, const Address& a, int offset = 0 );
1533 inline void stw( Register d, const Address& a, int offset = 0 );
1534 inline void stx( Register d, const Address& a, int offset = 0 );
1535 inline void st( Register d, const Address& a, int offset = 0 );
1536 inline void std( Register d, const Address& a, int offset = 0 );
1538 inline void stb( Register d, Register s1, RegisterOrConstant s2 );
1539 inline void sth( Register d, Register s1, RegisterOrConstant s2 );
1540 inline void stw( Register d, Register s1, RegisterOrConstant s2 );
1541 inline void stx( Register d, Register s1, RegisterOrConstant s2 );
1542 inline void std( Register d, Register s1, RegisterOrConstant s2 );
1543 inline void st( Register d, Register s1, RegisterOrConstant s2 );
1545 // pp 177
1547 void stba( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1548 void stba( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1549 void stha( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1550 void stha( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1551 void stwa( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1552 void stwa( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1553 void stxa( Register d, Register s1, Register s2, int ia ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1554 void stxa( Register d, Register s1, int simm13a ) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1555 void stda( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1556 void stda( Register d, Register s1, int simm13a ) { emit_long( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1558 // pp 97 (v8)
1560 inline void stc( int crd, Register s1, Register s2 );
1561 inline void stc( int crd, Register s1, int simm13a);
1562 inline void stdc( int crd, Register s1, Register s2 );
1563 inline void stdc( int crd, Register s1, int simm13a);
1564 inline void stcsr( int crd, Register s1, Register s2 );
1565 inline void stcsr( int crd, Register s1, int simm13a);
1566 inline void stdcq( int crd, Register s1, Register s2 );
1567 inline void stdcq( int crd, Register s1, int simm13a);
1569 // pp 230
1571 void sub( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
1572 void sub( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1573 void subcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | rs2(s2) ); }
1574 void subcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1575 void subc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | rs2(s2) ); }
1576 void subc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1577 void subccc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1578 void subccc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1580 // pp 231
1582 inline void swap( Register s1, Register s2, Register d );
1583 inline void swap( Register s1, int simm13a, Register d);
1584 inline void swap( Address& a, Register d, int offset = 0 );
1586 // pp 232
1588 void swapa( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1589 void swapa( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1591 // pp 234, note op in book is wrong, see pp 268
1593 void taddcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); }
1594 void taddcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1595 void taddcctv( Register s1, Register s2, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); }
1596 void taddcctv( Register s1, int simm13a, Register d ) { v9_dep(); emit_long( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1598 // pp 235
1600 void tsubcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); }
1601 void tsubcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1602 void tsubcctv( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); }
1603 void tsubcctv( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1605 // pp 237
1607 void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc); emit_long( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
1608 void trap( Condition c, CC cc, Register s1, int trapa ) { v8_no_cc(cc); emit_long( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
1609 // simple uncond. trap
1610 void trap( int trapa ) { trap( always, icc, G0, trapa ); }
1612 // pp 239 omit write priv register for now
1614 inline void wry( Register d) { v9_dep(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(0, 29, 25)); }
1615 inline void wrccr(Register s) { v9_only(); emit_long( op(arith_op) | rs1(s) | op3(wrreg_op3) | u_field(2, 29, 25)); }
1616 inline void wrccr(Register s, int simm13a) { v9_only(); emit_long( op(arith_op) |
1617 rs1(s) |
1618 op3(wrreg_op3) |
1619 u_field(2, 29, 25) |
1620 u_field(1, 13, 13) |
1621 simm(simm13a, 13)); }
1622 inline void wrasi( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
1623 inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
1625 // For a given register condition, return the appropriate condition code
1626 // Condition (the one you would use to get the same effect after "tst" on
1627 // the target register.)
1628 Assembler::Condition reg_cond_to_cc_cond(RCondition in);
1631 // Creation
1632 Assembler(CodeBuffer* code) : AbstractAssembler(code) {
1633 #ifdef CHECK_DELAY
1634 delay_state = no_delay;
1635 #endif
1636 }
1638 // Testing
1639 #ifndef PRODUCT
1640 void test_v9();
1641 void test_v8_onlys();
1642 #endif
1643 };
1646 class RegistersForDebugging : public StackObj {
1647 public:
1648 intptr_t i[8], l[8], o[8], g[8];
1649 float f[32];
1650 double d[32];
1652 void print(outputStream* s);
1654 static int i_offset(int j) { return offset_of(RegistersForDebugging, i[j]); }
1655 static int l_offset(int j) { return offset_of(RegistersForDebugging, l[j]); }
1656 static int o_offset(int j) { return offset_of(RegistersForDebugging, o[j]); }
1657 static int g_offset(int j) { return offset_of(RegistersForDebugging, g[j]); }
1658 static int f_offset(int j) { return offset_of(RegistersForDebugging, f[j]); }
1659 static int d_offset(int j) { return offset_of(RegistersForDebugging, d[j / 2]); }
1661 // gen asm code to save regs
1662 static void save_registers(MacroAssembler* a);
1664 // restore global registers in case C code disturbed them
1665 static void restore_registers(MacroAssembler* a, Register r);
1668 };
1671 // MacroAssembler extends Assembler by a few frequently used macros.
1672 //
1673 // Most of the standard SPARC synthetic ops are defined here.
1674 // Instructions for which a 'better' code sequence exists depending
1675 // on arguments should also go in here.
1677 #define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__)
1678 #define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__)
1679 #define JUMP(a, off) jump(a, off, __FILE__, __LINE__)
1680 #define JUMPL(a, d, off) jumpl(a, d, off, __FILE__, __LINE__)
1683 class MacroAssembler: public Assembler {
1684 protected:
1685 // Support for VM calls
1686 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
1687 // may customize this version by overriding it for its purposes (e.g., to save/restore
1688 // additional registers when doing a VM call).
1689 #ifdef CC_INTERP
1690 #define VIRTUAL
1691 #else
1692 #define VIRTUAL virtual
1693 #endif
1695 VIRTUAL void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments);
1697 //
1698 // It is imperative that all calls into the VM are handled via the call_VM macros.
1699 // They make sure that the stack linkage is setup correctly. call_VM's correspond
1700 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
1701 //
1702 // This is the base routine called by the different versions of call_VM. The interpreter
1703 // may customize this version by overriding it for its purposes (e.g., to save/restore
1704 // additional registers when doing a VM call).
1705 //
1706 // A non-volatile java_thread_cache register should be specified so
1707 // that the G2_thread value can be preserved across the call.
1708 // (If java_thread_cache is noreg, then a slow get_thread call
1709 // will re-initialize the G2_thread.) call_VM_base returns the register that contains the
1710 // thread.
1711 //
1712 // If no last_java_sp is specified (noreg) than SP will be used instead.
1714 virtual void call_VM_base(
1715 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
1716 Register java_thread_cache, // the thread if computed before ; use noreg otherwise
1717 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
1718 address entry_point, // the entry point
1719 int number_of_arguments, // the number of arguments (w/o thread) to pop after call
1720 bool check_exception=true // flag which indicates if exception should be checked
1721 );
1723 // This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
1724 // The implementation is only non-empty for the InterpreterMacroAssembler,
1725 // as only the interpreter handles and ForceEarlyReturn PopFrame requests.
1726 virtual void check_and_handle_popframe(Register scratch_reg);
1727 virtual void check_and_handle_earlyret(Register scratch_reg);
1729 public:
1730 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
1732 // Support for NULL-checks
1733 //
1734 // Generates code that causes a NULL OS exception if the content of reg is NULL.
1735 // If the accessed location is M[reg + offset] and the offset is known, provide the
1736 // offset. No explicit code generation is needed if the offset is within a certain
1737 // range (0 <= offset <= page_size).
1738 //
1739 // %%%%%% Currently not done for SPARC
1741 void null_check(Register reg, int offset = -1);
1742 static bool needs_explicit_null_check(intptr_t offset);
1744 // support for delayed instructions
1745 MacroAssembler* delayed() { Assembler::delayed(); return this; }
1747 // branches that use right instruction for v8 vs. v9
1748 inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1749 inline void br( Condition c, bool a, Predict p, Label& L );
1750 inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1751 inline void fb( Condition c, bool a, Predict p, Label& L );
1753 // compares register with zero and branches (V9 and V8 instructions)
1754 void br_zero( Condition c, bool a, Predict p, Register s1, Label& L);
1755 // Compares a pointer register with zero and branches on (not)null.
1756 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
1757 void br_null ( Register s1, bool a, Predict p, Label& L );
1758 void br_notnull( Register s1, bool a, Predict p, Label& L );
1760 // These versions will do the most efficient thing on v8 and v9. Perhaps
1761 // this is what the routine above was meant to do, but it didn't (and
1762 // didn't cover both target address kinds.)
1763 void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none );
1764 void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, Label& L);
1766 inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1767 inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
1769 // Branch that tests xcc in LP64 and icc in !LP64
1770 inline void brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1771 inline void brx( Condition c, bool a, Predict p, Label& L );
1773 // unconditional short branch
1774 inline void ba( bool a, Label& L );
1776 // Branch that tests fp condition codes
1777 inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1778 inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
1780 // get PC the best way
1781 inline int get_pc( Register d );
1783 // Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual)
1784 inline void cmp( Register s1, Register s2 ) { subcc( s1, s2, G0 ); }
1785 inline void cmp( Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); }
1787 inline void jmp( Register s1, Register s2 );
1788 inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
1790 inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
1791 inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
1792 inline void callr( Register s1, Register s2 );
1793 inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
1795 // Emits nothing on V8
1796 inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
1797 inline void iprefetch( Label& L);
1799 inline void tst( Register s ) { orcc( G0, s, G0 ); }
1801 #ifdef PRODUCT
1802 inline void ret( bool trace = TraceJumps ) { if (trace) {
1803 mov(I7, O7); // traceable register
1804 JMP(O7, 2 * BytesPerInstWord);
1805 } else {
1806 jmpl( I7, 2 * BytesPerInstWord, G0 );
1807 }
1808 }
1810 inline void retl( bool trace = TraceJumps ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
1811 else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
1812 #else
1813 void ret( bool trace = TraceJumps );
1814 void retl( bool trace = TraceJumps );
1815 #endif /* PRODUCT */
1817 // Required platform-specific helpers for Label::patch_instructions.
1818 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
1819 void pd_patch_instruction(address branch, address target);
1820 #ifndef PRODUCT
1821 static void pd_print_patched_instruction(address branch);
1822 #endif
1824 // sethi Macro handles optimizations and relocations
1825 void sethi( Address& a, bool ForceRelocatable = false );
1826 void sethi( intptr_t imm22a, Register d, bool ForceRelocatable = false, RelocationHolder const& rspec = RelocationHolder());
1828 // compute the size of a sethi/set
1829 static int size_of_sethi( address a, bool worst_case = false );
1830 static int worst_case_size_of_set();
1832 // set may be either setsw or setuw (high 32 bits may be zero or sign)
1833 void set( intptr_t value, Register d, RelocationHolder const& rspec = RelocationHolder() );
1834 void setsw( int value, Register d, RelocationHolder const& rspec = RelocationHolder() );
1835 void set64( jlong value, Register d, Register tmp);
1837 // sign-extend 32 to 64
1838 inline void signx( Register s, Register d ) { sra( s, G0, d); }
1839 inline void signx( Register d ) { sra( d, G0, d); }
1841 inline void not1( Register s, Register d ) { xnor( s, G0, d ); }
1842 inline void not1( Register d ) { xnor( d, G0, d ); }
1844 inline void neg( Register s, Register d ) { sub( G0, s, d ); }
1845 inline void neg( Register d ) { sub( G0, d, d ); }
1847 inline void cas( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); }
1848 inline void casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); }
1849 // Functions for isolating 64 bit atomic swaps for LP64
1850 // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
1851 inline void cas_ptr( Register s1, Register s2, Register d) {
1852 #ifdef _LP64
1853 casx( s1, s2, d );
1854 #else
1855 cas( s1, s2, d );
1856 #endif
1857 }
1859 // Functions for isolating 64 bit shifts for LP64
1860 inline void sll_ptr( Register s1, Register s2, Register d );
1861 inline void sll_ptr( Register s1, int imm6a, Register d );
1862 inline void sll_ptr( Register s1, RegisterOrConstant s2, Register d );
1863 inline void srl_ptr( Register s1, Register s2, Register d );
1864 inline void srl_ptr( Register s1, int imm6a, Register d );
1866 // little-endian
1867 inline void casl( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); }
1868 inline void casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); }
1870 inline void inc( Register d, int const13 = 1 ) { add( d, const13, d); }
1871 inline void inccc( Register d, int const13 = 1 ) { addcc( d, const13, d); }
1873 inline void dec( Register d, int const13 = 1 ) { sub( d, const13, d); }
1874 inline void deccc( Register d, int const13 = 1 ) { subcc( d, const13, d); }
1876 inline void btst( Register s1, Register s2 ) { andcc( s1, s2, G0 ); }
1877 inline void btst( int simm13a, Register s ) { andcc( s, simm13a, G0 ); }
1879 inline void bset( Register s1, Register s2 ) { or3( s1, s2, s2 ); }
1880 inline void bset( int simm13a, Register s ) { or3( s, simm13a, s ); }
1882 inline void bclr( Register s1, Register s2 ) { andn( s1, s2, s2 ); }
1883 inline void bclr( int simm13a, Register s ) { andn( s, simm13a, s ); }
1885 inline void btog( Register s1, Register s2 ) { xor3( s1, s2, s2 ); }
1886 inline void btog( int simm13a, Register s ) { xor3( s, simm13a, s ); }
1888 inline void clr( Register d ) { or3( G0, G0, d ); }
1890 inline void clrb( Register s1, Register s2);
1891 inline void clrh( Register s1, Register s2);
1892 inline void clr( Register s1, Register s2);
1893 inline void clrx( Register s1, Register s2);
1895 inline void clrb( Register s1, int simm13a);
1896 inline void clrh( Register s1, int simm13a);
1897 inline void clr( Register s1, int simm13a);
1898 inline void clrx( Register s1, int simm13a);
1900 // copy & clear upper word
1901 inline void clruw( Register s, Register d ) { srl( s, G0, d); }
1902 // clear upper word
1903 inline void clruwu( Register d ) { srl( d, G0, d); }
1905 // membar psuedo instruction. takes into account target memory model.
1906 inline void membar( Assembler::Membar_mask_bits const7a );
1908 // returns if membar generates anything.
1909 inline bool membar_has_effect( Assembler::Membar_mask_bits const7a );
1911 // mov pseudo instructions
1912 inline void mov( Register s, Register d) {
1913 if ( s != d ) or3( G0, s, d);
1914 else assert_not_delayed(); // Put something useful in the delay slot!
1915 }
1917 inline void mov_or_nop( Register s, Register d) {
1918 if ( s != d ) or3( G0, s, d);
1919 else nop();
1920 }
1922 inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); }
1924 // address pseudos: make these names unlike instruction names to avoid confusion
1925 inline void split_disp( Address& a, Register temp );
1926 inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
1927 inline void load_address( Address& a, int offset = 0 );
1928 inline void load_contents( Address& a, Register d, int offset = 0 );
1929 inline void load_ptr_contents( Address& a, Register d, int offset = 0 );
1930 inline void store_contents( Register s, Address& a, int offset = 0 );
1931 inline void store_ptr_contents( Register s, Address& a, int offset = 0 );
1932 inline void jumpl_to( Address& a, Register d, int offset = 0 );
1933 inline void jump_to( Address& a, int offset = 0 );
1935 // ring buffer traceable jumps
1937 void jmp2( Register r1, Register r2, const char* file, int line );
1938 void jmp ( Register r1, int offset, const char* file, int line );
1940 void jumpl( Address& a, Register d, int offset, const char* file, int line );
1941 void jump ( Address& a, int offset, const char* file, int line );
1944 // argument pseudos:
1946 inline void load_argument( Argument& a, Register d );
1947 inline void store_argument( Register s, Argument& a );
1948 inline void store_ptr_argument( Register s, Argument& a );
1949 inline void store_float_argument( FloatRegister s, Argument& a );
1950 inline void store_double_argument( FloatRegister s, Argument& a );
1951 inline void store_long_argument( Register s, Argument& a );
1953 // handy macros:
1955 inline void round_to( Register r, int modulus ) {
1956 assert_not_delayed();
1957 inc( r, modulus - 1 );
1958 and3( r, -modulus, r );
1959 }
1961 // --------------------------------------------------
1963 // Functions for isolating 64 bit loads for LP64
1964 // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's
1965 // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's
1966 inline void ld_ptr( Register s1, Register s2, Register d );
1967 inline void ld_ptr( Register s1, int simm13a, Register d);
1968 inline void ld_ptr( Register s1, RegisterOrConstant s2, Register d );
1969 inline void ld_ptr( const Address& a, Register d, int offset = 0 );
1970 inline void st_ptr( Register d, Register s1, Register s2 );
1971 inline void st_ptr( Register d, Register s1, int simm13a);
1972 inline void st_ptr( Register d, Register s1, RegisterOrConstant s2 );
1973 inline void st_ptr( Register d, const Address& a, int offset = 0 );
1975 // ld_long will perform ld for 32 bit VM's and ldx for 64 bit VM's
1976 // st_long will perform st for 32 bit VM's and stx for 64 bit VM's
1977 inline void ld_long( Register s1, Register s2, Register d );
1978 inline void ld_long( Register s1, int simm13a, Register d );
1979 inline void ld_long( Register s1, RegisterOrConstant s2, Register d );
1980 inline void ld_long( const Address& a, Register d, int offset = 0 );
1981 inline void st_long( Register d, Register s1, Register s2 );
1982 inline void st_long( Register d, Register s1, int simm13a );
1983 inline void st_long( Register d, Register s1, RegisterOrConstant s2 );
1984 inline void st_long( Register d, const Address& a, int offset = 0 );
1986 // Loading values by size and signed-ness
1987 void load_sized_value(Register s1, RegisterOrConstant s2, Register d,
1988 int size_in_bytes, bool is_signed);
1990 // Helpers for address formation.
1991 // They update the dest in place, whether it is a register or constant.
1992 // They emit no code at all if src is a constant zero.
1993 // If dest is a constant and src is a register, the temp argument
1994 // is required, and becomes the result.
1995 // If dest is a register and src is a non-simm13 constant,
1996 // the temp argument is required, and is used to materialize the constant.
1997 void regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
1998 Register temp = noreg );
1999 void regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
2000 Register temp = noreg );
2001 RegisterOrConstant ensure_rs2(RegisterOrConstant rs2, Register sethi_temp) {
2002 guarantee(sethi_temp != noreg, "constant offset overflow");
2003 if (is_simm13(rs2.constant_or_zero()))
2004 return rs2; // register or short constant
2005 set(rs2.as_constant(), sethi_temp);
2006 return sethi_temp;
2007 }
2009 // --------------------------------------------------
2011 public:
2012 // traps as per trap.h (SPARC ABI?)
2014 void breakpoint_trap();
2015 void breakpoint_trap(Condition c, CC cc = icc);
2016 void flush_windows_trap();
2017 void clean_windows_trap();
2018 void get_psr_trap();
2019 void set_psr_trap();
2021 // V8/V9 flush_windows
2022 void flush_windows();
2024 // Support for serializing memory accesses between threads
2025 void serialize_memory(Register thread, Register tmp1, Register tmp2);
2027 // Stack frame creation/removal
2028 void enter();
2029 void leave();
2031 // V8/V9 integer multiply
2032 void mult(Register s1, Register s2, Register d);
2033 void mult(Register s1, int simm13a, Register d);
2035 // V8/V9 read and write of condition codes.
2036 void read_ccr(Register d);
2037 void write_ccr(Register s);
2039 // Manipulation of C++ bools
2040 // These are idioms to flag the need for care with accessing bools but on
2041 // this platform we assume byte size
2043 inline void stbool( Register d, const Address& a, int offset = 0 ) { stb(d, a, offset); }
2044 inline void ldbool( const Address& a, Register d, int offset = 0 ) { ldsb( a, d, offset ); }
2045 inline void tstbool( Register s ) { tst(s); }
2046 inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
2048 // klass oop manipulations if compressed
2049 void load_klass(Register src_oop, Register klass);
2050 void store_klass(Register klass, Register dst_oop);
2051 void store_klass_gap(Register s, Register dst_oop);
2053 // oop manipulations
2054 void load_heap_oop(const Address& s, Register d, int offset = 0);
2055 void load_heap_oop(Register s1, Register s2, Register d);
2056 void load_heap_oop(Register s1, int simm13a, Register d);
2057 void store_heap_oop(Register d, Register s1, Register s2);
2058 void store_heap_oop(Register d, Register s1, int simm13a);
2059 void store_heap_oop(Register d, const Address& a, int offset = 0);
2061 void encode_heap_oop(Register src, Register dst);
2062 void encode_heap_oop(Register r) {
2063 encode_heap_oop(r, r);
2064 }
2065 void decode_heap_oop(Register src, Register dst);
2066 void decode_heap_oop(Register r) {
2067 decode_heap_oop(r, r);
2068 }
2069 void encode_heap_oop_not_null(Register r);
2070 void decode_heap_oop_not_null(Register r);
2071 void encode_heap_oop_not_null(Register src, Register dst);
2072 void decode_heap_oop_not_null(Register src, Register dst);
2074 // Support for managing the JavaThread pointer (i.e.; the reference to
2075 // thread-local information).
2076 void get_thread(); // load G2_thread
2077 void verify_thread(); // verify G2_thread contents
2078 void save_thread (const Register threache); // save to cache
2079 void restore_thread(const Register thread_cache); // restore from cache
2081 // Support for last Java frame (but use call_VM instead where possible)
2082 void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
2083 void reset_last_Java_frame(void);
2085 // Call into the VM.
2086 // Passes the thread pointer (in O0) as a prepended argument.
2087 // Makes sure oop return values are visible to the GC.
2088 void call_VM(Register oop_result, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
2089 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
2090 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
2091 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
2093 // these overloadings are not presently used on SPARC:
2094 void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
2095 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
2096 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
2097 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
2099 void call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments = 0);
2100 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
2101 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
2102 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3);
2104 void get_vm_result (Register oop_result);
2105 void get_vm_result_2(Register oop_result);
2107 // vm result is currently getting hijacked to for oop preservation
2108 void set_vm_result(Register oop_result);
2110 // if call_VM_base was called with check_exceptions=false, then call
2111 // check_and_forward_exception to handle exceptions when it is safe
2112 void check_and_forward_exception(Register scratch_reg);
2114 private:
2115 // For V8
2116 void read_ccr_trap(Register ccr_save);
2117 void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
2119 #ifdef ASSERT
2120 // For V8 debugging. Uses V8 instruction sequence and checks
2121 // result with V9 insturctions rdccr and wrccr.
2122 // Uses Gscatch and Gscatch2
2123 void read_ccr_v8_assert(Register ccr_save);
2124 void write_ccr_v8_assert(Register ccr_save);
2125 #endif // ASSERT
2127 public:
2129 // Write to card table for - register is destroyed afterwards.
2130 void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
2132 void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
2134 #ifndef SERIALGC
2135 // Array store and offset
2136 void g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs);
2138 void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
2140 // May do filtering, depending on the boolean arguments.
2141 void g1_card_table_write(jbyte* byte_map_base,
2142 Register tmp, Register obj, Register new_val,
2143 bool region_filter, bool null_filter);
2144 #endif // SERIALGC
2146 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
2147 void push_fTOS();
2149 // pops double TOS element from CPU stack and pushes on FPU stack
2150 void pop_fTOS();
2152 void empty_FPU_stack();
2154 void push_IU_state();
2155 void pop_IU_state();
2157 void push_FPU_state();
2158 void pop_FPU_state();
2160 void push_CPU_state();
2161 void pop_CPU_state();
2163 // if heap base register is used - reinit it with the correct value
2164 void reinit_heapbase();
2166 // Debugging
2167 void _verify_oop(Register reg, const char * msg, const char * file, int line);
2168 void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
2170 #define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__)
2171 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr ", __FILE__, __LINE__)
2173 // only if +VerifyOops
2174 void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
2175 // only if +VerifyFPU
2176 void stop(const char* msg); // prints msg, dumps registers and stops execution
2177 void warn(const char* msg); // prints msg, but don't stop
2178 void untested(const char* what = "");
2179 void unimplemented(const char* what = "") { char* b = new char[1024]; sprintf(b, "unimplemented: %s", what); stop(b); }
2180 void should_not_reach_here() { stop("should not reach here"); }
2181 void print_CPU_state();
2183 // oops in code
2184 Address allocate_oop_address( jobject obj, Register d ); // allocate_index
2185 Address constant_oop_address( jobject obj, Register d ); // find_index
2186 inline void set_oop ( jobject obj, Register d ); // uses allocate_oop_address
2187 inline void set_oop_constant( jobject obj, Register d ); // uses constant_oop_address
2188 inline void set_oop ( Address obj_addr ); // same as load_address
2190 void set_narrow_oop( jobject obj, Register d );
2192 // nop padding
2193 void align(int modulus);
2195 // declare a safepoint
2196 void safepoint();
2198 // factor out part of stop into subroutine to save space
2199 void stop_subroutine();
2200 // factor out part of verify_oop into subroutine to save space
2201 void verify_oop_subroutine();
2203 // side-door communication with signalHandler in os_solaris.cpp
2204 static address _verify_oop_implicit_branch[3];
2206 #ifndef PRODUCT
2207 static void test();
2208 #endif
2210 // convert an incoming arglist to varargs format; put the pointer in d
2211 void set_varargs( Argument a, Register d );
2213 int total_frame_size_in_bytes(int extraWords);
2215 // used when extraWords known statically
2216 void save_frame(int extraWords);
2217 void save_frame_c1(int size_in_bytes);
2218 // make a frame, and simultaneously pass up one or two register value
2219 // into the new register window
2220 void save_frame_and_mov(int extraWords, Register s1, Register d1, Register s2 = Register(), Register d2 = Register());
2222 // give no. (outgoing) params, calc # of words will need on frame
2223 void calc_mem_param_words(Register Rparam_words, Register Rresult);
2225 // used to calculate frame size dynamically
2226 // result is in bytes and must be negated for save inst
2227 void calc_frame_size(Register extraWords, Register resultReg);
2229 // calc and also save
2230 void calc_frame_size_and_save(Register extraWords, Register resultReg);
2232 static void debug(char* msg, RegistersForDebugging* outWindow);
2234 // implementations of bytecodes used by both interpreter and compiler
2236 void lcmp( Register Ra_hi, Register Ra_low,
2237 Register Rb_hi, Register Rb_low,
2238 Register Rresult);
2240 void lneg( Register Rhi, Register Rlow );
2242 void lshl( Register Rin_high, Register Rin_low, Register Rcount,
2243 Register Rout_high, Register Rout_low, Register Rtemp );
2245 void lshr( Register Rin_high, Register Rin_low, Register Rcount,
2246 Register Rout_high, Register Rout_low, Register Rtemp );
2248 void lushr( Register Rin_high, Register Rin_low, Register Rcount,
2249 Register Rout_high, Register Rout_low, Register Rtemp );
2251 #ifdef _LP64
2252 void lcmp( Register Ra, Register Rb, Register Rresult);
2253 #endif
2255 void float_cmp( bool is_float, int unordered_result,
2256 FloatRegister Fa, FloatRegister Fb,
2257 Register Rresult);
2259 void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2260 void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
2261 void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2262 void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2264 void save_all_globals_into_locals();
2265 void restore_globals_from_locals();
2267 void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
2268 address lock_addr=0, bool use_call_vm=false);
2269 void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
2270 address lock_addr=0, bool use_call_vm=false);
2271 void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
2273 // These set the icc condition code to equal if the lock succeeded
2274 // and notEqual if it failed and requires a slow case
2275 void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
2276 Register Rscratch,
2277 BiasedLockingCounters* counters = NULL,
2278 bool try_bias = UseBiasedLocking);
2279 void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox,
2280 Register Rscratch,
2281 bool try_bias = UseBiasedLocking);
2283 // Biased locking support
2284 // Upon entry, lock_reg must point to the lock record on the stack,
2285 // obj_reg must contain the target object, and mark_reg must contain
2286 // the target object's header.
2287 // Destroys mark_reg if an attempt is made to bias an anonymously
2288 // biased lock. In this case a failure will go either to the slow
2289 // case or fall through with the notEqual condition code set with
2290 // the expectation that the slow case in the runtime will be called.
2291 // In the fall-through case where the CAS-based lock is done,
2292 // mark_reg is not destroyed.
2293 void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
2294 Label& done, Label* slow_case = NULL,
2295 BiasedLockingCounters* counters = NULL);
2296 // Upon entry, the base register of mark_addr must contain the oop.
2297 // Destroys temp_reg.
2299 // If allow_delay_slot_filling is set to true, the next instruction
2300 // emitted after this one will go in an annulled delay slot if the
2301 // biased locking exit case failed.
2302 void biased_locking_exit(Address mark_addr, Register temp_reg, Label& done, bool allow_delay_slot_filling = false);
2304 // allocation
2305 void eden_allocate(
2306 Register obj, // result: pointer to object after successful allocation
2307 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
2308 int con_size_in_bytes, // object size in bytes if known at compile time
2309 Register t1, // temp register
2310 Register t2, // temp register
2311 Label& slow_case // continuation point if fast allocation fails
2312 );
2313 void tlab_allocate(
2314 Register obj, // result: pointer to object after successful allocation
2315 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
2316 int con_size_in_bytes, // object size in bytes if known at compile time
2317 Register t1, // temp register
2318 Label& slow_case // continuation point if fast allocation fails
2319 );
2320 void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
2322 // interface method calling
2323 void lookup_interface_method(Register recv_klass,
2324 Register intf_klass,
2325 RegisterOrConstant itable_index,
2326 Register method_result,
2327 Register temp_reg, Register temp2_reg,
2328 Label& no_such_interface);
2330 // Test sub_klass against super_klass, with fast and slow paths.
2332 // The fast path produces a tri-state answer: yes / no / maybe-slow.
2333 // One of the three labels can be NULL, meaning take the fall-through.
2334 // If super_check_offset is -1, the value is loaded up from super_klass.
2335 // No registers are killed, except temp_reg and temp2_reg.
2336 // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
2337 void check_klass_subtype_fast_path(Register sub_klass,
2338 Register super_klass,
2339 Register temp_reg,
2340 Register temp2_reg,
2341 Label* L_success,
2342 Label* L_failure,
2343 Label* L_slow_path,
2344 RegisterOrConstant super_check_offset = RegisterOrConstant(-1),
2345 Register instanceof_hack = noreg);
2347 // The rest of the type check; must be wired to a corresponding fast path.
2348 // It does not repeat the fast path logic, so don't use it standalone.
2349 // The temp_reg can be noreg, if no temps are available.
2350 // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
2351 // Updates the sub's secondary super cache as necessary.
2352 void check_klass_subtype_slow_path(Register sub_klass,
2353 Register super_klass,
2354 Register temp_reg,
2355 Register temp2_reg,
2356 Register temp3_reg,
2357 Register temp4_reg,
2358 Label* L_success,
2359 Label* L_failure);
2361 // Simplified, combined version, good for typical uses.
2362 // Falls through on failure.
2363 void check_klass_subtype(Register sub_klass,
2364 Register super_klass,
2365 Register temp_reg,
2366 Register temp2_reg,
2367 Label& L_success);
2370 // Stack overflow checking
2372 // Note: this clobbers G3_scratch
2373 void bang_stack_with_offset(int offset) {
2374 // stack grows down, caller passes positive offset
2375 assert(offset > 0, "must bang with negative offset");
2376 set((-offset)+STACK_BIAS, G3_scratch);
2377 st(G0, SP, G3_scratch);
2378 }
2380 // Writes to stack successive pages until offset reached to check for
2381 // stack overflow + shadow pages. Clobbers tsp and scratch registers.
2382 void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch);
2384 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
2386 void verify_tlab();
2388 Condition negate_condition(Condition cond);
2390 // Helper functions for statistics gathering.
2391 // Conditionally (non-atomically) increments passed counter address, preserving condition codes.
2392 void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
2393 // Unconditional increment.
2394 void inc_counter(address counter_addr, Register Rtemp1, Register Rtemp2);
2396 #undef VIRTUAL
2398 };
2400 /**
2401 * class SkipIfEqual:
2402 *
2403 * Instantiating this class will result in assembly code being output that will
2404 * jump around any code emitted between the creation of the instance and it's
2405 * automatic destruction at the end of a scope block, depending on the value of
2406 * the flag passed to the constructor, which will be checked at run-time.
2407 */
2408 class SkipIfEqual : public StackObj {
2409 private:
2410 MacroAssembler* _masm;
2411 Label _label;
2413 public:
2414 // 'temp' is a temp register that this object can use (and trash)
2415 SkipIfEqual(MacroAssembler*, Register temp,
2416 const bool* flag_addr, Assembler::Condition condition);
2417 ~SkipIfEqual();
2418 };
2420 #ifdef ASSERT
2421 // On RISC, there's no benefit to verifying instruction boundaries.
2422 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
2423 #endif