Thu, 21 Jul 2011 11:25:07 -0700
7063628: Use cbcond on T4
Summary: Add new short branch instruction to Hotspot sparc assembler.
Reviewed-by: never, twisti, jrose
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "assembler_sparc.inline.hpp"
28 #include "gc_interface/collectedHeap.inline.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "memory/cardTableModRefBS.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "prims/methodHandles.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/interfaceSupport.hpp"
35 #include "runtime/objectMonitor.hpp"
36 #include "runtime/os.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #ifndef SERIALGC
40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
41 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
42 #include "gc_implementation/g1/heapRegion.hpp"
43 #endif
45 #ifdef PRODUCT
46 #define BLOCK_COMMENT(str) /* nothing */
47 #else
48 #define BLOCK_COMMENT(str) block_comment(str)
49 #endif
51 // Convert the raw encoding form into the form expected by the
52 // constructor for Address.
53 Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
54 assert(scale == 0, "not supported");
55 RelocationHolder rspec;
56 if (disp_is_oop) {
57 rspec = Relocation::spec_simple(relocInfo::oop_type);
58 }
60 Register rindex = as_Register(index);
61 if (rindex != G0) {
62 Address madr(as_Register(base), rindex);
63 madr._rspec = rspec;
64 return madr;
65 } else {
66 Address madr(as_Register(base), disp);
67 madr._rspec = rspec;
68 return madr;
69 }
70 }
72 Address Argument::address_in_frame() const {
73 // Warning: In LP64 mode disp will occupy more than 10 bits, but
74 // op codes such as ld or ldx, only access disp() to get
75 // their simm13 argument.
76 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
77 if (is_in())
78 return Address(FP, disp); // In argument.
79 else
80 return Address(SP, disp); // Out argument.
81 }
83 static const char* argumentNames[][2] = {
84 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"},
85 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"},
86 {"A(n>9)","P(n>9)"}
87 };
89 const char* Argument::name() const {
90 int nofArgs = sizeof argumentNames / sizeof argumentNames[0];
91 int num = number();
92 if (num >= nofArgs) num = nofArgs - 1;
93 return argumentNames[num][is_in() ? 1 : 0];
94 }
96 void Assembler::print_instruction(int inst) {
97 const char* s;
98 switch (inv_op(inst)) {
99 default: s = "????"; break;
100 case call_op: s = "call"; break;
101 case branch_op:
102 switch (inv_op2(inst)) {
103 case fb_op2: s = "fb"; break;
104 case fbp_op2: s = "fbp"; break;
105 case br_op2: s = "br"; break;
106 case bp_op2: s = "bp"; break;
107 case cb_op2: s = "cb"; break;
108 case bpr_op2: {
109 if (is_cbcond(inst)) {
110 s = is_cxb(inst) ? "cxb" : "cwb";
111 } else {
112 s = "bpr";
113 }
114 break;
115 }
116 default: s = "????"; break;
117 }
118 }
119 ::tty->print("%s", s);
120 }
123 // Patch instruction inst at offset inst_pos to refer to dest_pos
124 // and return the resulting instruction.
125 // We should have pcs, not offsets, but since all is relative, it will work out
126 // OK.
127 int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
129 int m; // mask for displacement field
130 int v; // new value for displacement field
131 const int word_aligned_ones = -4;
132 switch (inv_op(inst)) {
133 default: ShouldNotReachHere();
134 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break;
135 case branch_op:
136 switch (inv_op2(inst)) {
137 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
138 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
139 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
140 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
141 case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
142 case bpr_op2: {
143 if (is_cbcond(inst)) {
144 m = wdisp10(word_aligned_ones, 0);
145 v = wdisp10(dest_pos, inst_pos);
146 } else {
147 m = wdisp16(word_aligned_ones, 0);
148 v = wdisp16(dest_pos, inst_pos);
149 }
150 break;
151 }
152 default: ShouldNotReachHere();
153 }
154 }
155 return inst & ~m | v;
156 }
158 // Return the offset of the branch destionation of instruction inst
159 // at offset pos.
160 // Should have pcs, but since all is relative, it works out.
161 int Assembler::branch_destination(int inst, int pos) {
162 int r;
163 switch (inv_op(inst)) {
164 default: ShouldNotReachHere();
165 case call_op: r = inv_wdisp(inst, pos, 30); break;
166 case branch_op:
167 switch (inv_op2(inst)) {
168 case fbp_op2: r = inv_wdisp( inst, pos, 19); break;
169 case bp_op2: r = inv_wdisp( inst, pos, 19); break;
170 case fb_op2: r = inv_wdisp( inst, pos, 22); break;
171 case br_op2: r = inv_wdisp( inst, pos, 22); break;
172 case cb_op2: r = inv_wdisp( inst, pos, 22); break;
173 case bpr_op2: {
174 if (is_cbcond(inst)) {
175 r = inv_wdisp10(inst, pos);
176 } else {
177 r = inv_wdisp16(inst, pos);
178 }
179 break;
180 }
181 default: ShouldNotReachHere();
182 }
183 }
184 return r;
185 }
187 int AbstractAssembler::code_fill_byte() {
188 return 0x00; // illegal instruction 0x00000000
189 }
191 Assembler::Condition Assembler::reg_cond_to_cc_cond(Assembler::RCondition in) {
192 switch (in) {
193 case rc_z: return equal;
194 case rc_lez: return lessEqual;
195 case rc_lz: return less;
196 case rc_nz: return notEqual;
197 case rc_gz: return greater;
198 case rc_gez: return greaterEqual;
199 default:
200 ShouldNotReachHere();
201 }
202 return equal;
203 }
205 // Generate a bunch 'o stuff (including v9's
206 #ifndef PRODUCT
207 void Assembler::test_v9() {
208 add( G0, G1, G2 );
209 add( G3, 0, G4 );
211 addcc( G5, G6, G7 );
212 addcc( I0, 1, I1 );
213 addc( I2, I3, I4 );
214 addc( I5, -1, I6 );
215 addccc( I7, L0, L1 );
216 addccc( L2, (1 << 12) - 2, L3 );
218 Label lbl1, lbl2, lbl3;
220 bind(lbl1);
222 bpr( rc_z, true, pn, L4, pc(), relocInfo::oop_type );
223 delayed()->nop();
224 bpr( rc_lez, false, pt, L5, lbl1);
225 delayed()->nop();
227 fb( f_never, true, pc() + 4, relocInfo::none);
228 delayed()->nop();
229 fb( f_notEqual, false, lbl2 );
230 delayed()->nop();
232 fbp( f_notZero, true, fcc0, pn, pc() - 4, relocInfo::none);
233 delayed()->nop();
234 fbp( f_lessOrGreater, false, fcc1, pt, lbl3 );
235 delayed()->nop();
237 br( equal, true, pc() + 1024, relocInfo::none);
238 delayed()->nop();
239 br( lessEqual, false, lbl1 );
240 delayed()->nop();
241 br( never, false, lbl1 );
242 delayed()->nop();
244 bp( less, true, icc, pn, pc(), relocInfo::none);
245 delayed()->nop();
246 bp( lessEqualUnsigned, false, xcc, pt, lbl2 );
247 delayed()->nop();
249 call( pc(), relocInfo::none);
250 delayed()->nop();
251 call( lbl3 );
252 delayed()->nop();
255 casa( L6, L7, O0 );
256 casxa( O1, O2, O3, 0 );
258 udiv( O4, O5, O7 );
259 udiv( G0, (1 << 12) - 1, G1 );
260 sdiv( G1, G2, G3 );
261 sdiv( G4, -((1 << 12) - 1), G5 );
262 udivcc( G6, G7, I0 );
263 udivcc( I1, -((1 << 12) - 2), I2 );
264 sdivcc( I3, I4, I5 );
265 sdivcc( I6, -((1 << 12) - 0), I7 );
267 done();
268 retry();
270 fadd( FloatRegisterImpl::S, F0, F1, F2 );
271 fsub( FloatRegisterImpl::D, F34, F0, F62 );
273 fcmp( FloatRegisterImpl::Q, fcc0, F0, F60);
274 fcmpe( FloatRegisterImpl::S, fcc1, F31, F30);
276 ftox( FloatRegisterImpl::D, F2, F4 );
277 ftoi( FloatRegisterImpl::Q, F4, F8 );
279 ftof( FloatRegisterImpl::S, FloatRegisterImpl::Q, F3, F12 );
281 fxtof( FloatRegisterImpl::S, F4, F5 );
282 fitof( FloatRegisterImpl::D, F6, F8 );
284 fmov( FloatRegisterImpl::Q, F16, F20 );
285 fneg( FloatRegisterImpl::S, F6, F7 );
286 fabs( FloatRegisterImpl::D, F10, F12 );
288 fmul( FloatRegisterImpl::Q, F24, F28, F32 );
289 fmul( FloatRegisterImpl::S, FloatRegisterImpl::D, F8, F9, F14 );
290 fdiv( FloatRegisterImpl::S, F10, F11, F12 );
292 fsqrt( FloatRegisterImpl::S, F13, F14 );
294 flush( L0, L1 );
295 flush( L2, -1 );
297 flushw();
299 illtrap( (1 << 22) - 2);
301 impdep1( 17, (1 << 19) - 1 );
302 impdep2( 3, 0 );
304 jmpl( L3, L4, L5 );
305 delayed()->nop();
306 jmpl( L6, -1, L7, Relocation::spec_simple(relocInfo::none));
307 delayed()->nop();
310 ldf( FloatRegisterImpl::S, O0, O1, F15 );
311 ldf( FloatRegisterImpl::D, O2, -1, F14 );
314 ldfsr( O3, O4 );
315 ldfsr( O5, -1 );
316 ldxfsr( O6, O7 );
317 ldxfsr( I0, -1 );
319 ldfa( FloatRegisterImpl::D, I1, I2, 1, F16 );
320 ldfa( FloatRegisterImpl::Q, I3, -1, F36 );
322 ldsb( I4, I5, I6 );
323 ldsb( I7, -1, G0 );
324 ldsh( G1, G3, G4 );
325 ldsh( G5, -1, G6 );
326 ldsw( G7, L0, L1 );
327 ldsw( L2, -1, L3 );
328 ldub( L4, L5, L6 );
329 ldub( L7, -1, O0 );
330 lduh( O1, O2, O3 );
331 lduh( O4, -1, O5 );
332 lduw( O6, O7, G0 );
333 lduw( G1, -1, G2 );
334 ldx( G3, G4, G5 );
335 ldx( G6, -1, G7 );
336 ldd( I0, I1, I2 );
337 ldd( I3, -1, I4 );
339 ldsba( I5, I6, 2, I7 );
340 ldsba( L0, -1, L1 );
341 ldsha( L2, L3, 3, L4 );
342 ldsha( L5, -1, L6 );
343 ldswa( L7, O0, (1 << 8) - 1, O1 );
344 ldswa( O2, -1, O3 );
345 lduba( O4, O5, 0, O6 );
346 lduba( O7, -1, I0 );
347 lduha( I1, I2, 1, I3 );
348 lduha( I4, -1, I5 );
349 lduwa( I6, I7, 2, L0 );
350 lduwa( L1, -1, L2 );
351 ldxa( L3, L4, 3, L5 );
352 ldxa( L6, -1, L7 );
353 ldda( G0, G1, 4, G2 );
354 ldda( G3, -1, G4 );
356 ldstub( G5, G6, G7 );
357 ldstub( O0, -1, O1 );
359 ldstuba( O2, O3, 5, O4 );
360 ldstuba( O5, -1, O6 );
362 and3( I0, L0, O0 );
363 and3( G7, -1, O7 );
364 andcc( L2, I2, G2 );
365 andcc( L4, -1, G4 );
366 andn( I5, I6, I7 );
367 andn( I6, -1, I7 );
368 andncc( I5, I6, I7 );
369 andncc( I7, -1, I6 );
370 or3( I5, I6, I7 );
371 or3( I7, -1, I6 );
372 orcc( I5, I6, I7 );
373 orcc( I7, -1, I6 );
374 orn( I5, I6, I7 );
375 orn( I7, -1, I6 );
376 orncc( I5, I6, I7 );
377 orncc( I7, -1, I6 );
378 xor3( I5, I6, I7 );
379 xor3( I7, -1, I6 );
380 xorcc( I5, I6, I7 );
381 xorcc( I7, -1, I6 );
382 xnor( I5, I6, I7 );
383 xnor( I7, -1, I6 );
384 xnorcc( I5, I6, I7 );
385 xnorcc( I7, -1, I6 );
387 membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) );
388 membar( StoreStore );
389 membar( LoadStore );
390 membar( StoreLoad );
391 membar( LoadLoad );
392 membar( Sync );
393 membar( MemIssue );
394 membar( Lookaside );
396 fmov( FloatRegisterImpl::S, f_ordered, true, fcc2, F16, F17 );
397 fmov( FloatRegisterImpl::D, rc_lz, L5, F18, F20 );
399 movcc( overflowClear, false, icc, I6, L4 );
400 movcc( f_unorderedOrEqual, true, fcc2, (1 << 10) - 1, O0 );
402 movr( rc_nz, I5, I6, I7 );
403 movr( rc_gz, L1, -1, L2 );
405 mulx( I5, I6, I7 );
406 mulx( I7, -1, I6 );
407 sdivx( I5, I6, I7 );
408 sdivx( I7, -1, I6 );
409 udivx( I5, I6, I7 );
410 udivx( I7, -1, I6 );
412 umul( I5, I6, I7 );
413 umul( I7, -1, I6 );
414 smul( I5, I6, I7 );
415 smul( I7, -1, I6 );
416 umulcc( I5, I6, I7 );
417 umulcc( I7, -1, I6 );
418 smulcc( I5, I6, I7 );
419 smulcc( I7, -1, I6 );
421 mulscc( I5, I6, I7 );
422 mulscc( I7, -1, I6 );
424 nop();
427 popc( G0, G1);
428 popc( -1, G2);
430 prefetch( L1, L2, severalReads );
431 prefetch( L3, -1, oneRead );
432 prefetcha( O3, O2, 6, severalWritesAndPossiblyReads );
433 prefetcha( G2, -1, oneWrite );
435 rett( I7, I7);
436 delayed()->nop();
437 rett( G0, -1, relocInfo::none);
438 delayed()->nop();
440 save( I5, I6, I7 );
441 save( I7, -1, I6 );
442 restore( I5, I6, I7 );
443 restore( I7, -1, I6 );
445 saved();
446 restored();
448 sethi( 0xaaaaaaaa, I3, Relocation::spec_simple(relocInfo::none));
450 sll( I5, I6, I7 );
451 sll( I7, 31, I6 );
452 srl( I5, I6, I7 );
453 srl( I7, 0, I6 );
454 sra( I5, I6, I7 );
455 sra( I7, 30, I6 );
456 sllx( I5, I6, I7 );
457 sllx( I7, 63, I6 );
458 srlx( I5, I6, I7 );
459 srlx( I7, 0, I6 );
460 srax( I5, I6, I7 );
461 srax( I7, 62, I6 );
463 sir( -1 );
465 stbar();
467 stf( FloatRegisterImpl::Q, F40, G0, I7 );
468 stf( FloatRegisterImpl::S, F18, I3, -1 );
470 stfsr( L1, L2 );
471 stfsr( I7, -1 );
472 stxfsr( I6, I5 );
473 stxfsr( L4, -1 );
475 stfa( FloatRegisterImpl::D, F22, I6, I7, 7 );
476 stfa( FloatRegisterImpl::Q, F44, G0, -1 );
478 stb( L5, O2, I7 );
479 stb( I7, I6, -1 );
480 sth( L5, O2, I7 );
481 sth( I7, I6, -1 );
482 stw( L5, O2, I7 );
483 stw( I7, I6, -1 );
484 stx( L5, O2, I7 );
485 stx( I7, I6, -1 );
486 std( L5, O2, I7 );
487 std( I7, I6, -1 );
489 stba( L5, O2, I7, 8 );
490 stba( I7, I6, -1 );
491 stha( L5, O2, I7, 9 );
492 stha( I7, I6, -1 );
493 stwa( L5, O2, I7, 0 );
494 stwa( I7, I6, -1 );
495 stxa( L5, O2, I7, 11 );
496 stxa( I7, I6, -1 );
497 stda( L5, O2, I7, 12 );
498 stda( I7, I6, -1 );
500 sub( I5, I6, I7 );
501 sub( I7, -1, I6 );
502 subcc( I5, I6, I7 );
503 subcc( I7, -1, I6 );
504 subc( I5, I6, I7 );
505 subc( I7, -1, I6 );
506 subccc( I5, I6, I7 );
507 subccc( I7, -1, I6 );
509 swap( I5, I6, I7 );
510 swap( I7, -1, I6 );
512 swapa( G0, G1, 13, G2 );
513 swapa( I7, -1, I6 );
515 taddcc( I5, I6, I7 );
516 taddcc( I7, -1, I6 );
517 taddcctv( I5, I6, I7 );
518 taddcctv( I7, -1, I6 );
520 tsubcc( I5, I6, I7 );
521 tsubcc( I7, -1, I6 );
522 tsubcctv( I5, I6, I7 );
523 tsubcctv( I7, -1, I6 );
525 trap( overflowClear, xcc, G0, G1 );
526 trap( lessEqual, icc, I7, 17 );
528 bind(lbl2);
529 bind(lbl3);
531 code()->decode();
532 }
534 // Generate a bunch 'o stuff unique to V8
535 void Assembler::test_v8_onlys() {
536 Label lbl1;
538 cb( cp_0or1or2, false, pc() - 4, relocInfo::none);
539 delayed()->nop();
540 cb( cp_never, true, lbl1);
541 delayed()->nop();
543 cpop1(1, 2, 3, 4);
544 cpop2(5, 6, 7, 8);
546 ldc( I0, I1, 31);
547 ldc( I2, -1, 0);
549 lddc( I4, I4, 30);
550 lddc( I6, 0, 1 );
552 ldcsr( L0, L1, 0);
553 ldcsr( L1, (1 << 12) - 1, 17 );
555 stc( 31, L4, L5);
556 stc( 30, L6, -(1 << 12) );
558 stdc( 0, L7, G0);
559 stdc( 1, G1, 0 );
561 stcsr( 16, G2, G3);
562 stcsr( 17, G4, 1 );
564 stdcq( 4, G5, G6);
565 stdcq( 5, G7, -1 );
567 bind(lbl1);
569 code()->decode();
570 }
571 #endif
573 // Implementation of MacroAssembler
575 void MacroAssembler::null_check(Register reg, int offset) {
576 if (needs_explicit_null_check((intptr_t)offset)) {
577 // provoke OS NULL exception if reg = NULL by
578 // accessing M[reg] w/o changing any registers
579 ld_ptr(reg, 0, G0);
580 }
581 else {
582 // nothing to do, (later) access of M[reg + offset]
583 // will provoke OS NULL exception if reg = NULL
584 }
585 }
587 // Ring buffer jumps
589 #ifndef PRODUCT
590 void MacroAssembler::ret( bool trace ) { if (trace) {
591 mov(I7, O7); // traceable register
592 JMP(O7, 2 * BytesPerInstWord);
593 } else {
594 jmpl( I7, 2 * BytesPerInstWord, G0 );
595 }
596 }
598 void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
599 else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
600 #endif /* PRODUCT */
603 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
604 assert_not_delayed();
605 // This can only be traceable if r1 & r2 are visible after a window save
606 if (TraceJumps) {
607 #ifndef PRODUCT
608 save_frame(0);
609 verify_thread();
610 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
611 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
612 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
613 add(O2, O1, O1);
615 add(r1->after_save(), r2->after_save(), O2);
616 set((intptr_t)file, O3);
617 set(line, O4);
618 Label L;
619 // get nearby pc, store jmp target
620 call(L, relocInfo::none); // No relocation for call to pc+0x8
621 delayed()->st(O2, O1, 0);
622 bind(L);
624 // store nearby pc
625 st(O7, O1, sizeof(intptr_t));
626 // store file
627 st(O3, O1, 2*sizeof(intptr_t));
628 // store line
629 st(O4, O1, 3*sizeof(intptr_t));
630 add(O0, 1, O0);
631 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
632 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
633 restore();
634 #endif /* PRODUCT */
635 }
636 jmpl(r1, r2, G0);
637 }
638 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
639 assert_not_delayed();
640 // This can only be traceable if r1 is visible after a window save
641 if (TraceJumps) {
642 #ifndef PRODUCT
643 save_frame(0);
644 verify_thread();
645 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
646 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
647 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
648 add(O2, O1, O1);
650 add(r1->after_save(), offset, O2);
651 set((intptr_t)file, O3);
652 set(line, O4);
653 Label L;
654 // get nearby pc, store jmp target
655 call(L, relocInfo::none); // No relocation for call to pc+0x8
656 delayed()->st(O2, O1, 0);
657 bind(L);
659 // store nearby pc
660 st(O7, O1, sizeof(intptr_t));
661 // store file
662 st(O3, O1, 2*sizeof(intptr_t));
663 // store line
664 st(O4, O1, 3*sizeof(intptr_t));
665 add(O0, 1, O0);
666 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
667 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
668 restore();
669 #endif /* PRODUCT */
670 }
671 jmp(r1, offset);
672 }
674 // This code sequence is relocatable to any address, even on LP64.
675 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
676 assert_not_delayed();
677 // Force fixed length sethi because NativeJump and NativeFarCall don't handle
678 // variable length instruction streams.
679 patchable_sethi(addrlit, temp);
680 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
681 if (TraceJumps) {
682 #ifndef PRODUCT
683 // Must do the add here so relocation can find the remainder of the
684 // value to be relocated.
685 add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
686 save_frame(0);
687 verify_thread();
688 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
689 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
690 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
691 add(O2, O1, O1);
693 set((intptr_t)file, O3);
694 set(line, O4);
695 Label L;
697 // get nearby pc, store jmp target
698 call(L, relocInfo::none); // No relocation for call to pc+0x8
699 delayed()->st(a.base()->after_save(), O1, 0);
700 bind(L);
702 // store nearby pc
703 st(O7, O1, sizeof(intptr_t));
704 // store file
705 st(O3, O1, 2*sizeof(intptr_t));
706 // store line
707 st(O4, O1, 3*sizeof(intptr_t));
708 add(O0, 1, O0);
709 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
710 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
711 restore();
712 jmpl(a.base(), G0, d);
713 #else
714 jmpl(a.base(), a.disp(), d);
715 #endif /* PRODUCT */
716 } else {
717 jmpl(a.base(), a.disp(), d);
718 }
719 }
721 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
722 jumpl(addrlit, temp, G0, offset, file, line);
723 }
726 // Convert to C varargs format
727 void MacroAssembler::set_varargs( Argument inArg, Register d ) {
728 // spill register-resident args to their memory slots
729 // (SPARC calling convention requires callers to have already preallocated these)
730 // Note that the inArg might in fact be an outgoing argument,
731 // if a leaf routine or stub does some tricky argument shuffling.
732 // This routine must work even though one of the saved arguments
733 // is in the d register (e.g., set_varargs(Argument(0, false), O0)).
734 for (Argument savePtr = inArg;
735 savePtr.is_register();
736 savePtr = savePtr.successor()) {
737 st_ptr(savePtr.as_register(), savePtr.address_in_frame());
738 }
739 // return the address of the first memory slot
740 Address a = inArg.address_in_frame();
741 add(a.base(), a.disp(), d);
742 }
744 // Conditional breakpoint (for assertion checks in assembly code)
745 void MacroAssembler::breakpoint_trap(Condition c, CC cc) {
746 trap(c, cc, G0, ST_RESERVED_FOR_USER_0);
747 }
749 // We want to use ST_BREAKPOINT here, but the debugger is confused by it.
750 void MacroAssembler::breakpoint_trap() {
751 trap(ST_RESERVED_FOR_USER_0);
752 }
754 // flush windows (except current) using flushw instruction if avail.
755 void MacroAssembler::flush_windows() {
756 if (VM_Version::v9_instructions_work()) flushw();
757 else flush_windows_trap();
758 }
760 // Write serialization page so VM thread can do a pseudo remote membar
761 // We use the current thread pointer to calculate a thread specific
762 // offset to write to within the page. This minimizes bus traffic
763 // due to cache line collision.
764 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
765 srl(thread, os::get_serialize_page_shift_count(), tmp2);
766 if (Assembler::is_simm13(os::vm_page_size())) {
767 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
768 }
769 else {
770 set((os::vm_page_size() - sizeof(int)), tmp1);
771 and3(tmp2, tmp1, tmp2);
772 }
773 set(os::get_memory_serialize_page(), tmp1);
774 st(G0, tmp1, tmp2);
775 }
779 void MacroAssembler::enter() {
780 Unimplemented();
781 }
783 void MacroAssembler::leave() {
784 Unimplemented();
785 }
787 void MacroAssembler::mult(Register s1, Register s2, Register d) {
788 if(VM_Version::v9_instructions_work()) {
789 mulx (s1, s2, d);
790 } else {
791 smul (s1, s2, d);
792 }
793 }
795 void MacroAssembler::mult(Register s1, int simm13a, Register d) {
796 if(VM_Version::v9_instructions_work()) {
797 mulx (s1, simm13a, d);
798 } else {
799 smul (s1, simm13a, d);
800 }
801 }
804 #ifdef ASSERT
805 void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
806 const Register s1 = G3_scratch;
807 const Register s2 = G4_scratch;
808 Label get_psr_test;
809 // Get the condition codes the V8 way.
810 read_ccr_trap(s1);
811 mov(ccr_save, s2);
812 // This is a test of V8 which has icc but not xcc
813 // so mask off the xcc bits
814 and3(s2, 0xf, s2);
815 // Compare condition codes from the V8 and V9 ways.
816 subcc(s2, s1, G0);
817 br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
818 delayed()->breakpoint_trap();
819 bind(get_psr_test);
820 }
822 void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
823 const Register s1 = G3_scratch;
824 const Register s2 = G4_scratch;
825 Label set_psr_test;
826 // Write out the saved condition codes the V8 way
827 write_ccr_trap(ccr_save, s1, s2);
828 // Read back the condition codes using the V9 instruction
829 rdccr(s1);
830 mov(ccr_save, s2);
831 // This is a test of V8 which has icc but not xcc
832 // so mask off the xcc bits
833 and3(s2, 0xf, s2);
834 and3(s1, 0xf, s1);
835 // Compare the V8 way with the V9 way.
836 subcc(s2, s1, G0);
837 br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
838 delayed()->breakpoint_trap();
839 bind(set_psr_test);
840 }
841 #else
842 #define read_ccr_v8_assert(x)
843 #define write_ccr_v8_assert(x)
844 #endif // ASSERT
846 void MacroAssembler::read_ccr(Register ccr_save) {
847 if (VM_Version::v9_instructions_work()) {
848 rdccr(ccr_save);
849 // Test code sequence used on V8. Do not move above rdccr.
850 read_ccr_v8_assert(ccr_save);
851 } else {
852 read_ccr_trap(ccr_save);
853 }
854 }
856 void MacroAssembler::write_ccr(Register ccr_save) {
857 if (VM_Version::v9_instructions_work()) {
858 // Test code sequence used on V8. Do not move below wrccr.
859 write_ccr_v8_assert(ccr_save);
860 wrccr(ccr_save);
861 } else {
862 const Register temp_reg1 = G3_scratch;
863 const Register temp_reg2 = G4_scratch;
864 write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
865 }
866 }
869 // Calls to C land
871 #ifdef ASSERT
872 // a hook for debugging
873 static Thread* reinitialize_thread() {
874 return ThreadLocalStorage::thread();
875 }
876 #else
877 #define reinitialize_thread ThreadLocalStorage::thread
878 #endif
880 #ifdef ASSERT
881 address last_get_thread = NULL;
882 #endif
884 // call this when G2_thread is not known to be valid
885 void MacroAssembler::get_thread() {
886 save_frame(0); // to avoid clobbering O0
887 mov(G1, L0); // avoid clobbering G1
888 mov(G5_method, L1); // avoid clobbering G5
889 mov(G3, L2); // avoid clobbering G3 also
890 mov(G4, L5); // avoid clobbering G4
891 #ifdef ASSERT
892 AddressLiteral last_get_thread_addrlit(&last_get_thread);
893 set(last_get_thread_addrlit, L3);
894 inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
895 st_ptr(L4, L3, 0);
896 #endif
897 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
898 delayed()->nop();
899 mov(L0, G1);
900 mov(L1, G5_method);
901 mov(L2, G3);
902 mov(L5, G4);
903 restore(O0, 0, G2_thread);
904 }
906 static Thread* verify_thread_subroutine(Thread* gthread_value) {
907 Thread* correct_value = ThreadLocalStorage::thread();
908 guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
909 return correct_value;
910 }
912 void MacroAssembler::verify_thread() {
913 if (VerifyThread) {
914 // NOTE: this chops off the heads of the 64-bit O registers.
915 #ifdef CC_INTERP
916 save_frame(0);
917 #else
918 // make sure G2_thread contains the right value
919 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
920 mov(G1, L1); // avoid clobbering G1
921 // G2 saved below
922 mov(G3, L3); // avoid clobbering G3
923 mov(G4, L4); // avoid clobbering G4
924 mov(G5_method, L5); // avoid clobbering G5_method
925 #endif /* CC_INTERP */
926 #if defined(COMPILER2) && !defined(_LP64)
927 // Save & restore possible 64-bit Long arguments in G-regs
928 srlx(G1,32,L0);
929 srlx(G4,32,L6);
930 #endif
931 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
932 delayed()->mov(G2_thread, O0);
934 mov(L1, G1); // Restore G1
935 // G2 restored below
936 mov(L3, G3); // restore G3
937 mov(L4, G4); // restore G4
938 mov(L5, G5_method); // restore G5_method
939 #if defined(COMPILER2) && !defined(_LP64)
940 // Save & restore possible 64-bit Long arguments in G-regs
941 sllx(L0,32,G2); // Move old high G1 bits high in G2
942 srl(G1, 0,G1); // Clear current high G1 bits
943 or3 (G1,G2,G1); // Recover 64-bit G1
944 sllx(L6,32,G2); // Move old high G4 bits high in G2
945 srl(G4, 0,G4); // Clear current high G4 bits
946 or3 (G4,G2,G4); // Recover 64-bit G4
947 #endif
948 restore(O0, 0, G2_thread);
949 }
950 }
953 void MacroAssembler::save_thread(const Register thread_cache) {
954 verify_thread();
955 if (thread_cache->is_valid()) {
956 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
957 mov(G2_thread, thread_cache);
958 }
959 if (VerifyThread) {
960 // smash G2_thread, as if the VM were about to anyway
961 set(0x67676767, G2_thread);
962 }
963 }
966 void MacroAssembler::restore_thread(const Register thread_cache) {
967 if (thread_cache->is_valid()) {
968 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
969 mov(thread_cache, G2_thread);
970 verify_thread();
971 } else {
972 // do it the slow way
973 get_thread();
974 }
975 }
978 // %%% maybe get rid of [re]set_last_Java_frame
979 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
980 assert_not_delayed();
981 Address flags(G2_thread, JavaThread::frame_anchor_offset() +
982 JavaFrameAnchor::flags_offset());
983 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset());
985 // Always set last_Java_pc and flags first because once last_Java_sp is visible
986 // has_last_Java_frame is true and users will look at the rest of the fields.
987 // (Note: flags should always be zero before we get here so doesn't need to be set.)
989 #ifdef ASSERT
990 // Verify that flags was zeroed on return to Java
991 Label PcOk;
992 save_frame(0); // to avoid clobbering O0
993 ld_ptr(pc_addr, L0);
994 br_null_short(L0, Assembler::pt, PcOk);
995 stop("last_Java_pc not zeroed before leaving Java");
996 bind(PcOk);
998 // Verify that flags was zeroed on return to Java
999 Label FlagsOk;
1000 ld(flags, L0);
1001 tst(L0);
1002 br(Assembler::zero, false, Assembler::pt, FlagsOk);
1003 delayed() -> restore();
1004 stop("flags not zeroed before leaving Java");
1005 bind(FlagsOk);
1006 #endif /* ASSERT */
1007 //
1008 // When returning from calling out from Java mode the frame anchor's last_Java_pc
1009 // will always be set to NULL. It is set here so that if we are doing a call to
1010 // native (not VM) that we capture the known pc and don't have to rely on the
1011 // native call having a standard frame linkage where we can find the pc.
1013 if (last_Java_pc->is_valid()) {
1014 st_ptr(last_Java_pc, pc_addr);
1015 }
1017 #ifdef _LP64
1018 #ifdef ASSERT
1019 // Make sure that we have an odd stack
1020 Label StackOk;
1021 andcc(last_java_sp, 0x01, G0);
1022 br(Assembler::notZero, false, Assembler::pt, StackOk);
1023 delayed()->nop();
1024 stop("Stack Not Biased in set_last_Java_frame");
1025 bind(StackOk);
1026 #endif // ASSERT
1027 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
1028 add( last_java_sp, STACK_BIAS, G4_scratch );
1029 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
1030 #else
1031 st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
1032 #endif // _LP64
1033 }
1035 void MacroAssembler::reset_last_Java_frame(void) {
1036 assert_not_delayed();
1038 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset());
1039 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
1040 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
1042 #ifdef ASSERT
1043 // check that it WAS previously set
1044 #ifdef CC_INTERP
1045 save_frame(0);
1046 #else
1047 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof
1048 #endif /* CC_INTERP */
1049 ld_ptr(sp_addr, L0);
1050 tst(L0);
1051 breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
1052 restore();
1053 #endif // ASSERT
1055 st_ptr(G0, sp_addr);
1056 // Always return last_Java_pc to zero
1057 st_ptr(G0, pc_addr);
1058 // Always null flags after return to Java
1059 st(G0, flags);
1060 }
1063 void MacroAssembler::call_VM_base(
1064 Register oop_result,
1065 Register thread_cache,
1066 Register last_java_sp,
1067 address entry_point,
1068 int number_of_arguments,
1069 bool check_exceptions)
1070 {
1071 assert_not_delayed();
1073 // determine last_java_sp register
1074 if (!last_java_sp->is_valid()) {
1075 last_java_sp = SP;
1076 }
1077 // debugging support
1078 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
1080 // 64-bit last_java_sp is biased!
1081 set_last_Java_frame(last_java_sp, noreg);
1082 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
1083 save_thread(thread_cache);
1084 // do the call
1085 call(entry_point, relocInfo::runtime_call_type);
1086 if (!VerifyThread)
1087 delayed()->mov(G2_thread, O0); // pass thread as first argument
1088 else
1089 delayed()->nop(); // (thread already passed)
1090 restore_thread(thread_cache);
1091 reset_last_Java_frame();
1093 // check for pending exceptions. use Gtemp as scratch register.
1094 if (check_exceptions) {
1095 check_and_forward_exception(Gtemp);
1096 }
1098 #ifdef ASSERT
1099 set(badHeapWordVal, G3);
1100 set(badHeapWordVal, G4);
1101 set(badHeapWordVal, G5);
1102 #endif
1104 // get oop result if there is one and reset the value in the thread
1105 if (oop_result->is_valid()) {
1106 get_vm_result(oop_result);
1107 }
1108 }
1110 void MacroAssembler::check_and_forward_exception(Register scratch_reg)
1111 {
1112 Label L;
1114 check_and_handle_popframe(scratch_reg);
1115 check_and_handle_earlyret(scratch_reg);
1117 Address exception_addr(G2_thread, Thread::pending_exception_offset());
1118 ld_ptr(exception_addr, scratch_reg);
1119 br_null_short(scratch_reg, pt, L);
1120 // we use O7 linkage so that forward_exception_entry has the issuing PC
1121 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
1122 delayed()->nop();
1123 bind(L);
1124 }
1127 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) {
1128 }
1131 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
1132 }
1135 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
1136 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
1137 }
1140 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
1141 // O0 is reserved for the thread
1142 mov(arg_1, O1);
1143 call_VM(oop_result, entry_point, 1, check_exceptions);
1144 }
1147 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
1148 // O0 is reserved for the thread
1149 mov(arg_1, O1);
1150 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1151 call_VM(oop_result, entry_point, 2, check_exceptions);
1152 }
1155 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
1156 // O0 is reserved for the thread
1157 mov(arg_1, O1);
1158 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1159 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
1160 call_VM(oop_result, entry_point, 3, check_exceptions);
1161 }
1165 // Note: The following call_VM overloadings are useful when a "save"
1166 // has already been performed by a stub, and the last Java frame is
1167 // the previous one. In that case, last_java_sp must be passed as FP
1168 // instead of SP.
1171 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
1172 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1173 }
1176 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
1177 // O0 is reserved for the thread
1178 mov(arg_1, O1);
1179 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1180 }
1183 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
1184 // O0 is reserved for the thread
1185 mov(arg_1, O1);
1186 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1187 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1188 }
1191 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
1192 // O0 is reserved for the thread
1193 mov(arg_1, O1);
1194 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1195 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
1196 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1197 }
1201 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) {
1202 assert_not_delayed();
1203 save_thread(thread_cache);
1204 // do the call
1205 call(entry_point, relocInfo::runtime_call_type);
1206 delayed()->nop();
1207 restore_thread(thread_cache);
1208 #ifdef ASSERT
1209 set(badHeapWordVal, G3);
1210 set(badHeapWordVal, G4);
1211 set(badHeapWordVal, G5);
1212 #endif
1213 }
1216 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) {
1217 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments);
1218 }
1221 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
1222 mov(arg_1, O0);
1223 call_VM_leaf(thread_cache, entry_point, 1);
1224 }
1227 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
1228 mov(arg_1, O0);
1229 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
1230 call_VM_leaf(thread_cache, entry_point, 2);
1231 }
1234 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) {
1235 mov(arg_1, O0);
1236 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
1237 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument");
1238 call_VM_leaf(thread_cache, entry_point, 3);
1239 }
1242 void MacroAssembler::get_vm_result(Register oop_result) {
1243 verify_thread();
1244 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
1245 ld_ptr( vm_result_addr, oop_result);
1246 st_ptr(G0, vm_result_addr);
1247 verify_oop(oop_result);
1248 }
1251 void MacroAssembler::get_vm_result_2(Register oop_result) {
1252 verify_thread();
1253 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
1254 ld_ptr(vm_result_addr_2, oop_result);
1255 st_ptr(G0, vm_result_addr_2);
1256 verify_oop(oop_result);
1257 }
1260 // We require that C code which does not return a value in vm_result will
1261 // leave it undisturbed.
1262 void MacroAssembler::set_vm_result(Register oop_result) {
1263 verify_thread();
1264 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
1265 verify_oop(oop_result);
1267 # ifdef ASSERT
1268 // Check that we are not overwriting any other oop.
1269 #ifdef CC_INTERP
1270 save_frame(0);
1271 #else
1272 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof
1273 #endif /* CC_INTERP */
1274 ld_ptr(vm_result_addr, L0);
1275 tst(L0);
1276 restore();
1277 breakpoint_trap(notZero, Assembler::ptr_cc);
1278 // }
1279 # endif
1281 st_ptr(oop_result, vm_result_addr);
1282 }
1285 void MacroAssembler::card_table_write(jbyte* byte_map_base,
1286 Register tmp, Register obj) {
1287 #ifdef _LP64
1288 srlx(obj, CardTableModRefBS::card_shift, obj);
1289 #else
1290 srl(obj, CardTableModRefBS::card_shift, obj);
1291 #endif
1292 assert(tmp != obj, "need separate temp reg");
1293 set((address) byte_map_base, tmp);
1294 stb(G0, tmp, obj);
1295 }
1298 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
1299 address save_pc;
1300 int shiftcnt;
1301 #ifdef _LP64
1302 # ifdef CHECK_DELAY
1303 assert_not_delayed((char*) "cannot put two instructions in delay slot");
1304 # endif
1305 v9_dep();
1306 save_pc = pc();
1308 int msb32 = (int) (addrlit.value() >> 32);
1309 int lsb32 = (int) (addrlit.value());
1311 if (msb32 == 0 && lsb32 >= 0) {
1312 Assembler::sethi(lsb32, d, addrlit.rspec());
1313 }
1314 else if (msb32 == -1) {
1315 Assembler::sethi(~lsb32, d, addrlit.rspec());
1316 xor3(d, ~low10(~0), d);
1317 }
1318 else {
1319 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits
1320 if (msb32 & 0x3ff) // Any bits?
1321 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32
1322 if (lsb32 & 0xFFFFFC00) { // done?
1323 if ((lsb32 >> 20) & 0xfff) { // Any bits set?
1324 sllx(d, 12, d); // Make room for next 12 bits
1325 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12
1326 shiftcnt = 0; // We already shifted
1327 }
1328 else
1329 shiftcnt = 12;
1330 if ((lsb32 >> 10) & 0x3ff) {
1331 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits
1332 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10
1333 shiftcnt = 0;
1334 }
1335 else
1336 shiftcnt = 10;
1337 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd
1338 }
1339 else
1340 sllx(d, 32, d);
1341 }
1342 // Pad out the instruction sequence so it can be patched later.
1343 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none &&
1344 addrlit.rtype() != relocInfo::runtime_call_type)) {
1345 while (pc() < (save_pc + (7 * BytesPerInstWord)))
1346 nop();
1347 }
1348 #else
1349 Assembler::sethi(addrlit.value(), d, addrlit.rspec());
1350 #endif
1351 }
1354 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) {
1355 internal_sethi(addrlit, d, false);
1356 }
1359 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) {
1360 internal_sethi(addrlit, d, true);
1361 }
1364 int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
1365 #ifdef _LP64
1366 if (worst_case) return 7;
1367 intptr_t iaddr = (intptr_t) a;
1368 int msb32 = (int) (iaddr >> 32);
1369 int lsb32 = (int) (iaddr);
1370 int count;
1371 if (msb32 == 0 && lsb32 >= 0)
1372 count = 1;
1373 else if (msb32 == -1)
1374 count = 2;
1375 else {
1376 count = 2;
1377 if (msb32 & 0x3ff)
1378 count++;
1379 if (lsb32 & 0xFFFFFC00 ) {
1380 if ((lsb32 >> 20) & 0xfff) count += 2;
1381 if ((lsb32 >> 10) & 0x3ff) count += 2;
1382 }
1383 }
1384 return count;
1385 #else
1386 return 1;
1387 #endif
1388 }
1390 int MacroAssembler::worst_case_insts_for_set() {
1391 return insts_for_sethi(NULL, true) + 1;
1392 }
1395 // Keep in sync with MacroAssembler::insts_for_internal_set
1396 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
1397 intptr_t value = addrlit.value();
1399 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) {
1400 // can optimize
1401 if (-4096 <= value && value <= 4095) {
1402 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
1403 return;
1404 }
1405 if (inv_hi22(hi22(value)) == value) {
1406 sethi(addrlit, d);
1407 return;
1408 }
1409 }
1410 assert_not_delayed((char*) "cannot put two instructions in delay slot");
1411 internal_sethi(addrlit, d, ForceRelocatable);
1412 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) {
1413 add(d, addrlit.low10(), d, addrlit.rspec());
1414 }
1415 }
1417 // Keep in sync with MacroAssembler::internal_set
1418 int MacroAssembler::insts_for_internal_set(intptr_t value) {
1419 // can optimize
1420 if (-4096 <= value && value <= 4095) {
1421 return 1;
1422 }
1423 if (inv_hi22(hi22(value)) == value) {
1424 return insts_for_sethi((address) value);
1425 }
1426 int count = insts_for_sethi((address) value);
1427 AddressLiteral al(value);
1428 if (al.low10() != 0) {
1429 count++;
1430 }
1431 return count;
1432 }
1434 void MacroAssembler::set(const AddressLiteral& al, Register d) {
1435 internal_set(al, d, false);
1436 }
1438 void MacroAssembler::set(intptr_t value, Register d) {
1439 AddressLiteral al(value);
1440 internal_set(al, d, false);
1441 }
1443 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) {
1444 AddressLiteral al(addr, rspec);
1445 internal_set(al, d, false);
1446 }
1448 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) {
1449 internal_set(al, d, true);
1450 }
1452 void MacroAssembler::patchable_set(intptr_t value, Register d) {
1453 AddressLiteral al(value);
1454 internal_set(al, d, true);
1455 }
1458 void MacroAssembler::set64(jlong value, Register d, Register tmp) {
1459 assert_not_delayed();
1460 v9_dep();
1462 int hi = (int)(value >> 32);
1463 int lo = (int)(value & ~0);
1464 // (Matcher::isSimpleConstant64 knows about the following optimizations.)
1465 if (Assembler::is_simm13(lo) && value == lo) {
1466 or3(G0, lo, d);
1467 } else if (hi == 0) {
1468 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32
1469 if (low10(lo) != 0)
1470 or3(d, low10(lo), d);
1471 }
1472 else if (hi == -1) {
1473 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32
1474 xor3(d, low10(lo) ^ ~low10(~0), d);
1475 }
1476 else if (lo == 0) {
1477 if (Assembler::is_simm13(hi)) {
1478 or3(G0, hi, d);
1479 } else {
1480 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32
1481 if (low10(hi) != 0)
1482 or3(d, low10(hi), d);
1483 }
1484 sllx(d, 32, d);
1485 }
1486 else {
1487 Assembler::sethi(hi, tmp);
1488 Assembler::sethi(lo, d); // macro assembler version sign-extends
1489 if (low10(hi) != 0)
1490 or3 (tmp, low10(hi), tmp);
1491 if (low10(lo) != 0)
1492 or3 ( d, low10(lo), d);
1493 sllx(tmp, 32, tmp);
1494 or3 (d, tmp, d);
1495 }
1496 }
1498 int MacroAssembler::insts_for_set64(jlong value) {
1499 v9_dep();
1501 int hi = (int) (value >> 32);
1502 int lo = (int) (value & ~0);
1503 int count = 0;
1505 // (Matcher::isSimpleConstant64 knows about the following optimizations.)
1506 if (Assembler::is_simm13(lo) && value == lo) {
1507 count++;
1508 } else if (hi == 0) {
1509 count++;
1510 if (low10(lo) != 0)
1511 count++;
1512 }
1513 else if (hi == -1) {
1514 count += 2;
1515 }
1516 else if (lo == 0) {
1517 if (Assembler::is_simm13(hi)) {
1518 count++;
1519 } else {
1520 count++;
1521 if (low10(hi) != 0)
1522 count++;
1523 }
1524 count++;
1525 }
1526 else {
1527 count += 2;
1528 if (low10(hi) != 0)
1529 count++;
1530 if (low10(lo) != 0)
1531 count++;
1532 count += 2;
1533 }
1534 return count;
1535 }
1537 // compute size in bytes of sparc frame, given
1538 // number of extraWords
1539 int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
1541 int nWords = frame::memory_parameter_word_sp_offset;
1543 nWords += extraWords;
1545 if (nWords & 1) ++nWords; // round up to double-word
1547 return nWords * BytesPerWord;
1548 }
1551 // save_frame: given number of "extra" words in frame,
1552 // issue approp. save instruction (p 200, v8 manual)
1554 void MacroAssembler::save_frame(int extraWords) {
1555 int delta = -total_frame_size_in_bytes(extraWords);
1556 if (is_simm13(delta)) {
1557 save(SP, delta, SP);
1558 } else {
1559 set(delta, G3_scratch);
1560 save(SP, G3_scratch, SP);
1561 }
1562 }
1565 void MacroAssembler::save_frame_c1(int size_in_bytes) {
1566 if (is_simm13(-size_in_bytes)) {
1567 save(SP, -size_in_bytes, SP);
1568 } else {
1569 set(-size_in_bytes, G3_scratch);
1570 save(SP, G3_scratch, SP);
1571 }
1572 }
1575 void MacroAssembler::save_frame_and_mov(int extraWords,
1576 Register s1, Register d1,
1577 Register s2, Register d2) {
1578 assert_not_delayed();
1580 // The trick here is to use precisely the same memory word
1581 // that trap handlers also use to save the register.
1582 // This word cannot be used for any other purpose, but
1583 // it works fine to save the register's value, whether or not
1584 // an interrupt flushes register windows at any given moment!
1585 Address s1_addr;
1586 if (s1->is_valid() && (s1->is_in() || s1->is_local())) {
1587 s1_addr = s1->address_in_saved_window();
1588 st_ptr(s1, s1_addr);
1589 }
1591 Address s2_addr;
1592 if (s2->is_valid() && (s2->is_in() || s2->is_local())) {
1593 s2_addr = s2->address_in_saved_window();
1594 st_ptr(s2, s2_addr);
1595 }
1597 save_frame(extraWords);
1599 if (s1_addr.base() == SP) {
1600 ld_ptr(s1_addr.after_save(), d1);
1601 } else if (s1->is_valid()) {
1602 mov(s1->after_save(), d1);
1603 }
1605 if (s2_addr.base() == SP) {
1606 ld_ptr(s2_addr.after_save(), d2);
1607 } else if (s2->is_valid()) {
1608 mov(s2->after_save(), d2);
1609 }
1610 }
1613 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
1614 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1615 int oop_index = oop_recorder()->allocate_index(obj);
1616 return AddressLiteral(obj, oop_Relocation::spec(oop_index));
1617 }
1620 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1621 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1622 int oop_index = oop_recorder()->find_index(obj);
1623 return AddressLiteral(obj, oop_Relocation::spec(oop_index));
1624 }
1626 void MacroAssembler::set_narrow_oop(jobject obj, Register d) {
1627 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1628 int oop_index = oop_recorder()->find_index(obj);
1629 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1631 assert_not_delayed();
1632 // Relocation with special format (see relocInfo_sparc.hpp).
1633 relocate(rspec, 1);
1634 // Assembler::sethi(0x3fffff, d);
1635 emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) );
1636 // Don't add relocation for 'add'. Do patching during 'sethi' processing.
1637 add(d, 0x3ff, d);
1639 }
1642 void MacroAssembler::align(int modulus) {
1643 while (offset() % modulus != 0) nop();
1644 }
1647 void MacroAssembler::safepoint() {
1648 relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint));
1649 }
1652 void RegistersForDebugging::print(outputStream* s) {
1653 int j;
1654 for ( j = 0; j < 8; ++j )
1655 if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]);
1656 else s->print_cr( "fp = 0x%.16lx", i[j]);
1657 s->cr();
1659 for ( j = 0; j < 8; ++j )
1660 s->print_cr("l%d = 0x%.16lx", j, l[j]);
1661 s->cr();
1663 for ( j = 0; j < 8; ++j )
1664 if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]);
1665 else s->print_cr( "sp = 0x%.16lx", o[j]);
1666 s->cr();
1668 for ( j = 0; j < 8; ++j )
1669 s->print_cr("g%d = 0x%.16lx", j, g[j]);
1670 s->cr();
1672 // print out floats with compression
1673 for (j = 0; j < 32; ) {
1674 jfloat val = f[j];
1675 int last = j;
1676 for ( ; last+1 < 32; ++last ) {
1677 char b1[1024], b2[1024];
1678 sprintf(b1, "%f", val);
1679 sprintf(b2, "%f", f[last+1]);
1680 if (strcmp(b1, b2))
1681 break;
1682 }
1683 s->print("f%d", j);
1684 if ( j != last ) s->print(" - f%d", last);
1685 s->print(" = %f", val);
1686 s->fill_to(25);
1687 s->print_cr(" (0x%x)", val);
1688 j = last + 1;
1689 }
1690 s->cr();
1692 // and doubles (evens only)
1693 for (j = 0; j < 32; ) {
1694 jdouble val = d[j];
1695 int last = j;
1696 for ( ; last+1 < 32; ++last ) {
1697 char b1[1024], b2[1024];
1698 sprintf(b1, "%f", val);
1699 sprintf(b2, "%f", d[last+1]);
1700 if (strcmp(b1, b2))
1701 break;
1702 }
1703 s->print("d%d", 2 * j);
1704 if ( j != last ) s->print(" - d%d", last);
1705 s->print(" = %f", val);
1706 s->fill_to(30);
1707 s->print("(0x%x)", *(int*)&val);
1708 s->fill_to(42);
1709 s->print_cr("(0x%x)", *(1 + (int*)&val));
1710 j = last + 1;
1711 }
1712 s->cr();
1713 }
1715 void RegistersForDebugging::save_registers(MacroAssembler* a) {
1716 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
1717 a->flush_windows();
1718 int i;
1719 for (i = 0; i < 8; ++i) {
1720 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
1721 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i));
1722 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i));
1723 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i));
1724 }
1725 for (i = 0; i < 32; ++i) {
1726 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
1727 }
1728 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
1729 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
1730 }
1731 }
1733 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
1734 for (int i = 1; i < 8; ++i) {
1735 a->ld_ptr(r, g_offset(i), as_gRegister(i));
1736 }
1737 for (int j = 0; j < 32; ++j) {
1738 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
1739 }
1740 for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
1741 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
1742 }
1743 }
1746 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
1747 void MacroAssembler::push_fTOS() {
1748 // %%%%%% need to implement this
1749 }
1751 // pops double TOS element from CPU stack and pushes on FPU stack
1752 void MacroAssembler::pop_fTOS() {
1753 // %%%%%% need to implement this
1754 }
1756 void MacroAssembler::empty_FPU_stack() {
1757 // %%%%%% need to implement this
1758 }
1760 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
1761 // plausibility check for oops
1762 if (!VerifyOops) return;
1764 if (reg == G0) return; // always NULL, which is always an oop
1766 BLOCK_COMMENT("verify_oop {");
1767 char buffer[64];
1768 #ifdef COMPILER1
1769 if (CommentedAssembly) {
1770 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset());
1771 block_comment(buffer);
1772 }
1773 #endif
1775 int len = strlen(file) + strlen(msg) + 1 + 4;
1776 sprintf(buffer, "%d", line);
1777 len += strlen(buffer);
1778 sprintf(buffer, " at offset %d ", offset());
1779 len += strlen(buffer);
1780 char * real_msg = new char[len];
1781 sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
1783 // Call indirectly to solve generation ordering problem
1784 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
1786 // Make some space on stack above the current register window.
1787 // Enough to hold 8 64-bit registers.
1788 add(SP,-8*8,SP);
1790 // Save some 64-bit registers; a normal 'save' chops the heads off
1791 // of 64-bit longs in the 32-bit build.
1792 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
1793 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
1794 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
1795 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
1797 set((intptr_t)real_msg, O1);
1798 // Load address to call to into O7
1799 load_ptr_contents(a, O7);
1800 // Register call to verify_oop_subroutine
1801 callr(O7, G0);
1802 delayed()->nop();
1803 // recover frame size
1804 add(SP, 8*8,SP);
1805 BLOCK_COMMENT("} verify_oop");
1806 }
1808 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
1809 // plausibility check for oops
1810 if (!VerifyOops) return;
1812 char buffer[64];
1813 sprintf(buffer, "%d", line);
1814 int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
1815 sprintf(buffer, " at SP+%d ", addr.disp());
1816 len += strlen(buffer);
1817 char * real_msg = new char[len];
1818 sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
1820 // Call indirectly to solve generation ordering problem
1821 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
1823 // Make some space on stack above the current register window.
1824 // Enough to hold 8 64-bit registers.
1825 add(SP,-8*8,SP);
1827 // Save some 64-bit registers; a normal 'save' chops the heads off
1828 // of 64-bit longs in the 32-bit build.
1829 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
1830 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
1831 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
1832 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
1834 set((intptr_t)real_msg, O1);
1835 // Load address to call to into O7
1836 load_ptr_contents(a, O7);
1837 // Register call to verify_oop_subroutine
1838 callr(O7, G0);
1839 delayed()->nop();
1840 // recover frame size
1841 add(SP, 8*8,SP);
1842 }
1844 // side-door communication with signalHandler in os_solaris.cpp
1845 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
1847 // This macro is expanded just once; it creates shared code. Contract:
1848 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY
1849 // registers, including flags. May not use a register 'save', as this blows
1850 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
1851 // call.
1852 void MacroAssembler::verify_oop_subroutine() {
1853 assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
1855 // Leaf call; no frame.
1856 Label succeed, fail, null_or_fail;
1858 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home).
1859 // O0 is now the oop to be checked. O7 is the return address.
1860 Register O0_obj = O0;
1862 // Save some more registers for temps.
1863 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8);
1864 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8);
1865 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8);
1866 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8);
1868 // Save flags
1869 Register O5_save_flags = O5;
1870 rdccr( O5_save_flags );
1872 { // count number of verifies
1873 Register O2_adr = O2;
1874 Register O3_accum = O3;
1875 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum);
1876 }
1878 Register O2_mask = O2;
1879 Register O3_bits = O3;
1880 Register O4_temp = O4;
1882 // mark lower end of faulting range
1883 assert(_verify_oop_implicit_branch[0] == NULL, "set once");
1884 _verify_oop_implicit_branch[0] = pc();
1886 // We can't check the mark oop because it could be in the process of
1887 // locking or unlocking while this is running.
1888 set(Universe::verify_oop_mask (), O2_mask);
1889 set(Universe::verify_oop_bits (), O3_bits);
1891 // assert((obj & oop_mask) == oop_bits);
1892 and3(O0_obj, O2_mask, O4_temp);
1893 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail);
1895 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) {
1896 // the null_or_fail case is useless; must test for null separately
1897 br_null_short(O0_obj, pn, succeed);
1898 }
1900 // Check the klassOop of this object for being in the right area of memory.
1901 // Cannot do the load in the delay above slot in case O0 is null
1902 load_klass(O0_obj, O0_obj);
1903 // assert((klass & klass_mask) == klass_bits);
1904 if( Universe::verify_klass_mask() != Universe::verify_oop_mask() )
1905 set(Universe::verify_klass_mask(), O2_mask);
1906 if( Universe::verify_klass_bits() != Universe::verify_oop_bits() )
1907 set(Universe::verify_klass_bits(), O3_bits);
1908 and3(O0_obj, O2_mask, O4_temp);
1909 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, fail);
1910 // Check the klass's klass
1911 load_klass(O0_obj, O0_obj);
1912 and3(O0_obj, O2_mask, O4_temp);
1913 cmp(O4_temp, O3_bits);
1914 brx(notEqual, false, pn, fail);
1915 delayed()->wrccr( O5_save_flags ); // Restore CCR's
1917 // mark upper end of faulting range
1918 _verify_oop_implicit_branch[1] = pc();
1920 //-----------------------
1921 // all tests pass
1922 bind(succeed);
1924 // Restore prior 64-bit registers
1925 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0);
1926 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1);
1927 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2);
1928 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3);
1929 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4);
1930 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5);
1932 retl(); // Leaf return; restore prior O7 in delay slot
1933 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7);
1935 //-----------------------
1936 bind(null_or_fail); // nulls are less common but OK
1937 br_null(O0_obj, false, pt, succeed);
1938 delayed()->wrccr( O5_save_flags ); // Restore CCR's
1940 //-----------------------
1941 // report failure:
1942 bind(fail);
1943 _verify_oop_implicit_branch[2] = pc();
1945 wrccr( O5_save_flags ); // Restore CCR's
1947 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1949 // stop_subroutine expects message pointer in I1.
1950 mov(I1, O1);
1952 // Restore prior 64-bit registers
1953 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0);
1954 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1);
1955 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2);
1956 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3);
1957 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4);
1958 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5);
1960 // factor long stop-sequence into subroutine to save space
1961 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
1963 // call indirectly to solve generation ordering problem
1964 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address());
1965 load_ptr_contents(al, O5);
1966 jmpl(O5, 0, O7);
1967 delayed()->nop();
1968 }
1971 void MacroAssembler::stop(const char* msg) {
1972 // save frame first to get O7 for return address
1973 // add one word to size in case struct is odd number of words long
1974 // It must be doubleword-aligned for storing doubles into it.
1976 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1978 // stop_subroutine expects message pointer in I1.
1979 set((intptr_t)msg, O1);
1981 // factor long stop-sequence into subroutine to save space
1982 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
1984 // call indirectly to solve generation ordering problem
1985 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address());
1986 load_ptr_contents(a, O5);
1987 jmpl(O5, 0, O7);
1988 delayed()->nop();
1990 breakpoint_trap(); // make stop actually stop rather than writing
1991 // unnoticeable results in the output files.
1993 // restore(); done in callee to save space!
1994 }
1997 void MacroAssembler::warn(const char* msg) {
1998 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1999 RegistersForDebugging::save_registers(this);
2000 mov(O0, L0);
2001 set((intptr_t)msg, O0);
2002 call( CAST_FROM_FN_PTR(address, warning) );
2003 delayed()->nop();
2004 // ret();
2005 // delayed()->restore();
2006 RegistersForDebugging::restore_registers(this, L0);
2007 restore();
2008 }
2011 void MacroAssembler::untested(const char* what) {
2012 // We must be able to turn interactive prompting off
2013 // in order to run automated test scripts on the VM
2014 // Use the flag ShowMessageBoxOnError
2016 char* b = new char[1024];
2017 sprintf(b, "untested: %s", what);
2019 if ( ShowMessageBoxOnError ) stop(b);
2020 else warn(b);
2021 }
2024 void MacroAssembler::stop_subroutine() {
2025 RegistersForDebugging::save_registers(this);
2027 // for the sake of the debugger, stick a PC on the current frame
2028 // (this assumes that the caller has performed an extra "save")
2029 mov(I7, L7);
2030 add(O7, -7 * BytesPerInt, I7);
2032 save_frame(); // one more save to free up another O7 register
2033 mov(I0, O1); // addr of reg save area
2035 // We expect pointer to message in I1. Caller must set it up in O1
2036 mov(I1, O0); // get msg
2037 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
2038 delayed()->nop();
2040 restore();
2042 RegistersForDebugging::restore_registers(this, O0);
2044 save_frame(0);
2045 call(CAST_FROM_FN_PTR(address,breakpoint));
2046 delayed()->nop();
2047 restore();
2049 mov(L7, I7);
2050 retl();
2051 delayed()->restore(); // see stop above
2052 }
2055 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
2056 if ( ShowMessageBoxOnError ) {
2057 JavaThreadState saved_state = JavaThread::current()->thread_state();
2058 JavaThread::current()->set_thread_state(_thread_in_vm);
2059 {
2060 // In order to get locks work, we need to fake a in_VM state
2061 ttyLocker ttyl;
2062 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
2063 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
2064 ::tty->print_cr("Interpreter::bytecode_counter = %d", BytecodeCounter::counter_value());
2065 }
2066 if (os::message_box(msg, "Execution stopped, print registers?"))
2067 regs->print(::tty);
2068 }
2069 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
2070 }
2071 else
2072 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
2073 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
2074 }
2077 #ifndef PRODUCT
2078 void MacroAssembler::test() {
2079 ResourceMark rm;
2081 CodeBuffer cb("test", 10000, 10000);
2082 MacroAssembler* a = new MacroAssembler(&cb);
2083 VM_Version::allow_all();
2084 a->test_v9();
2085 a->test_v8_onlys();
2086 VM_Version::revert();
2088 StubRoutines::Sparc::test_stop_entry()();
2089 }
2090 #endif
2093 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) {
2094 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
2095 Label no_extras;
2096 br( negative, true, pt, no_extras ); // if neg, clear reg
2097 delayed()->set(0, Rresult); // annuled, so only if taken
2098 bind( no_extras );
2099 }
2102 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
2103 #ifdef _LP64
2104 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
2105 #else
2106 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
2107 #endif
2108 bclr(1, Rresult);
2109 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
2110 }
2113 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) {
2114 calc_frame_size(Rextra_words, Rresult);
2115 neg(Rresult);
2116 save(SP, Rresult, SP);
2117 }
2120 // ---------------------------------------------------------
2121 Assembler::RCondition cond2rcond(Assembler::Condition c) {
2122 switch (c) {
2123 /*case zero: */
2124 case Assembler::equal: return Assembler::rc_z;
2125 case Assembler::lessEqual: return Assembler::rc_lez;
2126 case Assembler::less: return Assembler::rc_lz;
2127 /*case notZero:*/
2128 case Assembler::notEqual: return Assembler::rc_nz;
2129 case Assembler::greater: return Assembler::rc_gz;
2130 case Assembler::greaterEqual: return Assembler::rc_gez;
2131 }
2132 ShouldNotReachHere();
2133 return Assembler::rc_z;
2134 }
2136 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS
2137 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) {
2138 tst(s1);
2139 br (c, a, p, L);
2140 }
2142 // Compares a pointer register with zero and branches on null.
2143 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
2144 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
2145 assert_not_delayed();
2146 #ifdef _LP64
2147 bpr( rc_z, a, p, s1, L );
2148 #else
2149 tst(s1);
2150 br ( zero, a, p, L );
2151 #endif
2152 }
2154 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
2155 assert_not_delayed();
2156 #ifdef _LP64
2157 bpr( rc_nz, a, p, s1, L );
2158 #else
2159 tst(s1);
2160 br ( notZero, a, p, L );
2161 #endif
2162 }
2164 void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
2165 Register s1, address d,
2166 relocInfo::relocType rt ) {
2167 assert_not_delayed();
2168 if (VM_Version::v9_instructions_work()) {
2169 bpr(rc, a, p, s1, d, rt);
2170 } else {
2171 tst(s1);
2172 br(reg_cond_to_cc_cond(rc), a, p, d, rt);
2173 }
2174 }
2176 void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
2177 Register s1, Label& L ) {
2178 assert_not_delayed();
2179 if (VM_Version::v9_instructions_work()) {
2180 bpr(rc, a, p, s1, L);
2181 } else {
2182 tst(s1);
2183 br(reg_cond_to_cc_cond(rc), a, p, L);
2184 }
2185 }
2187 // Compare registers and branch with nop in delay slot or cbcond without delay slot.
2189 // Compare integer (32 bit) values (icc only).
2190 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c,
2191 Predict p, Label& L) {
2192 assert_not_delayed();
2193 if (use_cbcond(L)) {
2194 Assembler::cbcond(c, icc, s1, s2, L);
2195 } else {
2196 cmp(s1, s2);
2197 br(c, false, p, L);
2198 delayed()->nop();
2199 }
2200 }
2202 // Compare integer (32 bit) values (icc only).
2203 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c,
2204 Predict p, Label& L) {
2205 assert_not_delayed();
2206 if (is_simm(simm13a,5) && use_cbcond(L)) {
2207 Assembler::cbcond(c, icc, s1, simm13a, L);
2208 } else {
2209 cmp(s1, simm13a);
2210 br(c, false, p, L);
2211 delayed()->nop();
2212 }
2213 }
2215 // Branch that tests xcc in LP64 and icc in !LP64
2216 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c,
2217 Predict p, Label& L) {
2218 assert_not_delayed();
2219 if (use_cbcond(L)) {
2220 Assembler::cbcond(c, ptr_cc, s1, s2, L);
2221 } else {
2222 cmp(s1, s2);
2223 brx(c, false, p, L);
2224 delayed()->nop();
2225 }
2226 }
2228 // Branch that tests xcc in LP64 and icc in !LP64
2229 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c,
2230 Predict p, Label& L) {
2231 assert_not_delayed();
2232 if (is_simm(simm13a,5) && use_cbcond(L)) {
2233 Assembler::cbcond(c, ptr_cc, s1, simm13a, L);
2234 } else {
2235 cmp(s1, simm13a);
2236 brx(c, false, p, L);
2237 delayed()->nop();
2238 }
2239 }
2241 // Short branch version for compares a pointer with zero.
2243 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) {
2244 assert_not_delayed();
2245 if (use_cbcond(L)) {
2246 Assembler::cbcond(zero, ptr_cc, s1, 0, L);
2247 return;
2248 }
2249 br_null(s1, false, p, L);
2250 delayed()->nop();
2251 }
2253 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) {
2254 assert_not_delayed();
2255 if (use_cbcond(L)) {
2256 Assembler::cbcond(notZero, ptr_cc, s1, 0, L);
2257 return;
2258 }
2259 br_notnull(s1, false, p, L);
2260 delayed()->nop();
2261 }
2263 // Unconditional short branch
2264 void MacroAssembler::ba_short(Label& L) {
2265 if (use_cbcond(L)) {
2266 Assembler::cbcond(equal, icc, G0, G0, L);
2267 return;
2268 }
2269 br(always, false, pt, L);
2270 delayed()->nop();
2271 }
2273 // instruction sequences factored across compiler & interpreter
2276 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
2277 Register Rb_hi, Register Rb_low,
2278 Register Rresult) {
2280 Label check_low_parts, done;
2282 cmp(Ra_hi, Rb_hi ); // compare hi parts
2283 br(equal, true, pt, check_low_parts);
2284 delayed()->cmp(Ra_low, Rb_low); // test low parts
2286 // And, with an unsigned comparison, it does not matter if the numbers
2287 // are negative or not.
2288 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff.
2289 // The second one is bigger (unsignedly).
2291 // Other notes: The first move in each triplet can be unconditional
2292 // (and therefore probably prefetchable).
2293 // And the equals case for the high part does not need testing,
2294 // since that triplet is reached only after finding the high halves differ.
2296 if (VM_Version::v9_instructions_work()) {
2297 mov(-1, Rresult);
2298 ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult);
2299 } else {
2300 br(less, true, pt, done); delayed()-> set(-1, Rresult);
2301 br(greater, true, pt, done); delayed()-> set( 1, Rresult);
2302 }
2304 bind( check_low_parts );
2306 if (VM_Version::v9_instructions_work()) {
2307 mov( -1, Rresult);
2308 movcc(equal, false, icc, 0, Rresult);
2309 movcc(greaterUnsigned, false, icc, 1, Rresult);
2310 } else {
2311 set(-1, Rresult);
2312 br(equal, true, pt, done); delayed()->set( 0, Rresult);
2313 br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
2314 }
2315 bind( done );
2316 }
2318 void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
2319 subcc( G0, Rlow, Rlow );
2320 subc( G0, Rhi, Rhi );
2321 }
2323 void MacroAssembler::lshl( Register Rin_high, Register Rin_low,
2324 Register Rcount,
2325 Register Rout_high, Register Rout_low,
2326 Register Rtemp ) {
2329 Register Ralt_count = Rtemp;
2330 Register Rxfer_bits = Rtemp;
2332 assert( Ralt_count != Rin_high
2333 && Ralt_count != Rin_low
2334 && Ralt_count != Rcount
2335 && Rxfer_bits != Rin_low
2336 && Rxfer_bits != Rin_high
2337 && Rxfer_bits != Rcount
2338 && Rxfer_bits != Rout_low
2339 && Rout_low != Rin_high,
2340 "register alias checks");
2342 Label big_shift, done;
2344 // This code can be optimized to use the 64 bit shifts in V9.
2345 // Here we use the 32 bit shifts.
2347 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
2348 subcc(Rcount, 31, Ralt_count);
2349 br(greater, true, pn, big_shift);
2350 delayed()->dec(Ralt_count);
2352 // shift < 32 bits, Ralt_count = Rcount-31
2354 // We get the transfer bits by shifting right by 32-count the low
2355 // register. This is done by shifting right by 31-count and then by one
2356 // more to take care of the special (rare) case where count is zero
2357 // (shifting by 32 would not work).
2359 neg(Ralt_count);
2361 // The order of the next two instructions is critical in the case where
2362 // Rin and Rout are the same and should not be reversed.
2364 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count
2365 if (Rcount != Rout_low) {
2366 sll(Rin_low, Rcount, Rout_low); // low half
2367 }
2368 sll(Rin_high, Rcount, Rout_high);
2369 if (Rcount == Rout_low) {
2370 sll(Rin_low, Rcount, Rout_low); // low half
2371 }
2372 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more
2373 ba(done);
2374 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low
2376 // shift >= 32 bits, Ralt_count = Rcount-32
2377 bind(big_shift);
2378 sll(Rin_low, Ralt_count, Rout_high );
2379 clr(Rout_low);
2381 bind(done);
2382 }
2385 void MacroAssembler::lshr( Register Rin_high, Register Rin_low,
2386 Register Rcount,
2387 Register Rout_high, Register Rout_low,
2388 Register Rtemp ) {
2390 Register Ralt_count = Rtemp;
2391 Register Rxfer_bits = Rtemp;
2393 assert( Ralt_count != Rin_high
2394 && Ralt_count != Rin_low
2395 && Ralt_count != Rcount
2396 && Rxfer_bits != Rin_low
2397 && Rxfer_bits != Rin_high
2398 && Rxfer_bits != Rcount
2399 && Rxfer_bits != Rout_high
2400 && Rout_high != Rin_low,
2401 "register alias checks");
2403 Label big_shift, done;
2405 // This code can be optimized to use the 64 bit shifts in V9.
2406 // Here we use the 32 bit shifts.
2408 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
2409 subcc(Rcount, 31, Ralt_count);
2410 br(greater, true, pn, big_shift);
2411 delayed()->dec(Ralt_count);
2413 // shift < 32 bits, Ralt_count = Rcount-31
2415 // We get the transfer bits by shifting left by 32-count the high
2416 // register. This is done by shifting left by 31-count and then by one
2417 // more to take care of the special (rare) case where count is zero
2418 // (shifting by 32 would not work).
2420 neg(Ralt_count);
2421 if (Rcount != Rout_low) {
2422 srl(Rin_low, Rcount, Rout_low);
2423 }
2425 // The order of the next two instructions is critical in the case where
2426 // Rin and Rout are the same and should not be reversed.
2428 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
2429 sra(Rin_high, Rcount, Rout_high ); // high half
2430 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more
2431 if (Rcount == Rout_low) {
2432 srl(Rin_low, Rcount, Rout_low);
2433 }
2434 ba(done);
2435 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
2437 // shift >= 32 bits, Ralt_count = Rcount-32
2438 bind(big_shift);
2440 sra(Rin_high, Ralt_count, Rout_low);
2441 sra(Rin_high, 31, Rout_high); // sign into hi
2443 bind( done );
2444 }
2448 void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
2449 Register Rcount,
2450 Register Rout_high, Register Rout_low,
2451 Register Rtemp ) {
2453 Register Ralt_count = Rtemp;
2454 Register Rxfer_bits = Rtemp;
2456 assert( Ralt_count != Rin_high
2457 && Ralt_count != Rin_low
2458 && Ralt_count != Rcount
2459 && Rxfer_bits != Rin_low
2460 && Rxfer_bits != Rin_high
2461 && Rxfer_bits != Rcount
2462 && Rxfer_bits != Rout_high
2463 && Rout_high != Rin_low,
2464 "register alias checks");
2466 Label big_shift, done;
2468 // This code can be optimized to use the 64 bit shifts in V9.
2469 // Here we use the 32 bit shifts.
2471 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
2472 subcc(Rcount, 31, Ralt_count);
2473 br(greater, true, pn, big_shift);
2474 delayed()->dec(Ralt_count);
2476 // shift < 32 bits, Ralt_count = Rcount-31
2478 // We get the transfer bits by shifting left by 32-count the high
2479 // register. This is done by shifting left by 31-count and then by one
2480 // more to take care of the special (rare) case where count is zero
2481 // (shifting by 32 would not work).
2483 neg(Ralt_count);
2484 if (Rcount != Rout_low) {
2485 srl(Rin_low, Rcount, Rout_low);
2486 }
2488 // The order of the next two instructions is critical in the case where
2489 // Rin and Rout are the same and should not be reversed.
2491 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
2492 srl(Rin_high, Rcount, Rout_high ); // high half
2493 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more
2494 if (Rcount == Rout_low) {
2495 srl(Rin_low, Rcount, Rout_low);
2496 }
2497 ba(done);
2498 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
2500 // shift >= 32 bits, Ralt_count = Rcount-32
2501 bind(big_shift);
2503 srl(Rin_high, Ralt_count, Rout_low);
2504 clr(Rout_high);
2506 bind( done );
2507 }
2509 #ifdef _LP64
2510 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
2511 cmp(Ra, Rb);
2512 mov(-1, Rresult);
2513 movcc(equal, false, xcc, 0, Rresult);
2514 movcc(greater, false, xcc, 1, Rresult);
2515 }
2516 #endif
2519 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
2520 switch (size_in_bytes) {
2521 case 8: ld_long(src, dst); break;
2522 case 4: ld( src, dst); break;
2523 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
2524 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
2525 default: ShouldNotReachHere();
2526 }
2527 }
2529 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
2530 switch (size_in_bytes) {
2531 case 8: st_long(src, dst); break;
2532 case 4: st( src, dst); break;
2533 case 2: sth( src, dst); break;
2534 case 1: stb( src, dst); break;
2535 default: ShouldNotReachHere();
2536 }
2537 }
2540 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
2541 FloatRegister Fa, FloatRegister Fb,
2542 Register Rresult) {
2544 fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
2546 Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less;
2547 Condition eq = f_equal;
2548 Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater;
2550 if (VM_Version::v9_instructions_work()) {
2552 mov(-1, Rresult);
2553 movcc(eq, true, fcc0, 0, Rresult);
2554 movcc(gt, true, fcc0, 1, Rresult);
2556 } else {
2557 Label done;
2559 set( -1, Rresult );
2560 //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
2561 fb( eq, true, pn, done); delayed()->set( 0, Rresult );
2562 fb( gt, true, pn, done); delayed()->set( 1, Rresult );
2564 bind (done);
2565 }
2566 }
2569 void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2570 {
2571 if (VM_Version::v9_instructions_work()) {
2572 Assembler::fneg(w, s, d);
2573 } else {
2574 if (w == FloatRegisterImpl::S) {
2575 Assembler::fneg(w, s, d);
2576 } else if (w == FloatRegisterImpl::D) {
2577 // number() does a sanity check on the alignment.
2578 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2579 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2581 Assembler::fneg(FloatRegisterImpl::S, s, d);
2582 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2583 } else {
2584 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2586 // number() does a sanity check on the alignment.
2587 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2588 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2590 Assembler::fneg(FloatRegisterImpl::S, s, d);
2591 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2592 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2593 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2594 }
2595 }
2596 }
2598 void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2599 {
2600 if (VM_Version::v9_instructions_work()) {
2601 Assembler::fmov(w, s, d);
2602 } else {
2603 if (w == FloatRegisterImpl::S) {
2604 Assembler::fmov(w, s, d);
2605 } else if (w == FloatRegisterImpl::D) {
2606 // number() does a sanity check on the alignment.
2607 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2608 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2610 Assembler::fmov(FloatRegisterImpl::S, s, d);
2611 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2612 } else {
2613 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2615 // number() does a sanity check on the alignment.
2616 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2617 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2619 Assembler::fmov(FloatRegisterImpl::S, s, d);
2620 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2621 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2622 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2623 }
2624 }
2625 }
2627 void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2628 {
2629 if (VM_Version::v9_instructions_work()) {
2630 Assembler::fabs(w, s, d);
2631 } else {
2632 if (w == FloatRegisterImpl::S) {
2633 Assembler::fabs(w, s, d);
2634 } else if (w == FloatRegisterImpl::D) {
2635 // number() does a sanity check on the alignment.
2636 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2637 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2639 Assembler::fabs(FloatRegisterImpl::S, s, d);
2640 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2641 } else {
2642 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2644 // number() does a sanity check on the alignment.
2645 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2646 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2648 Assembler::fabs(FloatRegisterImpl::S, s, d);
2649 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2650 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2651 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2652 }
2653 }
2654 }
2656 void MacroAssembler::save_all_globals_into_locals() {
2657 mov(G1,L1);
2658 mov(G2,L2);
2659 mov(G3,L3);
2660 mov(G4,L4);
2661 mov(G5,L5);
2662 mov(G6,L6);
2663 mov(G7,L7);
2664 }
2666 void MacroAssembler::restore_globals_from_locals() {
2667 mov(L1,G1);
2668 mov(L2,G2);
2669 mov(L3,G3);
2670 mov(L4,G4);
2671 mov(L5,G5);
2672 mov(L6,G6);
2673 mov(L7,G7);
2674 }
2676 // Use for 64 bit operation.
2677 void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
2678 {
2679 // store ptr_reg as the new top value
2680 #ifdef _LP64
2681 casx(top_ptr_reg, top_reg, ptr_reg);
2682 #else
2683 cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
2684 #endif // _LP64
2685 }
2687 // [RGV] This routine does not handle 64 bit operations.
2688 // use casx_under_lock() or casx directly!!!
2689 void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
2690 {
2691 // store ptr_reg as the new top value
2692 if (VM_Version::v9_instructions_work()) {
2693 cas(top_ptr_reg, top_reg, ptr_reg);
2694 } else {
2696 // If the register is not an out nor global, it is not visible
2697 // after the save. Allocate a register for it, save its
2698 // value in the register save area (the save may not flush
2699 // registers to the save area).
2701 Register top_ptr_reg_after_save;
2702 Register top_reg_after_save;
2703 Register ptr_reg_after_save;
2705 if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
2706 top_ptr_reg_after_save = top_ptr_reg->after_save();
2707 } else {
2708 Address reg_save_addr = top_ptr_reg->address_in_saved_window();
2709 top_ptr_reg_after_save = L0;
2710 st(top_ptr_reg, reg_save_addr);
2711 }
2713 if (top_reg->is_out() || top_reg->is_global()) {
2714 top_reg_after_save = top_reg->after_save();
2715 } else {
2716 Address reg_save_addr = top_reg->address_in_saved_window();
2717 top_reg_after_save = L1;
2718 st(top_reg, reg_save_addr);
2719 }
2721 if (ptr_reg->is_out() || ptr_reg->is_global()) {
2722 ptr_reg_after_save = ptr_reg->after_save();
2723 } else {
2724 Address reg_save_addr = ptr_reg->address_in_saved_window();
2725 ptr_reg_after_save = L2;
2726 st(ptr_reg, reg_save_addr);
2727 }
2729 const Register& lock_reg = L3;
2730 const Register& lock_ptr_reg = L4;
2731 const Register& value_reg = L5;
2732 const Register& yield_reg = L6;
2733 const Register& yieldall_reg = L7;
2735 save_frame();
2737 if (top_ptr_reg_after_save == L0) {
2738 ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
2739 }
2741 if (top_reg_after_save == L1) {
2742 ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
2743 }
2745 if (ptr_reg_after_save == L2) {
2746 ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
2747 }
2749 Label(retry_get_lock);
2750 Label(not_same);
2751 Label(dont_yield);
2753 assert(lock_addr, "lock_address should be non null for v8");
2754 set((intptr_t)lock_addr, lock_ptr_reg);
2755 // Initialize yield counter
2756 mov(G0,yield_reg);
2757 mov(G0, yieldall_reg);
2758 set(StubRoutines::Sparc::locked, lock_reg);
2760 bind(retry_get_lock);
2761 cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield);
2763 if(use_call_vm) {
2764 Untested("Need to verify global reg consistancy");
2765 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
2766 } else {
2767 // Save the regs and make space for a C call
2768 save(SP, -96, SP);
2769 save_all_globals_into_locals();
2770 call(CAST_FROM_FN_PTR(address,os::yield_all));
2771 delayed()->mov(yieldall_reg, O0);
2772 restore_globals_from_locals();
2773 restore();
2774 }
2776 // reset the counter
2777 mov(G0,yield_reg);
2778 add(yieldall_reg, 1, yieldall_reg);
2780 bind(dont_yield);
2781 // try to get lock
2782 swap(lock_ptr_reg, 0, lock_reg);
2784 // did we get the lock?
2785 cmp(lock_reg, StubRoutines::Sparc::unlocked);
2786 br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
2787 delayed()->add(yield_reg,1,yield_reg);
2789 // yes, got lock. do we have the same top?
2790 ld(top_ptr_reg_after_save, 0, value_reg);
2791 cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same);
2793 // yes, same top.
2794 st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
2795 membar(Assembler::StoreStore);
2797 bind(not_same);
2798 mov(value_reg, ptr_reg_after_save);
2799 st(lock_reg, lock_ptr_reg, 0); // unlock
2801 restore();
2802 }
2803 }
2805 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
2806 Register tmp,
2807 int offset) {
2808 intptr_t value = *delayed_value_addr;
2809 if (value != 0)
2810 return RegisterOrConstant(value + offset);
2812 // load indirectly to solve generation ordering problem
2813 AddressLiteral a(delayed_value_addr);
2814 load_ptr_contents(a, tmp);
2816 #ifdef ASSERT
2817 tst(tmp);
2818 breakpoint_trap(zero, xcc);
2819 #endif
2821 if (offset != 0)
2822 add(tmp, offset, tmp);
2824 return RegisterOrConstant(tmp);
2825 }
2828 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2829 assert(d.register_or_noreg() != G0, "lost side effect");
2830 if ((s2.is_constant() && s2.as_constant() == 0) ||
2831 (s2.is_register() && s2.as_register() == G0)) {
2832 // Do nothing, just move value.
2833 if (s1.is_register()) {
2834 if (d.is_constant()) d = temp;
2835 mov(s1.as_register(), d.as_register());
2836 return d;
2837 } else {
2838 return s1;
2839 }
2840 }
2842 if (s1.is_register()) {
2843 assert_different_registers(s1.as_register(), temp);
2844 if (d.is_constant()) d = temp;
2845 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2846 return d;
2847 } else {
2848 if (s2.is_register()) {
2849 assert_different_registers(s2.as_register(), temp);
2850 if (d.is_constant()) d = temp;
2851 set(s1.as_constant(), temp);
2852 andn(temp, s2.as_register(), d.as_register());
2853 return d;
2854 } else {
2855 intptr_t res = s1.as_constant() & ~s2.as_constant();
2856 return res;
2857 }
2858 }
2859 }
2861 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2862 assert(d.register_or_noreg() != G0, "lost side effect");
2863 if ((s2.is_constant() && s2.as_constant() == 0) ||
2864 (s2.is_register() && s2.as_register() == G0)) {
2865 // Do nothing, just move value.
2866 if (s1.is_register()) {
2867 if (d.is_constant()) d = temp;
2868 mov(s1.as_register(), d.as_register());
2869 return d;
2870 } else {
2871 return s1;
2872 }
2873 }
2875 if (s1.is_register()) {
2876 assert_different_registers(s1.as_register(), temp);
2877 if (d.is_constant()) d = temp;
2878 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2879 return d;
2880 } else {
2881 if (s2.is_register()) {
2882 assert_different_registers(s2.as_register(), temp);
2883 if (d.is_constant()) d = temp;
2884 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
2885 return d;
2886 } else {
2887 intptr_t res = s1.as_constant() + s2.as_constant();
2888 return res;
2889 }
2890 }
2891 }
2893 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2894 assert(d.register_or_noreg() != G0, "lost side effect");
2895 if (!is_simm13(s2.constant_or_zero()))
2896 s2 = (s2.as_constant() & 0xFF);
2897 if ((s2.is_constant() && s2.as_constant() == 0) ||
2898 (s2.is_register() && s2.as_register() == G0)) {
2899 // Do nothing, just move value.
2900 if (s1.is_register()) {
2901 if (d.is_constant()) d = temp;
2902 mov(s1.as_register(), d.as_register());
2903 return d;
2904 } else {
2905 return s1;
2906 }
2907 }
2909 if (s1.is_register()) {
2910 assert_different_registers(s1.as_register(), temp);
2911 if (d.is_constant()) d = temp;
2912 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2913 return d;
2914 } else {
2915 if (s2.is_register()) {
2916 assert_different_registers(s2.as_register(), temp);
2917 if (d.is_constant()) d = temp;
2918 set(s1.as_constant(), temp);
2919 sll_ptr(temp, s2.as_register(), d.as_register());
2920 return d;
2921 } else {
2922 intptr_t res = s1.as_constant() << s2.as_constant();
2923 return res;
2924 }
2925 }
2926 }
2929 // Look up the method for a megamorphic invokeinterface call.
2930 // The target method is determined by <intf_klass, itable_index>.
2931 // The receiver klass is in recv_klass.
2932 // On success, the result will be in method_result, and execution falls through.
2933 // On failure, execution transfers to the given label.
2934 void MacroAssembler::lookup_interface_method(Register recv_klass,
2935 Register intf_klass,
2936 RegisterOrConstant itable_index,
2937 Register method_result,
2938 Register scan_temp,
2939 Register sethi_temp,
2940 Label& L_no_such_interface) {
2941 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
2942 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
2943 "caller must use same register for non-constant itable index as for method");
2945 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
2946 int vtable_base = instanceKlass::vtable_start_offset() * wordSize;
2947 int scan_step = itableOffsetEntry::size() * wordSize;
2948 int vte_size = vtableEntry::size() * wordSize;
2950 lduw(recv_klass, instanceKlass::vtable_length_offset() * wordSize, scan_temp);
2951 // %%% We should store the aligned, prescaled offset in the klassoop.
2952 // Then the next several instructions would fold away.
2954 int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0);
2955 int itb_offset = vtable_base;
2956 if (round_to_unit != 0) {
2957 // hoist first instruction of round_to(scan_temp, BytesPerLong):
2958 itb_offset += round_to_unit - wordSize;
2959 }
2960 int itb_scale = exact_log2(vtableEntry::size() * wordSize);
2961 sll(scan_temp, itb_scale, scan_temp);
2962 add(scan_temp, itb_offset, scan_temp);
2963 if (round_to_unit != 0) {
2964 // Round up to align_object_offset boundary
2965 // see code for instanceKlass::start_of_itable!
2966 // Was: round_to(scan_temp, BytesPerLong);
2967 // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp);
2968 and3(scan_temp, -round_to_unit, scan_temp);
2969 }
2970 add(recv_klass, scan_temp, scan_temp);
2972 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
2973 RegisterOrConstant itable_offset = itable_index;
2974 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
2975 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
2976 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
2978 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
2979 // if (scan->interface() == intf) {
2980 // result = (klass + scan->offset() + itable_index);
2981 // }
2982 // }
2983 Label search, found_method;
2985 for (int peel = 1; peel >= 0; peel--) {
2986 // %%%% Could load both offset and interface in one ldx, if they were
2987 // in the opposite order. This would save a load.
2988 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result);
2990 // Check that this entry is non-null. A null entry means that
2991 // the receiver class doesn't implement the interface, and wasn't the
2992 // same as when the caller was compiled.
2993 bpr(Assembler::rc_z, false, Assembler::pn, method_result, L_no_such_interface);
2994 delayed()->cmp(method_result, intf_klass);
2996 if (peel) {
2997 brx(Assembler::equal, false, Assembler::pt, found_method);
2998 } else {
2999 brx(Assembler::notEqual, false, Assembler::pn, search);
3000 // (invert the test to fall through to found_method...)
3001 }
3002 delayed()->add(scan_temp, scan_step, scan_temp);
3004 if (!peel) break;
3006 bind(search);
3007 }
3009 bind(found_method);
3011 // Got a hit.
3012 int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
3013 // scan_temp[-scan_step] points to the vtable offset we need
3014 ito_offset -= scan_step;
3015 lduw(scan_temp, ito_offset, scan_temp);
3016 ld_ptr(recv_klass, scan_temp, method_result);
3017 }
3020 void MacroAssembler::check_klass_subtype(Register sub_klass,
3021 Register super_klass,
3022 Register temp_reg,
3023 Register temp2_reg,
3024 Label& L_success) {
3025 Label L_failure, L_pop_to_failure;
3026 check_klass_subtype_fast_path(sub_klass, super_klass,
3027 temp_reg, temp2_reg,
3028 &L_success, &L_failure, NULL);
3029 Register sub_2 = sub_klass;
3030 Register sup_2 = super_klass;
3031 if (!sub_2->is_global()) sub_2 = L0;
3032 if (!sup_2->is_global()) sup_2 = L1;
3034 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
3035 check_klass_subtype_slow_path(sub_2, sup_2,
3036 L2, L3, L4, L5,
3037 NULL, &L_pop_to_failure);
3039 // on success:
3040 restore();
3041 ba_short(L_success);
3043 // on failure:
3044 bind(L_pop_to_failure);
3045 restore();
3046 bind(L_failure);
3047 }
3050 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
3051 Register super_klass,
3052 Register temp_reg,
3053 Register temp2_reg,
3054 Label* L_success,
3055 Label* L_failure,
3056 Label* L_slow_path,
3057 RegisterOrConstant super_check_offset) {
3058 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
3059 Klass::secondary_super_cache_offset_in_bytes());
3060 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
3061 Klass::super_check_offset_offset_in_bytes());
3063 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
3064 bool need_slow_path = (must_load_sco ||
3065 super_check_offset.constant_or_zero() == sco_offset);
3067 assert_different_registers(sub_klass, super_klass, temp_reg);
3068 if (super_check_offset.is_register()) {
3069 assert_different_registers(sub_klass, super_klass, temp_reg,
3070 super_check_offset.as_register());
3071 } else if (must_load_sco) {
3072 assert(temp2_reg != noreg, "supply either a temp or a register offset");
3073 }
3075 Label L_fallthrough;
3076 int label_nulls = 0;
3077 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3078 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3079 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
3080 assert(label_nulls <= 1 ||
3081 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
3082 "at most one NULL in the batch, usually");
3084 // If the pointers are equal, we are done (e.g., String[] elements).
3085 // This self-check enables sharing of secondary supertype arrays among
3086 // non-primary types such as array-of-interface. Otherwise, each such
3087 // type would need its own customized SSA.
3088 // We move this check to the front of the fast path because many
3089 // type checks are in fact trivially successful in this manner,
3090 // so we get a nicely predicted branch right at the start of the check.
3091 cmp(super_klass, sub_klass);
3092 brx(Assembler::equal, false, Assembler::pn, *L_success);
3093 delayed()->nop();
3095 // Check the supertype display:
3096 if (must_load_sco) {
3097 // The super check offset is always positive...
3098 lduw(super_klass, sco_offset, temp2_reg);
3099 super_check_offset = RegisterOrConstant(temp2_reg);
3100 // super_check_offset is register.
3101 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
3102 }
3103 ld_ptr(sub_klass, super_check_offset, temp_reg);
3104 cmp(super_klass, temp_reg);
3106 // This check has worked decisively for primary supers.
3107 // Secondary supers are sought in the super_cache ('super_cache_addr').
3108 // (Secondary supers are interfaces and very deeply nested subtypes.)
3109 // This works in the same check above because of a tricky aliasing
3110 // between the super_cache and the primary super display elements.
3111 // (The 'super_check_addr' can address either, as the case requires.)
3112 // Note that the cache is updated below if it does not help us find
3113 // what we need immediately.
3114 // So if it was a primary super, we can just fail immediately.
3115 // Otherwise, it's the slow path for us (no success at this point).
3117 // Hacked ba(), which may only be used just before L_fallthrough.
3118 #define FINAL_JUMP(label) \
3119 if (&(label) != &L_fallthrough) { \
3120 ba(label); delayed()->nop(); \
3121 }
3123 if (super_check_offset.is_register()) {
3124 brx(Assembler::equal, false, Assembler::pn, *L_success);
3125 delayed()->cmp(super_check_offset.as_register(), sc_offset);
3127 if (L_failure == &L_fallthrough) {
3128 brx(Assembler::equal, false, Assembler::pt, *L_slow_path);
3129 delayed()->nop();
3130 } else {
3131 brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
3132 delayed()->nop();
3133 FINAL_JUMP(*L_slow_path);
3134 }
3135 } else if (super_check_offset.as_constant() == sc_offset) {
3136 // Need a slow path; fast failure is impossible.
3137 if (L_slow_path == &L_fallthrough) {
3138 brx(Assembler::equal, false, Assembler::pt, *L_success);
3139 delayed()->nop();
3140 } else {
3141 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path);
3142 delayed()->nop();
3143 FINAL_JUMP(*L_success);
3144 }
3145 } else {
3146 // No slow path; it's a fast decision.
3147 if (L_failure == &L_fallthrough) {
3148 brx(Assembler::equal, false, Assembler::pt, *L_success);
3149 delayed()->nop();
3150 } else {
3151 brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
3152 delayed()->nop();
3153 FINAL_JUMP(*L_success);
3154 }
3155 }
3157 bind(L_fallthrough);
3159 #undef FINAL_JUMP
3160 }
3163 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
3164 Register super_klass,
3165 Register count_temp,
3166 Register scan_temp,
3167 Register scratch_reg,
3168 Register coop_reg,
3169 Label* L_success,
3170 Label* L_failure) {
3171 assert_different_registers(sub_klass, super_klass,
3172 count_temp, scan_temp, scratch_reg, coop_reg);
3174 Label L_fallthrough, L_loop;
3175 int label_nulls = 0;
3176 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3177 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3178 assert(label_nulls <= 1, "at most one NULL in the batch");
3180 // a couple of useful fields in sub_klass:
3181 int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
3182 Klass::secondary_supers_offset_in_bytes());
3183 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
3184 Klass::secondary_super_cache_offset_in_bytes());
3186 // Do a linear scan of the secondary super-klass chain.
3187 // This code is rarely used, so simplicity is a virtue here.
3189 #ifndef PRODUCT
3190 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
3191 inc_counter((address) pst_counter, count_temp, scan_temp);
3192 #endif
3194 // We will consult the secondary-super array.
3195 ld_ptr(sub_klass, ss_offset, scan_temp);
3197 // Compress superclass if necessary.
3198 Register search_key = super_klass;
3199 bool decode_super_klass = false;
3200 if (UseCompressedOops) {
3201 if (coop_reg != noreg) {
3202 encode_heap_oop_not_null(super_klass, coop_reg);
3203 search_key = coop_reg;
3204 } else {
3205 encode_heap_oop_not_null(super_klass);
3206 decode_super_klass = true; // scarce temps!
3207 }
3208 // The superclass is never null; it would be a basic system error if a null
3209 // pointer were to sneak in here. Note that we have already loaded the
3210 // Klass::super_check_offset from the super_klass in the fast path,
3211 // so if there is a null in that register, we are already in the afterlife.
3212 }
3214 // Load the array length. (Positive movl does right thing on LP64.)
3215 lduw(scan_temp, arrayOopDesc::length_offset_in_bytes(), count_temp);
3217 // Check for empty secondary super list
3218 tst(count_temp);
3220 // Top of search loop
3221 bind(L_loop);
3222 br(Assembler::equal, false, Assembler::pn, *L_failure);
3223 delayed()->add(scan_temp, heapOopSize, scan_temp);
3224 assert(heapOopSize != 0, "heapOopSize should be initialized");
3226 // Skip the array header in all array accesses.
3227 int elem_offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
3228 elem_offset -= heapOopSize; // the scan pointer was pre-incremented also
3230 // Load next super to check
3231 if (UseCompressedOops) {
3232 // Don't use load_heap_oop; we don't want to decode the element.
3233 lduw( scan_temp, elem_offset, scratch_reg );
3234 } else {
3235 ld_ptr( scan_temp, elem_offset, scratch_reg );
3236 }
3238 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
3239 cmp(scratch_reg, search_key);
3241 // A miss means we are NOT a subtype and need to keep looping
3242 brx(Assembler::notEqual, false, Assembler::pn, L_loop);
3243 delayed()->deccc(count_temp); // decrement trip counter in delay slot
3245 // Falling out the bottom means we found a hit; we ARE a subtype
3246 if (decode_super_klass) decode_heap_oop(super_klass);
3248 // Success. Cache the super we found and proceed in triumph.
3249 st_ptr(super_klass, sub_klass, sc_offset);
3251 if (L_success != &L_fallthrough) {
3252 ba(*L_success);
3253 delayed()->nop();
3254 }
3256 bind(L_fallthrough);
3257 }
3260 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
3261 Register temp_reg,
3262 Label& wrong_method_type) {
3263 assert_different_registers(mtype_reg, mh_reg, temp_reg);
3264 // compare method type against that of the receiver
3265 RegisterOrConstant mhtype_offset = delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg);
3266 load_heap_oop(mh_reg, mhtype_offset, temp_reg);
3267 cmp_and_brx_short(temp_reg, mtype_reg, Assembler::notEqual, Assembler::pn, wrong_method_type);
3268 }
3271 // A method handle has a "vmslots" field which gives the size of its
3272 // argument list in JVM stack slots. This field is either located directly
3273 // in every method handle, or else is indirectly accessed through the
3274 // method handle's MethodType. This macro hides the distinction.
3275 void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
3276 Register temp_reg) {
3277 assert_different_registers(vmslots_reg, mh_reg, temp_reg);
3278 // load mh.type.form.vmslots
3279 if (java_lang_invoke_MethodHandle::vmslots_offset_in_bytes() != 0) {
3280 // hoist vmslots into every mh to avoid dependent load chain
3281 ld( Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
3282 } else {
3283 Register temp2_reg = vmslots_reg;
3284 load_heap_oop(Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg);
3285 load_heap_oop(Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg);
3286 ld( Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
3287 }
3288 }
3291 void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
3292 assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
3293 assert_different_registers(mh_reg, temp_reg);
3295 // pick out the interpreted side of the handler
3296 // NOTE: vmentry is not an oop!
3297 ld_ptr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
3299 // off we go...
3300 ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg);
3301 jmp(temp_reg, 0);
3303 // for the various stubs which take control at this point,
3304 // see MethodHandles::generate_method_handle_stub
3306 // Some callers can fill the delay slot.
3307 if (emit_delayed_nop) {
3308 delayed()->nop();
3309 }
3310 }
3313 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
3314 Register temp_reg,
3315 int extra_slot_offset) {
3316 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
3317 int stackElementSize = Interpreter::stackElementSize;
3318 int offset = extra_slot_offset * stackElementSize;
3319 if (arg_slot.is_constant()) {
3320 offset += arg_slot.as_constant() * stackElementSize;
3321 return offset;
3322 } else {
3323 assert(temp_reg != noreg, "must specify");
3324 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg);
3325 if (offset != 0)
3326 add(temp_reg, offset, temp_reg);
3327 return temp_reg;
3328 }
3329 }
3332 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
3333 Register temp_reg,
3334 int extra_slot_offset) {
3335 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset));
3336 }
3339 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
3340 Register temp_reg,
3341 Label& done, Label* slow_case,
3342 BiasedLockingCounters* counters) {
3343 assert(UseBiasedLocking, "why call this otherwise?");
3345 if (PrintBiasedLockingStatistics) {
3346 assert_different_registers(obj_reg, mark_reg, temp_reg, O7);
3347 if (counters == NULL)
3348 counters = BiasedLocking::counters();
3349 }
3351 Label cas_label;
3353 // Biased locking
3354 // See whether the lock is currently biased toward our thread and
3355 // whether the epoch is still valid
3356 // Note that the runtime guarantees sufficient alignment of JavaThread
3357 // pointers to allow age to be placed into low bits
3358 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
3359 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
3360 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
3362 load_klass(obj_reg, temp_reg);
3363 ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
3364 or3(G2_thread, temp_reg, temp_reg);
3365 xor3(mark_reg, temp_reg, temp_reg);
3366 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
3367 if (counters != NULL) {
3368 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
3369 // Reload mark_reg as we may need it later
3370 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg);
3371 }
3372 brx(Assembler::equal, true, Assembler::pt, done);
3373 delayed()->nop();
3375 Label try_revoke_bias;
3376 Label try_rebias;
3377 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
3378 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3380 // At this point we know that the header has the bias pattern and
3381 // that we are not the bias owner in the current epoch. We need to
3382 // figure out more details about the state of the header in order to
3383 // know what operations can be legally performed on the object's
3384 // header.
3386 // If the low three bits in the xor result aren't clear, that means
3387 // the prototype header is no longer biased and we have to revoke
3388 // the bias on this object.
3389 btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
3390 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
3392 // Biasing is still enabled for this data type. See whether the
3393 // epoch of the current bias is still valid, meaning that the epoch
3394 // bits of the mark word are equal to the epoch bits of the
3395 // prototype header. (Note that the prototype header's epoch bits
3396 // only change at a safepoint.) If not, attempt to rebias the object
3397 // toward the current thread. Note that we must be absolutely sure
3398 // that the current epoch is invalid in order to do this because
3399 // otherwise the manipulations it performs on the mark word are
3400 // illegal.
3401 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
3402 brx(Assembler::notZero, false, Assembler::pn, try_rebias);
3404 // The epoch of the current bias is still valid but we know nothing
3405 // about the owner; it might be set or it might be clear. Try to
3406 // acquire the bias of the object using an atomic operation. If this
3407 // fails we will go in to the runtime to revoke the object's bias.
3408 // Note that we first construct the presumed unbiased header so we
3409 // don't accidentally blow away another thread's valid bias.
3410 delayed()->and3(mark_reg,
3411 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
3412 mark_reg);
3413 or3(G2_thread, mark_reg, temp_reg);
3414 casn(mark_addr.base(), mark_reg, temp_reg);
3415 // If the biasing toward our thread failed, this means that
3416 // another thread succeeded in biasing it toward itself and we
3417 // need to revoke that bias. The revocation will occur in the
3418 // interpreter runtime in the slow case.
3419 cmp(mark_reg, temp_reg);
3420 if (counters != NULL) {
3421 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
3422 }
3423 if (slow_case != NULL) {
3424 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
3425 delayed()->nop();
3426 }
3427 ba_short(done);
3429 bind(try_rebias);
3430 // At this point we know the epoch has expired, meaning that the
3431 // current "bias owner", if any, is actually invalid. Under these
3432 // circumstances _only_, we are allowed to use the current header's
3433 // value as the comparison value when doing the cas to acquire the
3434 // bias in the current epoch. In other words, we allow transfer of
3435 // the bias from one thread to another directly in this situation.
3436 //
3437 // FIXME: due to a lack of registers we currently blow away the age
3438 // bits in this situation. Should attempt to preserve them.
3439 load_klass(obj_reg, temp_reg);
3440 ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
3441 or3(G2_thread, temp_reg, temp_reg);
3442 casn(mark_addr.base(), mark_reg, temp_reg);
3443 // If the biasing toward our thread failed, this means that
3444 // another thread succeeded in biasing it toward itself and we
3445 // need to revoke that bias. The revocation will occur in the
3446 // interpreter runtime in the slow case.
3447 cmp(mark_reg, temp_reg);
3448 if (counters != NULL) {
3449 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
3450 }
3451 if (slow_case != NULL) {
3452 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
3453 delayed()->nop();
3454 }
3455 ba_short(done);
3457 bind(try_revoke_bias);
3458 // The prototype mark in the klass doesn't have the bias bit set any
3459 // more, indicating that objects of this data type are not supposed
3460 // to be biased any more. We are going to try to reset the mark of
3461 // this object to the prototype value and fall through to the
3462 // CAS-based locking scheme. Note that if our CAS fails, it means
3463 // that another thread raced us for the privilege of revoking the
3464 // bias of this particular object, so it's okay to continue in the
3465 // normal locking code.
3466 //
3467 // FIXME: due to a lack of registers we currently blow away the age
3468 // bits in this situation. Should attempt to preserve them.
3469 load_klass(obj_reg, temp_reg);
3470 ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
3471 casn(mark_addr.base(), mark_reg, temp_reg);
3472 // Fall through to the normal CAS-based lock, because no matter what
3473 // the result of the above CAS, some thread must have succeeded in
3474 // removing the bias bit from the object's header.
3475 if (counters != NULL) {
3476 cmp(mark_reg, temp_reg);
3477 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg);
3478 }
3480 bind(cas_label);
3481 }
3483 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done,
3484 bool allow_delay_slot_filling) {
3485 // Check for biased locking unlock case, which is a no-op
3486 // Note: we do not have to check the thread ID for two reasons.
3487 // First, the interpreter checks for IllegalMonitorStateException at
3488 // a higher level. Second, if the bias was revoked while we held the
3489 // lock, the object could not be rebiased toward another thread, so
3490 // the bias bit would be clear.
3491 ld_ptr(mark_addr, temp_reg);
3492 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
3493 cmp(temp_reg, markOopDesc::biased_lock_pattern);
3494 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
3495 delayed();
3496 if (!allow_delay_slot_filling) {
3497 nop();
3498 }
3499 }
3502 // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
3503 // Solaris/SPARC's "as". Another apt name would be cas_ptr()
3505 void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
3506 casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3507 }
3511 // compiler_lock_object() and compiler_unlock_object() are direct transliterations
3512 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
3513 // The code could be tightened up considerably.
3514 //
3515 // box->dhw disposition - post-conditions at DONE_LABEL.
3516 // - Successful inflated lock: box->dhw != 0.
3517 // Any non-zero value suffices.
3518 // Consider G2_thread, rsp, boxReg, or unused_mark()
3519 // - Successful Stack-lock: box->dhw == mark.
3520 // box->dhw must contain the displaced mark word value
3521 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
3522 // The slow-path fast_enter() and slow_enter() operators
3523 // are responsible for setting box->dhw = NonZero (typically ::unused_mark).
3524 // - Biased: box->dhw is undefined
3525 //
3526 // SPARC refworkload performance - specifically jetstream and scimark - are
3527 // extremely sensitive to the size of the code emitted by compiler_lock_object
3528 // and compiler_unlock_object. Critically, the key factor is code size, not path
3529 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
3530 // effect).
3533 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
3534 Register Rbox, Register Rscratch,
3535 BiasedLockingCounters* counters,
3536 bool try_bias) {
3537 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
3539 verify_oop(Roop);
3540 Label done ;
3542 if (counters != NULL) {
3543 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch);
3544 }
3546 if (EmitSync & 1) {
3547 mov(3, Rscratch);
3548 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3549 cmp(SP, G0);
3550 return ;
3551 }
3553 if (EmitSync & 2) {
3555 // Fetch object's markword
3556 ld_ptr(mark_addr, Rmark);
3558 if (try_bias) {
3559 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
3560 }
3562 // Save Rbox in Rscratch to be used for the cas operation
3563 mov(Rbox, Rscratch);
3565 // set Rmark to markOop | markOopDesc::unlocked_value
3566 or3(Rmark, markOopDesc::unlocked_value, Rmark);
3568 // Initialize the box. (Must happen before we update the object mark!)
3569 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
3571 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
3572 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3573 casx_under_lock(mark_addr.base(), Rmark, Rscratch,
3574 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3576 // if compare/exchange succeeded we found an unlocked object and we now have locked it
3577 // hence we are done
3578 cmp(Rmark, Rscratch);
3579 #ifdef _LP64
3580 sub(Rscratch, STACK_BIAS, Rscratch);
3581 #endif
3582 brx(Assembler::equal, false, Assembler::pt, done);
3583 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
3585 // we did not find an unlocked object so see if this is a recursive case
3586 // sub(Rscratch, SP, Rscratch);
3587 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
3588 andcc(Rscratch, 0xfffff003, Rscratch);
3589 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3590 bind (done);
3591 return ;
3592 }
3594 Label Egress ;
3596 if (EmitSync & 256) {
3597 Label IsInflated ;
3599 ld_ptr(mark_addr, Rmark); // fetch obj->mark
3600 // Triage: biased, stack-locked, neutral, inflated
3601 if (try_bias) {
3602 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
3603 // Invariant: if control reaches this point in the emitted stream
3604 // then Rmark has not been modified.
3605 }
3607 // Store mark into displaced mark field in the on-stack basic-lock "box"
3608 // Critically, this must happen before the CAS
3609 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
3610 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
3611 andcc(Rmark, 2, G0);
3612 brx(Assembler::notZero, false, Assembler::pn, IsInflated);
3613 delayed()->
3615 // Try stack-lock acquisition.
3616 // Beware: the 1st instruction is in a delay slot
3617 mov(Rbox, Rscratch);
3618 or3(Rmark, markOopDesc::unlocked_value, Rmark);
3619 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3620 casn(mark_addr.base(), Rmark, Rscratch);
3621 cmp(Rmark, Rscratch);
3622 brx(Assembler::equal, false, Assembler::pt, done);
3623 delayed()->sub(Rscratch, SP, Rscratch);
3625 // Stack-lock attempt failed - check for recursive stack-lock.
3626 // See the comments below about how we might remove this case.
3627 #ifdef _LP64
3628 sub(Rscratch, STACK_BIAS, Rscratch);
3629 #endif
3630 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
3631 andcc(Rscratch, 0xfffff003, Rscratch);
3632 br(Assembler::always, false, Assembler::pt, done);
3633 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3635 bind(IsInflated);
3636 if (EmitSync & 64) {
3637 // If m->owner != null goto IsLocked
3638 // Pessimistic form: Test-and-CAS vs CAS
3639 // The optimistic form avoids RTS->RTO cache line upgrades.
3640 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
3641 andcc(Rscratch, Rscratch, G0);
3642 brx(Assembler::notZero, false, Assembler::pn, done);
3643 delayed()->nop();
3644 // m->owner == null : it's unlocked.
3645 }
3647 // Try to CAS m->owner from null to Self
3648 // Invariant: if we acquire the lock then _recursions should be 0.
3649 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
3650 mov(G2_thread, Rscratch);
3651 casn(Rmark, G0, Rscratch);
3652 cmp(Rscratch, G0);
3653 // Intentional fall-through into done
3654 } else {
3655 // Aggressively avoid the Store-before-CAS penalty
3656 // Defer the store into box->dhw until after the CAS
3657 Label IsInflated, Recursive ;
3659 // Anticipate CAS -- Avoid RTS->RTO upgrade
3660 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
3662 ld_ptr(mark_addr, Rmark); // fetch obj->mark
3663 // Triage: biased, stack-locked, neutral, inflated
3665 if (try_bias) {
3666 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
3667 // Invariant: if control reaches this point in the emitted stream
3668 // then Rmark has not been modified.
3669 }
3670 andcc(Rmark, 2, G0);
3671 brx(Assembler::notZero, false, Assembler::pn, IsInflated);
3672 delayed()-> // Beware - dangling delay-slot
3674 // Try stack-lock acquisition.
3675 // Transiently install BUSY (0) encoding in the mark word.
3676 // if the CAS of 0 into the mark was successful then we execute:
3677 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box
3678 // ST obj->mark = box -- overwrite transient 0 value
3679 // This presumes TSO, of course.
3681 mov(0, Rscratch);
3682 or3(Rmark, markOopDesc::unlocked_value, Rmark);
3683 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3684 casn(mark_addr.base(), Rmark, Rscratch);
3685 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
3686 cmp(Rscratch, Rmark);
3687 brx(Assembler::notZero, false, Assembler::pn, Recursive);
3688 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
3689 if (counters != NULL) {
3690 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
3691 }
3692 ba(done);
3693 delayed()->st_ptr(Rbox, mark_addr);
3695 bind(Recursive);
3696 // Stack-lock attempt failed - check for recursive stack-lock.
3697 // Tests show that we can remove the recursive case with no impact
3698 // on refworkload 0.83. If we need to reduce the size of the code
3699 // emitted by compiler_lock_object() the recursive case is perfect
3700 // candidate.
3701 //
3702 // A more extreme idea is to always inflate on stack-lock recursion.
3703 // This lets us eliminate the recursive checks in compiler_lock_object
3704 // and compiler_unlock_object and the (box->dhw == 0) encoding.
3705 // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
3706 // and showed a performance *increase*. In the same experiment I eliminated
3707 // the fast-path stack-lock code from the interpreter and always passed
3708 // control to the "slow" operators in synchronizer.cpp.
3710 // RScratch contains the fetched obj->mark value from the failed CASN.
3711 #ifdef _LP64
3712 sub(Rscratch, STACK_BIAS, Rscratch);
3713 #endif
3714 sub(Rscratch, SP, Rscratch);
3715 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
3716 andcc(Rscratch, 0xfffff003, Rscratch);
3717 if (counters != NULL) {
3718 // Accounting needs the Rscratch register
3719 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3720 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
3721 ba_short(done);
3722 } else {
3723 ba(done);
3724 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3725 }
3727 bind (IsInflated);
3728 if (EmitSync & 64) {
3729 // If m->owner != null goto IsLocked
3730 // Test-and-CAS vs CAS
3731 // Pessimistic form avoids futile (doomed) CAS attempts
3732 // The optimistic form avoids RTS->RTO cache line upgrades.
3733 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
3734 andcc(Rscratch, Rscratch, G0);
3735 brx(Assembler::notZero, false, Assembler::pn, done);
3736 delayed()->nop();
3737 // m->owner == null : it's unlocked.
3738 }
3740 // Try to CAS m->owner from null to Self
3741 // Invariant: if we acquire the lock then _recursions should be 0.
3742 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
3743 mov(G2_thread, Rscratch);
3744 casn(Rmark, G0, Rscratch);
3745 cmp(Rscratch, G0);
3746 // ST box->displaced_header = NonZero.
3747 // Any non-zero value suffices:
3748 // unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
3749 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
3750 // Intentional fall-through into done
3751 }
3753 bind (done);
3754 }
3756 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
3757 Register Rbox, Register Rscratch,
3758 bool try_bias) {
3759 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
3761 Label done ;
3763 if (EmitSync & 4) {
3764 cmp(SP, G0);
3765 return ;
3766 }
3768 if (EmitSync & 8) {
3769 if (try_bias) {
3770 biased_locking_exit(mark_addr, Rscratch, done);
3771 }
3773 // Test first if it is a fast recursive unlock
3774 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
3775 br_null_short(Rmark, Assembler::pt, done);
3777 // Check if it is still a light weight lock, this is is true if we see
3778 // the stack address of the basicLock in the markOop of the object
3779 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3780 casx_under_lock(mark_addr.base(), Rbox, Rmark,
3781 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3782 ba(done);
3783 delayed()->cmp(Rbox, Rmark);
3784 bind(done);
3785 return ;
3786 }
3788 // Beware ... If the aggregate size of the code emitted by CLO and CUO is
3789 // is too large performance rolls abruptly off a cliff.
3790 // This could be related to inlining policies, code cache management, or
3791 // I$ effects.
3792 Label LStacked ;
3794 if (try_bias) {
3795 // TODO: eliminate redundant LDs of obj->mark
3796 biased_locking_exit(mark_addr, Rscratch, done);
3797 }
3799 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark);
3800 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
3801 andcc(Rscratch, Rscratch, G0);
3802 brx(Assembler::zero, false, Assembler::pn, done);
3803 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS
3804 andcc(Rmark, 2, G0);
3805 brx(Assembler::zero, false, Assembler::pt, LStacked);
3806 delayed()->nop();
3808 // It's inflated
3809 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
3810 // the ST of 0 into _owner which releases the lock. This prevents loads
3811 // and stores within the critical section from reordering (floating)
3812 // past the store that releases the lock. But TSO is a strong memory model
3813 // and that particular flavor of barrier is a noop, so we can safely elide it.
3814 // Note that we use 1-0 locking by default for the inflated case. We
3815 // close the resultant (and rare) race by having contented threads in
3816 // monitorenter periodically poll _owner.
3817 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
3818 ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
3819 xor3(Rscratch, G2_thread, Rscratch);
3820 orcc(Rbox, Rscratch, Rbox);
3821 brx(Assembler::notZero, false, Assembler::pn, done);
3822 delayed()->
3823 ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
3824 ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
3825 orcc(Rbox, Rscratch, G0);
3826 if (EmitSync & 65536) {
3827 Label LSucc ;
3828 brx(Assembler::notZero, false, Assembler::pn, LSucc);
3829 delayed()->nop();
3830 ba(done);
3831 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3833 bind(LSucc);
3834 st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3835 if (os::is_MP()) { membar (StoreLoad); }
3836 ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
3837 andcc(Rscratch, Rscratch, G0);
3838 brx(Assembler::notZero, false, Assembler::pt, done);
3839 delayed()->andcc(G0, G0, G0);
3840 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
3841 mov(G2_thread, Rscratch);
3842 casn(Rmark, G0, Rscratch);
3843 // invert icc.zf and goto done
3844 br_notnull(Rscratch, false, Assembler::pt, done);
3845 delayed()->cmp(G0, G0);
3846 ba(done);
3847 delayed()->cmp(G0, 1);
3848 } else {
3849 brx(Assembler::notZero, false, Assembler::pn, done);
3850 delayed()->nop();
3851 ba(done);
3852 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3853 }
3855 bind (LStacked);
3856 // Consider: we could replace the expensive CAS in the exit
3857 // path with a simple ST of the displaced mark value fetched from
3858 // the on-stack basiclock box. That admits a race where a thread T2
3859 // in the slow lock path -- inflating with monitor M -- could race a
3860 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2.
3861 // More precisely T1 in the stack-lock unlock path could "stomp" the
3862 // inflated mark value M installed by T2, resulting in an orphan
3863 // object monitor M and T2 becoming stranded. We can remedy that situation
3864 // by having T2 periodically poll the object's mark word using timed wait
3865 // operations. If T2 discovers that a stomp has occurred it vacates
3866 // the monitor M and wakes any other threads stranded on the now-orphan M.
3867 // In addition the monitor scavenger, which performs deflation,
3868 // would also need to check for orpan monitors and stranded threads.
3869 //
3870 // Finally, inflation is also used when T2 needs to assign a hashCode
3871 // to O and O is stack-locked by T1. The "stomp" race could cause
3872 // an assigned hashCode value to be lost. We can avoid that condition
3873 // and provide the necessary hashCode stability invariants by ensuring
3874 // that hashCode generation is idempotent between copying GCs.
3875 // For example we could compute the hashCode of an object O as
3876 // O's heap address XOR some high quality RNG value that is refreshed
3877 // at GC-time. The monitor scavenger would install the hashCode
3878 // found in any orphan monitors. Again, the mechanism admits a
3879 // lost-update "stomp" WAW race but detects and recovers as needed.
3880 //
3881 // A prototype implementation showed excellent results, although
3882 // the scavenger and timeout code was rather involved.
3884 casn(mark_addr.base(), Rbox, Rscratch);
3885 cmp(Rbox, Rscratch);
3886 // Intentional fall through into done ...
3888 bind(done);
3889 }
3893 void MacroAssembler::print_CPU_state() {
3894 // %%%%% need to implement this
3895 }
3897 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
3898 // %%%%% need to implement this
3899 }
3901 void MacroAssembler::push_IU_state() {
3902 // %%%%% need to implement this
3903 }
3906 void MacroAssembler::pop_IU_state() {
3907 // %%%%% need to implement this
3908 }
3911 void MacroAssembler::push_FPU_state() {
3912 // %%%%% need to implement this
3913 }
3916 void MacroAssembler::pop_FPU_state() {
3917 // %%%%% need to implement this
3918 }
3921 void MacroAssembler::push_CPU_state() {
3922 // %%%%% need to implement this
3923 }
3926 void MacroAssembler::pop_CPU_state() {
3927 // %%%%% need to implement this
3928 }
3932 void MacroAssembler::verify_tlab() {
3933 #ifdef ASSERT
3934 if (UseTLAB && VerifyOops) {
3935 Label next, next2, ok;
3936 Register t1 = L0;
3937 Register t2 = L1;
3938 Register t3 = L2;
3940 save_frame(0);
3941 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
3942 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
3943 or3(t1, t2, t3);
3944 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next);
3945 stop("assert(top >= start)");
3946 should_not_reach_here();
3948 bind(next);
3949 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
3950 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
3951 or3(t3, t2, t3);
3952 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2);
3953 stop("assert(top <= end)");
3954 should_not_reach_here();
3956 bind(next2);
3957 and3(t3, MinObjAlignmentInBytesMask, t3);
3958 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok);
3959 stop("assert(aligned)");
3960 should_not_reach_here();
3962 bind(ok);
3963 restore();
3964 }
3965 #endif
3966 }
3969 void MacroAssembler::eden_allocate(
3970 Register obj, // result: pointer to object after successful allocation
3971 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
3972 int con_size_in_bytes, // object size in bytes if known at compile time
3973 Register t1, // temp register
3974 Register t2, // temp register
3975 Label& slow_case // continuation point if fast allocation fails
3976 ){
3977 // make sure arguments make sense
3978 assert_different_registers(obj, var_size_in_bytes, t1, t2);
3979 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
3980 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
3982 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
3983 // No allocation in the shared eden.
3984 ba_short(slow_case);
3985 } else {
3986 // get eden boundaries
3987 // note: we need both top & top_addr!
3988 const Register top_addr = t1;
3989 const Register end = t2;
3991 CollectedHeap* ch = Universe::heap();
3992 set((intx)ch->top_addr(), top_addr);
3993 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
3994 ld_ptr(top_addr, delta, end);
3995 ld_ptr(top_addr, 0, obj);
3997 // try to allocate
3998 Label retry;
3999 bind(retry);
4000 #ifdef ASSERT
4001 // make sure eden top is properly aligned
4002 {
4003 Label L;
4004 btst(MinObjAlignmentInBytesMask, obj);
4005 br(Assembler::zero, false, Assembler::pt, L);
4006 delayed()->nop();
4007 stop("eden top is not properly aligned");
4008 bind(L);
4009 }
4010 #endif // ASSERT
4011 const Register free = end;
4012 sub(end, obj, free); // compute amount of free space
4013 if (var_size_in_bytes->is_valid()) {
4014 // size is unknown at compile time
4015 cmp(free, var_size_in_bytes);
4016 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
4017 delayed()->add(obj, var_size_in_bytes, end);
4018 } else {
4019 // size is known at compile time
4020 cmp(free, con_size_in_bytes);
4021 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
4022 delayed()->add(obj, con_size_in_bytes, end);
4023 }
4024 // Compare obj with the value at top_addr; if still equal, swap the value of
4025 // end with the value at top_addr. If not equal, read the value at top_addr
4026 // into end.
4027 casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
4028 // if someone beat us on the allocation, try again, otherwise continue
4029 cmp(obj, end);
4030 brx(Assembler::notEqual, false, Assembler::pn, retry);
4031 delayed()->mov(end, obj); // nop if successfull since obj == end
4033 #ifdef ASSERT
4034 // make sure eden top is properly aligned
4035 {
4036 Label L;
4037 const Register top_addr = t1;
4039 set((intx)ch->top_addr(), top_addr);
4040 ld_ptr(top_addr, 0, top_addr);
4041 btst(MinObjAlignmentInBytesMask, top_addr);
4042 br(Assembler::zero, false, Assembler::pt, L);
4043 delayed()->nop();
4044 stop("eden top is not properly aligned");
4045 bind(L);
4046 }
4047 #endif // ASSERT
4048 }
4049 }
4052 void MacroAssembler::tlab_allocate(
4053 Register obj, // result: pointer to object after successful allocation
4054 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
4055 int con_size_in_bytes, // object size in bytes if known at compile time
4056 Register t1, // temp register
4057 Label& slow_case // continuation point if fast allocation fails
4058 ){
4059 // make sure arguments make sense
4060 assert_different_registers(obj, var_size_in_bytes, t1);
4061 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size");
4062 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
4064 const Register free = t1;
4066 verify_tlab();
4068 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj);
4070 // calculate amount of free space
4071 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free);
4072 sub(free, obj, free);
4074 Label done;
4075 if (var_size_in_bytes == noreg) {
4076 cmp(free, con_size_in_bytes);
4077 } else {
4078 cmp(free, var_size_in_bytes);
4079 }
4080 br(Assembler::less, false, Assembler::pn, slow_case);
4081 // calculate the new top pointer
4082 if (var_size_in_bytes == noreg) {
4083 delayed()->add(obj, con_size_in_bytes, free);
4084 } else {
4085 delayed()->add(obj, var_size_in_bytes, free);
4086 }
4088 bind(done);
4090 #ifdef ASSERT
4091 // make sure new free pointer is properly aligned
4092 {
4093 Label L;
4094 btst(MinObjAlignmentInBytesMask, free);
4095 br(Assembler::zero, false, Assembler::pt, L);
4096 delayed()->nop();
4097 stop("updated TLAB free is not properly aligned");
4098 bind(L);
4099 }
4100 #endif // ASSERT
4102 // update the tlab top pointer
4103 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
4104 verify_tlab();
4105 }
4108 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
4109 Register top = O0;
4110 Register t1 = G1;
4111 Register t2 = G3;
4112 Register t3 = O1;
4113 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
4114 Label do_refill, discard_tlab;
4116 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
4117 // No allocation in the shared eden.
4118 ba_short(slow_case);
4119 }
4121 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
4122 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1);
4123 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2);
4125 // calculate amount of free space
4126 sub(t1, top, t1);
4127 srl_ptr(t1, LogHeapWordSize, t1);
4129 // Retain tlab and allocate object in shared space if
4130 // the amount free in the tlab is too large to discard.
4131 cmp(t1, t2);
4132 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
4134 // increment waste limit to prevent getting stuck on this slow path
4135 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
4136 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
4137 if (TLABStats) {
4138 // increment number of slow_allocations
4139 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2);
4140 add(t2, 1, t2);
4141 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
4142 }
4143 ba_short(try_eden);
4145 bind(discard_tlab);
4146 if (TLABStats) {
4147 // increment number of refills
4148 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2);
4149 add(t2, 1, t2);
4150 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()));
4151 // accumulate wastage
4152 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2);
4153 add(t2, t1, t2);
4154 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
4155 }
4157 // if tlab is currently allocated (top or end != null) then
4158 // fill [top, end + alignment_reserve) with array object
4159 br_null_short(top, Assembler::pn, do_refill);
4161 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
4162 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
4163 // set klass to intArrayKlass
4164 sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
4165 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
4166 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
4167 st(t1, top, arrayOopDesc::length_offset_in_bytes());
4168 set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
4169 ld_ptr(t2, 0, t2);
4170 // store klass last. concurrent gcs assumes klass length is valid if
4171 // klass field is not null.
4172 store_klass(t2, top);
4173 verify_oop(top);
4175 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1);
4176 sub(top, t1, t1); // size of tlab's allocated portion
4177 incr_allocated_bytes(t1, t2, t3);
4179 // refill the tlab with an eden allocation
4180 bind(do_refill);
4181 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
4182 sll_ptr(t1, LogHeapWordSize, t1);
4183 // allocate new tlab, address returned in top
4184 eden_allocate(top, t1, 0, t2, t3, slow_case);
4186 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
4187 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
4188 #ifdef ASSERT
4189 // check that tlab_size (t1) is still valid
4190 {
4191 Label ok;
4192 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
4193 sll_ptr(t2, LogHeapWordSize, t2);
4194 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok);
4195 stop("assert(t1 == tlab_size)");
4196 should_not_reach_here();
4198 bind(ok);
4199 }
4200 #endif // ASSERT
4201 add(top, t1, top); // t1 is tlab_size
4202 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
4203 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
4204 verify_tlab();
4205 ba_short(retry);
4206 }
4208 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
4209 Register t1, Register t2) {
4210 // Bump total bytes allocated by this thread
4211 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch
4212 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2);
4213 // v8 support has gone the way of the dodo
4214 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1);
4215 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1);
4216 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset()));
4217 }
4219 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
4220 switch (cond) {
4221 // Note some conditions are synonyms for others
4222 case Assembler::never: return Assembler::always;
4223 case Assembler::zero: return Assembler::notZero;
4224 case Assembler::lessEqual: return Assembler::greater;
4225 case Assembler::less: return Assembler::greaterEqual;
4226 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned;
4227 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned;
4228 case Assembler::negative: return Assembler::positive;
4229 case Assembler::overflowSet: return Assembler::overflowClear;
4230 case Assembler::always: return Assembler::never;
4231 case Assembler::notZero: return Assembler::zero;
4232 case Assembler::greater: return Assembler::lessEqual;
4233 case Assembler::greaterEqual: return Assembler::less;
4234 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned;
4235 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned;
4236 case Assembler::positive: return Assembler::negative;
4237 case Assembler::overflowClear: return Assembler::overflowSet;
4238 }
4240 ShouldNotReachHere(); return Assembler::overflowClear;
4241 }
4243 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
4244 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) {
4245 Condition negated_cond = negate_condition(cond);
4246 Label L;
4247 brx(negated_cond, false, Assembler::pt, L);
4248 delayed()->nop();
4249 inc_counter(counter_ptr, Rtmp1, Rtmp2);
4250 bind(L);
4251 }
4253 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) {
4254 AddressLiteral addrlit(counter_addr);
4255 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register.
4256 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits.
4257 ld(addr, Rtmp2);
4258 inc(Rtmp2);
4259 st(Rtmp2, addr);
4260 }
4262 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) {
4263 inc_counter((address) counter_addr, Rtmp1, Rtmp2);
4264 }
4266 SkipIfEqual::SkipIfEqual(
4267 MacroAssembler* masm, Register temp, const bool* flag_addr,
4268 Assembler::Condition condition) {
4269 _masm = masm;
4270 AddressLiteral flag(flag_addr);
4271 _masm->sethi(flag, temp);
4272 _masm->ldub(temp, flag.low10(), temp);
4273 _masm->tst(temp);
4274 _masm->br(condition, false, Assembler::pt, _label);
4275 _masm->delayed()->nop();
4276 }
4278 SkipIfEqual::~SkipIfEqual() {
4279 _masm->bind(_label);
4280 }
4283 // Writes to stack successive pages until offset reached to check for
4284 // stack overflow + shadow pages. This clobbers tsp and scratch.
4285 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
4286 Register Rscratch) {
4287 // Use stack pointer in temp stack pointer
4288 mov(SP, Rtsp);
4290 // Bang stack for total size given plus stack shadow page size.
4291 // Bang one page at a time because a large size can overflow yellow and
4292 // red zones (the bang will fail but stack overflow handling can't tell that
4293 // it was a stack overflow bang vs a regular segv).
4294 int offset = os::vm_page_size();
4295 Register Roffset = Rscratch;
4297 Label loop;
4298 bind(loop);
4299 set((-offset)+STACK_BIAS, Rscratch);
4300 st(G0, Rtsp, Rscratch);
4301 set(offset, Roffset);
4302 sub(Rsize, Roffset, Rsize);
4303 cmp(Rsize, G0);
4304 br(Assembler::greater, false, Assembler::pn, loop);
4305 delayed()->sub(Rtsp, Roffset, Rtsp);
4307 // Bang down shadow pages too.
4308 // The -1 because we already subtracted 1 page.
4309 for (int i = 0; i< StackShadowPages-1; i++) {
4310 set((-i*offset)+STACK_BIAS, Rscratch);
4311 st(G0, Rtsp, Rscratch);
4312 }
4313 }
4315 ///////////////////////////////////////////////////////////////////////////////////
4316 #ifndef SERIALGC
4318 static address satb_log_enqueue_with_frame = NULL;
4319 static u_char* satb_log_enqueue_with_frame_end = NULL;
4321 static address satb_log_enqueue_frameless = NULL;
4322 static u_char* satb_log_enqueue_frameless_end = NULL;
4324 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
4326 static void generate_satb_log_enqueue(bool with_frame) {
4327 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
4328 CodeBuffer buf(bb);
4329 MacroAssembler masm(&buf);
4331 #define __ masm.
4333 address start = __ pc();
4334 Register pre_val;
4336 Label refill, restart;
4337 if (with_frame) {
4338 __ save_frame(0);
4339 pre_val = I0; // Was O0 before the save.
4340 } else {
4341 pre_val = O0;
4342 }
4343 int satb_q_index_byte_offset =
4344 in_bytes(JavaThread::satb_mark_queue_offset() +
4345 PtrQueue::byte_offset_of_index());
4346 int satb_q_buf_byte_offset =
4347 in_bytes(JavaThread::satb_mark_queue_offset() +
4348 PtrQueue::byte_offset_of_buf());
4349 assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) &&
4350 in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t),
4351 "check sizes in assembly below");
4353 __ bind(restart);
4354 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0);
4356 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, L0, refill);
4357 // If the branch is taken, no harm in executing this in the delay slot.
4358 __ delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, L1);
4359 __ sub(L0, oopSize, L0);
4361 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0
4362 if (!with_frame) {
4363 // Use return-from-leaf
4364 __ retl();
4365 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset);
4366 } else {
4367 // Not delayed.
4368 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset);
4369 }
4370 if (with_frame) {
4371 __ ret();
4372 __ delayed()->restore();
4373 }
4374 __ bind(refill);
4376 address handle_zero =
4377 CAST_FROM_FN_PTR(address,
4378 &SATBMarkQueueSet::handle_zero_index_for_thread);
4379 // This should be rare enough that we can afford to save all the
4380 // scratch registers that the calling context might be using.
4381 __ mov(G1_scratch, L0);
4382 __ mov(G3_scratch, L1);
4383 __ mov(G4, L2);
4384 // We need the value of O0 above (for the write into the buffer), so we
4385 // save and restore it.
4386 __ mov(O0, L3);
4387 // Since the call will overwrite O7, we save and restore that, as well.
4388 __ mov(O7, L4);
4389 __ call_VM_leaf(L5, handle_zero, G2_thread);
4390 __ mov(L0, G1_scratch);
4391 __ mov(L1, G3_scratch);
4392 __ mov(L2, G4);
4393 __ mov(L3, O0);
4394 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
4395 __ delayed()->mov(L4, O7);
4397 if (with_frame) {
4398 satb_log_enqueue_with_frame = start;
4399 satb_log_enqueue_with_frame_end = __ pc();
4400 } else {
4401 satb_log_enqueue_frameless = start;
4402 satb_log_enqueue_frameless_end = __ pc();
4403 }
4405 #undef __
4406 }
4408 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
4409 if (with_frame) {
4410 if (satb_log_enqueue_with_frame == 0) {
4411 generate_satb_log_enqueue(with_frame);
4412 assert(satb_log_enqueue_with_frame != 0, "postcondition.");
4413 if (G1SATBPrintStubs) {
4414 tty->print_cr("Generated with-frame satb enqueue:");
4415 Disassembler::decode((u_char*)satb_log_enqueue_with_frame,
4416 satb_log_enqueue_with_frame_end,
4417 tty);
4418 }
4419 }
4420 } else {
4421 if (satb_log_enqueue_frameless == 0) {
4422 generate_satb_log_enqueue(with_frame);
4423 assert(satb_log_enqueue_frameless != 0, "postcondition.");
4424 if (G1SATBPrintStubs) {
4425 tty->print_cr("Generated frameless satb enqueue:");
4426 Disassembler::decode((u_char*)satb_log_enqueue_frameless,
4427 satb_log_enqueue_frameless_end,
4428 tty);
4429 }
4430 }
4431 }
4432 }
4434 void MacroAssembler::g1_write_barrier_pre(Register obj,
4435 Register index,
4436 int offset,
4437 Register pre_val,
4438 Register tmp,
4439 bool preserve_o_regs) {
4440 Label filtered;
4442 if (obj == noreg) {
4443 // We are not loading the previous value so make
4444 // sure that we don't trash the value in pre_val
4445 // with the code below.
4446 assert_different_registers(pre_val, tmp);
4447 } else {
4448 // We will be loading the previous value
4449 // in this code so...
4450 assert(offset == 0 || index == noreg, "choose one");
4451 assert(pre_val == noreg, "check this code");
4452 }
4454 // Is marking active?
4455 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
4456 ld(G2,
4457 in_bytes(JavaThread::satb_mark_queue_offset() +
4458 PtrQueue::byte_offset_of_active()),
4459 tmp);
4460 } else {
4461 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
4462 "Assumption");
4463 ldsb(G2,
4464 in_bytes(JavaThread::satb_mark_queue_offset() +
4465 PtrQueue::byte_offset_of_active()),
4466 tmp);
4467 }
4469 // Check on whether to annul.
4470 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
4471 delayed()->nop();
4473 // Do we need to load the previous value?
4474 if (obj != noreg) {
4475 // Load the previous value...
4476 if (index == noreg) {
4477 if (Assembler::is_simm13(offset)) {
4478 load_heap_oop(obj, offset, tmp);
4479 } else {
4480 set(offset, tmp);
4481 load_heap_oop(obj, tmp, tmp);
4482 }
4483 } else {
4484 load_heap_oop(obj, index, tmp);
4485 }
4486 // Previous value has been loaded into tmp
4487 pre_val = tmp;
4488 }
4490 assert(pre_val != noreg, "must have a real register");
4492 // Is the previous value null?
4493 // Check on whether to annul.
4494 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered);
4495 delayed()->nop();
4497 // OK, it's not filtered, so we'll need to call enqueue. In the normal
4498 // case, pre_val will be a scratch G-reg, but there are some cases in
4499 // which it's an O-reg. In the first case, do a normal call. In the
4500 // latter, do a save here and call the frameless version.
4502 guarantee(pre_val->is_global() || pre_val->is_out(),
4503 "Or we need to think harder.");
4505 if (pre_val->is_global() && !preserve_o_regs) {
4506 generate_satb_log_enqueue_if_necessary(true); // with frame
4508 call(satb_log_enqueue_with_frame);
4509 delayed()->mov(pre_val, O0);
4510 } else {
4511 generate_satb_log_enqueue_if_necessary(false); // frameless
4513 save_frame(0);
4514 call(satb_log_enqueue_frameless);
4515 delayed()->mov(pre_val->after_save(), O0);
4516 restore();
4517 }
4519 bind(filtered);
4520 }
4522 static jint num_ct_writes = 0;
4523 static jint num_ct_writes_filtered_in_hr = 0;
4524 static jint num_ct_writes_filtered_null = 0;
4525 static G1CollectedHeap* g1 = NULL;
4527 static Thread* count_ct_writes(void* filter_val, void* new_val) {
4528 Atomic::inc(&num_ct_writes);
4529 if (filter_val == NULL) {
4530 Atomic::inc(&num_ct_writes_filtered_in_hr);
4531 } else if (new_val == NULL) {
4532 Atomic::inc(&num_ct_writes_filtered_null);
4533 } else {
4534 if (g1 == NULL) {
4535 g1 = G1CollectedHeap::heap();
4536 }
4537 }
4538 if ((num_ct_writes % 1000000) == 0) {
4539 jint num_ct_writes_filtered =
4540 num_ct_writes_filtered_in_hr +
4541 num_ct_writes_filtered_null;
4543 tty->print_cr("%d potential CT writes: %5.2f%% filtered\n"
4544 " (%5.2f%% intra-HR, %5.2f%% null).",
4545 num_ct_writes,
4546 100.0*(float)num_ct_writes_filtered/(float)num_ct_writes,
4547 100.0*(float)num_ct_writes_filtered_in_hr/
4548 (float)num_ct_writes,
4549 100.0*(float)num_ct_writes_filtered_null/
4550 (float)num_ct_writes);
4551 }
4552 return Thread::current();
4553 }
4555 static address dirty_card_log_enqueue = 0;
4556 static u_char* dirty_card_log_enqueue_end = 0;
4558 // This gets to assume that o0 contains the object address.
4559 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
4560 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
4561 CodeBuffer buf(bb);
4562 MacroAssembler masm(&buf);
4563 #define __ masm.
4564 address start = __ pc();
4566 Label not_already_dirty, restart, refill;
4568 #ifdef _LP64
4569 __ srlx(O0, CardTableModRefBS::card_shift, O0);
4570 #else
4571 __ srl(O0, CardTableModRefBS::card_shift, O0);
4572 #endif
4573 AddressLiteral addrlit(byte_map_base);
4574 __ set(addrlit, O1); // O1 := <card table base>
4575 __ ldub(O0, O1, O2); // O2 := [O0 + O1]
4577 __ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
4578 O2, not_already_dirty);
4579 // Get O1 + O2 into a reg by itself -- useful in the take-the-branch
4580 // case, harmless if not.
4581 __ delayed()->add(O0, O1, O3);
4583 // We didn't take the branch, so we're already dirty: return.
4584 // Use return-from-leaf
4585 __ retl();
4586 __ delayed()->nop();
4588 // Not dirty.
4589 __ bind(not_already_dirty);
4590 // First, dirty it.
4591 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty).
4592 int dirty_card_q_index_byte_offset =
4593 in_bytes(JavaThread::dirty_card_queue_offset() +
4594 PtrQueue::byte_offset_of_index());
4595 int dirty_card_q_buf_byte_offset =
4596 in_bytes(JavaThread::dirty_card_queue_offset() +
4597 PtrQueue::byte_offset_of_buf());
4598 __ bind(restart);
4599 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
4601 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn,
4602 L0, refill);
4603 // If the branch is taken, no harm in executing this in the delay slot.
4604 __ delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1);
4605 __ sub(L0, oopSize, L0);
4607 __ st_ptr(O3, L1, L0); // [_buf + index] := I0
4608 // Use return-from-leaf
4609 __ retl();
4610 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset);
4612 __ bind(refill);
4613 address handle_zero =
4614 CAST_FROM_FN_PTR(address,
4615 &DirtyCardQueueSet::handle_zero_index_for_thread);
4616 // This should be rare enough that we can afford to save all the
4617 // scratch registers that the calling context might be using.
4618 __ mov(G1_scratch, L3);
4619 __ mov(G3_scratch, L5);
4620 // We need the value of O3 above (for the write into the buffer), so we
4621 // save and restore it.
4622 __ mov(O3, L6);
4623 // Since the call will overwrite O7, we save and restore that, as well.
4624 __ mov(O7, L4);
4626 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread);
4627 __ mov(L3, G1_scratch);
4628 __ mov(L5, G3_scratch);
4629 __ mov(L6, O3);
4630 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
4631 __ delayed()->mov(L4, O7);
4633 dirty_card_log_enqueue = start;
4634 dirty_card_log_enqueue_end = __ pc();
4635 // XXX Should have a guarantee here about not going off the end!
4636 // Does it already do so? Do an experiment...
4638 #undef __
4640 }
4642 static inline void
4643 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) {
4644 if (dirty_card_log_enqueue == 0) {
4645 generate_dirty_card_log_enqueue(byte_map_base);
4646 assert(dirty_card_log_enqueue != 0, "postcondition.");
4647 if (G1SATBPrintStubs) {
4648 tty->print_cr("Generated dirty_card enqueue:");
4649 Disassembler::decode((u_char*)dirty_card_log_enqueue,
4650 dirty_card_log_enqueue_end,
4651 tty);
4652 }
4653 }
4654 }
4657 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
4659 Label filtered;
4660 MacroAssembler* post_filter_masm = this;
4662 if (new_val == G0) return;
4664 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
4665 assert(bs->kind() == BarrierSet::G1SATBCT ||
4666 bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
4667 if (G1RSBarrierRegionFilter) {
4668 xor3(store_addr, new_val, tmp);
4669 #ifdef _LP64
4670 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
4671 #else
4672 srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
4673 #endif
4675 if (G1PrintCTFilterStats) {
4676 guarantee(tmp->is_global(), "Or stats won't work...");
4677 // This is a sleazy hack: I'm temporarily hijacking G2, which I
4678 // promise to restore.
4679 mov(new_val, G2);
4680 save_frame(0);
4681 mov(tmp, O0);
4682 mov(G2, O1);
4683 // Save G-regs that target may use.
4684 mov(G1, L1);
4685 mov(G2, L2);
4686 mov(G3, L3);
4687 mov(G4, L4);
4688 mov(G5, L5);
4689 call(CAST_FROM_FN_PTR(address, &count_ct_writes));
4690 delayed()->nop();
4691 mov(O0, G2);
4692 // Restore G-regs that target may have used.
4693 mov(L1, G1);
4694 mov(L3, G3);
4695 mov(L4, G4);
4696 mov(L5, G5);
4697 restore(G0, G0, G0);
4698 }
4699 // XXX Should I predict this taken or not? Does it mattern?
4700 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
4701 delayed()->nop();
4702 }
4704 // If the "store_addr" register is an "in" or "local" register, move it to
4705 // a scratch reg so we can pass it as an argument.
4706 bool use_scr = !(store_addr->is_global() || store_addr->is_out());
4707 // Pick a scratch register different from "tmp".
4708 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch);
4709 // Make sure we use up the delay slot!
4710 if (use_scr) {
4711 post_filter_masm->mov(store_addr, scr);
4712 } else {
4713 post_filter_masm->nop();
4714 }
4715 generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base);
4716 save_frame(0);
4717 call(dirty_card_log_enqueue);
4718 if (use_scr) {
4719 delayed()->mov(scr, O0);
4720 } else {
4721 delayed()->mov(store_addr->after_save(), O0);
4722 }
4723 restore();
4725 bind(filtered);
4727 }
4729 #endif // SERIALGC
4730 ///////////////////////////////////////////////////////////////////////////////////
4732 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
4733 // If we're writing constant NULL, we can skip the write barrier.
4734 if (new_val == G0) return;
4735 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
4736 assert(bs->kind() == BarrierSet::CardTableModRef ||
4737 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
4738 card_table_write(bs->byte_map_base, tmp, store_addr);
4739 }
4741 void MacroAssembler::load_klass(Register src_oop, Register klass) {
4742 // The number of bytes in this code is used by
4743 // MachCallDynamicJavaNode::ret_addr_offset()
4744 // if this changes, change that.
4745 if (UseCompressedOops) {
4746 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
4747 decode_heap_oop_not_null(klass);
4748 } else {
4749 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass);
4750 }
4751 }
4753 void MacroAssembler::store_klass(Register klass, Register dst_oop) {
4754 if (UseCompressedOops) {
4755 assert(dst_oop != klass, "not enough registers");
4756 encode_heap_oop_not_null(klass);
4757 st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
4758 } else {
4759 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes());
4760 }
4761 }
4763 void MacroAssembler::store_klass_gap(Register s, Register d) {
4764 if (UseCompressedOops) {
4765 assert(s != d, "not enough registers");
4766 st(s, d, oopDesc::klass_gap_offset_in_bytes());
4767 }
4768 }
4770 void MacroAssembler::load_heap_oop(const Address& s, Register d) {
4771 if (UseCompressedOops) {
4772 lduw(s, d);
4773 decode_heap_oop(d);
4774 } else {
4775 ld_ptr(s, d);
4776 }
4777 }
4779 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) {
4780 if (UseCompressedOops) {
4781 lduw(s1, s2, d);
4782 decode_heap_oop(d, d);
4783 } else {
4784 ld_ptr(s1, s2, d);
4785 }
4786 }
4788 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) {
4789 if (UseCompressedOops) {
4790 lduw(s1, simm13a, d);
4791 decode_heap_oop(d, d);
4792 } else {
4793 ld_ptr(s1, simm13a, d);
4794 }
4795 }
4797 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) {
4798 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d);
4799 else load_heap_oop(s1, s2.as_register(), d);
4800 }
4802 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
4803 if (UseCompressedOops) {
4804 assert(s1 != d && s2 != d, "not enough registers");
4805 encode_heap_oop(d);
4806 st(d, s1, s2);
4807 } else {
4808 st_ptr(d, s1, s2);
4809 }
4810 }
4812 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) {
4813 if (UseCompressedOops) {
4814 assert(s1 != d, "not enough registers");
4815 encode_heap_oop(d);
4816 st(d, s1, simm13a);
4817 } else {
4818 st_ptr(d, s1, simm13a);
4819 }
4820 }
4822 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
4823 if (UseCompressedOops) {
4824 assert(a.base() != d, "not enough registers");
4825 encode_heap_oop(d);
4826 st(d, a, offset);
4827 } else {
4828 st_ptr(d, a, offset);
4829 }
4830 }
4833 void MacroAssembler::encode_heap_oop(Register src, Register dst) {
4834 assert (UseCompressedOops, "must be compressed");
4835 assert (Universe::heap() != NULL, "java heap should be initialized");
4836 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4837 verify_oop(src);
4838 if (Universe::narrow_oop_base() == NULL) {
4839 srlx(src, LogMinObjAlignmentInBytes, dst);
4840 return;
4841 }
4842 Label done;
4843 if (src == dst) {
4844 // optimize for frequent case src == dst
4845 bpr(rc_nz, true, Assembler::pt, src, done);
4846 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken
4847 bind(done);
4848 srlx(src, LogMinObjAlignmentInBytes, dst);
4849 } else {
4850 bpr(rc_z, false, Assembler::pn, src, done);
4851 delayed() -> mov(G0, dst);
4852 // could be moved before branch, and annulate delay,
4853 // but may add some unneeded work decoding null
4854 sub(src, G6_heapbase, dst);
4855 srlx(dst, LogMinObjAlignmentInBytes, dst);
4856 bind(done);
4857 }
4858 }
4861 void MacroAssembler::encode_heap_oop_not_null(Register r) {
4862 assert (UseCompressedOops, "must be compressed");
4863 assert (Universe::heap() != NULL, "java heap should be initialized");
4864 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4865 verify_oop(r);
4866 if (Universe::narrow_oop_base() != NULL)
4867 sub(r, G6_heapbase, r);
4868 srlx(r, LogMinObjAlignmentInBytes, r);
4869 }
4871 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) {
4872 assert (UseCompressedOops, "must be compressed");
4873 assert (Universe::heap() != NULL, "java heap should be initialized");
4874 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4875 verify_oop(src);
4876 if (Universe::narrow_oop_base() == NULL) {
4877 srlx(src, LogMinObjAlignmentInBytes, dst);
4878 } else {
4879 sub(src, G6_heapbase, dst);
4880 srlx(dst, LogMinObjAlignmentInBytes, dst);
4881 }
4882 }
4884 // Same algorithm as oops.inline.hpp decode_heap_oop.
4885 void MacroAssembler::decode_heap_oop(Register src, Register dst) {
4886 assert (UseCompressedOops, "must be compressed");
4887 assert (Universe::heap() != NULL, "java heap should be initialized");
4888 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4889 sllx(src, LogMinObjAlignmentInBytes, dst);
4890 if (Universe::narrow_oop_base() != NULL) {
4891 Label done;
4892 bpr(rc_nz, true, Assembler::pt, dst, done);
4893 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
4894 bind(done);
4895 }
4896 verify_oop(dst);
4897 }
4899 void MacroAssembler::decode_heap_oop_not_null(Register r) {
4900 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4901 // pd_code_size_limit.
4902 // Also do not verify_oop as this is called by verify_oop.
4903 assert (UseCompressedOops, "must be compressed");
4904 assert (Universe::heap() != NULL, "java heap should be initialized");
4905 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4906 sllx(r, LogMinObjAlignmentInBytes, r);
4907 if (Universe::narrow_oop_base() != NULL)
4908 add(r, G6_heapbase, r);
4909 }
4911 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
4912 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4913 // pd_code_size_limit.
4914 // Also do not verify_oop as this is called by verify_oop.
4915 assert (UseCompressedOops, "must be compressed");
4916 assert (Universe::heap() != NULL, "java heap should be initialized");
4917 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4918 sllx(src, LogMinObjAlignmentInBytes, dst);
4919 if (Universe::narrow_oop_base() != NULL)
4920 add(dst, G6_heapbase, dst);
4921 }
4923 void MacroAssembler::reinit_heapbase() {
4924 if (UseCompressedOops) {
4925 // call indirectly to solve generation ordering problem
4926 AddressLiteral base(Universe::narrow_oop_base_addr());
4927 load_ptr_contents(base, G6_heapbase);
4928 }
4929 }
4931 // Compare char[] arrays aligned to 4 bytes.
4932 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
4933 Register limit, Register result,
4934 Register chr1, Register chr2, Label& Ldone) {
4935 Label Lvector, Lloop;
4936 assert(chr1 == result, "should be the same");
4938 // Note: limit contains number of bytes (2*char_elements) != 0.
4939 andcc(limit, 0x2, chr1); // trailing character ?
4940 br(Assembler::zero, false, Assembler::pt, Lvector);
4941 delayed()->nop();
4943 // compare the trailing char
4944 sub(limit, sizeof(jchar), limit);
4945 lduh(ary1, limit, chr1);
4946 lduh(ary2, limit, chr2);
4947 cmp(chr1, chr2);
4948 br(Assembler::notEqual, true, Assembler::pt, Ldone);
4949 delayed()->mov(G0, result); // not equal
4951 // only one char ?
4952 cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn);
4953 delayed()->add(G0, 1, result); // zero-length arrays are equal
4955 // word by word compare, dont't need alignment check
4956 bind(Lvector);
4957 // Shift ary1 and ary2 to the end of the arrays, negate limit
4958 add(ary1, limit, ary1);
4959 add(ary2, limit, ary2);
4960 neg(limit, limit);
4962 lduw(ary1, limit, chr1);
4963 bind(Lloop);
4964 lduw(ary2, limit, chr2);
4965 cmp(chr1, chr2);
4966 br(Assembler::notEqual, true, Assembler::pt, Ldone);
4967 delayed()->mov(G0, result); // not equal
4968 inccc(limit, 2*sizeof(jchar));
4969 // annul LDUW if branch is not taken to prevent access past end of array
4970 br(Assembler::notZero, true, Assembler::pt, Lloop);
4971 delayed()->lduw(ary1, limit, chr1); // hoisted
4973 // Caller should set it:
4974 // add(G0, 1, result); // equals
4975 }