Mon, 11 Oct 2010 04:18:58 -0700
6829194: JSR 292 needs to support compressed oops
Reviewed-by: kvn, jrose
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_assembler_sparc.cpp.incl"
28 // Convert the raw encoding form into the form expected by the
29 // constructor for Address.
30 Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
31 assert(scale == 0, "not supported");
32 RelocationHolder rspec;
33 if (disp_is_oop) {
34 rspec = Relocation::spec_simple(relocInfo::oop_type);
35 }
37 Register rindex = as_Register(index);
38 if (rindex != G0) {
39 Address madr(as_Register(base), rindex);
40 madr._rspec = rspec;
41 return madr;
42 } else {
43 Address madr(as_Register(base), disp);
44 madr._rspec = rspec;
45 return madr;
46 }
47 }
49 Address Argument::address_in_frame() const {
50 // Warning: In LP64 mode disp will occupy more than 10 bits, but
51 // op codes such as ld or ldx, only access disp() to get
52 // their simm13 argument.
53 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
54 if (is_in())
55 return Address(FP, disp); // In argument.
56 else
57 return Address(SP, disp); // Out argument.
58 }
60 static const char* argumentNames[][2] = {
61 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"},
62 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"},
63 {"A(n>9)","P(n>9)"}
64 };
66 const char* Argument::name() const {
67 int nofArgs = sizeof argumentNames / sizeof argumentNames[0];
68 int num = number();
69 if (num >= nofArgs) num = nofArgs - 1;
70 return argumentNames[num][is_in() ? 1 : 0];
71 }
73 void Assembler::print_instruction(int inst) {
74 const char* s;
75 switch (inv_op(inst)) {
76 default: s = "????"; break;
77 case call_op: s = "call"; break;
78 case branch_op:
79 switch (inv_op2(inst)) {
80 case bpr_op2: s = "bpr"; break;
81 case fb_op2: s = "fb"; break;
82 case fbp_op2: s = "fbp"; break;
83 case br_op2: s = "br"; break;
84 case bp_op2: s = "bp"; break;
85 case cb_op2: s = "cb"; break;
86 default: s = "????"; break;
87 }
88 }
89 ::tty->print("%s", s);
90 }
93 // Patch instruction inst at offset inst_pos to refer to dest_pos
94 // and return the resulting instruction.
95 // We should have pcs, not offsets, but since all is relative, it will work out
96 // OK.
97 int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
99 int m; // mask for displacement field
100 int v; // new value for displacement field
101 const int word_aligned_ones = -4;
102 switch (inv_op(inst)) {
103 default: ShouldNotReachHere();
104 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break;
105 case branch_op:
106 switch (inv_op2(inst)) {
107 case bpr_op2: m = wdisp16(word_aligned_ones, 0); v = wdisp16(dest_pos, inst_pos); break;
108 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
109 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
110 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
111 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
112 case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
113 default: ShouldNotReachHere();
114 }
115 }
116 return inst & ~m | v;
117 }
119 // Return the offset of the branch destionation of instruction inst
120 // at offset pos.
121 // Should have pcs, but since all is relative, it works out.
122 int Assembler::branch_destination(int inst, int pos) {
123 int r;
124 switch (inv_op(inst)) {
125 default: ShouldNotReachHere();
126 case call_op: r = inv_wdisp(inst, pos, 30); break;
127 case branch_op:
128 switch (inv_op2(inst)) {
129 case bpr_op2: r = inv_wdisp16(inst, pos); break;
130 case fbp_op2: r = inv_wdisp( inst, pos, 19); break;
131 case bp_op2: r = inv_wdisp( inst, pos, 19); break;
132 case fb_op2: r = inv_wdisp( inst, pos, 22); break;
133 case br_op2: r = inv_wdisp( inst, pos, 22); break;
134 case cb_op2: r = inv_wdisp( inst, pos, 22); break;
135 default: ShouldNotReachHere();
136 }
137 }
138 return r;
139 }
141 int AbstractAssembler::code_fill_byte() {
142 return 0x00; // illegal instruction 0x00000000
143 }
145 Assembler::Condition Assembler::reg_cond_to_cc_cond(Assembler::RCondition in) {
146 switch (in) {
147 case rc_z: return equal;
148 case rc_lez: return lessEqual;
149 case rc_lz: return less;
150 case rc_nz: return notEqual;
151 case rc_gz: return greater;
152 case rc_gez: return greaterEqual;
153 default:
154 ShouldNotReachHere();
155 }
156 return equal;
157 }
159 // Generate a bunch 'o stuff (including v9's
160 #ifndef PRODUCT
161 void Assembler::test_v9() {
162 add( G0, G1, G2 );
163 add( G3, 0, G4 );
165 addcc( G5, G6, G7 );
166 addcc( I0, 1, I1 );
167 addc( I2, I3, I4 );
168 addc( I5, -1, I6 );
169 addccc( I7, L0, L1 );
170 addccc( L2, (1 << 12) - 2, L3 );
172 Label lbl1, lbl2, lbl3;
174 bind(lbl1);
176 bpr( rc_z, true, pn, L4, pc(), relocInfo::oop_type );
177 delayed()->nop();
178 bpr( rc_lez, false, pt, L5, lbl1);
179 delayed()->nop();
181 fb( f_never, true, pc() + 4, relocInfo::none);
182 delayed()->nop();
183 fb( f_notEqual, false, lbl2 );
184 delayed()->nop();
186 fbp( f_notZero, true, fcc0, pn, pc() - 4, relocInfo::none);
187 delayed()->nop();
188 fbp( f_lessOrGreater, false, fcc1, pt, lbl3 );
189 delayed()->nop();
191 br( equal, true, pc() + 1024, relocInfo::none);
192 delayed()->nop();
193 br( lessEqual, false, lbl1 );
194 delayed()->nop();
195 br( never, false, lbl1 );
196 delayed()->nop();
198 bp( less, true, icc, pn, pc(), relocInfo::none);
199 delayed()->nop();
200 bp( lessEqualUnsigned, false, xcc, pt, lbl2 );
201 delayed()->nop();
203 call( pc(), relocInfo::none);
204 delayed()->nop();
205 call( lbl3 );
206 delayed()->nop();
209 casa( L6, L7, O0 );
210 casxa( O1, O2, O3, 0 );
212 udiv( O4, O5, O7 );
213 udiv( G0, (1 << 12) - 1, G1 );
214 sdiv( G1, G2, G3 );
215 sdiv( G4, -((1 << 12) - 1), G5 );
216 udivcc( G6, G7, I0 );
217 udivcc( I1, -((1 << 12) - 2), I2 );
218 sdivcc( I3, I4, I5 );
219 sdivcc( I6, -((1 << 12) - 0), I7 );
221 done();
222 retry();
224 fadd( FloatRegisterImpl::S, F0, F1, F2 );
225 fsub( FloatRegisterImpl::D, F34, F0, F62 );
227 fcmp( FloatRegisterImpl::Q, fcc0, F0, F60);
228 fcmpe( FloatRegisterImpl::S, fcc1, F31, F30);
230 ftox( FloatRegisterImpl::D, F2, F4 );
231 ftoi( FloatRegisterImpl::Q, F4, F8 );
233 ftof( FloatRegisterImpl::S, FloatRegisterImpl::Q, F3, F12 );
235 fxtof( FloatRegisterImpl::S, F4, F5 );
236 fitof( FloatRegisterImpl::D, F6, F8 );
238 fmov( FloatRegisterImpl::Q, F16, F20 );
239 fneg( FloatRegisterImpl::S, F6, F7 );
240 fabs( FloatRegisterImpl::D, F10, F12 );
242 fmul( FloatRegisterImpl::Q, F24, F28, F32 );
243 fmul( FloatRegisterImpl::S, FloatRegisterImpl::D, F8, F9, F14 );
244 fdiv( FloatRegisterImpl::S, F10, F11, F12 );
246 fsqrt( FloatRegisterImpl::S, F13, F14 );
248 flush( L0, L1 );
249 flush( L2, -1 );
251 flushw();
253 illtrap( (1 << 22) - 2);
255 impdep1( 17, (1 << 19) - 1 );
256 impdep2( 3, 0 );
258 jmpl( L3, L4, L5 );
259 delayed()->nop();
260 jmpl( L6, -1, L7, Relocation::spec_simple(relocInfo::none));
261 delayed()->nop();
264 ldf( FloatRegisterImpl::S, O0, O1, F15 );
265 ldf( FloatRegisterImpl::D, O2, -1, F14 );
268 ldfsr( O3, O4 );
269 ldfsr( O5, -1 );
270 ldxfsr( O6, O7 );
271 ldxfsr( I0, -1 );
273 ldfa( FloatRegisterImpl::D, I1, I2, 1, F16 );
274 ldfa( FloatRegisterImpl::Q, I3, -1, F36 );
276 ldsb( I4, I5, I6 );
277 ldsb( I7, -1, G0 );
278 ldsh( G1, G3, G4 );
279 ldsh( G5, -1, G6 );
280 ldsw( G7, L0, L1 );
281 ldsw( L2, -1, L3 );
282 ldub( L4, L5, L6 );
283 ldub( L7, -1, O0 );
284 lduh( O1, O2, O3 );
285 lduh( O4, -1, O5 );
286 lduw( O6, O7, G0 );
287 lduw( G1, -1, G2 );
288 ldx( G3, G4, G5 );
289 ldx( G6, -1, G7 );
290 ldd( I0, I1, I2 );
291 ldd( I3, -1, I4 );
293 ldsba( I5, I6, 2, I7 );
294 ldsba( L0, -1, L1 );
295 ldsha( L2, L3, 3, L4 );
296 ldsha( L5, -1, L6 );
297 ldswa( L7, O0, (1 << 8) - 1, O1 );
298 ldswa( O2, -1, O3 );
299 lduba( O4, O5, 0, O6 );
300 lduba( O7, -1, I0 );
301 lduha( I1, I2, 1, I3 );
302 lduha( I4, -1, I5 );
303 lduwa( I6, I7, 2, L0 );
304 lduwa( L1, -1, L2 );
305 ldxa( L3, L4, 3, L5 );
306 ldxa( L6, -1, L7 );
307 ldda( G0, G1, 4, G2 );
308 ldda( G3, -1, G4 );
310 ldstub( G5, G6, G7 );
311 ldstub( O0, -1, O1 );
313 ldstuba( O2, O3, 5, O4 );
314 ldstuba( O5, -1, O6 );
316 and3( I0, L0, O0 );
317 and3( G7, -1, O7 );
318 andcc( L2, I2, G2 );
319 andcc( L4, -1, G4 );
320 andn( I5, I6, I7 );
321 andn( I6, -1, I7 );
322 andncc( I5, I6, I7 );
323 andncc( I7, -1, I6 );
324 or3( I5, I6, I7 );
325 or3( I7, -1, I6 );
326 orcc( I5, I6, I7 );
327 orcc( I7, -1, I6 );
328 orn( I5, I6, I7 );
329 orn( I7, -1, I6 );
330 orncc( I5, I6, I7 );
331 orncc( I7, -1, I6 );
332 xor3( I5, I6, I7 );
333 xor3( I7, -1, I6 );
334 xorcc( I5, I6, I7 );
335 xorcc( I7, -1, I6 );
336 xnor( I5, I6, I7 );
337 xnor( I7, -1, I6 );
338 xnorcc( I5, I6, I7 );
339 xnorcc( I7, -1, I6 );
341 membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) );
342 membar( StoreStore );
343 membar( LoadStore );
344 membar( StoreLoad );
345 membar( LoadLoad );
346 membar( Sync );
347 membar( MemIssue );
348 membar( Lookaside );
350 fmov( FloatRegisterImpl::S, f_ordered, true, fcc2, F16, F17 );
351 fmov( FloatRegisterImpl::D, rc_lz, L5, F18, F20 );
353 movcc( overflowClear, false, icc, I6, L4 );
354 movcc( f_unorderedOrEqual, true, fcc2, (1 << 10) - 1, O0 );
356 movr( rc_nz, I5, I6, I7 );
357 movr( rc_gz, L1, -1, L2 );
359 mulx( I5, I6, I7 );
360 mulx( I7, -1, I6 );
361 sdivx( I5, I6, I7 );
362 sdivx( I7, -1, I6 );
363 udivx( I5, I6, I7 );
364 udivx( I7, -1, I6 );
366 umul( I5, I6, I7 );
367 umul( I7, -1, I6 );
368 smul( I5, I6, I7 );
369 smul( I7, -1, I6 );
370 umulcc( I5, I6, I7 );
371 umulcc( I7, -1, I6 );
372 smulcc( I5, I6, I7 );
373 smulcc( I7, -1, I6 );
375 mulscc( I5, I6, I7 );
376 mulscc( I7, -1, I6 );
378 nop();
381 popc( G0, G1);
382 popc( -1, G2);
384 prefetch( L1, L2, severalReads );
385 prefetch( L3, -1, oneRead );
386 prefetcha( O3, O2, 6, severalWritesAndPossiblyReads );
387 prefetcha( G2, -1, oneWrite );
389 rett( I7, I7);
390 delayed()->nop();
391 rett( G0, -1, relocInfo::none);
392 delayed()->nop();
394 save( I5, I6, I7 );
395 save( I7, -1, I6 );
396 restore( I5, I6, I7 );
397 restore( I7, -1, I6 );
399 saved();
400 restored();
402 sethi( 0xaaaaaaaa, I3, Relocation::spec_simple(relocInfo::none));
404 sll( I5, I6, I7 );
405 sll( I7, 31, I6 );
406 srl( I5, I6, I7 );
407 srl( I7, 0, I6 );
408 sra( I5, I6, I7 );
409 sra( I7, 30, I6 );
410 sllx( I5, I6, I7 );
411 sllx( I7, 63, I6 );
412 srlx( I5, I6, I7 );
413 srlx( I7, 0, I6 );
414 srax( I5, I6, I7 );
415 srax( I7, 62, I6 );
417 sir( -1 );
419 stbar();
421 stf( FloatRegisterImpl::Q, F40, G0, I7 );
422 stf( FloatRegisterImpl::S, F18, I3, -1 );
424 stfsr( L1, L2 );
425 stfsr( I7, -1 );
426 stxfsr( I6, I5 );
427 stxfsr( L4, -1 );
429 stfa( FloatRegisterImpl::D, F22, I6, I7, 7 );
430 stfa( FloatRegisterImpl::Q, F44, G0, -1 );
432 stb( L5, O2, I7 );
433 stb( I7, I6, -1 );
434 sth( L5, O2, I7 );
435 sth( I7, I6, -1 );
436 stw( L5, O2, I7 );
437 stw( I7, I6, -1 );
438 stx( L5, O2, I7 );
439 stx( I7, I6, -1 );
440 std( L5, O2, I7 );
441 std( I7, I6, -1 );
443 stba( L5, O2, I7, 8 );
444 stba( I7, I6, -1 );
445 stha( L5, O2, I7, 9 );
446 stha( I7, I6, -1 );
447 stwa( L5, O2, I7, 0 );
448 stwa( I7, I6, -1 );
449 stxa( L5, O2, I7, 11 );
450 stxa( I7, I6, -1 );
451 stda( L5, O2, I7, 12 );
452 stda( I7, I6, -1 );
454 sub( I5, I6, I7 );
455 sub( I7, -1, I6 );
456 subcc( I5, I6, I7 );
457 subcc( I7, -1, I6 );
458 subc( I5, I6, I7 );
459 subc( I7, -1, I6 );
460 subccc( I5, I6, I7 );
461 subccc( I7, -1, I6 );
463 swap( I5, I6, I7 );
464 swap( I7, -1, I6 );
466 swapa( G0, G1, 13, G2 );
467 swapa( I7, -1, I6 );
469 taddcc( I5, I6, I7 );
470 taddcc( I7, -1, I6 );
471 taddcctv( I5, I6, I7 );
472 taddcctv( I7, -1, I6 );
474 tsubcc( I5, I6, I7 );
475 tsubcc( I7, -1, I6 );
476 tsubcctv( I5, I6, I7 );
477 tsubcctv( I7, -1, I6 );
479 trap( overflowClear, xcc, G0, G1 );
480 trap( lessEqual, icc, I7, 17 );
482 bind(lbl2);
483 bind(lbl3);
485 code()->decode();
486 }
488 // Generate a bunch 'o stuff unique to V8
489 void Assembler::test_v8_onlys() {
490 Label lbl1;
492 cb( cp_0or1or2, false, pc() - 4, relocInfo::none);
493 delayed()->nop();
494 cb( cp_never, true, lbl1);
495 delayed()->nop();
497 cpop1(1, 2, 3, 4);
498 cpop2(5, 6, 7, 8);
500 ldc( I0, I1, 31);
501 ldc( I2, -1, 0);
503 lddc( I4, I4, 30);
504 lddc( I6, 0, 1 );
506 ldcsr( L0, L1, 0);
507 ldcsr( L1, (1 << 12) - 1, 17 );
509 stc( 31, L4, L5);
510 stc( 30, L6, -(1 << 12) );
512 stdc( 0, L7, G0);
513 stdc( 1, G1, 0 );
515 stcsr( 16, G2, G3);
516 stcsr( 17, G4, 1 );
518 stdcq( 4, G5, G6);
519 stdcq( 5, G7, -1 );
521 bind(lbl1);
523 code()->decode();
524 }
525 #endif
527 // Implementation of MacroAssembler
529 void MacroAssembler::null_check(Register reg, int offset) {
530 if (needs_explicit_null_check((intptr_t)offset)) {
531 // provoke OS NULL exception if reg = NULL by
532 // accessing M[reg] w/o changing any registers
533 ld_ptr(reg, 0, G0);
534 }
535 else {
536 // nothing to do, (later) access of M[reg + offset]
537 // will provoke OS NULL exception if reg = NULL
538 }
539 }
541 // Ring buffer jumps
543 #ifndef PRODUCT
544 void MacroAssembler::ret( bool trace ) { if (trace) {
545 mov(I7, O7); // traceable register
546 JMP(O7, 2 * BytesPerInstWord);
547 } else {
548 jmpl( I7, 2 * BytesPerInstWord, G0 );
549 }
550 }
552 void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
553 else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
554 #endif /* PRODUCT */
557 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
558 assert_not_delayed();
559 // This can only be traceable if r1 & r2 are visible after a window save
560 if (TraceJumps) {
561 #ifndef PRODUCT
562 save_frame(0);
563 verify_thread();
564 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
565 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
566 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
567 add(O2, O1, O1);
569 add(r1->after_save(), r2->after_save(), O2);
570 set((intptr_t)file, O3);
571 set(line, O4);
572 Label L;
573 // get nearby pc, store jmp target
574 call(L, relocInfo::none); // No relocation for call to pc+0x8
575 delayed()->st(O2, O1, 0);
576 bind(L);
578 // store nearby pc
579 st(O7, O1, sizeof(intptr_t));
580 // store file
581 st(O3, O1, 2*sizeof(intptr_t));
582 // store line
583 st(O4, O1, 3*sizeof(intptr_t));
584 add(O0, 1, O0);
585 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
586 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
587 restore();
588 #endif /* PRODUCT */
589 }
590 jmpl(r1, r2, G0);
591 }
592 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
593 assert_not_delayed();
594 // This can only be traceable if r1 is visible after a window save
595 if (TraceJumps) {
596 #ifndef PRODUCT
597 save_frame(0);
598 verify_thread();
599 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
600 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
601 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
602 add(O2, O1, O1);
604 add(r1->after_save(), offset, O2);
605 set((intptr_t)file, O3);
606 set(line, O4);
607 Label L;
608 // get nearby pc, store jmp target
609 call(L, relocInfo::none); // No relocation for call to pc+0x8
610 delayed()->st(O2, O1, 0);
611 bind(L);
613 // store nearby pc
614 st(O7, O1, sizeof(intptr_t));
615 // store file
616 st(O3, O1, 2*sizeof(intptr_t));
617 // store line
618 st(O4, O1, 3*sizeof(intptr_t));
619 add(O0, 1, O0);
620 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
621 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
622 restore();
623 #endif /* PRODUCT */
624 }
625 jmp(r1, offset);
626 }
628 // This code sequence is relocatable to any address, even on LP64.
629 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
630 assert_not_delayed();
631 // Force fixed length sethi because NativeJump and NativeFarCall don't handle
632 // variable length instruction streams.
633 patchable_sethi(addrlit, temp);
634 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
635 if (TraceJumps) {
636 #ifndef PRODUCT
637 // Must do the add here so relocation can find the remainder of the
638 // value to be relocated.
639 add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
640 save_frame(0);
641 verify_thread();
642 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
643 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
644 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
645 add(O2, O1, O1);
647 set((intptr_t)file, O3);
648 set(line, O4);
649 Label L;
651 // get nearby pc, store jmp target
652 call(L, relocInfo::none); // No relocation for call to pc+0x8
653 delayed()->st(a.base()->after_save(), O1, 0);
654 bind(L);
656 // store nearby pc
657 st(O7, O1, sizeof(intptr_t));
658 // store file
659 st(O3, O1, 2*sizeof(intptr_t));
660 // store line
661 st(O4, O1, 3*sizeof(intptr_t));
662 add(O0, 1, O0);
663 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
664 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
665 restore();
666 jmpl(a.base(), G0, d);
667 #else
668 jmpl(a.base(), a.disp(), d);
669 #endif /* PRODUCT */
670 } else {
671 jmpl(a.base(), a.disp(), d);
672 }
673 }
675 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
676 jumpl(addrlit, temp, G0, offset, file, line);
677 }
680 // Convert to C varargs format
681 void MacroAssembler::set_varargs( Argument inArg, Register d ) {
682 // spill register-resident args to their memory slots
683 // (SPARC calling convention requires callers to have already preallocated these)
684 // Note that the inArg might in fact be an outgoing argument,
685 // if a leaf routine or stub does some tricky argument shuffling.
686 // This routine must work even though one of the saved arguments
687 // is in the d register (e.g., set_varargs(Argument(0, false), O0)).
688 for (Argument savePtr = inArg;
689 savePtr.is_register();
690 savePtr = savePtr.successor()) {
691 st_ptr(savePtr.as_register(), savePtr.address_in_frame());
692 }
693 // return the address of the first memory slot
694 Address a = inArg.address_in_frame();
695 add(a.base(), a.disp(), d);
696 }
698 // Conditional breakpoint (for assertion checks in assembly code)
699 void MacroAssembler::breakpoint_trap(Condition c, CC cc) {
700 trap(c, cc, G0, ST_RESERVED_FOR_USER_0);
701 }
703 // We want to use ST_BREAKPOINT here, but the debugger is confused by it.
704 void MacroAssembler::breakpoint_trap() {
705 trap(ST_RESERVED_FOR_USER_0);
706 }
708 // flush windows (except current) using flushw instruction if avail.
709 void MacroAssembler::flush_windows() {
710 if (VM_Version::v9_instructions_work()) flushw();
711 else flush_windows_trap();
712 }
714 // Write serialization page so VM thread can do a pseudo remote membar
715 // We use the current thread pointer to calculate a thread specific
716 // offset to write to within the page. This minimizes bus traffic
717 // due to cache line collision.
718 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
719 srl(thread, os::get_serialize_page_shift_count(), tmp2);
720 if (Assembler::is_simm13(os::vm_page_size())) {
721 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
722 }
723 else {
724 set((os::vm_page_size() - sizeof(int)), tmp1);
725 and3(tmp2, tmp1, tmp2);
726 }
727 set(os::get_memory_serialize_page(), tmp1);
728 st(G0, tmp1, tmp2);
729 }
733 void MacroAssembler::enter() {
734 Unimplemented();
735 }
737 void MacroAssembler::leave() {
738 Unimplemented();
739 }
741 void MacroAssembler::mult(Register s1, Register s2, Register d) {
742 if(VM_Version::v9_instructions_work()) {
743 mulx (s1, s2, d);
744 } else {
745 smul (s1, s2, d);
746 }
747 }
749 void MacroAssembler::mult(Register s1, int simm13a, Register d) {
750 if(VM_Version::v9_instructions_work()) {
751 mulx (s1, simm13a, d);
752 } else {
753 smul (s1, simm13a, d);
754 }
755 }
758 #ifdef ASSERT
759 void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
760 const Register s1 = G3_scratch;
761 const Register s2 = G4_scratch;
762 Label get_psr_test;
763 // Get the condition codes the V8 way.
764 read_ccr_trap(s1);
765 mov(ccr_save, s2);
766 // This is a test of V8 which has icc but not xcc
767 // so mask off the xcc bits
768 and3(s2, 0xf, s2);
769 // Compare condition codes from the V8 and V9 ways.
770 subcc(s2, s1, G0);
771 br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
772 delayed()->breakpoint_trap();
773 bind(get_psr_test);
774 }
776 void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
777 const Register s1 = G3_scratch;
778 const Register s2 = G4_scratch;
779 Label set_psr_test;
780 // Write out the saved condition codes the V8 way
781 write_ccr_trap(ccr_save, s1, s2);
782 // Read back the condition codes using the V9 instruction
783 rdccr(s1);
784 mov(ccr_save, s2);
785 // This is a test of V8 which has icc but not xcc
786 // so mask off the xcc bits
787 and3(s2, 0xf, s2);
788 and3(s1, 0xf, s1);
789 // Compare the V8 way with the V9 way.
790 subcc(s2, s1, G0);
791 br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
792 delayed()->breakpoint_trap();
793 bind(set_psr_test);
794 }
795 #else
796 #define read_ccr_v8_assert(x)
797 #define write_ccr_v8_assert(x)
798 #endif // ASSERT
800 void MacroAssembler::read_ccr(Register ccr_save) {
801 if (VM_Version::v9_instructions_work()) {
802 rdccr(ccr_save);
803 // Test code sequence used on V8. Do not move above rdccr.
804 read_ccr_v8_assert(ccr_save);
805 } else {
806 read_ccr_trap(ccr_save);
807 }
808 }
810 void MacroAssembler::write_ccr(Register ccr_save) {
811 if (VM_Version::v9_instructions_work()) {
812 // Test code sequence used on V8. Do not move below wrccr.
813 write_ccr_v8_assert(ccr_save);
814 wrccr(ccr_save);
815 } else {
816 const Register temp_reg1 = G3_scratch;
817 const Register temp_reg2 = G4_scratch;
818 write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
819 }
820 }
823 // Calls to C land
825 #ifdef ASSERT
826 // a hook for debugging
827 static Thread* reinitialize_thread() {
828 return ThreadLocalStorage::thread();
829 }
830 #else
831 #define reinitialize_thread ThreadLocalStorage::thread
832 #endif
834 #ifdef ASSERT
835 address last_get_thread = NULL;
836 #endif
838 // call this when G2_thread is not known to be valid
839 void MacroAssembler::get_thread() {
840 save_frame(0); // to avoid clobbering O0
841 mov(G1, L0); // avoid clobbering G1
842 mov(G5_method, L1); // avoid clobbering G5
843 mov(G3, L2); // avoid clobbering G3 also
844 mov(G4, L5); // avoid clobbering G4
845 #ifdef ASSERT
846 AddressLiteral last_get_thread_addrlit(&last_get_thread);
847 set(last_get_thread_addrlit, L3);
848 inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
849 st_ptr(L4, L3, 0);
850 #endif
851 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
852 delayed()->nop();
853 mov(L0, G1);
854 mov(L1, G5_method);
855 mov(L2, G3);
856 mov(L5, G4);
857 restore(O0, 0, G2_thread);
858 }
860 static Thread* verify_thread_subroutine(Thread* gthread_value) {
861 Thread* correct_value = ThreadLocalStorage::thread();
862 guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
863 return correct_value;
864 }
866 void MacroAssembler::verify_thread() {
867 if (VerifyThread) {
868 // NOTE: this chops off the heads of the 64-bit O registers.
869 #ifdef CC_INTERP
870 save_frame(0);
871 #else
872 // make sure G2_thread contains the right value
873 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
874 mov(G1, L1); // avoid clobbering G1
875 // G2 saved below
876 mov(G3, L3); // avoid clobbering G3
877 mov(G4, L4); // avoid clobbering G4
878 mov(G5_method, L5); // avoid clobbering G5_method
879 #endif /* CC_INTERP */
880 #if defined(COMPILER2) && !defined(_LP64)
881 // Save & restore possible 64-bit Long arguments in G-regs
882 srlx(G1,32,L0);
883 srlx(G4,32,L6);
884 #endif
885 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
886 delayed()->mov(G2_thread, O0);
888 mov(L1, G1); // Restore G1
889 // G2 restored below
890 mov(L3, G3); // restore G3
891 mov(L4, G4); // restore G4
892 mov(L5, G5_method); // restore G5_method
893 #if defined(COMPILER2) && !defined(_LP64)
894 // Save & restore possible 64-bit Long arguments in G-regs
895 sllx(L0,32,G2); // Move old high G1 bits high in G2
896 sllx(G1, 0,G1); // Clear current high G1 bits
897 or3 (G1,G2,G1); // Recover 64-bit G1
898 sllx(L6,32,G2); // Move old high G4 bits high in G2
899 sllx(G4, 0,G4); // Clear current high G4 bits
900 or3 (G4,G2,G4); // Recover 64-bit G4
901 #endif
902 restore(O0, 0, G2_thread);
903 }
904 }
907 void MacroAssembler::save_thread(const Register thread_cache) {
908 verify_thread();
909 if (thread_cache->is_valid()) {
910 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
911 mov(G2_thread, thread_cache);
912 }
913 if (VerifyThread) {
914 // smash G2_thread, as if the VM were about to anyway
915 set(0x67676767, G2_thread);
916 }
917 }
920 void MacroAssembler::restore_thread(const Register thread_cache) {
921 if (thread_cache->is_valid()) {
922 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
923 mov(thread_cache, G2_thread);
924 verify_thread();
925 } else {
926 // do it the slow way
927 get_thread();
928 }
929 }
932 // %%% maybe get rid of [re]set_last_Java_frame
933 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
934 assert_not_delayed();
935 Address flags(G2_thread, JavaThread::frame_anchor_offset() +
936 JavaFrameAnchor::flags_offset());
937 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset());
939 // Always set last_Java_pc and flags first because once last_Java_sp is visible
940 // has_last_Java_frame is true and users will look at the rest of the fields.
941 // (Note: flags should always be zero before we get here so doesn't need to be set.)
943 #ifdef ASSERT
944 // Verify that flags was zeroed on return to Java
945 Label PcOk;
946 save_frame(0); // to avoid clobbering O0
947 ld_ptr(pc_addr, L0);
948 tst(L0);
949 #ifdef _LP64
950 brx(Assembler::zero, false, Assembler::pt, PcOk);
951 #else
952 br(Assembler::zero, false, Assembler::pt, PcOk);
953 #endif // _LP64
954 delayed() -> nop();
955 stop("last_Java_pc not zeroed before leaving Java");
956 bind(PcOk);
958 // Verify that flags was zeroed on return to Java
959 Label FlagsOk;
960 ld(flags, L0);
961 tst(L0);
962 br(Assembler::zero, false, Assembler::pt, FlagsOk);
963 delayed() -> restore();
964 stop("flags not zeroed before leaving Java");
965 bind(FlagsOk);
966 #endif /* ASSERT */
967 //
968 // When returning from calling out from Java mode the frame anchor's last_Java_pc
969 // will always be set to NULL. It is set here so that if we are doing a call to
970 // native (not VM) that we capture the known pc and don't have to rely on the
971 // native call having a standard frame linkage where we can find the pc.
973 if (last_Java_pc->is_valid()) {
974 st_ptr(last_Java_pc, pc_addr);
975 }
977 #ifdef _LP64
978 #ifdef ASSERT
979 // Make sure that we have an odd stack
980 Label StackOk;
981 andcc(last_java_sp, 0x01, G0);
982 br(Assembler::notZero, false, Assembler::pt, StackOk);
983 delayed() -> nop();
984 stop("Stack Not Biased in set_last_Java_frame");
985 bind(StackOk);
986 #endif // ASSERT
987 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
988 add( last_java_sp, STACK_BIAS, G4_scratch );
989 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
990 #else
991 st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
992 #endif // _LP64
993 }
995 void MacroAssembler::reset_last_Java_frame(void) {
996 assert_not_delayed();
998 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset());
999 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
1000 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
1002 #ifdef ASSERT
1003 // check that it WAS previously set
1004 #ifdef CC_INTERP
1005 save_frame(0);
1006 #else
1007 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof
1008 #endif /* CC_INTERP */
1009 ld_ptr(sp_addr, L0);
1010 tst(L0);
1011 breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
1012 restore();
1013 #endif // ASSERT
1015 st_ptr(G0, sp_addr);
1016 // Always return last_Java_pc to zero
1017 st_ptr(G0, pc_addr);
1018 // Always null flags after return to Java
1019 st(G0, flags);
1020 }
1023 void MacroAssembler::call_VM_base(
1024 Register oop_result,
1025 Register thread_cache,
1026 Register last_java_sp,
1027 address entry_point,
1028 int number_of_arguments,
1029 bool check_exceptions)
1030 {
1031 assert_not_delayed();
1033 // determine last_java_sp register
1034 if (!last_java_sp->is_valid()) {
1035 last_java_sp = SP;
1036 }
1037 // debugging support
1038 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
1040 // 64-bit last_java_sp is biased!
1041 set_last_Java_frame(last_java_sp, noreg);
1042 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
1043 save_thread(thread_cache);
1044 // do the call
1045 call(entry_point, relocInfo::runtime_call_type);
1046 if (!VerifyThread)
1047 delayed()->mov(G2_thread, O0); // pass thread as first argument
1048 else
1049 delayed()->nop(); // (thread already passed)
1050 restore_thread(thread_cache);
1051 reset_last_Java_frame();
1053 // check for pending exceptions. use Gtemp as scratch register.
1054 if (check_exceptions) {
1055 check_and_forward_exception(Gtemp);
1056 }
1058 // get oop result if there is one and reset the value in the thread
1059 if (oop_result->is_valid()) {
1060 get_vm_result(oop_result);
1061 }
1062 }
1064 void MacroAssembler::check_and_forward_exception(Register scratch_reg)
1065 {
1066 Label L;
1068 check_and_handle_popframe(scratch_reg);
1069 check_and_handle_earlyret(scratch_reg);
1071 Address exception_addr(G2_thread, Thread::pending_exception_offset());
1072 ld_ptr(exception_addr, scratch_reg);
1073 br_null(scratch_reg,false,pt,L);
1074 delayed()->nop();
1075 // we use O7 linkage so that forward_exception_entry has the issuing PC
1076 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
1077 delayed()->nop();
1078 bind(L);
1079 }
1082 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) {
1083 }
1086 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
1087 }
1090 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
1091 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
1092 }
1095 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
1096 // O0 is reserved for the thread
1097 mov(arg_1, O1);
1098 call_VM(oop_result, entry_point, 1, check_exceptions);
1099 }
1102 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
1103 // O0 is reserved for the thread
1104 mov(arg_1, O1);
1105 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1106 call_VM(oop_result, entry_point, 2, check_exceptions);
1107 }
1110 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
1111 // O0 is reserved for the thread
1112 mov(arg_1, O1);
1113 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1114 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
1115 call_VM(oop_result, entry_point, 3, check_exceptions);
1116 }
1120 // Note: The following call_VM overloadings are useful when a "save"
1121 // has already been performed by a stub, and the last Java frame is
1122 // the previous one. In that case, last_java_sp must be passed as FP
1123 // instead of SP.
1126 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
1127 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
1128 }
1131 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
1132 // O0 is reserved for the thread
1133 mov(arg_1, O1);
1134 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
1135 }
1138 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
1139 // O0 is reserved for the thread
1140 mov(arg_1, O1);
1141 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1142 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
1143 }
1146 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
1147 // O0 is reserved for the thread
1148 mov(arg_1, O1);
1149 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
1150 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
1151 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
1152 }
1156 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) {
1157 assert_not_delayed();
1158 save_thread(thread_cache);
1159 // do the call
1160 call(entry_point, relocInfo::runtime_call_type);
1161 delayed()->nop();
1162 restore_thread(thread_cache);
1163 }
1166 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) {
1167 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments);
1168 }
1171 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
1172 mov(arg_1, O0);
1173 call_VM_leaf(thread_cache, entry_point, 1);
1174 }
1177 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
1178 mov(arg_1, O0);
1179 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
1180 call_VM_leaf(thread_cache, entry_point, 2);
1181 }
1184 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) {
1185 mov(arg_1, O0);
1186 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
1187 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument");
1188 call_VM_leaf(thread_cache, entry_point, 3);
1189 }
1192 void MacroAssembler::get_vm_result(Register oop_result) {
1193 verify_thread();
1194 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
1195 ld_ptr( vm_result_addr, oop_result);
1196 st_ptr(G0, vm_result_addr);
1197 verify_oop(oop_result);
1198 }
1201 void MacroAssembler::get_vm_result_2(Register oop_result) {
1202 verify_thread();
1203 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
1204 ld_ptr(vm_result_addr_2, oop_result);
1205 st_ptr(G0, vm_result_addr_2);
1206 verify_oop(oop_result);
1207 }
1210 // We require that C code which does not return a value in vm_result will
1211 // leave it undisturbed.
1212 void MacroAssembler::set_vm_result(Register oop_result) {
1213 verify_thread();
1214 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
1215 verify_oop(oop_result);
1217 # ifdef ASSERT
1218 // Check that we are not overwriting any other oop.
1219 #ifdef CC_INTERP
1220 save_frame(0);
1221 #else
1222 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof
1223 #endif /* CC_INTERP */
1224 ld_ptr(vm_result_addr, L0);
1225 tst(L0);
1226 restore();
1227 breakpoint_trap(notZero, Assembler::ptr_cc);
1228 // }
1229 # endif
1231 st_ptr(oop_result, vm_result_addr);
1232 }
1235 void MacroAssembler::card_table_write(jbyte* byte_map_base,
1236 Register tmp, Register obj) {
1237 #ifdef _LP64
1238 srlx(obj, CardTableModRefBS::card_shift, obj);
1239 #else
1240 srl(obj, CardTableModRefBS::card_shift, obj);
1241 #endif
1242 assert(tmp != obj, "need separate temp reg");
1243 set((address) byte_map_base, tmp);
1244 stb(G0, tmp, obj);
1245 }
1248 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
1249 address save_pc;
1250 int shiftcnt;
1251 #ifdef _LP64
1252 # ifdef CHECK_DELAY
1253 assert_not_delayed((char*) "cannot put two instructions in delay slot");
1254 # endif
1255 v9_dep();
1256 save_pc = pc();
1258 int msb32 = (int) (addrlit.value() >> 32);
1259 int lsb32 = (int) (addrlit.value());
1261 if (msb32 == 0 && lsb32 >= 0) {
1262 Assembler::sethi(lsb32, d, addrlit.rspec());
1263 }
1264 else if (msb32 == -1) {
1265 Assembler::sethi(~lsb32, d, addrlit.rspec());
1266 xor3(d, ~low10(~0), d);
1267 }
1268 else {
1269 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits
1270 if (msb32 & 0x3ff) // Any bits?
1271 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32
1272 if (lsb32 & 0xFFFFFC00) { // done?
1273 if ((lsb32 >> 20) & 0xfff) { // Any bits set?
1274 sllx(d, 12, d); // Make room for next 12 bits
1275 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12
1276 shiftcnt = 0; // We already shifted
1277 }
1278 else
1279 shiftcnt = 12;
1280 if ((lsb32 >> 10) & 0x3ff) {
1281 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits
1282 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10
1283 shiftcnt = 0;
1284 }
1285 else
1286 shiftcnt = 10;
1287 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd
1288 }
1289 else
1290 sllx(d, 32, d);
1291 }
1292 // Pad out the instruction sequence so it can be patched later.
1293 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none &&
1294 addrlit.rtype() != relocInfo::runtime_call_type)) {
1295 while (pc() < (save_pc + (7 * BytesPerInstWord)))
1296 nop();
1297 }
1298 #else
1299 Assembler::sethi(addrlit.value(), d, addrlit.rspec());
1300 #endif
1301 }
1304 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) {
1305 internal_sethi(addrlit, d, false);
1306 }
1309 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) {
1310 internal_sethi(addrlit, d, true);
1311 }
1314 int MacroAssembler::size_of_sethi(address a, bool worst_case) {
1315 #ifdef _LP64
1316 if (worst_case) return 7;
1317 intptr_t iaddr = (intptr_t)a;
1318 int hi32 = (int)(iaddr >> 32);
1319 int lo32 = (int)(iaddr);
1320 int inst_count;
1321 if (hi32 == 0 && lo32 >= 0)
1322 inst_count = 1;
1323 else if (hi32 == -1)
1324 inst_count = 2;
1325 else {
1326 inst_count = 2;
1327 if ( hi32 & 0x3ff )
1328 inst_count++;
1329 if ( lo32 & 0xFFFFFC00 ) {
1330 if( (lo32 >> 20) & 0xfff ) inst_count += 2;
1331 if( (lo32 >> 10) & 0x3ff ) inst_count += 2;
1332 }
1333 }
1334 return BytesPerInstWord * inst_count;
1335 #else
1336 return BytesPerInstWord;
1337 #endif
1338 }
1340 int MacroAssembler::worst_case_size_of_set() {
1341 return size_of_sethi(NULL, true) + 1;
1342 }
1345 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
1346 intptr_t value = addrlit.value();
1348 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) {
1349 // can optimize
1350 if (-4096 <= value && value <= 4095) {
1351 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
1352 return;
1353 }
1354 if (inv_hi22(hi22(value)) == value) {
1355 sethi(addrlit, d);
1356 return;
1357 }
1358 }
1359 assert_not_delayed((char*) "cannot put two instructions in delay slot");
1360 internal_sethi(addrlit, d, ForceRelocatable);
1361 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) {
1362 add(d, addrlit.low10(), d, addrlit.rspec());
1363 }
1364 }
1366 void MacroAssembler::set(const AddressLiteral& al, Register d) {
1367 internal_set(al, d, false);
1368 }
1370 void MacroAssembler::set(intptr_t value, Register d) {
1371 AddressLiteral al(value);
1372 internal_set(al, d, false);
1373 }
1375 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) {
1376 AddressLiteral al(addr, rspec);
1377 internal_set(al, d, false);
1378 }
1380 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) {
1381 internal_set(al, d, true);
1382 }
1384 void MacroAssembler::patchable_set(intptr_t value, Register d) {
1385 AddressLiteral al(value);
1386 internal_set(al, d, true);
1387 }
1390 void MacroAssembler::set64(jlong value, Register d, Register tmp) {
1391 assert_not_delayed();
1392 v9_dep();
1394 int hi = (int)(value >> 32);
1395 int lo = (int)(value & ~0);
1396 // (Matcher::isSimpleConstant64 knows about the following optimizations.)
1397 if (Assembler::is_simm13(lo) && value == lo) {
1398 or3(G0, lo, d);
1399 } else if (hi == 0) {
1400 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32
1401 if (low10(lo) != 0)
1402 or3(d, low10(lo), d);
1403 }
1404 else if (hi == -1) {
1405 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32
1406 xor3(d, low10(lo) ^ ~low10(~0), d);
1407 }
1408 else if (lo == 0) {
1409 if (Assembler::is_simm13(hi)) {
1410 or3(G0, hi, d);
1411 } else {
1412 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32
1413 if (low10(hi) != 0)
1414 or3(d, low10(hi), d);
1415 }
1416 sllx(d, 32, d);
1417 }
1418 else {
1419 Assembler::sethi(hi, tmp);
1420 Assembler::sethi(lo, d); // macro assembler version sign-extends
1421 if (low10(hi) != 0)
1422 or3 (tmp, low10(hi), tmp);
1423 if (low10(lo) != 0)
1424 or3 ( d, low10(lo), d);
1425 sllx(tmp, 32, tmp);
1426 or3 (d, tmp, d);
1427 }
1428 }
1430 // compute size in bytes of sparc frame, given
1431 // number of extraWords
1432 int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
1434 int nWords = frame::memory_parameter_word_sp_offset;
1436 nWords += extraWords;
1438 if (nWords & 1) ++nWords; // round up to double-word
1440 return nWords * BytesPerWord;
1441 }
1444 // save_frame: given number of "extra" words in frame,
1445 // issue approp. save instruction (p 200, v8 manual)
1447 void MacroAssembler::save_frame(int extraWords = 0) {
1448 int delta = -total_frame_size_in_bytes(extraWords);
1449 if (is_simm13(delta)) {
1450 save(SP, delta, SP);
1451 } else {
1452 set(delta, G3_scratch);
1453 save(SP, G3_scratch, SP);
1454 }
1455 }
1458 void MacroAssembler::save_frame_c1(int size_in_bytes) {
1459 if (is_simm13(-size_in_bytes)) {
1460 save(SP, -size_in_bytes, SP);
1461 } else {
1462 set(-size_in_bytes, G3_scratch);
1463 save(SP, G3_scratch, SP);
1464 }
1465 }
1468 void MacroAssembler::save_frame_and_mov(int extraWords,
1469 Register s1, Register d1,
1470 Register s2, Register d2) {
1471 assert_not_delayed();
1473 // The trick here is to use precisely the same memory word
1474 // that trap handlers also use to save the register.
1475 // This word cannot be used for any other purpose, but
1476 // it works fine to save the register's value, whether or not
1477 // an interrupt flushes register windows at any given moment!
1478 Address s1_addr;
1479 if (s1->is_valid() && (s1->is_in() || s1->is_local())) {
1480 s1_addr = s1->address_in_saved_window();
1481 st_ptr(s1, s1_addr);
1482 }
1484 Address s2_addr;
1485 if (s2->is_valid() && (s2->is_in() || s2->is_local())) {
1486 s2_addr = s2->address_in_saved_window();
1487 st_ptr(s2, s2_addr);
1488 }
1490 save_frame(extraWords);
1492 if (s1_addr.base() == SP) {
1493 ld_ptr(s1_addr.after_save(), d1);
1494 } else if (s1->is_valid()) {
1495 mov(s1->after_save(), d1);
1496 }
1498 if (s2_addr.base() == SP) {
1499 ld_ptr(s2_addr.after_save(), d2);
1500 } else if (s2->is_valid()) {
1501 mov(s2->after_save(), d2);
1502 }
1503 }
1506 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
1507 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1508 int oop_index = oop_recorder()->allocate_index(obj);
1509 return AddressLiteral(obj, oop_Relocation::spec(oop_index));
1510 }
1513 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1514 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1515 int oop_index = oop_recorder()->find_index(obj);
1516 return AddressLiteral(obj, oop_Relocation::spec(oop_index));
1517 }
1519 void MacroAssembler::set_narrow_oop(jobject obj, Register d) {
1520 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1521 int oop_index = oop_recorder()->find_index(obj);
1522 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1524 assert_not_delayed();
1525 // Relocation with special format (see relocInfo_sparc.hpp).
1526 relocate(rspec, 1);
1527 // Assembler::sethi(0x3fffff, d);
1528 emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) );
1529 // Don't add relocation for 'add'. Do patching during 'sethi' processing.
1530 add(d, 0x3ff, d);
1532 }
1535 void MacroAssembler::align(int modulus) {
1536 while (offset() % modulus != 0) nop();
1537 }
1540 void MacroAssembler::safepoint() {
1541 relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint));
1542 }
1545 void RegistersForDebugging::print(outputStream* s) {
1546 int j;
1547 for ( j = 0; j < 8; ++j )
1548 if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]);
1549 else s->print_cr( "fp = 0x%.16lx", i[j]);
1550 s->cr();
1552 for ( j = 0; j < 8; ++j )
1553 s->print_cr("l%d = 0x%.16lx", j, l[j]);
1554 s->cr();
1556 for ( j = 0; j < 8; ++j )
1557 if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]);
1558 else s->print_cr( "sp = 0x%.16lx", o[j]);
1559 s->cr();
1561 for ( j = 0; j < 8; ++j )
1562 s->print_cr("g%d = 0x%.16lx", j, g[j]);
1563 s->cr();
1565 // print out floats with compression
1566 for (j = 0; j < 32; ) {
1567 jfloat val = f[j];
1568 int last = j;
1569 for ( ; last+1 < 32; ++last ) {
1570 char b1[1024], b2[1024];
1571 sprintf(b1, "%f", val);
1572 sprintf(b2, "%f", f[last+1]);
1573 if (strcmp(b1, b2))
1574 break;
1575 }
1576 s->print("f%d", j);
1577 if ( j != last ) s->print(" - f%d", last);
1578 s->print(" = %f", val);
1579 s->fill_to(25);
1580 s->print_cr(" (0x%x)", val);
1581 j = last + 1;
1582 }
1583 s->cr();
1585 // and doubles (evens only)
1586 for (j = 0; j < 32; ) {
1587 jdouble val = d[j];
1588 int last = j;
1589 for ( ; last+1 < 32; ++last ) {
1590 char b1[1024], b2[1024];
1591 sprintf(b1, "%f", val);
1592 sprintf(b2, "%f", d[last+1]);
1593 if (strcmp(b1, b2))
1594 break;
1595 }
1596 s->print("d%d", 2 * j);
1597 if ( j != last ) s->print(" - d%d", last);
1598 s->print(" = %f", val);
1599 s->fill_to(30);
1600 s->print("(0x%x)", *(int*)&val);
1601 s->fill_to(42);
1602 s->print_cr("(0x%x)", *(1 + (int*)&val));
1603 j = last + 1;
1604 }
1605 s->cr();
1606 }
1608 void RegistersForDebugging::save_registers(MacroAssembler* a) {
1609 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
1610 a->flush_windows();
1611 int i;
1612 for (i = 0; i < 8; ++i) {
1613 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
1614 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i));
1615 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i));
1616 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i));
1617 }
1618 for (i = 0; i < 32; ++i) {
1619 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
1620 }
1621 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
1622 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
1623 }
1624 }
1626 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
1627 for (int i = 1; i < 8; ++i) {
1628 a->ld_ptr(r, g_offset(i), as_gRegister(i));
1629 }
1630 for (int j = 0; j < 32; ++j) {
1631 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
1632 }
1633 for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
1634 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
1635 }
1636 }
1639 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
1640 void MacroAssembler::push_fTOS() {
1641 // %%%%%% need to implement this
1642 }
1644 // pops double TOS element from CPU stack and pushes on FPU stack
1645 void MacroAssembler::pop_fTOS() {
1646 // %%%%%% need to implement this
1647 }
1649 void MacroAssembler::empty_FPU_stack() {
1650 // %%%%%% need to implement this
1651 }
1653 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
1654 // plausibility check for oops
1655 if (!VerifyOops) return;
1657 if (reg == G0) return; // always NULL, which is always an oop
1659 char buffer[64];
1660 #ifdef COMPILER1
1661 if (CommentedAssembly) {
1662 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset());
1663 block_comment(buffer);
1664 }
1665 #endif
1667 int len = strlen(file) + strlen(msg) + 1 + 4;
1668 sprintf(buffer, "%d", line);
1669 len += strlen(buffer);
1670 sprintf(buffer, " at offset %d ", offset());
1671 len += strlen(buffer);
1672 char * real_msg = new char[len];
1673 sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
1675 // Call indirectly to solve generation ordering problem
1676 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
1678 // Make some space on stack above the current register window.
1679 // Enough to hold 8 64-bit registers.
1680 add(SP,-8*8,SP);
1682 // Save some 64-bit registers; a normal 'save' chops the heads off
1683 // of 64-bit longs in the 32-bit build.
1684 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
1685 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
1686 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
1687 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
1689 set((intptr_t)real_msg, O1);
1690 // Load address to call to into O7
1691 load_ptr_contents(a, O7);
1692 // Register call to verify_oop_subroutine
1693 callr(O7, G0);
1694 delayed()->nop();
1695 // recover frame size
1696 add(SP, 8*8,SP);
1697 }
1699 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
1700 // plausibility check for oops
1701 if (!VerifyOops) return;
1703 char buffer[64];
1704 sprintf(buffer, "%d", line);
1705 int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
1706 sprintf(buffer, " at SP+%d ", addr.disp());
1707 len += strlen(buffer);
1708 char * real_msg = new char[len];
1709 sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
1711 // Call indirectly to solve generation ordering problem
1712 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
1714 // Make some space on stack above the current register window.
1715 // Enough to hold 8 64-bit registers.
1716 add(SP,-8*8,SP);
1718 // Save some 64-bit registers; a normal 'save' chops the heads off
1719 // of 64-bit longs in the 32-bit build.
1720 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
1721 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
1722 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
1723 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
1725 set((intptr_t)real_msg, O1);
1726 // Load address to call to into O7
1727 load_ptr_contents(a, O7);
1728 // Register call to verify_oop_subroutine
1729 callr(O7, G0);
1730 delayed()->nop();
1731 // recover frame size
1732 add(SP, 8*8,SP);
1733 }
1735 // side-door communication with signalHandler in os_solaris.cpp
1736 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
1738 // This macro is expanded just once; it creates shared code. Contract:
1739 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY
1740 // registers, including flags. May not use a register 'save', as this blows
1741 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
1742 // call.
1743 void MacroAssembler::verify_oop_subroutine() {
1744 assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
1746 // Leaf call; no frame.
1747 Label succeed, fail, null_or_fail;
1749 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home).
1750 // O0 is now the oop to be checked. O7 is the return address.
1751 Register O0_obj = O0;
1753 // Save some more registers for temps.
1754 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8);
1755 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8);
1756 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8);
1757 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8);
1759 // Save flags
1760 Register O5_save_flags = O5;
1761 rdccr( O5_save_flags );
1763 { // count number of verifies
1764 Register O2_adr = O2;
1765 Register O3_accum = O3;
1766 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum);
1767 }
1769 Register O2_mask = O2;
1770 Register O3_bits = O3;
1771 Register O4_temp = O4;
1773 // mark lower end of faulting range
1774 assert(_verify_oop_implicit_branch[0] == NULL, "set once");
1775 _verify_oop_implicit_branch[0] = pc();
1777 // We can't check the mark oop because it could be in the process of
1778 // locking or unlocking while this is running.
1779 set(Universe::verify_oop_mask (), O2_mask);
1780 set(Universe::verify_oop_bits (), O3_bits);
1782 // assert((obj & oop_mask) == oop_bits);
1783 and3(O0_obj, O2_mask, O4_temp);
1784 cmp(O4_temp, O3_bits);
1785 brx(notEqual, false, pn, null_or_fail);
1786 delayed()->nop();
1788 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) {
1789 // the null_or_fail case is useless; must test for null separately
1790 br_null(O0_obj, false, pn, succeed);
1791 delayed()->nop();
1792 }
1794 // Check the klassOop of this object for being in the right area of memory.
1795 // Cannot do the load in the delay above slot in case O0 is null
1796 load_klass(O0_obj, O0_obj);
1797 // assert((klass & klass_mask) == klass_bits);
1798 if( Universe::verify_klass_mask() != Universe::verify_oop_mask() )
1799 set(Universe::verify_klass_mask(), O2_mask);
1800 if( Universe::verify_klass_bits() != Universe::verify_oop_bits() )
1801 set(Universe::verify_klass_bits(), O3_bits);
1802 and3(O0_obj, O2_mask, O4_temp);
1803 cmp(O4_temp, O3_bits);
1804 brx(notEqual, false, pn, fail);
1805 delayed()->nop();
1806 // Check the klass's klass
1807 load_klass(O0_obj, O0_obj);
1808 and3(O0_obj, O2_mask, O4_temp);
1809 cmp(O4_temp, O3_bits);
1810 brx(notEqual, false, pn, fail);
1811 delayed()->wrccr( O5_save_flags ); // Restore CCR's
1813 // mark upper end of faulting range
1814 _verify_oop_implicit_branch[1] = pc();
1816 //-----------------------
1817 // all tests pass
1818 bind(succeed);
1820 // Restore prior 64-bit registers
1821 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0);
1822 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1);
1823 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2);
1824 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3);
1825 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4);
1826 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5);
1828 retl(); // Leaf return; restore prior O7 in delay slot
1829 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7);
1831 //-----------------------
1832 bind(null_or_fail); // nulls are less common but OK
1833 br_null(O0_obj, false, pt, succeed);
1834 delayed()->wrccr( O5_save_flags ); // Restore CCR's
1836 //-----------------------
1837 // report failure:
1838 bind(fail);
1839 _verify_oop_implicit_branch[2] = pc();
1841 wrccr( O5_save_flags ); // Restore CCR's
1843 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1845 // stop_subroutine expects message pointer in I1.
1846 mov(I1, O1);
1848 // Restore prior 64-bit registers
1849 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0);
1850 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1);
1851 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2);
1852 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3);
1853 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4);
1854 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5);
1856 // factor long stop-sequence into subroutine to save space
1857 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
1859 // call indirectly to solve generation ordering problem
1860 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address());
1861 load_ptr_contents(al, O5);
1862 jmpl(O5, 0, O7);
1863 delayed()->nop();
1864 }
1867 void MacroAssembler::stop(const char* msg) {
1868 // save frame first to get O7 for return address
1869 // add one word to size in case struct is odd number of words long
1870 // It must be doubleword-aligned for storing doubles into it.
1872 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1874 // stop_subroutine expects message pointer in I1.
1875 set((intptr_t)msg, O1);
1877 // factor long stop-sequence into subroutine to save space
1878 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
1880 // call indirectly to solve generation ordering problem
1881 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address());
1882 load_ptr_contents(a, O5);
1883 jmpl(O5, 0, O7);
1884 delayed()->nop();
1886 breakpoint_trap(); // make stop actually stop rather than writing
1887 // unnoticeable results in the output files.
1889 // restore(); done in callee to save space!
1890 }
1893 void MacroAssembler::warn(const char* msg) {
1894 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1895 RegistersForDebugging::save_registers(this);
1896 mov(O0, L0);
1897 set((intptr_t)msg, O0);
1898 call( CAST_FROM_FN_PTR(address, warning) );
1899 delayed()->nop();
1900 // ret();
1901 // delayed()->restore();
1902 RegistersForDebugging::restore_registers(this, L0);
1903 restore();
1904 }
1907 void MacroAssembler::untested(const char* what) {
1908 // We must be able to turn interactive prompting off
1909 // in order to run automated test scripts on the VM
1910 // Use the flag ShowMessageBoxOnError
1912 char* b = new char[1024];
1913 sprintf(b, "untested: %s", what);
1915 if ( ShowMessageBoxOnError ) stop(b);
1916 else warn(b);
1917 }
1920 void MacroAssembler::stop_subroutine() {
1921 RegistersForDebugging::save_registers(this);
1923 // for the sake of the debugger, stick a PC on the current frame
1924 // (this assumes that the caller has performed an extra "save")
1925 mov(I7, L7);
1926 add(O7, -7 * BytesPerInt, I7);
1928 save_frame(); // one more save to free up another O7 register
1929 mov(I0, O1); // addr of reg save area
1931 // We expect pointer to message in I1. Caller must set it up in O1
1932 mov(I1, O0); // get msg
1933 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
1934 delayed()->nop();
1936 restore();
1938 RegistersForDebugging::restore_registers(this, O0);
1940 save_frame(0);
1941 call(CAST_FROM_FN_PTR(address,breakpoint));
1942 delayed()->nop();
1943 restore();
1945 mov(L7, I7);
1946 retl();
1947 delayed()->restore(); // see stop above
1948 }
1951 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
1952 if ( ShowMessageBoxOnError ) {
1953 JavaThreadState saved_state = JavaThread::current()->thread_state();
1954 JavaThread::current()->set_thread_state(_thread_in_vm);
1955 {
1956 // In order to get locks work, we need to fake a in_VM state
1957 ttyLocker ttyl;
1958 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
1959 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
1960 ::tty->print_cr("Interpreter::bytecode_counter = %d", BytecodeCounter::counter_value());
1961 }
1962 if (os::message_box(msg, "Execution stopped, print registers?"))
1963 regs->print(::tty);
1964 }
1965 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
1966 }
1967 else
1968 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
1969 assert(false, "error");
1970 }
1973 #ifndef PRODUCT
1974 void MacroAssembler::test() {
1975 ResourceMark rm;
1977 CodeBuffer cb("test", 10000, 10000);
1978 MacroAssembler* a = new MacroAssembler(&cb);
1979 VM_Version::allow_all();
1980 a->test_v9();
1981 a->test_v8_onlys();
1982 VM_Version::revert();
1984 StubRoutines::Sparc::test_stop_entry()();
1985 }
1986 #endif
1989 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) {
1990 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
1991 Label no_extras;
1992 br( negative, true, pt, no_extras ); // if neg, clear reg
1993 delayed()->set(0, Rresult); // annuled, so only if taken
1994 bind( no_extras );
1995 }
1998 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
1999 #ifdef _LP64
2000 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
2001 #else
2002 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
2003 #endif
2004 bclr(1, Rresult);
2005 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
2006 }
2009 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) {
2010 calc_frame_size(Rextra_words, Rresult);
2011 neg(Rresult);
2012 save(SP, Rresult, SP);
2013 }
2016 // ---------------------------------------------------------
2017 Assembler::RCondition cond2rcond(Assembler::Condition c) {
2018 switch (c) {
2019 /*case zero: */
2020 case Assembler::equal: return Assembler::rc_z;
2021 case Assembler::lessEqual: return Assembler::rc_lez;
2022 case Assembler::less: return Assembler::rc_lz;
2023 /*case notZero:*/
2024 case Assembler::notEqual: return Assembler::rc_nz;
2025 case Assembler::greater: return Assembler::rc_gz;
2026 case Assembler::greaterEqual: return Assembler::rc_gez;
2027 }
2028 ShouldNotReachHere();
2029 return Assembler::rc_z;
2030 }
2032 // compares register with zero and branches. NOT FOR USE WITH 64-bit POINTERS
2033 void MacroAssembler::br_zero( Condition c, bool a, Predict p, Register s1, Label& L) {
2034 tst(s1);
2035 br (c, a, p, L);
2036 }
2039 // Compares a pointer register with zero and branches on null.
2040 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
2041 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
2042 assert_not_delayed();
2043 #ifdef _LP64
2044 bpr( rc_z, a, p, s1, L );
2045 #else
2046 tst(s1);
2047 br ( zero, a, p, L );
2048 #endif
2049 }
2051 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
2052 assert_not_delayed();
2053 #ifdef _LP64
2054 bpr( rc_nz, a, p, s1, L );
2055 #else
2056 tst(s1);
2057 br ( notZero, a, p, L );
2058 #endif
2059 }
2061 void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
2062 Register s1, address d,
2063 relocInfo::relocType rt ) {
2064 if (VM_Version::v9_instructions_work()) {
2065 bpr(rc, a, p, s1, d, rt);
2066 } else {
2067 tst(s1);
2068 br(reg_cond_to_cc_cond(rc), a, p, d, rt);
2069 }
2070 }
2072 void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
2073 Register s1, Label& L ) {
2074 if (VM_Version::v9_instructions_work()) {
2075 bpr(rc, a, p, s1, L);
2076 } else {
2077 tst(s1);
2078 br(reg_cond_to_cc_cond(rc), a, p, L);
2079 }
2080 }
2083 // instruction sequences factored across compiler & interpreter
2086 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
2087 Register Rb_hi, Register Rb_low,
2088 Register Rresult) {
2090 Label check_low_parts, done;
2092 cmp(Ra_hi, Rb_hi ); // compare hi parts
2093 br(equal, true, pt, check_low_parts);
2094 delayed()->cmp(Ra_low, Rb_low); // test low parts
2096 // And, with an unsigned comparison, it does not matter if the numbers
2097 // are negative or not.
2098 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff.
2099 // The second one is bigger (unsignedly).
2101 // Other notes: The first move in each triplet can be unconditional
2102 // (and therefore probably prefetchable).
2103 // And the equals case for the high part does not need testing,
2104 // since that triplet is reached only after finding the high halves differ.
2106 if (VM_Version::v9_instructions_work()) {
2108 mov ( -1, Rresult);
2109 ba( false, done ); delayed()-> movcc(greater, false, icc, 1, Rresult);
2110 }
2111 else {
2112 br(less, true, pt, done); delayed()-> set(-1, Rresult);
2113 br(greater, true, pt, done); delayed()-> set( 1, Rresult);
2114 }
2116 bind( check_low_parts );
2118 if (VM_Version::v9_instructions_work()) {
2119 mov( -1, Rresult);
2120 movcc(equal, false, icc, 0, Rresult);
2121 movcc(greaterUnsigned, false, icc, 1, Rresult);
2122 }
2123 else {
2124 set(-1, Rresult);
2125 br(equal, true, pt, done); delayed()->set( 0, Rresult);
2126 br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
2127 }
2128 bind( done );
2129 }
2131 void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
2132 subcc( G0, Rlow, Rlow );
2133 subc( G0, Rhi, Rhi );
2134 }
2136 void MacroAssembler::lshl( Register Rin_high, Register Rin_low,
2137 Register Rcount,
2138 Register Rout_high, Register Rout_low,
2139 Register Rtemp ) {
2142 Register Ralt_count = Rtemp;
2143 Register Rxfer_bits = Rtemp;
2145 assert( Ralt_count != Rin_high
2146 && Ralt_count != Rin_low
2147 && Ralt_count != Rcount
2148 && Rxfer_bits != Rin_low
2149 && Rxfer_bits != Rin_high
2150 && Rxfer_bits != Rcount
2151 && Rxfer_bits != Rout_low
2152 && Rout_low != Rin_high,
2153 "register alias checks");
2155 Label big_shift, done;
2157 // This code can be optimized to use the 64 bit shifts in V9.
2158 // Here we use the 32 bit shifts.
2160 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
2161 subcc(Rcount, 31, Ralt_count);
2162 br(greater, true, pn, big_shift);
2163 delayed()->
2164 dec(Ralt_count);
2166 // shift < 32 bits, Ralt_count = Rcount-31
2168 // We get the transfer bits by shifting right by 32-count the low
2169 // register. This is done by shifting right by 31-count and then by one
2170 // more to take care of the special (rare) case where count is zero
2171 // (shifting by 32 would not work).
2173 neg( Ralt_count );
2175 // The order of the next two instructions is critical in the case where
2176 // Rin and Rout are the same and should not be reversed.
2178 srl( Rin_low, Ralt_count, Rxfer_bits ); // shift right by 31-count
2179 if (Rcount != Rout_low) {
2180 sll( Rin_low, Rcount, Rout_low ); // low half
2181 }
2182 sll( Rin_high, Rcount, Rout_high );
2183 if (Rcount == Rout_low) {
2184 sll( Rin_low, Rcount, Rout_low ); // low half
2185 }
2186 srl( Rxfer_bits, 1, Rxfer_bits ); // shift right by one more
2187 ba (false, done);
2188 delayed()->
2189 or3( Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low
2191 // shift >= 32 bits, Ralt_count = Rcount-32
2192 bind(big_shift);
2193 sll( Rin_low, Ralt_count, Rout_high );
2194 clr( Rout_low );
2196 bind(done);
2197 }
2200 void MacroAssembler::lshr( Register Rin_high, Register Rin_low,
2201 Register Rcount,
2202 Register Rout_high, Register Rout_low,
2203 Register Rtemp ) {
2205 Register Ralt_count = Rtemp;
2206 Register Rxfer_bits = Rtemp;
2208 assert( Ralt_count != Rin_high
2209 && Ralt_count != Rin_low
2210 && Ralt_count != Rcount
2211 && Rxfer_bits != Rin_low
2212 && Rxfer_bits != Rin_high
2213 && Rxfer_bits != Rcount
2214 && Rxfer_bits != Rout_high
2215 && Rout_high != Rin_low,
2216 "register alias checks");
2218 Label big_shift, done;
2220 // This code can be optimized to use the 64 bit shifts in V9.
2221 // Here we use the 32 bit shifts.
2223 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
2224 subcc(Rcount, 31, Ralt_count);
2225 br(greater, true, pn, big_shift);
2226 delayed()->dec(Ralt_count);
2228 // shift < 32 bits, Ralt_count = Rcount-31
2230 // We get the transfer bits by shifting left by 32-count the high
2231 // register. This is done by shifting left by 31-count and then by one
2232 // more to take care of the special (rare) case where count is zero
2233 // (shifting by 32 would not work).
2235 neg( Ralt_count );
2236 if (Rcount != Rout_low) {
2237 srl( Rin_low, Rcount, Rout_low );
2238 }
2240 // The order of the next two instructions is critical in the case where
2241 // Rin and Rout are the same and should not be reversed.
2243 sll( Rin_high, Ralt_count, Rxfer_bits ); // shift left by 31-count
2244 sra( Rin_high, Rcount, Rout_high ); // high half
2245 sll( Rxfer_bits, 1, Rxfer_bits ); // shift left by one more
2246 if (Rcount == Rout_low) {
2247 srl( Rin_low, Rcount, Rout_low );
2248 }
2249 ba (false, done);
2250 delayed()->
2251 or3( Rout_low, Rxfer_bits, Rout_low ); // new low value: or shifted old low part and xfer from high
2253 // shift >= 32 bits, Ralt_count = Rcount-32
2254 bind(big_shift);
2256 sra( Rin_high, Ralt_count, Rout_low );
2257 sra( Rin_high, 31, Rout_high ); // sign into hi
2259 bind( done );
2260 }
2264 void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
2265 Register Rcount,
2266 Register Rout_high, Register Rout_low,
2267 Register Rtemp ) {
2269 Register Ralt_count = Rtemp;
2270 Register Rxfer_bits = Rtemp;
2272 assert( Ralt_count != Rin_high
2273 && Ralt_count != Rin_low
2274 && Ralt_count != Rcount
2275 && Rxfer_bits != Rin_low
2276 && Rxfer_bits != Rin_high
2277 && Rxfer_bits != Rcount
2278 && Rxfer_bits != Rout_high
2279 && Rout_high != Rin_low,
2280 "register alias checks");
2282 Label big_shift, done;
2284 // This code can be optimized to use the 64 bit shifts in V9.
2285 // Here we use the 32 bit shifts.
2287 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
2288 subcc(Rcount, 31, Ralt_count);
2289 br(greater, true, pn, big_shift);
2290 delayed()->dec(Ralt_count);
2292 // shift < 32 bits, Ralt_count = Rcount-31
2294 // We get the transfer bits by shifting left by 32-count the high
2295 // register. This is done by shifting left by 31-count and then by one
2296 // more to take care of the special (rare) case where count is zero
2297 // (shifting by 32 would not work).
2299 neg( Ralt_count );
2300 if (Rcount != Rout_low) {
2301 srl( Rin_low, Rcount, Rout_low );
2302 }
2304 // The order of the next two instructions is critical in the case where
2305 // Rin and Rout are the same and should not be reversed.
2307 sll( Rin_high, Ralt_count, Rxfer_bits ); // shift left by 31-count
2308 srl( Rin_high, Rcount, Rout_high ); // high half
2309 sll( Rxfer_bits, 1, Rxfer_bits ); // shift left by one more
2310 if (Rcount == Rout_low) {
2311 srl( Rin_low, Rcount, Rout_low );
2312 }
2313 ba (false, done);
2314 delayed()->
2315 or3( Rout_low, Rxfer_bits, Rout_low ); // new low value: or shifted old low part and xfer from high
2317 // shift >= 32 bits, Ralt_count = Rcount-32
2318 bind(big_shift);
2320 srl( Rin_high, Ralt_count, Rout_low );
2321 clr( Rout_high );
2323 bind( done );
2324 }
2326 #ifdef _LP64
2327 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
2328 cmp(Ra, Rb);
2329 mov( -1, Rresult);
2330 movcc(equal, false, xcc, 0, Rresult);
2331 movcc(greater, false, xcc, 1, Rresult);
2332 }
2333 #endif
2336 void MacroAssembler::load_sized_value(Address src, Register dst,
2337 size_t size_in_bytes, bool is_signed) {
2338 switch (size_in_bytes) {
2339 case 8: ldx(src, dst); break;
2340 case 4: ld( src, dst); break;
2341 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
2342 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
2343 default: ShouldNotReachHere();
2344 }
2345 }
2348 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
2349 FloatRegister Fa, FloatRegister Fb,
2350 Register Rresult) {
2352 fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
2354 Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less;
2355 Condition eq = f_equal;
2356 Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater;
2358 if (VM_Version::v9_instructions_work()) {
2360 mov( -1, Rresult );
2361 movcc( eq, true, fcc0, 0, Rresult );
2362 movcc( gt, true, fcc0, 1, Rresult );
2364 } else {
2365 Label done;
2367 set( -1, Rresult );
2368 //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
2369 fb( eq, true, pn, done); delayed()->set( 0, Rresult );
2370 fb( gt, true, pn, done); delayed()->set( 1, Rresult );
2372 bind (done);
2373 }
2374 }
2377 void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2378 {
2379 if (VM_Version::v9_instructions_work()) {
2380 Assembler::fneg(w, s, d);
2381 } else {
2382 if (w == FloatRegisterImpl::S) {
2383 Assembler::fneg(w, s, d);
2384 } else if (w == FloatRegisterImpl::D) {
2385 // number() does a sanity check on the alignment.
2386 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2387 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2389 Assembler::fneg(FloatRegisterImpl::S, s, d);
2390 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2391 } else {
2392 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2394 // number() does a sanity check on the alignment.
2395 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2396 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2398 Assembler::fneg(FloatRegisterImpl::S, s, d);
2399 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2400 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2401 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2402 }
2403 }
2404 }
2406 void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2407 {
2408 if (VM_Version::v9_instructions_work()) {
2409 Assembler::fmov(w, s, d);
2410 } else {
2411 if (w == FloatRegisterImpl::S) {
2412 Assembler::fmov(w, s, d);
2413 } else if (w == FloatRegisterImpl::D) {
2414 // number() does a sanity check on the alignment.
2415 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2416 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2418 Assembler::fmov(FloatRegisterImpl::S, s, d);
2419 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2420 } else {
2421 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2423 // number() does a sanity check on the alignment.
2424 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2425 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2427 Assembler::fmov(FloatRegisterImpl::S, s, d);
2428 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2429 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2430 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2431 }
2432 }
2433 }
2435 void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
2436 {
2437 if (VM_Version::v9_instructions_work()) {
2438 Assembler::fabs(w, s, d);
2439 } else {
2440 if (w == FloatRegisterImpl::S) {
2441 Assembler::fabs(w, s, d);
2442 } else if (w == FloatRegisterImpl::D) {
2443 // number() does a sanity check on the alignment.
2444 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
2445 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
2447 Assembler::fabs(FloatRegisterImpl::S, s, d);
2448 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2449 } else {
2450 assert(w == FloatRegisterImpl::Q, "Invalid float register width");
2452 // number() does a sanity check on the alignment.
2453 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
2454 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
2456 Assembler::fabs(FloatRegisterImpl::S, s, d);
2457 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
2458 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
2459 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
2460 }
2461 }
2462 }
2464 void MacroAssembler::save_all_globals_into_locals() {
2465 mov(G1,L1);
2466 mov(G2,L2);
2467 mov(G3,L3);
2468 mov(G4,L4);
2469 mov(G5,L5);
2470 mov(G6,L6);
2471 mov(G7,L7);
2472 }
2474 void MacroAssembler::restore_globals_from_locals() {
2475 mov(L1,G1);
2476 mov(L2,G2);
2477 mov(L3,G3);
2478 mov(L4,G4);
2479 mov(L5,G5);
2480 mov(L6,G6);
2481 mov(L7,G7);
2482 }
2484 // Use for 64 bit operation.
2485 void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
2486 {
2487 // store ptr_reg as the new top value
2488 #ifdef _LP64
2489 casx(top_ptr_reg, top_reg, ptr_reg);
2490 #else
2491 cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
2492 #endif // _LP64
2493 }
2495 // [RGV] This routine does not handle 64 bit operations.
2496 // use casx_under_lock() or casx directly!!!
2497 void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
2498 {
2499 // store ptr_reg as the new top value
2500 if (VM_Version::v9_instructions_work()) {
2501 cas(top_ptr_reg, top_reg, ptr_reg);
2502 } else {
2504 // If the register is not an out nor global, it is not visible
2505 // after the save. Allocate a register for it, save its
2506 // value in the register save area (the save may not flush
2507 // registers to the save area).
2509 Register top_ptr_reg_after_save;
2510 Register top_reg_after_save;
2511 Register ptr_reg_after_save;
2513 if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
2514 top_ptr_reg_after_save = top_ptr_reg->after_save();
2515 } else {
2516 Address reg_save_addr = top_ptr_reg->address_in_saved_window();
2517 top_ptr_reg_after_save = L0;
2518 st(top_ptr_reg, reg_save_addr);
2519 }
2521 if (top_reg->is_out() || top_reg->is_global()) {
2522 top_reg_after_save = top_reg->after_save();
2523 } else {
2524 Address reg_save_addr = top_reg->address_in_saved_window();
2525 top_reg_after_save = L1;
2526 st(top_reg, reg_save_addr);
2527 }
2529 if (ptr_reg->is_out() || ptr_reg->is_global()) {
2530 ptr_reg_after_save = ptr_reg->after_save();
2531 } else {
2532 Address reg_save_addr = ptr_reg->address_in_saved_window();
2533 ptr_reg_after_save = L2;
2534 st(ptr_reg, reg_save_addr);
2535 }
2537 const Register& lock_reg = L3;
2538 const Register& lock_ptr_reg = L4;
2539 const Register& value_reg = L5;
2540 const Register& yield_reg = L6;
2541 const Register& yieldall_reg = L7;
2543 save_frame();
2545 if (top_ptr_reg_after_save == L0) {
2546 ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
2547 }
2549 if (top_reg_after_save == L1) {
2550 ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
2551 }
2553 if (ptr_reg_after_save == L2) {
2554 ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
2555 }
2557 Label(retry_get_lock);
2558 Label(not_same);
2559 Label(dont_yield);
2561 assert(lock_addr, "lock_address should be non null for v8");
2562 set((intptr_t)lock_addr, lock_ptr_reg);
2563 // Initialize yield counter
2564 mov(G0,yield_reg);
2565 mov(G0, yieldall_reg);
2566 set(StubRoutines::Sparc::locked, lock_reg);
2568 bind(retry_get_lock);
2569 cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
2570 br(Assembler::less, false, Assembler::pt, dont_yield);
2571 delayed()->nop();
2573 if(use_call_vm) {
2574 Untested("Need to verify global reg consistancy");
2575 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
2576 } else {
2577 // Save the regs and make space for a C call
2578 save(SP, -96, SP);
2579 save_all_globals_into_locals();
2580 call(CAST_FROM_FN_PTR(address,os::yield_all));
2581 delayed()->mov(yieldall_reg, O0);
2582 restore_globals_from_locals();
2583 restore();
2584 }
2586 // reset the counter
2587 mov(G0,yield_reg);
2588 add(yieldall_reg, 1, yieldall_reg);
2590 bind(dont_yield);
2591 // try to get lock
2592 swap(lock_ptr_reg, 0, lock_reg);
2594 // did we get the lock?
2595 cmp(lock_reg, StubRoutines::Sparc::unlocked);
2596 br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
2597 delayed()->add(yield_reg,1,yield_reg);
2599 // yes, got lock. do we have the same top?
2600 ld(top_ptr_reg_after_save, 0, value_reg);
2601 cmp(value_reg, top_reg_after_save);
2602 br(Assembler::notEqual, false, Assembler::pn, not_same);
2603 delayed()->nop();
2605 // yes, same top.
2606 st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
2607 membar(Assembler::StoreStore);
2609 bind(not_same);
2610 mov(value_reg, ptr_reg_after_save);
2611 st(lock_reg, lock_ptr_reg, 0); // unlock
2613 restore();
2614 }
2615 }
2617 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
2618 Register tmp,
2619 int offset) {
2620 intptr_t value = *delayed_value_addr;
2621 if (value != 0)
2622 return RegisterOrConstant(value + offset);
2624 // load indirectly to solve generation ordering problem
2625 AddressLiteral a(delayed_value_addr);
2626 load_ptr_contents(a, tmp);
2628 #ifdef ASSERT
2629 tst(tmp);
2630 breakpoint_trap(zero, xcc);
2631 #endif
2633 if (offset != 0)
2634 add(tmp, offset, tmp);
2636 return RegisterOrConstant(tmp);
2637 }
2640 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2641 assert(d.register_or_noreg() != G0, "lost side effect");
2642 if ((s2.is_constant() && s2.as_constant() == 0) ||
2643 (s2.is_register() && s2.as_register() == G0)) {
2644 // Do nothing, just move value.
2645 if (s1.is_register()) {
2646 if (d.is_constant()) d = temp;
2647 mov(s1.as_register(), d.as_register());
2648 return d;
2649 } else {
2650 return s1;
2651 }
2652 }
2654 if (s1.is_register()) {
2655 assert_different_registers(s1.as_register(), temp);
2656 if (d.is_constant()) d = temp;
2657 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2658 return d;
2659 } else {
2660 if (s2.is_register()) {
2661 assert_different_registers(s2.as_register(), temp);
2662 if (d.is_constant()) d = temp;
2663 set(s1.as_constant(), temp);
2664 andn(temp, s2.as_register(), d.as_register());
2665 return d;
2666 } else {
2667 intptr_t res = s1.as_constant() & ~s2.as_constant();
2668 return res;
2669 }
2670 }
2671 }
2673 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2674 assert(d.register_or_noreg() != G0, "lost side effect");
2675 if ((s2.is_constant() && s2.as_constant() == 0) ||
2676 (s2.is_register() && s2.as_register() == G0)) {
2677 // Do nothing, just move value.
2678 if (s1.is_register()) {
2679 if (d.is_constant()) d = temp;
2680 mov(s1.as_register(), d.as_register());
2681 return d;
2682 } else {
2683 return s1;
2684 }
2685 }
2687 if (s1.is_register()) {
2688 assert_different_registers(s1.as_register(), temp);
2689 if (d.is_constant()) d = temp;
2690 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2691 return d;
2692 } else {
2693 if (s2.is_register()) {
2694 assert_different_registers(s2.as_register(), temp);
2695 if (d.is_constant()) d = temp;
2696 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
2697 return d;
2698 } else {
2699 intptr_t res = s1.as_constant() + s2.as_constant();
2700 return res;
2701 }
2702 }
2703 }
2705 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2706 assert(d.register_or_noreg() != G0, "lost side effect");
2707 if (!is_simm13(s2.constant_or_zero()))
2708 s2 = (s2.as_constant() & 0xFF);
2709 if ((s2.is_constant() && s2.as_constant() == 0) ||
2710 (s2.is_register() && s2.as_register() == G0)) {
2711 // Do nothing, just move value.
2712 if (s1.is_register()) {
2713 if (d.is_constant()) d = temp;
2714 mov(s1.as_register(), d.as_register());
2715 return d;
2716 } else {
2717 return s1;
2718 }
2719 }
2721 if (s1.is_register()) {
2722 assert_different_registers(s1.as_register(), temp);
2723 if (d.is_constant()) d = temp;
2724 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2725 return d;
2726 } else {
2727 if (s2.is_register()) {
2728 assert_different_registers(s2.as_register(), temp);
2729 if (d.is_constant()) d = temp;
2730 set(s1.as_constant(), temp);
2731 sll_ptr(temp, s2.as_register(), d.as_register());
2732 return d;
2733 } else {
2734 intptr_t res = s1.as_constant() << s2.as_constant();
2735 return res;
2736 }
2737 }
2738 }
2741 // Look up the method for a megamorphic invokeinterface call.
2742 // The target method is determined by <intf_klass, itable_index>.
2743 // The receiver klass is in recv_klass.
2744 // On success, the result will be in method_result, and execution falls through.
2745 // On failure, execution transfers to the given label.
2746 void MacroAssembler::lookup_interface_method(Register recv_klass,
2747 Register intf_klass,
2748 RegisterOrConstant itable_index,
2749 Register method_result,
2750 Register scan_temp,
2751 Register sethi_temp,
2752 Label& L_no_such_interface) {
2753 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
2754 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
2755 "caller must use same register for non-constant itable index as for method");
2757 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
2758 int vtable_base = instanceKlass::vtable_start_offset() * wordSize;
2759 int scan_step = itableOffsetEntry::size() * wordSize;
2760 int vte_size = vtableEntry::size() * wordSize;
2762 lduw(recv_klass, instanceKlass::vtable_length_offset() * wordSize, scan_temp);
2763 // %%% We should store the aligned, prescaled offset in the klassoop.
2764 // Then the next several instructions would fold away.
2766 int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0);
2767 int itb_offset = vtable_base;
2768 if (round_to_unit != 0) {
2769 // hoist first instruction of round_to(scan_temp, BytesPerLong):
2770 itb_offset += round_to_unit - wordSize;
2771 }
2772 int itb_scale = exact_log2(vtableEntry::size() * wordSize);
2773 sll(scan_temp, itb_scale, scan_temp);
2774 add(scan_temp, itb_offset, scan_temp);
2775 if (round_to_unit != 0) {
2776 // Round up to align_object_offset boundary
2777 // see code for instanceKlass::start_of_itable!
2778 // Was: round_to(scan_temp, BytesPerLong);
2779 // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp);
2780 and3(scan_temp, -round_to_unit, scan_temp);
2781 }
2782 add(recv_klass, scan_temp, scan_temp);
2784 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
2785 RegisterOrConstant itable_offset = itable_index;
2786 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
2787 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
2788 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
2790 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
2791 // if (scan->interface() == intf) {
2792 // result = (klass + scan->offset() + itable_index);
2793 // }
2794 // }
2795 Label search, found_method;
2797 for (int peel = 1; peel >= 0; peel--) {
2798 // %%%% Could load both offset and interface in one ldx, if they were
2799 // in the opposite order. This would save a load.
2800 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result);
2802 // Check that this entry is non-null. A null entry means that
2803 // the receiver class doesn't implement the interface, and wasn't the
2804 // same as when the caller was compiled.
2805 bpr(Assembler::rc_z, false, Assembler::pn, method_result, L_no_such_interface);
2806 delayed()->cmp(method_result, intf_klass);
2808 if (peel) {
2809 brx(Assembler::equal, false, Assembler::pt, found_method);
2810 } else {
2811 brx(Assembler::notEqual, false, Assembler::pn, search);
2812 // (invert the test to fall through to found_method...)
2813 }
2814 delayed()->add(scan_temp, scan_step, scan_temp);
2816 if (!peel) break;
2818 bind(search);
2819 }
2821 bind(found_method);
2823 // Got a hit.
2824 int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
2825 // scan_temp[-scan_step] points to the vtable offset we need
2826 ito_offset -= scan_step;
2827 lduw(scan_temp, ito_offset, scan_temp);
2828 ld_ptr(recv_klass, scan_temp, method_result);
2829 }
2832 void MacroAssembler::check_klass_subtype(Register sub_klass,
2833 Register super_klass,
2834 Register temp_reg,
2835 Register temp2_reg,
2836 Label& L_success) {
2837 Label L_failure, L_pop_to_failure;
2838 check_klass_subtype_fast_path(sub_klass, super_klass,
2839 temp_reg, temp2_reg,
2840 &L_success, &L_failure, NULL);
2841 Register sub_2 = sub_klass;
2842 Register sup_2 = super_klass;
2843 if (!sub_2->is_global()) sub_2 = L0;
2844 if (!sup_2->is_global()) sup_2 = L1;
2846 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
2847 check_klass_subtype_slow_path(sub_2, sup_2,
2848 L2, L3, L4, L5,
2849 NULL, &L_pop_to_failure);
2851 // on success:
2852 restore();
2853 ba(false, L_success);
2854 delayed()->nop();
2856 // on failure:
2857 bind(L_pop_to_failure);
2858 restore();
2859 bind(L_failure);
2860 }
2863 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
2864 Register super_klass,
2865 Register temp_reg,
2866 Register temp2_reg,
2867 Label* L_success,
2868 Label* L_failure,
2869 Label* L_slow_path,
2870 RegisterOrConstant super_check_offset,
2871 Register instanceof_hack) {
2872 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
2873 Klass::secondary_super_cache_offset_in_bytes());
2874 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2875 Klass::super_check_offset_offset_in_bytes());
2877 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
2878 bool need_slow_path = (must_load_sco ||
2879 super_check_offset.constant_or_zero() == sco_offset);
2881 assert_different_registers(sub_klass, super_klass, temp_reg);
2882 if (super_check_offset.is_register()) {
2883 assert_different_registers(sub_klass, super_klass, temp_reg,
2884 super_check_offset.as_register());
2885 } else if (must_load_sco) {
2886 assert(temp2_reg != noreg, "supply either a temp or a register offset");
2887 }
2889 Label L_fallthrough;
2890 int label_nulls = 0;
2891 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
2892 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
2893 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
2894 assert(label_nulls <= 1 || instanceof_hack != noreg ||
2895 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
2896 "at most one NULL in the batch, usually");
2898 // Support for the instanceof hack, which uses delay slots to
2899 // set a destination register to zero or one.
2900 bool do_bool_sets = (instanceof_hack != noreg);
2901 #define BOOL_SET(bool_value) \
2902 if (do_bool_sets && bool_value >= 0) \
2903 set(bool_value, instanceof_hack)
2904 #define DELAYED_BOOL_SET(bool_value) \
2905 if (do_bool_sets && bool_value >= 0) \
2906 delayed()->set(bool_value, instanceof_hack); \
2907 else delayed()->nop()
2908 // Hacked ba(), which may only be used just before L_fallthrough.
2909 #define FINAL_JUMP(label, bool_value) \
2910 if (&(label) == &L_fallthrough) { \
2911 BOOL_SET(bool_value); \
2912 } else { \
2913 ba((do_bool_sets && bool_value >= 0), label); \
2914 DELAYED_BOOL_SET(bool_value); \
2915 }
2917 // If the pointers are equal, we are done (e.g., String[] elements).
2918 // This self-check enables sharing of secondary supertype arrays among
2919 // non-primary types such as array-of-interface. Otherwise, each such
2920 // type would need its own customized SSA.
2921 // We move this check to the front of the fast path because many
2922 // type checks are in fact trivially successful in this manner,
2923 // so we get a nicely predicted branch right at the start of the check.
2924 cmp(super_klass, sub_klass);
2925 brx(Assembler::equal, do_bool_sets, Assembler::pn, *L_success);
2926 DELAYED_BOOL_SET(1);
2928 // Check the supertype display:
2929 if (must_load_sco) {
2930 // The super check offset is always positive...
2931 lduw(super_klass, sco_offset, temp2_reg);
2932 super_check_offset = RegisterOrConstant(temp2_reg);
2933 // super_check_offset is register.
2934 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
2935 }
2936 ld_ptr(sub_klass, super_check_offset, temp_reg);
2937 cmp(super_klass, temp_reg);
2939 // This check has worked decisively for primary supers.
2940 // Secondary supers are sought in the super_cache ('super_cache_addr').
2941 // (Secondary supers are interfaces and very deeply nested subtypes.)
2942 // This works in the same check above because of a tricky aliasing
2943 // between the super_cache and the primary super display elements.
2944 // (The 'super_check_addr' can address either, as the case requires.)
2945 // Note that the cache is updated below if it does not help us find
2946 // what we need immediately.
2947 // So if it was a primary super, we can just fail immediately.
2948 // Otherwise, it's the slow path for us (no success at this point).
2950 if (super_check_offset.is_register()) {
2951 brx(Assembler::equal, do_bool_sets, Assembler::pn, *L_success);
2952 delayed(); if (do_bool_sets) BOOL_SET(1);
2953 // if !do_bool_sets, sneak the next cmp into the delay slot:
2954 cmp(super_check_offset.as_register(), sc_offset);
2956 if (L_failure == &L_fallthrough) {
2957 brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_slow_path);
2958 delayed()->nop();
2959 BOOL_SET(0); // fallthrough on failure
2960 } else {
2961 brx(Assembler::notEqual, do_bool_sets, Assembler::pn, *L_failure);
2962 DELAYED_BOOL_SET(0);
2963 FINAL_JUMP(*L_slow_path, -1); // -1 => vanilla delay slot
2964 }
2965 } else if (super_check_offset.as_constant() == sc_offset) {
2966 // Need a slow path; fast failure is impossible.
2967 if (L_slow_path == &L_fallthrough) {
2968 brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_success);
2969 DELAYED_BOOL_SET(1);
2970 } else {
2971 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path);
2972 delayed()->nop();
2973 FINAL_JUMP(*L_success, 1);
2974 }
2975 } else {
2976 // No slow path; it's a fast decision.
2977 if (L_failure == &L_fallthrough) {
2978 brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_success);
2979 DELAYED_BOOL_SET(1);
2980 BOOL_SET(0);
2981 } else {
2982 brx(Assembler::notEqual, do_bool_sets, Assembler::pn, *L_failure);
2983 DELAYED_BOOL_SET(0);
2984 FINAL_JUMP(*L_success, 1);
2985 }
2986 }
2988 bind(L_fallthrough);
2990 #undef final_jump
2991 #undef bool_set
2992 #undef DELAYED_BOOL_SET
2993 #undef final_jump
2994 }
2997 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
2998 Register super_klass,
2999 Register count_temp,
3000 Register scan_temp,
3001 Register scratch_reg,
3002 Register coop_reg,
3003 Label* L_success,
3004 Label* L_failure) {
3005 assert_different_registers(sub_klass, super_klass,
3006 count_temp, scan_temp, scratch_reg, coop_reg);
3008 Label L_fallthrough, L_loop;
3009 int label_nulls = 0;
3010 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3011 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3012 assert(label_nulls <= 1, "at most one NULL in the batch");
3014 // a couple of useful fields in sub_klass:
3015 int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
3016 Klass::secondary_supers_offset_in_bytes());
3017 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
3018 Klass::secondary_super_cache_offset_in_bytes());
3020 // Do a linear scan of the secondary super-klass chain.
3021 // This code is rarely used, so simplicity is a virtue here.
3023 #ifndef PRODUCT
3024 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
3025 inc_counter((address) pst_counter, count_temp, scan_temp);
3026 #endif
3028 // We will consult the secondary-super array.
3029 ld_ptr(sub_klass, ss_offset, scan_temp);
3031 // Compress superclass if necessary.
3032 Register search_key = super_klass;
3033 bool decode_super_klass = false;
3034 if (UseCompressedOops) {
3035 if (coop_reg != noreg) {
3036 encode_heap_oop_not_null(super_klass, coop_reg);
3037 search_key = coop_reg;
3038 } else {
3039 encode_heap_oop_not_null(super_klass);
3040 decode_super_klass = true; // scarce temps!
3041 }
3042 // The superclass is never null; it would be a basic system error if a null
3043 // pointer were to sneak in here. Note that we have already loaded the
3044 // Klass::super_check_offset from the super_klass in the fast path,
3045 // so if there is a null in that register, we are already in the afterlife.
3046 }
3048 // Load the array length. (Positive movl does right thing on LP64.)
3049 lduw(scan_temp, arrayOopDesc::length_offset_in_bytes(), count_temp);
3051 // Check for empty secondary super list
3052 tst(count_temp);
3054 // Top of search loop
3055 bind(L_loop);
3056 br(Assembler::equal, false, Assembler::pn, *L_failure);
3057 delayed()->add(scan_temp, heapOopSize, scan_temp);
3058 assert(heapOopSize != 0, "heapOopSize should be initialized");
3060 // Skip the array header in all array accesses.
3061 int elem_offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
3062 elem_offset -= heapOopSize; // the scan pointer was pre-incremented also
3064 // Load next super to check
3065 if (UseCompressedOops) {
3066 // Don't use load_heap_oop; we don't want to decode the element.
3067 lduw( scan_temp, elem_offset, scratch_reg );
3068 } else {
3069 ld_ptr( scan_temp, elem_offset, scratch_reg );
3070 }
3072 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
3073 cmp(scratch_reg, search_key);
3075 // A miss means we are NOT a subtype and need to keep looping
3076 brx(Assembler::notEqual, false, Assembler::pn, L_loop);
3077 delayed()->deccc(count_temp); // decrement trip counter in delay slot
3079 // Falling out the bottom means we found a hit; we ARE a subtype
3080 if (decode_super_klass) decode_heap_oop(super_klass);
3082 // Success. Cache the super we found and proceed in triumph.
3083 st_ptr(super_klass, sub_klass, sc_offset);
3085 if (L_success != &L_fallthrough) {
3086 ba(false, *L_success);
3087 delayed()->nop();
3088 }
3090 bind(L_fallthrough);
3091 }
3094 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
3095 Register temp_reg,
3096 Label& wrong_method_type) {
3097 assert_different_registers(mtype_reg, mh_reg, temp_reg);
3098 // compare method type against that of the receiver
3099 RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg);
3100 load_heap_oop(mh_reg, mhtype_offset, temp_reg);
3101 cmp(temp_reg, mtype_reg);
3102 br(Assembler::notEqual, false, Assembler::pn, wrong_method_type);
3103 delayed()->nop();
3104 }
3107 // A method handle has a "vmslots" field which gives the size of its
3108 // argument list in JVM stack slots. This field is either located directly
3109 // in every method handle, or else is indirectly accessed through the
3110 // method handle's MethodType. This macro hides the distinction.
3111 void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
3112 Register temp_reg) {
3113 assert_different_registers(vmslots_reg, mh_reg, temp_reg);
3114 // load mh.type.form.vmslots
3115 if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
3116 // hoist vmslots into every mh to avoid dependent load chain
3117 ld( Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
3118 } else {
3119 Register temp2_reg = vmslots_reg;
3120 load_heap_oop(Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg);
3121 load_heap_oop(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg);
3122 ld( Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
3123 }
3124 }
3127 void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
3128 assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
3129 assert_different_registers(mh_reg, temp_reg);
3131 // pick out the interpreted side of the handler
3132 // NOTE: vmentry is not an oop!
3133 ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
3135 // off we go...
3136 ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg);
3137 jmp(temp_reg, 0);
3139 // for the various stubs which take control at this point,
3140 // see MethodHandles::generate_method_handle_stub
3142 // Some callers can fill the delay slot.
3143 if (emit_delayed_nop) {
3144 delayed()->nop();
3145 }
3146 }
3149 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
3150 int extra_slot_offset) {
3151 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
3152 int stackElementSize = Interpreter::stackElementSize;
3153 int offset = extra_slot_offset * stackElementSize;
3154 if (arg_slot.is_constant()) {
3155 offset += arg_slot.as_constant() * stackElementSize;
3156 return offset;
3157 } else {
3158 Register temp = arg_slot.as_register();
3159 sll_ptr(temp, exact_log2(stackElementSize), temp);
3160 if (offset != 0)
3161 add(temp, offset, temp);
3162 return temp;
3163 }
3164 }
3167 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
3168 int extra_slot_offset) {
3169 return Address(Gargs, argument_offset(arg_slot, extra_slot_offset));
3170 }
3173 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
3174 Register temp_reg,
3175 Label& done, Label* slow_case,
3176 BiasedLockingCounters* counters) {
3177 assert(UseBiasedLocking, "why call this otherwise?");
3179 if (PrintBiasedLockingStatistics) {
3180 assert_different_registers(obj_reg, mark_reg, temp_reg, O7);
3181 if (counters == NULL)
3182 counters = BiasedLocking::counters();
3183 }
3185 Label cas_label;
3187 // Biased locking
3188 // See whether the lock is currently biased toward our thread and
3189 // whether the epoch is still valid
3190 // Note that the runtime guarantees sufficient alignment of JavaThread
3191 // pointers to allow age to be placed into low bits
3192 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
3193 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
3194 cmp(temp_reg, markOopDesc::biased_lock_pattern);
3195 brx(Assembler::notEqual, false, Assembler::pn, cas_label);
3196 delayed()->nop();
3198 load_klass(obj_reg, temp_reg);
3199 ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
3200 or3(G2_thread, temp_reg, temp_reg);
3201 xor3(mark_reg, temp_reg, temp_reg);
3202 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
3203 if (counters != NULL) {
3204 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
3205 // Reload mark_reg as we may need it later
3206 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg);
3207 }
3208 brx(Assembler::equal, true, Assembler::pt, done);
3209 delayed()->nop();
3211 Label try_revoke_bias;
3212 Label try_rebias;
3213 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
3214 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3216 // At this point we know that the header has the bias pattern and
3217 // that we are not the bias owner in the current epoch. We need to
3218 // figure out more details about the state of the header in order to
3219 // know what operations can be legally performed on the object's
3220 // header.
3222 // If the low three bits in the xor result aren't clear, that means
3223 // the prototype header is no longer biased and we have to revoke
3224 // the bias on this object.
3225 btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
3226 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
3228 // Biasing is still enabled for this data type. See whether the
3229 // epoch of the current bias is still valid, meaning that the epoch
3230 // bits of the mark word are equal to the epoch bits of the
3231 // prototype header. (Note that the prototype header's epoch bits
3232 // only change at a safepoint.) If not, attempt to rebias the object
3233 // toward the current thread. Note that we must be absolutely sure
3234 // that the current epoch is invalid in order to do this because
3235 // otherwise the manipulations it performs on the mark word are
3236 // illegal.
3237 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
3238 brx(Assembler::notZero, false, Assembler::pn, try_rebias);
3240 // The epoch of the current bias is still valid but we know nothing
3241 // about the owner; it might be set or it might be clear. Try to
3242 // acquire the bias of the object using an atomic operation. If this
3243 // fails we will go in to the runtime to revoke the object's bias.
3244 // Note that we first construct the presumed unbiased header so we
3245 // don't accidentally blow away another thread's valid bias.
3246 delayed()->and3(mark_reg,
3247 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
3248 mark_reg);
3249 or3(G2_thread, mark_reg, temp_reg);
3250 casn(mark_addr.base(), mark_reg, temp_reg);
3251 // If the biasing toward our thread failed, this means that
3252 // another thread succeeded in biasing it toward itself and we
3253 // need to revoke that bias. The revocation will occur in the
3254 // interpreter runtime in the slow case.
3255 cmp(mark_reg, temp_reg);
3256 if (counters != NULL) {
3257 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
3258 }
3259 if (slow_case != NULL) {
3260 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
3261 delayed()->nop();
3262 }
3263 br(Assembler::always, false, Assembler::pt, done);
3264 delayed()->nop();
3266 bind(try_rebias);
3267 // At this point we know the epoch has expired, meaning that the
3268 // current "bias owner", if any, is actually invalid. Under these
3269 // circumstances _only_, we are allowed to use the current header's
3270 // value as the comparison value when doing the cas to acquire the
3271 // bias in the current epoch. In other words, we allow transfer of
3272 // the bias from one thread to another directly in this situation.
3273 //
3274 // FIXME: due to a lack of registers we currently blow away the age
3275 // bits in this situation. Should attempt to preserve them.
3276 load_klass(obj_reg, temp_reg);
3277 ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
3278 or3(G2_thread, temp_reg, temp_reg);
3279 casn(mark_addr.base(), mark_reg, temp_reg);
3280 // If the biasing toward our thread failed, this means that
3281 // another thread succeeded in biasing it toward itself and we
3282 // need to revoke that bias. The revocation will occur in the
3283 // interpreter runtime in the slow case.
3284 cmp(mark_reg, temp_reg);
3285 if (counters != NULL) {
3286 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
3287 }
3288 if (slow_case != NULL) {
3289 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
3290 delayed()->nop();
3291 }
3292 br(Assembler::always, false, Assembler::pt, done);
3293 delayed()->nop();
3295 bind(try_revoke_bias);
3296 // The prototype mark in the klass doesn't have the bias bit set any
3297 // more, indicating that objects of this data type are not supposed
3298 // to be biased any more. We are going to try to reset the mark of
3299 // this object to the prototype value and fall through to the
3300 // CAS-based locking scheme. Note that if our CAS fails, it means
3301 // that another thread raced us for the privilege of revoking the
3302 // bias of this particular object, so it's okay to continue in the
3303 // normal locking code.
3304 //
3305 // FIXME: due to a lack of registers we currently blow away the age
3306 // bits in this situation. Should attempt to preserve them.
3307 load_klass(obj_reg, temp_reg);
3308 ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
3309 casn(mark_addr.base(), mark_reg, temp_reg);
3310 // Fall through to the normal CAS-based lock, because no matter what
3311 // the result of the above CAS, some thread must have succeeded in
3312 // removing the bias bit from the object's header.
3313 if (counters != NULL) {
3314 cmp(mark_reg, temp_reg);
3315 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg);
3316 }
3318 bind(cas_label);
3319 }
3321 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done,
3322 bool allow_delay_slot_filling) {
3323 // Check for biased locking unlock case, which is a no-op
3324 // Note: we do not have to check the thread ID for two reasons.
3325 // First, the interpreter checks for IllegalMonitorStateException at
3326 // a higher level. Second, if the bias was revoked while we held the
3327 // lock, the object could not be rebiased toward another thread, so
3328 // the bias bit would be clear.
3329 ld_ptr(mark_addr, temp_reg);
3330 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
3331 cmp(temp_reg, markOopDesc::biased_lock_pattern);
3332 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
3333 delayed();
3334 if (!allow_delay_slot_filling) {
3335 nop();
3336 }
3337 }
3340 // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
3341 // Solaris/SPARC's "as". Another apt name would be cas_ptr()
3343 void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
3344 casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()) ;
3345 }
3349 // compiler_lock_object() and compiler_unlock_object() are direct transliterations
3350 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
3351 // The code could be tightened up considerably.
3352 //
3353 // box->dhw disposition - post-conditions at DONE_LABEL.
3354 // - Successful inflated lock: box->dhw != 0.
3355 // Any non-zero value suffices.
3356 // Consider G2_thread, rsp, boxReg, or unused_mark()
3357 // - Successful Stack-lock: box->dhw == mark.
3358 // box->dhw must contain the displaced mark word value
3359 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
3360 // The slow-path fast_enter() and slow_enter() operators
3361 // are responsible for setting box->dhw = NonZero (typically ::unused_mark).
3362 // - Biased: box->dhw is undefined
3363 //
3364 // SPARC refworkload performance - specifically jetstream and scimark - are
3365 // extremely sensitive to the size of the code emitted by compiler_lock_object
3366 // and compiler_unlock_object. Critically, the key factor is code size, not path
3367 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
3368 // effect).
3371 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
3372 Register Rbox, Register Rscratch,
3373 BiasedLockingCounters* counters,
3374 bool try_bias) {
3375 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
3377 verify_oop(Roop);
3378 Label done ;
3380 if (counters != NULL) {
3381 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch);
3382 }
3384 if (EmitSync & 1) {
3385 mov (3, Rscratch) ;
3386 st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3387 cmp (SP, G0) ;
3388 return ;
3389 }
3391 if (EmitSync & 2) {
3393 // Fetch object's markword
3394 ld_ptr(mark_addr, Rmark);
3396 if (try_bias) {
3397 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
3398 }
3400 // Save Rbox in Rscratch to be used for the cas operation
3401 mov(Rbox, Rscratch);
3403 // set Rmark to markOop | markOopDesc::unlocked_value
3404 or3(Rmark, markOopDesc::unlocked_value, Rmark);
3406 // Initialize the box. (Must happen before we update the object mark!)
3407 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
3409 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
3410 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3411 casx_under_lock(mark_addr.base(), Rmark, Rscratch,
3412 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3414 // if compare/exchange succeeded we found an unlocked object and we now have locked it
3415 // hence we are done
3416 cmp(Rmark, Rscratch);
3417 #ifdef _LP64
3418 sub(Rscratch, STACK_BIAS, Rscratch);
3419 #endif
3420 brx(Assembler::equal, false, Assembler::pt, done);
3421 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
3423 // we did not find an unlocked object so see if this is a recursive case
3424 // sub(Rscratch, SP, Rscratch);
3425 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
3426 andcc(Rscratch, 0xfffff003, Rscratch);
3427 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3428 bind (done) ;
3429 return ;
3430 }
3432 Label Egress ;
3434 if (EmitSync & 256) {
3435 Label IsInflated ;
3437 ld_ptr (mark_addr, Rmark); // fetch obj->mark
3438 // Triage: biased, stack-locked, neutral, inflated
3439 if (try_bias) {
3440 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
3441 // Invariant: if control reaches this point in the emitted stream
3442 // then Rmark has not been modified.
3443 }
3445 // Store mark into displaced mark field in the on-stack basic-lock "box"
3446 // Critically, this must happen before the CAS
3447 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
3448 st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
3449 andcc (Rmark, 2, G0) ;
3450 brx (Assembler::notZero, false, Assembler::pn, IsInflated) ;
3451 delayed() ->
3453 // Try stack-lock acquisition.
3454 // Beware: the 1st instruction is in a delay slot
3455 mov (Rbox, Rscratch);
3456 or3 (Rmark, markOopDesc::unlocked_value, Rmark);
3457 assert (mark_addr.disp() == 0, "cas must take a zero displacement");
3458 casn (mark_addr.base(), Rmark, Rscratch) ;
3459 cmp (Rmark, Rscratch);
3460 brx (Assembler::equal, false, Assembler::pt, done);
3461 delayed()->sub(Rscratch, SP, Rscratch);
3463 // Stack-lock attempt failed - check for recursive stack-lock.
3464 // See the comments below about how we might remove this case.
3465 #ifdef _LP64
3466 sub (Rscratch, STACK_BIAS, Rscratch);
3467 #endif
3468 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
3469 andcc (Rscratch, 0xfffff003, Rscratch);
3470 br (Assembler::always, false, Assembler::pt, done) ;
3471 delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3473 bind (IsInflated) ;
3474 if (EmitSync & 64) {
3475 // If m->owner != null goto IsLocked
3476 // Pessimistic form: Test-and-CAS vs CAS
3477 // The optimistic form avoids RTS->RTO cache line upgrades.
3478 ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
3479 andcc (Rscratch, Rscratch, G0) ;
3480 brx (Assembler::notZero, false, Assembler::pn, done) ;
3481 delayed()->nop() ;
3482 // m->owner == null : it's unlocked.
3483 }
3485 // Try to CAS m->owner from null to Self
3486 // Invariant: if we acquire the lock then _recursions should be 0.
3487 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
3488 mov (G2_thread, Rscratch) ;
3489 casn (Rmark, G0, Rscratch) ;
3490 cmp (Rscratch, G0) ;
3491 // Intentional fall-through into done
3492 } else {
3493 // Aggressively avoid the Store-before-CAS penalty
3494 // Defer the store into box->dhw until after the CAS
3495 Label IsInflated, Recursive ;
3497 // Anticipate CAS -- Avoid RTS->RTO upgrade
3498 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
3500 ld_ptr (mark_addr, Rmark); // fetch obj->mark
3501 // Triage: biased, stack-locked, neutral, inflated
3503 if (try_bias) {
3504 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
3505 // Invariant: if control reaches this point in the emitted stream
3506 // then Rmark has not been modified.
3507 }
3508 andcc (Rmark, 2, G0) ;
3509 brx (Assembler::notZero, false, Assembler::pn, IsInflated) ;
3510 delayed()-> // Beware - dangling delay-slot
3512 // Try stack-lock acquisition.
3513 // Transiently install BUSY (0) encoding in the mark word.
3514 // if the CAS of 0 into the mark was successful then we execute:
3515 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box
3516 // ST obj->mark = box -- overwrite transient 0 value
3517 // This presumes TSO, of course.
3519 mov (0, Rscratch) ;
3520 or3 (Rmark, markOopDesc::unlocked_value, Rmark);
3521 assert (mark_addr.disp() == 0, "cas must take a zero displacement");
3522 casn (mark_addr.base(), Rmark, Rscratch) ;
3523 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
3524 cmp (Rscratch, Rmark) ;
3525 brx (Assembler::notZero, false, Assembler::pn, Recursive) ;
3526 delayed() ->
3527 st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
3528 if (counters != NULL) {
3529 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
3530 }
3531 br (Assembler::always, false, Assembler::pt, done);
3532 delayed() ->
3533 st_ptr (Rbox, mark_addr) ;
3535 bind (Recursive) ;
3536 // Stack-lock attempt failed - check for recursive stack-lock.
3537 // Tests show that we can remove the recursive case with no impact
3538 // on refworkload 0.83. If we need to reduce the size of the code
3539 // emitted by compiler_lock_object() the recursive case is perfect
3540 // candidate.
3541 //
3542 // A more extreme idea is to always inflate on stack-lock recursion.
3543 // This lets us eliminate the recursive checks in compiler_lock_object
3544 // and compiler_unlock_object and the (box->dhw == 0) encoding.
3545 // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
3546 // and showed a performance *increase*. In the same experiment I eliminated
3547 // the fast-path stack-lock code from the interpreter and always passed
3548 // control to the "slow" operators in synchronizer.cpp.
3550 // RScratch contains the fetched obj->mark value from the failed CASN.
3551 #ifdef _LP64
3552 sub (Rscratch, STACK_BIAS, Rscratch);
3553 #endif
3554 sub(Rscratch, SP, Rscratch);
3555 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
3556 andcc (Rscratch, 0xfffff003, Rscratch);
3557 if (counters != NULL) {
3558 // Accounting needs the Rscratch register
3559 st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3560 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
3561 br (Assembler::always, false, Assembler::pt, done) ;
3562 delayed()->nop() ;
3563 } else {
3564 br (Assembler::always, false, Assembler::pt, done) ;
3565 delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
3566 }
3568 bind (IsInflated) ;
3569 if (EmitSync & 64) {
3570 // If m->owner != null goto IsLocked
3571 // Test-and-CAS vs CAS
3572 // Pessimistic form avoids futile (doomed) CAS attempts
3573 // The optimistic form avoids RTS->RTO cache line upgrades.
3574 ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
3575 andcc (Rscratch, Rscratch, G0) ;
3576 brx (Assembler::notZero, false, Assembler::pn, done) ;
3577 delayed()->nop() ;
3578 // m->owner == null : it's unlocked.
3579 }
3581 // Try to CAS m->owner from null to Self
3582 // Invariant: if we acquire the lock then _recursions should be 0.
3583 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
3584 mov (G2_thread, Rscratch) ;
3585 casn (Rmark, G0, Rscratch) ;
3586 cmp (Rscratch, G0) ;
3587 // ST box->displaced_header = NonZero.
3588 // Any non-zero value suffices:
3589 // unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
3590 st_ptr (Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
3591 // Intentional fall-through into done
3592 }
3594 bind (done) ;
3595 }
3597 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
3598 Register Rbox, Register Rscratch,
3599 bool try_bias) {
3600 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
3602 Label done ;
3604 if (EmitSync & 4) {
3605 cmp (SP, G0) ;
3606 return ;
3607 }
3609 if (EmitSync & 8) {
3610 if (try_bias) {
3611 biased_locking_exit(mark_addr, Rscratch, done);
3612 }
3614 // Test first if it is a fast recursive unlock
3615 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
3616 cmp(Rmark, G0);
3617 brx(Assembler::equal, false, Assembler::pt, done);
3618 delayed()->nop();
3620 // Check if it is still a light weight lock, this is is true if we see
3621 // the stack address of the basicLock in the markOop of the object
3622 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
3623 casx_under_lock(mark_addr.base(), Rbox, Rmark,
3624 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3625 br (Assembler::always, false, Assembler::pt, done);
3626 delayed()->cmp(Rbox, Rmark);
3627 bind (done) ;
3628 return ;
3629 }
3631 // Beware ... If the aggregate size of the code emitted by CLO and CUO is
3632 // is too large performance rolls abruptly off a cliff.
3633 // This could be related to inlining policies, code cache management, or
3634 // I$ effects.
3635 Label LStacked ;
3637 if (try_bias) {
3638 // TODO: eliminate redundant LDs of obj->mark
3639 biased_locking_exit(mark_addr, Rscratch, done);
3640 }
3642 ld_ptr (Roop, oopDesc::mark_offset_in_bytes(), Rmark) ;
3643 ld_ptr (Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
3644 andcc (Rscratch, Rscratch, G0);
3645 brx (Assembler::zero, false, Assembler::pn, done);
3646 delayed()-> nop() ; // consider: relocate fetch of mark, above, into this DS
3647 andcc (Rmark, 2, G0) ;
3648 brx (Assembler::zero, false, Assembler::pt, LStacked) ;
3649 delayed()-> nop() ;
3651 // It's inflated
3652 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
3653 // the ST of 0 into _owner which releases the lock. This prevents loads
3654 // and stores within the critical section from reordering (floating)
3655 // past the store that releases the lock. But TSO is a strong memory model
3656 // and that particular flavor of barrier is a noop, so we can safely elide it.
3657 // Note that we use 1-0 locking by default for the inflated case. We
3658 // close the resultant (and rare) race by having contented threads in
3659 // monitorenter periodically poll _owner.
3660 ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
3661 ld_ptr (Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
3662 xor3 (Rscratch, G2_thread, Rscratch) ;
3663 orcc (Rbox, Rscratch, Rbox) ;
3664 brx (Assembler::notZero, false, Assembler::pn, done) ;
3665 delayed()->
3666 ld_ptr (Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
3667 ld_ptr (Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
3668 orcc (Rbox, Rscratch, G0) ;
3669 if (EmitSync & 65536) {
3670 Label LSucc ;
3671 brx (Assembler::notZero, false, Assembler::pn, LSucc) ;
3672 delayed()->nop() ;
3673 br (Assembler::always, false, Assembler::pt, done) ;
3674 delayed()->
3675 st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3677 bind (LSucc) ;
3678 st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3679 if (os::is_MP()) { membar (StoreLoad) ; }
3680 ld_ptr (Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
3681 andcc (Rscratch, Rscratch, G0) ;
3682 brx (Assembler::notZero, false, Assembler::pt, done) ;
3683 delayed()-> andcc (G0, G0, G0) ;
3684 add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
3685 mov (G2_thread, Rscratch) ;
3686 casn (Rmark, G0, Rscratch) ;
3687 cmp (Rscratch, G0) ;
3688 // invert icc.zf and goto done
3689 brx (Assembler::notZero, false, Assembler::pt, done) ;
3690 delayed() -> cmp (G0, G0) ;
3691 br (Assembler::always, false, Assembler::pt, done);
3692 delayed() -> cmp (G0, 1) ;
3693 } else {
3694 brx (Assembler::notZero, false, Assembler::pn, done) ;
3695 delayed()->nop() ;
3696 br (Assembler::always, false, Assembler::pt, done) ;
3697 delayed()->
3698 st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3699 }
3701 bind (LStacked) ;
3702 // Consider: we could replace the expensive CAS in the exit
3703 // path with a simple ST of the displaced mark value fetched from
3704 // the on-stack basiclock box. That admits a race where a thread T2
3705 // in the slow lock path -- inflating with monitor M -- could race a
3706 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2.
3707 // More precisely T1 in the stack-lock unlock path could "stomp" the
3708 // inflated mark value M installed by T2, resulting in an orphan
3709 // object monitor M and T2 becoming stranded. We can remedy that situation
3710 // by having T2 periodically poll the object's mark word using timed wait
3711 // operations. If T2 discovers that a stomp has occurred it vacates
3712 // the monitor M and wakes any other threads stranded on the now-orphan M.
3713 // In addition the monitor scavenger, which performs deflation,
3714 // would also need to check for orpan monitors and stranded threads.
3715 //
3716 // Finally, inflation is also used when T2 needs to assign a hashCode
3717 // to O and O is stack-locked by T1. The "stomp" race could cause
3718 // an assigned hashCode value to be lost. We can avoid that condition
3719 // and provide the necessary hashCode stability invariants by ensuring
3720 // that hashCode generation is idempotent between copying GCs.
3721 // For example we could compute the hashCode of an object O as
3722 // O's heap address XOR some high quality RNG value that is refreshed
3723 // at GC-time. The monitor scavenger would install the hashCode
3724 // found in any orphan monitors. Again, the mechanism admits a
3725 // lost-update "stomp" WAW race but detects and recovers as needed.
3726 //
3727 // A prototype implementation showed excellent results, although
3728 // the scavenger and timeout code was rather involved.
3730 casn (mark_addr.base(), Rbox, Rscratch) ;
3731 cmp (Rbox, Rscratch);
3732 // Intentional fall through into done ...
3734 bind (done) ;
3735 }
3739 void MacroAssembler::print_CPU_state() {
3740 // %%%%% need to implement this
3741 }
3743 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
3744 // %%%%% need to implement this
3745 }
3747 void MacroAssembler::push_IU_state() {
3748 // %%%%% need to implement this
3749 }
3752 void MacroAssembler::pop_IU_state() {
3753 // %%%%% need to implement this
3754 }
3757 void MacroAssembler::push_FPU_state() {
3758 // %%%%% need to implement this
3759 }
3762 void MacroAssembler::pop_FPU_state() {
3763 // %%%%% need to implement this
3764 }
3767 void MacroAssembler::push_CPU_state() {
3768 // %%%%% need to implement this
3769 }
3772 void MacroAssembler::pop_CPU_state() {
3773 // %%%%% need to implement this
3774 }
3778 void MacroAssembler::verify_tlab() {
3779 #ifdef ASSERT
3780 if (UseTLAB && VerifyOops) {
3781 Label next, next2, ok;
3782 Register t1 = L0;
3783 Register t2 = L1;
3784 Register t3 = L2;
3786 save_frame(0);
3787 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
3788 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
3789 or3(t1, t2, t3);
3790 cmp(t1, t2);
3791 br(Assembler::greaterEqual, false, Assembler::pn, next);
3792 delayed()->nop();
3793 stop("assert(top >= start)");
3794 should_not_reach_here();
3796 bind(next);
3797 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
3798 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
3799 or3(t3, t2, t3);
3800 cmp(t1, t2);
3801 br(Assembler::lessEqual, false, Assembler::pn, next2);
3802 delayed()->nop();
3803 stop("assert(top <= end)");
3804 should_not_reach_here();
3806 bind(next2);
3807 and3(t3, MinObjAlignmentInBytesMask, t3);
3808 cmp(t3, 0);
3809 br(Assembler::lessEqual, false, Assembler::pn, ok);
3810 delayed()->nop();
3811 stop("assert(aligned)");
3812 should_not_reach_here();
3814 bind(ok);
3815 restore();
3816 }
3817 #endif
3818 }
3821 void MacroAssembler::eden_allocate(
3822 Register obj, // result: pointer to object after successful allocation
3823 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
3824 int con_size_in_bytes, // object size in bytes if known at compile time
3825 Register t1, // temp register
3826 Register t2, // temp register
3827 Label& slow_case // continuation point if fast allocation fails
3828 ){
3829 // make sure arguments make sense
3830 assert_different_registers(obj, var_size_in_bytes, t1, t2);
3831 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
3832 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
3834 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
3835 // No allocation in the shared eden.
3836 br(Assembler::always, false, Assembler::pt, slow_case);
3837 delayed()->nop();
3838 } else {
3839 // get eden boundaries
3840 // note: we need both top & top_addr!
3841 const Register top_addr = t1;
3842 const Register end = t2;
3844 CollectedHeap* ch = Universe::heap();
3845 set((intx)ch->top_addr(), top_addr);
3846 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
3847 ld_ptr(top_addr, delta, end);
3848 ld_ptr(top_addr, 0, obj);
3850 // try to allocate
3851 Label retry;
3852 bind(retry);
3853 #ifdef ASSERT
3854 // make sure eden top is properly aligned
3855 {
3856 Label L;
3857 btst(MinObjAlignmentInBytesMask, obj);
3858 br(Assembler::zero, false, Assembler::pt, L);
3859 delayed()->nop();
3860 stop("eden top is not properly aligned");
3861 bind(L);
3862 }
3863 #endif // ASSERT
3864 const Register free = end;
3865 sub(end, obj, free); // compute amount of free space
3866 if (var_size_in_bytes->is_valid()) {
3867 // size is unknown at compile time
3868 cmp(free, var_size_in_bytes);
3869 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
3870 delayed()->add(obj, var_size_in_bytes, end);
3871 } else {
3872 // size is known at compile time
3873 cmp(free, con_size_in_bytes);
3874 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
3875 delayed()->add(obj, con_size_in_bytes, end);
3876 }
3877 // Compare obj with the value at top_addr; if still equal, swap the value of
3878 // end with the value at top_addr. If not equal, read the value at top_addr
3879 // into end.
3880 casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3881 // if someone beat us on the allocation, try again, otherwise continue
3882 cmp(obj, end);
3883 brx(Assembler::notEqual, false, Assembler::pn, retry);
3884 delayed()->mov(end, obj); // nop if successfull since obj == end
3886 #ifdef ASSERT
3887 // make sure eden top is properly aligned
3888 {
3889 Label L;
3890 const Register top_addr = t1;
3892 set((intx)ch->top_addr(), top_addr);
3893 ld_ptr(top_addr, 0, top_addr);
3894 btst(MinObjAlignmentInBytesMask, top_addr);
3895 br(Assembler::zero, false, Assembler::pt, L);
3896 delayed()->nop();
3897 stop("eden top is not properly aligned");
3898 bind(L);
3899 }
3900 #endif // ASSERT
3901 }
3902 }
3905 void MacroAssembler::tlab_allocate(
3906 Register obj, // result: pointer to object after successful allocation
3907 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
3908 int con_size_in_bytes, // object size in bytes if known at compile time
3909 Register t1, // temp register
3910 Label& slow_case // continuation point if fast allocation fails
3911 ){
3912 // make sure arguments make sense
3913 assert_different_registers(obj, var_size_in_bytes, t1);
3914 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size");
3915 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
3917 const Register free = t1;
3919 verify_tlab();
3921 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj);
3923 // calculate amount of free space
3924 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free);
3925 sub(free, obj, free);
3927 Label done;
3928 if (var_size_in_bytes == noreg) {
3929 cmp(free, con_size_in_bytes);
3930 } else {
3931 cmp(free, var_size_in_bytes);
3932 }
3933 br(Assembler::less, false, Assembler::pn, slow_case);
3934 // calculate the new top pointer
3935 if (var_size_in_bytes == noreg) {
3936 delayed()->add(obj, con_size_in_bytes, free);
3937 } else {
3938 delayed()->add(obj, var_size_in_bytes, free);
3939 }
3941 bind(done);
3943 #ifdef ASSERT
3944 // make sure new free pointer is properly aligned
3945 {
3946 Label L;
3947 btst(MinObjAlignmentInBytesMask, free);
3948 br(Assembler::zero, false, Assembler::pt, L);
3949 delayed()->nop();
3950 stop("updated TLAB free is not properly aligned");
3951 bind(L);
3952 }
3953 #endif // ASSERT
3955 // update the tlab top pointer
3956 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3957 verify_tlab();
3958 }
3961 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
3962 Register top = O0;
3963 Register t1 = G1;
3964 Register t2 = G3;
3965 Register t3 = O1;
3966 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
3967 Label do_refill, discard_tlab;
3969 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
3970 // No allocation in the shared eden.
3971 br(Assembler::always, false, Assembler::pt, slow_case);
3972 delayed()->nop();
3973 }
3975 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
3976 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1);
3977 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2);
3979 // calculate amount of free space
3980 sub(t1, top, t1);
3981 srl_ptr(t1, LogHeapWordSize, t1);
3983 // Retain tlab and allocate object in shared space if
3984 // the amount free in the tlab is too large to discard.
3985 cmp(t1, t2);
3986 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
3988 // increment waste limit to prevent getting stuck on this slow path
3989 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
3990 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3991 if (TLABStats) {
3992 // increment number of slow_allocations
3993 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2);
3994 add(t2, 1, t2);
3995 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
3996 }
3997 br(Assembler::always, false, Assembler::pt, try_eden);
3998 delayed()->nop();
4000 bind(discard_tlab);
4001 if (TLABStats) {
4002 // increment number of refills
4003 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2);
4004 add(t2, 1, t2);
4005 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()));
4006 // accumulate wastage
4007 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2);
4008 add(t2, t1, t2);
4009 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
4010 }
4012 // if tlab is currently allocated (top or end != null) then
4013 // fill [top, end + alignment_reserve) with array object
4014 br_null(top, false, Assembler::pn, do_refill);
4015 delayed()->nop();
4017 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
4018 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
4019 // set klass to intArrayKlass
4020 sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
4021 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
4022 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
4023 st(t1, top, arrayOopDesc::length_offset_in_bytes());
4024 set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
4025 ld_ptr(t2, 0, t2);
4026 // store klass last. concurrent gcs assumes klass length is valid if
4027 // klass field is not null.
4028 store_klass(t2, top);
4029 verify_oop(top);
4031 // refill the tlab with an eden allocation
4032 bind(do_refill);
4033 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
4034 sll_ptr(t1, LogHeapWordSize, t1);
4035 // add object_size ??
4036 eden_allocate(top, t1, 0, t2, t3, slow_case);
4038 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
4039 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
4040 #ifdef ASSERT
4041 // check that tlab_size (t1) is still valid
4042 {
4043 Label ok;
4044 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
4045 sll_ptr(t2, LogHeapWordSize, t2);
4046 cmp(t1, t2);
4047 br(Assembler::equal, false, Assembler::pt, ok);
4048 delayed()->nop();
4049 stop("assert(t1 == tlab_size)");
4050 should_not_reach_here();
4052 bind(ok);
4053 }
4054 #endif // ASSERT
4055 add(top, t1, top); // t1 is tlab_size
4056 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
4057 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
4058 verify_tlab();
4059 br(Assembler::always, false, Assembler::pt, retry);
4060 delayed()->nop();
4061 }
4063 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
4064 switch (cond) {
4065 // Note some conditions are synonyms for others
4066 case Assembler::never: return Assembler::always;
4067 case Assembler::zero: return Assembler::notZero;
4068 case Assembler::lessEqual: return Assembler::greater;
4069 case Assembler::less: return Assembler::greaterEqual;
4070 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned;
4071 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned;
4072 case Assembler::negative: return Assembler::positive;
4073 case Assembler::overflowSet: return Assembler::overflowClear;
4074 case Assembler::always: return Assembler::never;
4075 case Assembler::notZero: return Assembler::zero;
4076 case Assembler::greater: return Assembler::lessEqual;
4077 case Assembler::greaterEqual: return Assembler::less;
4078 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned;
4079 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned;
4080 case Assembler::positive: return Assembler::negative;
4081 case Assembler::overflowClear: return Assembler::overflowSet;
4082 }
4084 ShouldNotReachHere(); return Assembler::overflowClear;
4085 }
4087 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
4088 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) {
4089 Condition negated_cond = negate_condition(cond);
4090 Label L;
4091 brx(negated_cond, false, Assembler::pt, L);
4092 delayed()->nop();
4093 inc_counter(counter_ptr, Rtmp1, Rtmp2);
4094 bind(L);
4095 }
4097 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) {
4098 AddressLiteral addrlit(counter_addr);
4099 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register.
4100 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits.
4101 ld(addr, Rtmp2);
4102 inc(Rtmp2);
4103 st(Rtmp2, addr);
4104 }
4106 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) {
4107 inc_counter((address) counter_addr, Rtmp1, Rtmp2);
4108 }
4110 SkipIfEqual::SkipIfEqual(
4111 MacroAssembler* masm, Register temp, const bool* flag_addr,
4112 Assembler::Condition condition) {
4113 _masm = masm;
4114 AddressLiteral flag(flag_addr);
4115 _masm->sethi(flag, temp);
4116 _masm->ldub(temp, flag.low10(), temp);
4117 _masm->tst(temp);
4118 _masm->br(condition, false, Assembler::pt, _label);
4119 _masm->delayed()->nop();
4120 }
4122 SkipIfEqual::~SkipIfEqual() {
4123 _masm->bind(_label);
4124 }
4127 // Writes to stack successive pages until offset reached to check for
4128 // stack overflow + shadow pages. This clobbers tsp and scratch.
4129 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
4130 Register Rscratch) {
4131 // Use stack pointer in temp stack pointer
4132 mov(SP, Rtsp);
4134 // Bang stack for total size given plus stack shadow page size.
4135 // Bang one page at a time because a large size can overflow yellow and
4136 // red zones (the bang will fail but stack overflow handling can't tell that
4137 // it was a stack overflow bang vs a regular segv).
4138 int offset = os::vm_page_size();
4139 Register Roffset = Rscratch;
4141 Label loop;
4142 bind(loop);
4143 set((-offset)+STACK_BIAS, Rscratch);
4144 st(G0, Rtsp, Rscratch);
4145 set(offset, Roffset);
4146 sub(Rsize, Roffset, Rsize);
4147 cmp(Rsize, G0);
4148 br(Assembler::greater, false, Assembler::pn, loop);
4149 delayed()->sub(Rtsp, Roffset, Rtsp);
4151 // Bang down shadow pages too.
4152 // The -1 because we already subtracted 1 page.
4153 for (int i = 0; i< StackShadowPages-1; i++) {
4154 set((-i*offset)+STACK_BIAS, Rscratch);
4155 st(G0, Rtsp, Rscratch);
4156 }
4157 }
4159 ///////////////////////////////////////////////////////////////////////////////////
4160 #ifndef SERIALGC
4162 static uint num_stores = 0;
4163 static uint num_null_pre_stores = 0;
4165 static void count_null_pre_vals(void* pre_val) {
4166 num_stores++;
4167 if (pre_val == NULL) num_null_pre_stores++;
4168 if ((num_stores % 1000000) == 0) {
4169 tty->print_cr(UINT32_FORMAT " stores, " UINT32_FORMAT " (%5.2f%%) with null pre-vals.",
4170 num_stores, num_null_pre_stores,
4171 100.0*(float)num_null_pre_stores/(float)num_stores);
4172 }
4173 }
4175 static address satb_log_enqueue_with_frame = 0;
4176 static u_char* satb_log_enqueue_with_frame_end = 0;
4178 static address satb_log_enqueue_frameless = 0;
4179 static u_char* satb_log_enqueue_frameless_end = 0;
4181 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
4183 // The calls to this don't work. We'd need to do a fair amount of work to
4184 // make it work.
4185 static void check_index(int ind) {
4186 assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0),
4187 "Invariants.");
4188 }
4190 static void generate_satb_log_enqueue(bool with_frame) {
4191 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
4192 CodeBuffer buf(bb);
4193 MacroAssembler masm(&buf);
4194 address start = masm.pc();
4195 Register pre_val;
4197 Label refill, restart;
4198 if (with_frame) {
4199 masm.save_frame(0);
4200 pre_val = I0; // Was O0 before the save.
4201 } else {
4202 pre_val = O0;
4203 }
4204 int satb_q_index_byte_offset =
4205 in_bytes(JavaThread::satb_mark_queue_offset() +
4206 PtrQueue::byte_offset_of_index());
4207 int satb_q_buf_byte_offset =
4208 in_bytes(JavaThread::satb_mark_queue_offset() +
4209 PtrQueue::byte_offset_of_buf());
4210 assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) &&
4211 in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t),
4212 "check sizes in assembly below");
4214 masm.bind(restart);
4215 masm.ld_ptr(G2_thread, satb_q_index_byte_offset, L0);
4217 masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, L0, refill);
4218 // If the branch is taken, no harm in executing this in the delay slot.
4219 masm.delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, L1);
4220 masm.sub(L0, oopSize, L0);
4222 masm.st_ptr(pre_val, L1, L0); // [_buf + index] := I0
4223 if (!with_frame) {
4224 // Use return-from-leaf
4225 masm.retl();
4226 masm.delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset);
4227 } else {
4228 // Not delayed.
4229 masm.st_ptr(L0, G2_thread, satb_q_index_byte_offset);
4230 }
4231 if (with_frame) {
4232 masm.ret();
4233 masm.delayed()->restore();
4234 }
4235 masm.bind(refill);
4237 address handle_zero =
4238 CAST_FROM_FN_PTR(address,
4239 &SATBMarkQueueSet::handle_zero_index_for_thread);
4240 // This should be rare enough that we can afford to save all the
4241 // scratch registers that the calling context might be using.
4242 masm.mov(G1_scratch, L0);
4243 masm.mov(G3_scratch, L1);
4244 masm.mov(G4, L2);
4245 // We need the value of O0 above (for the write into the buffer), so we
4246 // save and restore it.
4247 masm.mov(O0, L3);
4248 // Since the call will overwrite O7, we save and restore that, as well.
4249 masm.mov(O7, L4);
4250 masm.call_VM_leaf(L5, handle_zero, G2_thread);
4251 masm.mov(L0, G1_scratch);
4252 masm.mov(L1, G3_scratch);
4253 masm.mov(L2, G4);
4254 masm.mov(L3, O0);
4255 masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart);
4256 masm.delayed()->mov(L4, O7);
4258 if (with_frame) {
4259 satb_log_enqueue_with_frame = start;
4260 satb_log_enqueue_with_frame_end = masm.pc();
4261 } else {
4262 satb_log_enqueue_frameless = start;
4263 satb_log_enqueue_frameless_end = masm.pc();
4264 }
4265 }
4267 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
4268 if (with_frame) {
4269 if (satb_log_enqueue_with_frame == 0) {
4270 generate_satb_log_enqueue(with_frame);
4271 assert(satb_log_enqueue_with_frame != 0, "postcondition.");
4272 if (G1SATBPrintStubs) {
4273 tty->print_cr("Generated with-frame satb enqueue:");
4274 Disassembler::decode((u_char*)satb_log_enqueue_with_frame,
4275 satb_log_enqueue_with_frame_end,
4276 tty);
4277 }
4278 }
4279 } else {
4280 if (satb_log_enqueue_frameless == 0) {
4281 generate_satb_log_enqueue(with_frame);
4282 assert(satb_log_enqueue_frameless != 0, "postcondition.");
4283 if (G1SATBPrintStubs) {
4284 tty->print_cr("Generated frameless satb enqueue:");
4285 Disassembler::decode((u_char*)satb_log_enqueue_frameless,
4286 satb_log_enqueue_frameless_end,
4287 tty);
4288 }
4289 }
4290 }
4291 }
4293 void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs) {
4294 assert(offset == 0 || index == noreg, "choose one");
4296 if (G1DisablePreBarrier) return;
4297 // satb_log_barrier(tmp, obj, offset, preserve_o_regs);
4298 Label filtered;
4299 // satb_log_barrier_work0(tmp, filtered);
4300 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
4301 ld(G2,
4302 in_bytes(JavaThread::satb_mark_queue_offset() +
4303 PtrQueue::byte_offset_of_active()),
4304 tmp);
4305 } else {
4306 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
4307 "Assumption");
4308 ldsb(G2,
4309 in_bytes(JavaThread::satb_mark_queue_offset() +
4310 PtrQueue::byte_offset_of_active()),
4311 tmp);
4312 }
4314 // Check on whether to annul.
4315 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
4316 delayed() -> nop();
4318 // satb_log_barrier_work1(tmp, offset);
4319 if (index == noreg) {
4320 if (Assembler::is_simm13(offset)) {
4321 load_heap_oop(obj, offset, tmp);
4322 } else {
4323 set(offset, tmp);
4324 load_heap_oop(obj, tmp, tmp);
4325 }
4326 } else {
4327 load_heap_oop(obj, index, tmp);
4328 }
4330 // satb_log_barrier_work2(obj, tmp, offset);
4332 // satb_log_barrier_work3(tmp, filtered, preserve_o_regs);
4334 const Register pre_val = tmp;
4336 if (G1SATBBarrierPrintNullPreVals) {
4337 save_frame(0);
4338 mov(pre_val, O0);
4339 // Save G-regs that target may use.
4340 mov(G1, L1);
4341 mov(G2, L2);
4342 mov(G3, L3);
4343 mov(G4, L4);
4344 mov(G5, L5);
4345 call(CAST_FROM_FN_PTR(address, &count_null_pre_vals));
4346 delayed()->nop();
4347 // Restore G-regs that target may have used.
4348 mov(L1, G1);
4349 mov(L2, G2);
4350 mov(L3, G3);
4351 mov(L4, G4);
4352 mov(L5, G5);
4353 restore(G0, G0, G0);
4354 }
4356 // Check on whether to annul.
4357 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered);
4358 delayed() -> nop();
4360 // OK, it's not filtered, so we'll need to call enqueue. In the normal
4361 // case, pre_val will be a scratch G-reg, but there's some cases in which
4362 // it's an O-reg. In the first case, do a normal call. In the latter,
4363 // do a save here and call the frameless version.
4365 guarantee(pre_val->is_global() || pre_val->is_out(),
4366 "Or we need to think harder.");
4367 if (pre_val->is_global() && !preserve_o_regs) {
4368 generate_satb_log_enqueue_if_necessary(true); // with frame.
4369 call(satb_log_enqueue_with_frame);
4370 delayed()->mov(pre_val, O0);
4371 } else {
4372 generate_satb_log_enqueue_if_necessary(false); // with frameless.
4373 save_frame(0);
4374 call(satb_log_enqueue_frameless);
4375 delayed()->mov(pre_val->after_save(), O0);
4376 restore();
4377 }
4379 bind(filtered);
4380 }
4382 static jint num_ct_writes = 0;
4383 static jint num_ct_writes_filtered_in_hr = 0;
4384 static jint num_ct_writes_filtered_null = 0;
4385 static G1CollectedHeap* g1 = NULL;
4387 static Thread* count_ct_writes(void* filter_val, void* new_val) {
4388 Atomic::inc(&num_ct_writes);
4389 if (filter_val == NULL) {
4390 Atomic::inc(&num_ct_writes_filtered_in_hr);
4391 } else if (new_val == NULL) {
4392 Atomic::inc(&num_ct_writes_filtered_null);
4393 } else {
4394 if (g1 == NULL) {
4395 g1 = G1CollectedHeap::heap();
4396 }
4397 }
4398 if ((num_ct_writes % 1000000) == 0) {
4399 jint num_ct_writes_filtered =
4400 num_ct_writes_filtered_in_hr +
4401 num_ct_writes_filtered_null;
4403 tty->print_cr("%d potential CT writes: %5.2f%% filtered\n"
4404 " (%5.2f%% intra-HR, %5.2f%% null).",
4405 num_ct_writes,
4406 100.0*(float)num_ct_writes_filtered/(float)num_ct_writes,
4407 100.0*(float)num_ct_writes_filtered_in_hr/
4408 (float)num_ct_writes,
4409 100.0*(float)num_ct_writes_filtered_null/
4410 (float)num_ct_writes);
4411 }
4412 return Thread::current();
4413 }
4415 static address dirty_card_log_enqueue = 0;
4416 static u_char* dirty_card_log_enqueue_end = 0;
4418 // This gets to assume that o0 contains the object address.
4419 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
4420 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
4421 CodeBuffer buf(bb);
4422 MacroAssembler masm(&buf);
4423 address start = masm.pc();
4425 Label not_already_dirty, restart, refill;
4427 #ifdef _LP64
4428 masm.srlx(O0, CardTableModRefBS::card_shift, O0);
4429 #else
4430 masm.srl(O0, CardTableModRefBS::card_shift, O0);
4431 #endif
4432 AddressLiteral addrlit(byte_map_base);
4433 masm.set(addrlit, O1); // O1 := <card table base>
4434 masm.ldub(O0, O1, O2); // O2 := [O0 + O1]
4436 masm.br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
4437 O2, not_already_dirty);
4438 // Get O1 + O2 into a reg by itself -- useful in the take-the-branch
4439 // case, harmless if not.
4440 masm.delayed()->add(O0, O1, O3);
4442 // We didn't take the branch, so we're already dirty: return.
4443 // Use return-from-leaf
4444 masm.retl();
4445 masm.delayed()->nop();
4447 // Not dirty.
4448 masm.bind(not_already_dirty);
4449 // First, dirty it.
4450 masm.stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty).
4451 int dirty_card_q_index_byte_offset =
4452 in_bytes(JavaThread::dirty_card_queue_offset() +
4453 PtrQueue::byte_offset_of_index());
4454 int dirty_card_q_buf_byte_offset =
4455 in_bytes(JavaThread::dirty_card_queue_offset() +
4456 PtrQueue::byte_offset_of_buf());
4457 masm.bind(restart);
4458 masm.ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
4460 masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn,
4461 L0, refill);
4462 // If the branch is taken, no harm in executing this in the delay slot.
4463 masm.delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1);
4464 masm.sub(L0, oopSize, L0);
4466 masm.st_ptr(O3, L1, L0); // [_buf + index] := I0
4467 // Use return-from-leaf
4468 masm.retl();
4469 masm.delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset);
4471 masm.bind(refill);
4472 address handle_zero =
4473 CAST_FROM_FN_PTR(address,
4474 &DirtyCardQueueSet::handle_zero_index_for_thread);
4475 // This should be rare enough that we can afford to save all the
4476 // scratch registers that the calling context might be using.
4477 masm.mov(G1_scratch, L3);
4478 masm.mov(G3_scratch, L5);
4479 // We need the value of O3 above (for the write into the buffer), so we
4480 // save and restore it.
4481 masm.mov(O3, L6);
4482 // Since the call will overwrite O7, we save and restore that, as well.
4483 masm.mov(O7, L4);
4485 masm.call_VM_leaf(L7_thread_cache, handle_zero, G2_thread);
4486 masm.mov(L3, G1_scratch);
4487 masm.mov(L5, G3_scratch);
4488 masm.mov(L6, O3);
4489 masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart);
4490 masm.delayed()->mov(L4, O7);
4492 dirty_card_log_enqueue = start;
4493 dirty_card_log_enqueue_end = masm.pc();
4494 // XXX Should have a guarantee here about not going off the end!
4495 // Does it already do so? Do an experiment...
4496 }
4498 static inline void
4499 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) {
4500 if (dirty_card_log_enqueue == 0) {
4501 generate_dirty_card_log_enqueue(byte_map_base);
4502 assert(dirty_card_log_enqueue != 0, "postcondition.");
4503 if (G1SATBPrintStubs) {
4504 tty->print_cr("Generated dirty_card enqueue:");
4505 Disassembler::decode((u_char*)dirty_card_log_enqueue,
4506 dirty_card_log_enqueue_end,
4507 tty);
4508 }
4509 }
4510 }
4513 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
4515 Label filtered;
4516 MacroAssembler* post_filter_masm = this;
4518 if (new_val == G0) return;
4519 if (G1DisablePostBarrier) return;
4521 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
4522 assert(bs->kind() == BarrierSet::G1SATBCT ||
4523 bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
4524 if (G1RSBarrierRegionFilter) {
4525 xor3(store_addr, new_val, tmp);
4526 #ifdef _LP64
4527 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
4528 #else
4529 srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
4530 #endif
4531 if (G1PrintCTFilterStats) {
4532 guarantee(tmp->is_global(), "Or stats won't work...");
4533 // This is a sleazy hack: I'm temporarily hijacking G2, which I
4534 // promise to restore.
4535 mov(new_val, G2);
4536 save_frame(0);
4537 mov(tmp, O0);
4538 mov(G2, O1);
4539 // Save G-regs that target may use.
4540 mov(G1, L1);
4541 mov(G2, L2);
4542 mov(G3, L3);
4543 mov(G4, L4);
4544 mov(G5, L5);
4545 call(CAST_FROM_FN_PTR(address, &count_ct_writes));
4546 delayed()->nop();
4547 mov(O0, G2);
4548 // Restore G-regs that target may have used.
4549 mov(L1, G1);
4550 mov(L3, G3);
4551 mov(L4, G4);
4552 mov(L5, G5);
4553 restore(G0, G0, G0);
4554 }
4555 // XXX Should I predict this taken or not? Does it mattern?
4556 br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
4557 delayed()->nop();
4558 }
4560 // If the "store_addr" register is an "in" or "local" register, move it to
4561 // a scratch reg so we can pass it as an argument.
4562 bool use_scr = !(store_addr->is_global() || store_addr->is_out());
4563 // Pick a scratch register different from "tmp".
4564 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch);
4565 // Make sure we use up the delay slot!
4566 if (use_scr) {
4567 post_filter_masm->mov(store_addr, scr);
4568 } else {
4569 post_filter_masm->nop();
4570 }
4571 generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base);
4572 save_frame(0);
4573 call(dirty_card_log_enqueue);
4574 if (use_scr) {
4575 delayed()->mov(scr, O0);
4576 } else {
4577 delayed()->mov(store_addr->after_save(), O0);
4578 }
4579 restore();
4581 bind(filtered);
4583 }
4585 #endif // SERIALGC
4586 ///////////////////////////////////////////////////////////////////////////////////
4588 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
4589 // If we're writing constant NULL, we can skip the write barrier.
4590 if (new_val == G0) return;
4591 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
4592 assert(bs->kind() == BarrierSet::CardTableModRef ||
4593 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
4594 card_table_write(bs->byte_map_base, tmp, store_addr);
4595 }
4597 void MacroAssembler::load_klass(Register src_oop, Register klass) {
4598 // The number of bytes in this code is used by
4599 // MachCallDynamicJavaNode::ret_addr_offset()
4600 // if this changes, change that.
4601 if (UseCompressedOops) {
4602 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
4603 decode_heap_oop_not_null(klass);
4604 } else {
4605 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass);
4606 }
4607 }
4609 void MacroAssembler::store_klass(Register klass, Register dst_oop) {
4610 if (UseCompressedOops) {
4611 assert(dst_oop != klass, "not enough registers");
4612 encode_heap_oop_not_null(klass);
4613 st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
4614 } else {
4615 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes());
4616 }
4617 }
4619 void MacroAssembler::store_klass_gap(Register s, Register d) {
4620 if (UseCompressedOops) {
4621 assert(s != d, "not enough registers");
4622 st(s, d, oopDesc::klass_gap_offset_in_bytes());
4623 }
4624 }
4626 void MacroAssembler::load_heap_oop(const Address& s, Register d) {
4627 if (UseCompressedOops) {
4628 lduw(s, d);
4629 decode_heap_oop(d);
4630 } else {
4631 ld_ptr(s, d);
4632 }
4633 }
4635 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) {
4636 if (UseCompressedOops) {
4637 lduw(s1, s2, d);
4638 decode_heap_oop(d, d);
4639 } else {
4640 ld_ptr(s1, s2, d);
4641 }
4642 }
4644 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) {
4645 if (UseCompressedOops) {
4646 lduw(s1, simm13a, d);
4647 decode_heap_oop(d, d);
4648 } else {
4649 ld_ptr(s1, simm13a, d);
4650 }
4651 }
4653 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) {
4654 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d);
4655 else load_heap_oop(s1, s2.as_register(), d);
4656 }
4658 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
4659 if (UseCompressedOops) {
4660 assert(s1 != d && s2 != d, "not enough registers");
4661 encode_heap_oop(d);
4662 st(d, s1, s2);
4663 } else {
4664 st_ptr(d, s1, s2);
4665 }
4666 }
4668 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) {
4669 if (UseCompressedOops) {
4670 assert(s1 != d, "not enough registers");
4671 encode_heap_oop(d);
4672 st(d, s1, simm13a);
4673 } else {
4674 st_ptr(d, s1, simm13a);
4675 }
4676 }
4678 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
4679 if (UseCompressedOops) {
4680 assert(a.base() != d, "not enough registers");
4681 encode_heap_oop(d);
4682 st(d, a, offset);
4683 } else {
4684 st_ptr(d, a, offset);
4685 }
4686 }
4689 void MacroAssembler::encode_heap_oop(Register src, Register dst) {
4690 assert (UseCompressedOops, "must be compressed");
4691 assert (Universe::heap() != NULL, "java heap should be initialized");
4692 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4693 verify_oop(src);
4694 if (Universe::narrow_oop_base() == NULL) {
4695 srlx(src, LogMinObjAlignmentInBytes, dst);
4696 return;
4697 }
4698 Label done;
4699 if (src == dst) {
4700 // optimize for frequent case src == dst
4701 bpr(rc_nz, true, Assembler::pt, src, done);
4702 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken
4703 bind(done);
4704 srlx(src, LogMinObjAlignmentInBytes, dst);
4705 } else {
4706 bpr(rc_z, false, Assembler::pn, src, done);
4707 delayed() -> mov(G0, dst);
4708 // could be moved before branch, and annulate delay,
4709 // but may add some unneeded work decoding null
4710 sub(src, G6_heapbase, dst);
4711 srlx(dst, LogMinObjAlignmentInBytes, dst);
4712 bind(done);
4713 }
4714 }
4717 void MacroAssembler::encode_heap_oop_not_null(Register r) {
4718 assert (UseCompressedOops, "must be compressed");
4719 assert (Universe::heap() != NULL, "java heap should be initialized");
4720 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4721 verify_oop(r);
4722 if (Universe::narrow_oop_base() != NULL)
4723 sub(r, G6_heapbase, r);
4724 srlx(r, LogMinObjAlignmentInBytes, r);
4725 }
4727 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) {
4728 assert (UseCompressedOops, "must be compressed");
4729 assert (Universe::heap() != NULL, "java heap should be initialized");
4730 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4731 verify_oop(src);
4732 if (Universe::narrow_oop_base() == NULL) {
4733 srlx(src, LogMinObjAlignmentInBytes, dst);
4734 } else {
4735 sub(src, G6_heapbase, dst);
4736 srlx(dst, LogMinObjAlignmentInBytes, dst);
4737 }
4738 }
4740 // Same algorithm as oops.inline.hpp decode_heap_oop.
4741 void MacroAssembler::decode_heap_oop(Register src, Register dst) {
4742 assert (UseCompressedOops, "must be compressed");
4743 assert (Universe::heap() != NULL, "java heap should be initialized");
4744 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4745 sllx(src, LogMinObjAlignmentInBytes, dst);
4746 if (Universe::narrow_oop_base() != NULL) {
4747 Label done;
4748 bpr(rc_nz, true, Assembler::pt, dst, done);
4749 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
4750 bind(done);
4751 }
4752 verify_oop(dst);
4753 }
4755 void MacroAssembler::decode_heap_oop_not_null(Register r) {
4756 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4757 // pd_code_size_limit.
4758 // Also do not verify_oop as this is called by verify_oop.
4759 assert (UseCompressedOops, "must be compressed");
4760 assert (Universe::heap() != NULL, "java heap should be initialized");
4761 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4762 sllx(r, LogMinObjAlignmentInBytes, r);
4763 if (Universe::narrow_oop_base() != NULL)
4764 add(r, G6_heapbase, r);
4765 }
4767 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
4768 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4769 // pd_code_size_limit.
4770 // Also do not verify_oop as this is called by verify_oop.
4771 assert (UseCompressedOops, "must be compressed");
4772 assert (Universe::heap() != NULL, "java heap should be initialized");
4773 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4774 sllx(src, LogMinObjAlignmentInBytes, dst);
4775 if (Universe::narrow_oop_base() != NULL)
4776 add(dst, G6_heapbase, dst);
4777 }
4779 void MacroAssembler::reinit_heapbase() {
4780 if (UseCompressedOops) {
4781 // call indirectly to solve generation ordering problem
4782 AddressLiteral base(Universe::narrow_oop_base_addr());
4783 load_ptr_contents(base, G6_heapbase);
4784 }
4785 }
4787 // Compare char[] arrays aligned to 4 bytes.
4788 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
4789 Register limit, Register result,
4790 Register chr1, Register chr2, Label& Ldone) {
4791 Label Lvector, Lloop;
4792 assert(chr1 == result, "should be the same");
4794 // Note: limit contains number of bytes (2*char_elements) != 0.
4795 andcc(limit, 0x2, chr1); // trailing character ?
4796 br(Assembler::zero, false, Assembler::pt, Lvector);
4797 delayed()->nop();
4799 // compare the trailing char
4800 sub(limit, sizeof(jchar), limit);
4801 lduh(ary1, limit, chr1);
4802 lduh(ary2, limit, chr2);
4803 cmp(chr1, chr2);
4804 br(Assembler::notEqual, true, Assembler::pt, Ldone);
4805 delayed()->mov(G0, result); // not equal
4807 // only one char ?
4808 br_on_reg_cond(rc_z, true, Assembler::pn, limit, Ldone);
4809 delayed()->add(G0, 1, result); // zero-length arrays are equal
4811 // word by word compare, dont't need alignment check
4812 bind(Lvector);
4813 // Shift ary1 and ary2 to the end of the arrays, negate limit
4814 add(ary1, limit, ary1);
4815 add(ary2, limit, ary2);
4816 neg(limit, limit);
4818 lduw(ary1, limit, chr1);
4819 bind(Lloop);
4820 lduw(ary2, limit, chr2);
4821 cmp(chr1, chr2);
4822 br(Assembler::notEqual, true, Assembler::pt, Ldone);
4823 delayed()->mov(G0, result); // not equal
4824 inccc(limit, 2*sizeof(jchar));
4825 // annul LDUW if branch is not taken to prevent access past end of array
4826 br(Assembler::notZero, true, Assembler::pt, Lloop);
4827 delayed()->lduw(ary1, limit, chr1); // hoisted
4829 // Caller should set it:
4830 // add(G0, 1, result); // equals
4831 }