duke@435: /* duke@435: * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: #include "incls/_precompiled.incl" duke@435: #include "incls/_assembler_sparc.cpp.incl" duke@435: duke@435: // Implementation of Address duke@435: duke@435: Address::Address( addr_type t, int which ) { duke@435: switch (t) { duke@435: case extra_in_argument: duke@435: case extra_out_argument: duke@435: _base = t == extra_in_argument ? FP : SP; duke@435: _hi = 0; duke@435: // Warning: In LP64 mode, _disp will occupy more than 10 bits. duke@435: // This is inconsistent with the other constructors but op duke@435: // codes such as ld or ldx, only access disp() to get their duke@435: // simm13 argument. duke@435: _disp = ((which - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: break; duke@435: } duke@435: } duke@435: duke@435: static const char* argumentNames[][2] = { duke@435: {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, duke@435: {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, duke@435: {"A(n>9)","P(n>9)"} duke@435: }; duke@435: duke@435: const char* Argument::name() const { duke@435: int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; duke@435: int num = number(); duke@435: if (num >= nofArgs) num = nofArgs - 1; duke@435: return argumentNames[num][is_in() ? 1 : 0]; duke@435: } duke@435: duke@435: void Assembler::print_instruction(int inst) { duke@435: const char* s; duke@435: switch (inv_op(inst)) { duke@435: default: s = "????"; break; duke@435: case call_op: s = "call"; break; duke@435: case branch_op: duke@435: switch (inv_op2(inst)) { duke@435: case bpr_op2: s = "bpr"; break; duke@435: case fb_op2: s = "fb"; break; duke@435: case fbp_op2: s = "fbp"; break; duke@435: case br_op2: s = "br"; break; duke@435: case bp_op2: s = "bp"; break; duke@435: case cb_op2: s = "cb"; break; duke@435: default: s = "????"; break; duke@435: } duke@435: } duke@435: ::tty->print("%s", s); duke@435: } duke@435: duke@435: duke@435: // Patch instruction inst at offset inst_pos to refer to dest_pos duke@435: // and return the resulting instruction. duke@435: // We should have pcs, not offsets, but since all is relative, it will work out duke@435: // OK. duke@435: int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) { duke@435: duke@435: int m; // mask for displacement field duke@435: int v; // new value for displacement field duke@435: const int word_aligned_ones = -4; duke@435: switch (inv_op(inst)) { duke@435: default: ShouldNotReachHere(); duke@435: case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; duke@435: case branch_op: duke@435: switch (inv_op2(inst)) { duke@435: case bpr_op2: m = wdisp16(word_aligned_ones, 0); v = wdisp16(dest_pos, inst_pos); break; duke@435: case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; duke@435: case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; duke@435: case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; duke@435: case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; duke@435: case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; duke@435: default: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: return inst & ~m | v; duke@435: } duke@435: duke@435: // Return the offset of the branch destionation of instruction inst duke@435: // at offset pos. duke@435: // Should have pcs, but since all is relative, it works out. duke@435: int Assembler::branch_destination(int inst, int pos) { duke@435: int r; duke@435: switch (inv_op(inst)) { duke@435: default: ShouldNotReachHere(); duke@435: case call_op: r = inv_wdisp(inst, pos, 30); break; duke@435: case branch_op: duke@435: switch (inv_op2(inst)) { duke@435: case bpr_op2: r = inv_wdisp16(inst, pos); break; duke@435: case fbp_op2: r = inv_wdisp( inst, pos, 19); break; duke@435: case bp_op2: r = inv_wdisp( inst, pos, 19); break; duke@435: case fb_op2: r = inv_wdisp( inst, pos, 22); break; duke@435: case br_op2: r = inv_wdisp( inst, pos, 22); break; duke@435: case cb_op2: r = inv_wdisp( inst, pos, 22); break; duke@435: default: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: return r; duke@435: } duke@435: duke@435: int AbstractAssembler::code_fill_byte() { duke@435: return 0x00; // illegal instruction 0x00000000 duke@435: } duke@435: ysr@777: Assembler::Condition Assembler::reg_cond_to_cc_cond(Assembler::RCondition in) { ysr@777: switch (in) { ysr@777: case rc_z: return equal; ysr@777: case rc_lez: return lessEqual; ysr@777: case rc_lz: return less; ysr@777: case rc_nz: return notEqual; ysr@777: case rc_gz: return greater; ysr@777: case rc_gez: return greaterEqual; ysr@777: default: ysr@777: ShouldNotReachHere(); ysr@777: } ysr@777: return equal; ysr@777: } ysr@777: duke@435: // Generate a bunch 'o stuff (including v9's duke@435: #ifndef PRODUCT duke@435: void Assembler::test_v9() { duke@435: add( G0, G1, G2 ); duke@435: add( G3, 0, G4 ); duke@435: duke@435: addcc( G5, G6, G7 ); duke@435: addcc( I0, 1, I1 ); duke@435: addc( I2, I3, I4 ); duke@435: addc( I5, -1, I6 ); duke@435: addccc( I7, L0, L1 ); duke@435: addccc( L2, (1 << 12) - 2, L3 ); duke@435: duke@435: Label lbl1, lbl2, lbl3; duke@435: duke@435: bind(lbl1); duke@435: duke@435: bpr( rc_z, true, pn, L4, pc(), relocInfo::oop_type ); duke@435: delayed()->nop(); duke@435: bpr( rc_lez, false, pt, L5, lbl1); duke@435: delayed()->nop(); duke@435: duke@435: fb( f_never, true, pc() + 4, relocInfo::none); duke@435: delayed()->nop(); duke@435: fb( f_notEqual, false, lbl2 ); duke@435: delayed()->nop(); duke@435: duke@435: fbp( f_notZero, true, fcc0, pn, pc() - 4, relocInfo::none); duke@435: delayed()->nop(); duke@435: fbp( f_lessOrGreater, false, fcc1, pt, lbl3 ); duke@435: delayed()->nop(); duke@435: duke@435: br( equal, true, pc() + 1024, relocInfo::none); duke@435: delayed()->nop(); duke@435: br( lessEqual, false, lbl1 ); duke@435: delayed()->nop(); duke@435: br( never, false, lbl1 ); duke@435: delayed()->nop(); duke@435: duke@435: bp( less, true, icc, pn, pc(), relocInfo::none); duke@435: delayed()->nop(); duke@435: bp( lessEqualUnsigned, false, xcc, pt, lbl2 ); duke@435: delayed()->nop(); duke@435: duke@435: call( pc(), relocInfo::none); duke@435: delayed()->nop(); duke@435: call( lbl3 ); duke@435: delayed()->nop(); duke@435: duke@435: duke@435: casa( L6, L7, O0 ); duke@435: casxa( O1, O2, O3, 0 ); duke@435: duke@435: udiv( O4, O5, O7 ); duke@435: udiv( G0, (1 << 12) - 1, G1 ); duke@435: sdiv( G1, G2, G3 ); duke@435: sdiv( G4, -((1 << 12) - 1), G5 ); duke@435: udivcc( G6, G7, I0 ); duke@435: udivcc( I1, -((1 << 12) - 2), I2 ); duke@435: sdivcc( I3, I4, I5 ); duke@435: sdivcc( I6, -((1 << 12) - 0), I7 ); duke@435: duke@435: done(); duke@435: retry(); duke@435: duke@435: fadd( FloatRegisterImpl::S, F0, F1, F2 ); duke@435: fsub( FloatRegisterImpl::D, F34, F0, F62 ); duke@435: duke@435: fcmp( FloatRegisterImpl::Q, fcc0, F0, F60); duke@435: fcmpe( FloatRegisterImpl::S, fcc1, F31, F30); duke@435: duke@435: ftox( FloatRegisterImpl::D, F2, F4 ); duke@435: ftoi( FloatRegisterImpl::Q, F4, F8 ); duke@435: duke@435: ftof( FloatRegisterImpl::S, FloatRegisterImpl::Q, F3, F12 ); duke@435: duke@435: fxtof( FloatRegisterImpl::S, F4, F5 ); duke@435: fitof( FloatRegisterImpl::D, F6, F8 ); duke@435: duke@435: fmov( FloatRegisterImpl::Q, F16, F20 ); duke@435: fneg( FloatRegisterImpl::S, F6, F7 ); duke@435: fabs( FloatRegisterImpl::D, F10, F12 ); duke@435: duke@435: fmul( FloatRegisterImpl::Q, F24, F28, F32 ); duke@435: fmul( FloatRegisterImpl::S, FloatRegisterImpl::D, F8, F9, F14 ); duke@435: fdiv( FloatRegisterImpl::S, F10, F11, F12 ); duke@435: duke@435: fsqrt( FloatRegisterImpl::S, F13, F14 ); duke@435: duke@435: flush( L0, L1 ); duke@435: flush( L2, -1 ); duke@435: duke@435: flushw(); duke@435: duke@435: illtrap( (1 << 22) - 2); duke@435: duke@435: impdep1( 17, (1 << 19) - 1 ); duke@435: impdep2( 3, 0 ); duke@435: duke@435: jmpl( L3, L4, L5 ); duke@435: delayed()->nop(); duke@435: jmpl( L6, -1, L7, Relocation::spec_simple(relocInfo::none)); duke@435: delayed()->nop(); duke@435: duke@435: duke@435: ldf( FloatRegisterImpl::S, O0, O1, F15 ); duke@435: ldf( FloatRegisterImpl::D, O2, -1, F14 ); duke@435: duke@435: duke@435: ldfsr( O3, O4 ); duke@435: ldfsr( O5, -1 ); duke@435: ldxfsr( O6, O7 ); duke@435: ldxfsr( I0, -1 ); duke@435: duke@435: ldfa( FloatRegisterImpl::D, I1, I2, 1, F16 ); duke@435: ldfa( FloatRegisterImpl::Q, I3, -1, F36 ); duke@435: duke@435: ldsb( I4, I5, I6 ); duke@435: ldsb( I7, -1, G0 ); duke@435: ldsh( G1, G3, G4 ); duke@435: ldsh( G5, -1, G6 ); duke@435: ldsw( G7, L0, L1 ); duke@435: ldsw( L2, -1, L3 ); duke@435: ldub( L4, L5, L6 ); duke@435: ldub( L7, -1, O0 ); duke@435: lduh( O1, O2, O3 ); duke@435: lduh( O4, -1, O5 ); duke@435: lduw( O6, O7, G0 ); duke@435: lduw( G1, -1, G2 ); duke@435: ldx( G3, G4, G5 ); duke@435: ldx( G6, -1, G7 ); duke@435: ldd( I0, I1, I2 ); duke@435: ldd( I3, -1, I4 ); duke@435: duke@435: ldsba( I5, I6, 2, I7 ); duke@435: ldsba( L0, -1, L1 ); duke@435: ldsha( L2, L3, 3, L4 ); duke@435: ldsha( L5, -1, L6 ); duke@435: ldswa( L7, O0, (1 << 8) - 1, O1 ); duke@435: ldswa( O2, -1, O3 ); duke@435: lduba( O4, O5, 0, O6 ); duke@435: lduba( O7, -1, I0 ); duke@435: lduha( I1, I2, 1, I3 ); duke@435: lduha( I4, -1, I5 ); duke@435: lduwa( I6, I7, 2, L0 ); duke@435: lduwa( L1, -1, L2 ); duke@435: ldxa( L3, L4, 3, L5 ); duke@435: ldxa( L6, -1, L7 ); duke@435: ldda( G0, G1, 4, G2 ); duke@435: ldda( G3, -1, G4 ); duke@435: duke@435: ldstub( G5, G6, G7 ); duke@435: ldstub( O0, -1, O1 ); duke@435: duke@435: ldstuba( O2, O3, 5, O4 ); duke@435: ldstuba( O5, -1, O6 ); duke@435: duke@435: and3( I0, L0, O0 ); duke@435: and3( G7, -1, O7 ); duke@435: andcc( L2, I2, G2 ); duke@435: andcc( L4, -1, G4 ); duke@435: andn( I5, I6, I7 ); duke@435: andn( I6, -1, I7 ); duke@435: andncc( I5, I6, I7 ); duke@435: andncc( I7, -1, I6 ); duke@435: or3( I5, I6, I7 ); duke@435: or3( I7, -1, I6 ); duke@435: orcc( I5, I6, I7 ); duke@435: orcc( I7, -1, I6 ); duke@435: orn( I5, I6, I7 ); duke@435: orn( I7, -1, I6 ); duke@435: orncc( I5, I6, I7 ); duke@435: orncc( I7, -1, I6 ); duke@435: xor3( I5, I6, I7 ); duke@435: xor3( I7, -1, I6 ); duke@435: xorcc( I5, I6, I7 ); duke@435: xorcc( I7, -1, I6 ); duke@435: xnor( I5, I6, I7 ); duke@435: xnor( I7, -1, I6 ); duke@435: xnorcc( I5, I6, I7 ); duke@435: xnorcc( I7, -1, I6 ); duke@435: duke@435: membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) ); duke@435: membar( StoreStore ); duke@435: membar( LoadStore ); duke@435: membar( StoreLoad ); duke@435: membar( LoadLoad ); duke@435: membar( Sync ); duke@435: membar( MemIssue ); duke@435: membar( Lookaside ); duke@435: duke@435: fmov( FloatRegisterImpl::S, f_ordered, true, fcc2, F16, F17 ); duke@435: fmov( FloatRegisterImpl::D, rc_lz, L5, F18, F20 ); duke@435: duke@435: movcc( overflowClear, false, icc, I6, L4 ); duke@435: movcc( f_unorderedOrEqual, true, fcc2, (1 << 10) - 1, O0 ); duke@435: duke@435: movr( rc_nz, I5, I6, I7 ); duke@435: movr( rc_gz, L1, -1, L2 ); duke@435: duke@435: mulx( I5, I6, I7 ); duke@435: mulx( I7, -1, I6 ); duke@435: sdivx( I5, I6, I7 ); duke@435: sdivx( I7, -1, I6 ); duke@435: udivx( I5, I6, I7 ); duke@435: udivx( I7, -1, I6 ); duke@435: duke@435: umul( I5, I6, I7 ); duke@435: umul( I7, -1, I6 ); duke@435: smul( I5, I6, I7 ); duke@435: smul( I7, -1, I6 ); duke@435: umulcc( I5, I6, I7 ); duke@435: umulcc( I7, -1, I6 ); duke@435: smulcc( I5, I6, I7 ); duke@435: smulcc( I7, -1, I6 ); duke@435: duke@435: mulscc( I5, I6, I7 ); duke@435: mulscc( I7, -1, I6 ); duke@435: duke@435: nop(); duke@435: duke@435: duke@435: popc( G0, G1); duke@435: popc( -1, G2); duke@435: duke@435: prefetch( L1, L2, severalReads ); duke@435: prefetch( L3, -1, oneRead ); duke@435: prefetcha( O3, O2, 6, severalWritesAndPossiblyReads ); duke@435: prefetcha( G2, -1, oneWrite ); duke@435: duke@435: rett( I7, I7); duke@435: delayed()->nop(); duke@435: rett( G0, -1, relocInfo::none); duke@435: delayed()->nop(); duke@435: duke@435: save( I5, I6, I7 ); duke@435: save( I7, -1, I6 ); duke@435: restore( I5, I6, I7 ); duke@435: restore( I7, -1, I6 ); duke@435: duke@435: saved(); duke@435: restored(); duke@435: duke@435: sethi( 0xaaaaaaaa, I3, Relocation::spec_simple(relocInfo::none)); duke@435: duke@435: sll( I5, I6, I7 ); duke@435: sll( I7, 31, I6 ); duke@435: srl( I5, I6, I7 ); duke@435: srl( I7, 0, I6 ); duke@435: sra( I5, I6, I7 ); duke@435: sra( I7, 30, I6 ); duke@435: sllx( I5, I6, I7 ); duke@435: sllx( I7, 63, I6 ); duke@435: srlx( I5, I6, I7 ); duke@435: srlx( I7, 0, I6 ); duke@435: srax( I5, I6, I7 ); duke@435: srax( I7, 62, I6 ); duke@435: duke@435: sir( -1 ); duke@435: duke@435: stbar(); duke@435: duke@435: stf( FloatRegisterImpl::Q, F40, G0, I7 ); duke@435: stf( FloatRegisterImpl::S, F18, I3, -1 ); duke@435: duke@435: stfsr( L1, L2 ); duke@435: stfsr( I7, -1 ); duke@435: stxfsr( I6, I5 ); duke@435: stxfsr( L4, -1 ); duke@435: duke@435: stfa( FloatRegisterImpl::D, F22, I6, I7, 7 ); duke@435: stfa( FloatRegisterImpl::Q, F44, G0, -1 ); duke@435: duke@435: stb( L5, O2, I7 ); duke@435: stb( I7, I6, -1 ); duke@435: sth( L5, O2, I7 ); duke@435: sth( I7, I6, -1 ); duke@435: stw( L5, O2, I7 ); duke@435: stw( I7, I6, -1 ); duke@435: stx( L5, O2, I7 ); duke@435: stx( I7, I6, -1 ); duke@435: std( L5, O2, I7 ); duke@435: std( I7, I6, -1 ); duke@435: duke@435: stba( L5, O2, I7, 8 ); duke@435: stba( I7, I6, -1 ); duke@435: stha( L5, O2, I7, 9 ); duke@435: stha( I7, I6, -1 ); duke@435: stwa( L5, O2, I7, 0 ); duke@435: stwa( I7, I6, -1 ); duke@435: stxa( L5, O2, I7, 11 ); duke@435: stxa( I7, I6, -1 ); duke@435: stda( L5, O2, I7, 12 ); duke@435: stda( I7, I6, -1 ); duke@435: duke@435: sub( I5, I6, I7 ); duke@435: sub( I7, -1, I6 ); duke@435: subcc( I5, I6, I7 ); duke@435: subcc( I7, -1, I6 ); duke@435: subc( I5, I6, I7 ); duke@435: subc( I7, -1, I6 ); duke@435: subccc( I5, I6, I7 ); duke@435: subccc( I7, -1, I6 ); duke@435: duke@435: swap( I5, I6, I7 ); duke@435: swap( I7, -1, I6 ); duke@435: duke@435: swapa( G0, G1, 13, G2 ); duke@435: swapa( I7, -1, I6 ); duke@435: duke@435: taddcc( I5, I6, I7 ); duke@435: taddcc( I7, -1, I6 ); duke@435: taddcctv( I5, I6, I7 ); duke@435: taddcctv( I7, -1, I6 ); duke@435: duke@435: tsubcc( I5, I6, I7 ); duke@435: tsubcc( I7, -1, I6 ); duke@435: tsubcctv( I5, I6, I7 ); duke@435: tsubcctv( I7, -1, I6 ); duke@435: duke@435: trap( overflowClear, xcc, G0, G1 ); duke@435: trap( lessEqual, icc, I7, 17 ); duke@435: duke@435: bind(lbl2); duke@435: bind(lbl3); duke@435: duke@435: code()->decode(); duke@435: } duke@435: duke@435: // Generate a bunch 'o stuff unique to V8 duke@435: void Assembler::test_v8_onlys() { duke@435: Label lbl1; duke@435: duke@435: cb( cp_0or1or2, false, pc() - 4, relocInfo::none); duke@435: delayed()->nop(); duke@435: cb( cp_never, true, lbl1); duke@435: delayed()->nop(); duke@435: duke@435: cpop1(1, 2, 3, 4); duke@435: cpop2(5, 6, 7, 8); duke@435: duke@435: ldc( I0, I1, 31); duke@435: ldc( I2, -1, 0); duke@435: duke@435: lddc( I4, I4, 30); duke@435: lddc( I6, 0, 1 ); duke@435: duke@435: ldcsr( L0, L1, 0); duke@435: ldcsr( L1, (1 << 12) - 1, 17 ); duke@435: duke@435: stc( 31, L4, L5); duke@435: stc( 30, L6, -(1 << 12) ); duke@435: duke@435: stdc( 0, L7, G0); duke@435: stdc( 1, G1, 0 ); duke@435: duke@435: stcsr( 16, G2, G3); duke@435: stcsr( 17, G4, 1 ); duke@435: duke@435: stdcq( 4, G5, G6); duke@435: stdcq( 5, G7, -1 ); duke@435: duke@435: bind(lbl1); duke@435: duke@435: code()->decode(); duke@435: } duke@435: #endif duke@435: duke@435: // Implementation of MacroAssembler duke@435: duke@435: void MacroAssembler::null_check(Register reg, int offset) { duke@435: if (needs_explicit_null_check((intptr_t)offset)) { duke@435: // provoke OS NULL exception if reg = NULL by duke@435: // accessing M[reg] w/o changing any registers duke@435: ld_ptr(reg, 0, G0); duke@435: } duke@435: else { duke@435: // nothing to do, (later) access of M[reg + offset] duke@435: // will provoke OS NULL exception if reg = NULL duke@435: } duke@435: } duke@435: duke@435: // Ring buffer jumps duke@435: duke@435: #ifndef PRODUCT duke@435: void MacroAssembler::ret( bool trace ) { if (trace) { duke@435: mov(I7, O7); // traceable register duke@435: JMP(O7, 2 * BytesPerInstWord); duke@435: } else { duke@435: jmpl( I7, 2 * BytesPerInstWord, G0 ); duke@435: } duke@435: } duke@435: duke@435: void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord); duke@435: else jmpl( O7, 2 * BytesPerInstWord, G0 ); } duke@435: #endif /* PRODUCT */ duke@435: duke@435: duke@435: void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { duke@435: assert_not_delayed(); duke@435: // This can only be traceable if r1 & r2 are visible after a window save duke@435: if (TraceJumps) { duke@435: #ifndef PRODUCT duke@435: save_frame(0); duke@435: verify_thread(); duke@435: ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); duke@435: add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); duke@435: sll(O0, exact_log2(4*sizeof(intptr_t)), O2); duke@435: add(O2, O1, O1); duke@435: duke@435: add(r1->after_save(), r2->after_save(), O2); duke@435: set((intptr_t)file, O3); duke@435: set(line, O4); duke@435: Label L; duke@435: // get nearby pc, store jmp target duke@435: call(L, relocInfo::none); // No relocation for call to pc+0x8 duke@435: delayed()->st(O2, O1, 0); duke@435: bind(L); duke@435: duke@435: // store nearby pc duke@435: st(O7, O1, sizeof(intptr_t)); duke@435: // store file duke@435: st(O3, O1, 2*sizeof(intptr_t)); duke@435: // store line duke@435: st(O4, O1, 3*sizeof(intptr_t)); duke@435: add(O0, 1, O0); duke@435: and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); duke@435: st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); duke@435: restore(); duke@435: #endif /* PRODUCT */ duke@435: } duke@435: jmpl(r1, r2, G0); duke@435: } duke@435: void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { duke@435: assert_not_delayed(); duke@435: // This can only be traceable if r1 is visible after a window save duke@435: if (TraceJumps) { duke@435: #ifndef PRODUCT duke@435: save_frame(0); duke@435: verify_thread(); duke@435: ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); duke@435: add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); duke@435: sll(O0, exact_log2(4*sizeof(intptr_t)), O2); duke@435: add(O2, O1, O1); duke@435: duke@435: add(r1->after_save(), offset, O2); duke@435: set((intptr_t)file, O3); duke@435: set(line, O4); duke@435: Label L; duke@435: // get nearby pc, store jmp target duke@435: call(L, relocInfo::none); // No relocation for call to pc+0x8 duke@435: delayed()->st(O2, O1, 0); duke@435: bind(L); duke@435: duke@435: // store nearby pc duke@435: st(O7, O1, sizeof(intptr_t)); duke@435: // store file duke@435: st(O3, O1, 2*sizeof(intptr_t)); duke@435: // store line duke@435: st(O4, O1, 3*sizeof(intptr_t)); duke@435: add(O0, 1, O0); duke@435: and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); duke@435: st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); duke@435: restore(); duke@435: #endif /* PRODUCT */ duke@435: } duke@435: jmp(r1, offset); duke@435: } duke@435: duke@435: // This code sequence is relocatable to any address, even on LP64. duke@435: void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file, int line ) { duke@435: assert_not_delayed(); duke@435: // Force fixed length sethi because NativeJump and NativeFarCall don't handle duke@435: // variable length instruction streams. duke@435: sethi(a, /*ForceRelocatable=*/ true); duke@435: if (TraceJumps) { duke@435: #ifndef PRODUCT duke@435: // Must do the add here so relocation can find the remainder of the duke@435: // value to be relocated. duke@435: add(a.base(), a.disp() + offset, a.base(), a.rspec(offset)); duke@435: save_frame(0); duke@435: verify_thread(); duke@435: ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); duke@435: add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); duke@435: sll(O0, exact_log2(4*sizeof(intptr_t)), O2); duke@435: add(O2, O1, O1); duke@435: duke@435: set((intptr_t)file, O3); duke@435: set(line, O4); duke@435: Label L; duke@435: duke@435: // get nearby pc, store jmp target duke@435: call(L, relocInfo::none); // No relocation for call to pc+0x8 duke@435: delayed()->st(a.base()->after_save(), O1, 0); duke@435: bind(L); duke@435: duke@435: // store nearby pc duke@435: st(O7, O1, sizeof(intptr_t)); duke@435: // store file duke@435: st(O3, O1, 2*sizeof(intptr_t)); duke@435: // store line duke@435: st(O4, O1, 3*sizeof(intptr_t)); duke@435: add(O0, 1, O0); duke@435: and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); duke@435: st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); duke@435: restore(); duke@435: jmpl(a.base(), G0, d); duke@435: #else duke@435: jmpl(a, d, offset); duke@435: #endif /* PRODUCT */ duke@435: } else { duke@435: jmpl(a, d, offset); duke@435: } duke@435: } duke@435: duke@435: void MacroAssembler::jump( Address& a, int offset, const char* file, int line ) { duke@435: jumpl( a, G0, offset, file, line ); duke@435: } duke@435: duke@435: duke@435: // Convert to C varargs format duke@435: void MacroAssembler::set_varargs( Argument inArg, Register d ) { duke@435: // spill register-resident args to their memory slots duke@435: // (SPARC calling convention requires callers to have already preallocated these) duke@435: // Note that the inArg might in fact be an outgoing argument, duke@435: // if a leaf routine or stub does some tricky argument shuffling. duke@435: // This routine must work even though one of the saved arguments duke@435: // is in the d register (e.g., set_varargs(Argument(0, false), O0)). duke@435: for (Argument savePtr = inArg; duke@435: savePtr.is_register(); duke@435: savePtr = savePtr.successor()) { duke@435: st_ptr(savePtr.as_register(), savePtr.address_in_frame()); duke@435: } duke@435: // return the address of the first memory slot duke@435: add(inArg.address_in_frame(), d); duke@435: } duke@435: duke@435: // Conditional breakpoint (for assertion checks in assembly code) duke@435: void MacroAssembler::breakpoint_trap(Condition c, CC cc) { duke@435: trap(c, cc, G0, ST_RESERVED_FOR_USER_0); duke@435: } duke@435: duke@435: // We want to use ST_BREAKPOINT here, but the debugger is confused by it. duke@435: void MacroAssembler::breakpoint_trap() { duke@435: trap(ST_RESERVED_FOR_USER_0); duke@435: } duke@435: duke@435: // flush windows (except current) using flushw instruction if avail. duke@435: void MacroAssembler::flush_windows() { duke@435: if (VM_Version::v9_instructions_work()) flushw(); duke@435: else flush_windows_trap(); duke@435: } duke@435: duke@435: // Write serialization page so VM thread can do a pseudo remote membar duke@435: // We use the current thread pointer to calculate a thread specific duke@435: // offset to write to within the page. This minimizes bus traffic duke@435: // due to cache line collision. duke@435: void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { duke@435: Address mem_serialize_page(tmp1, os::get_memory_serialize_page()); duke@435: srl(thread, os::get_serialize_page_shift_count(), tmp2); duke@435: if (Assembler::is_simm13(os::vm_page_size())) { duke@435: and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); duke@435: } duke@435: else { duke@435: set((os::vm_page_size() - sizeof(int)), tmp1); duke@435: and3(tmp2, tmp1, tmp2); duke@435: } duke@435: load_address(mem_serialize_page); duke@435: st(G0, tmp1, tmp2); duke@435: } duke@435: duke@435: duke@435: duke@435: void MacroAssembler::enter() { duke@435: Unimplemented(); duke@435: } duke@435: duke@435: void MacroAssembler::leave() { duke@435: Unimplemented(); duke@435: } duke@435: duke@435: void MacroAssembler::mult(Register s1, Register s2, Register d) { duke@435: if(VM_Version::v9_instructions_work()) { duke@435: mulx (s1, s2, d); duke@435: } else { duke@435: smul (s1, s2, d); duke@435: } duke@435: } duke@435: duke@435: void MacroAssembler::mult(Register s1, int simm13a, Register d) { duke@435: if(VM_Version::v9_instructions_work()) { duke@435: mulx (s1, simm13a, d); duke@435: } else { duke@435: smul (s1, simm13a, d); duke@435: } duke@435: } duke@435: duke@435: duke@435: #ifdef ASSERT duke@435: void MacroAssembler::read_ccr_v8_assert(Register ccr_save) { duke@435: const Register s1 = G3_scratch; duke@435: const Register s2 = G4_scratch; duke@435: Label get_psr_test; duke@435: // Get the condition codes the V8 way. duke@435: read_ccr_trap(s1); duke@435: mov(ccr_save, s2); duke@435: // This is a test of V8 which has icc but not xcc duke@435: // so mask off the xcc bits duke@435: and3(s2, 0xf, s2); duke@435: // Compare condition codes from the V8 and V9 ways. duke@435: subcc(s2, s1, G0); duke@435: br(Assembler::notEqual, true, Assembler::pt, get_psr_test); duke@435: delayed()->breakpoint_trap(); duke@435: bind(get_psr_test); duke@435: } duke@435: duke@435: void MacroAssembler::write_ccr_v8_assert(Register ccr_save) { duke@435: const Register s1 = G3_scratch; duke@435: const Register s2 = G4_scratch; duke@435: Label set_psr_test; duke@435: // Write out the saved condition codes the V8 way duke@435: write_ccr_trap(ccr_save, s1, s2); duke@435: // Read back the condition codes using the V9 instruction duke@435: rdccr(s1); duke@435: mov(ccr_save, s2); duke@435: // This is a test of V8 which has icc but not xcc duke@435: // so mask off the xcc bits duke@435: and3(s2, 0xf, s2); duke@435: and3(s1, 0xf, s1); duke@435: // Compare the V8 way with the V9 way. duke@435: subcc(s2, s1, G0); duke@435: br(Assembler::notEqual, true, Assembler::pt, set_psr_test); duke@435: delayed()->breakpoint_trap(); duke@435: bind(set_psr_test); duke@435: } duke@435: #else duke@435: #define read_ccr_v8_assert(x) duke@435: #define write_ccr_v8_assert(x) duke@435: #endif // ASSERT duke@435: duke@435: void MacroAssembler::read_ccr(Register ccr_save) { duke@435: if (VM_Version::v9_instructions_work()) { duke@435: rdccr(ccr_save); duke@435: // Test code sequence used on V8. Do not move above rdccr. duke@435: read_ccr_v8_assert(ccr_save); duke@435: } else { duke@435: read_ccr_trap(ccr_save); duke@435: } duke@435: } duke@435: duke@435: void MacroAssembler::write_ccr(Register ccr_save) { duke@435: if (VM_Version::v9_instructions_work()) { duke@435: // Test code sequence used on V8. Do not move below wrccr. duke@435: write_ccr_v8_assert(ccr_save); duke@435: wrccr(ccr_save); duke@435: } else { duke@435: const Register temp_reg1 = G3_scratch; duke@435: const Register temp_reg2 = G4_scratch; duke@435: write_ccr_trap(ccr_save, temp_reg1, temp_reg2); duke@435: } duke@435: } duke@435: duke@435: duke@435: // Calls to C land duke@435: duke@435: #ifdef ASSERT duke@435: // a hook for debugging duke@435: static Thread* reinitialize_thread() { duke@435: return ThreadLocalStorage::thread(); duke@435: } duke@435: #else duke@435: #define reinitialize_thread ThreadLocalStorage::thread duke@435: #endif duke@435: duke@435: #ifdef ASSERT duke@435: address last_get_thread = NULL; duke@435: #endif duke@435: duke@435: // call this when G2_thread is not known to be valid duke@435: void MacroAssembler::get_thread() { duke@435: save_frame(0); // to avoid clobbering O0 duke@435: mov(G1, L0); // avoid clobbering G1 duke@435: mov(G5_method, L1); // avoid clobbering G5 duke@435: mov(G3, L2); // avoid clobbering G3 also duke@435: mov(G4, L5); // avoid clobbering G4 duke@435: #ifdef ASSERT duke@435: Address last_get_thread_addr(L3, (address)&last_get_thread); duke@435: sethi(last_get_thread_addr); duke@435: inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call duke@435: st_ptr(L4, last_get_thread_addr); duke@435: #endif duke@435: call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); duke@435: delayed()->nop(); duke@435: mov(L0, G1); duke@435: mov(L1, G5_method); duke@435: mov(L2, G3); duke@435: mov(L5, G4); duke@435: restore(O0, 0, G2_thread); duke@435: } duke@435: duke@435: static Thread* verify_thread_subroutine(Thread* gthread_value) { duke@435: Thread* correct_value = ThreadLocalStorage::thread(); duke@435: guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); duke@435: return correct_value; duke@435: } duke@435: duke@435: void MacroAssembler::verify_thread() { duke@435: if (VerifyThread) { duke@435: // NOTE: this chops off the heads of the 64-bit O registers. duke@435: #ifdef CC_INTERP duke@435: save_frame(0); duke@435: #else duke@435: // make sure G2_thread contains the right value duke@435: save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof) duke@435: mov(G1, L1); // avoid clobbering G1 duke@435: // G2 saved below duke@435: mov(G3, L3); // avoid clobbering G3 duke@435: mov(G4, L4); // avoid clobbering G4 duke@435: mov(G5_method, L5); // avoid clobbering G5_method duke@435: #endif /* CC_INTERP */ duke@435: #if defined(COMPILER2) && !defined(_LP64) duke@435: // Save & restore possible 64-bit Long arguments in G-regs duke@435: srlx(G1,32,L0); duke@435: srlx(G4,32,L6); duke@435: #endif duke@435: call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); duke@435: delayed()->mov(G2_thread, O0); duke@435: duke@435: mov(L1, G1); // Restore G1 duke@435: // G2 restored below duke@435: mov(L3, G3); // restore G3 duke@435: mov(L4, G4); // restore G4 duke@435: mov(L5, G5_method); // restore G5_method duke@435: #if defined(COMPILER2) && !defined(_LP64) duke@435: // Save & restore possible 64-bit Long arguments in G-regs duke@435: sllx(L0,32,G2); // Move old high G1 bits high in G2 duke@435: sllx(G1, 0,G1); // Clear current high G1 bits duke@435: or3 (G1,G2,G1); // Recover 64-bit G1 duke@435: sllx(L6,32,G2); // Move old high G4 bits high in G2 duke@435: sllx(G4, 0,G4); // Clear current high G4 bits duke@435: or3 (G4,G2,G4); // Recover 64-bit G4 duke@435: #endif duke@435: restore(O0, 0, G2_thread); duke@435: } duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::save_thread(const Register thread_cache) { duke@435: verify_thread(); duke@435: if (thread_cache->is_valid()) { duke@435: assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); duke@435: mov(G2_thread, thread_cache); duke@435: } duke@435: if (VerifyThread) { duke@435: // smash G2_thread, as if the VM were about to anyway duke@435: set(0x67676767, G2_thread); duke@435: } duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::restore_thread(const Register thread_cache) { duke@435: if (thread_cache->is_valid()) { duke@435: assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); duke@435: mov(thread_cache, G2_thread); duke@435: verify_thread(); duke@435: } else { duke@435: // do it the slow way duke@435: get_thread(); duke@435: } duke@435: } duke@435: duke@435: duke@435: // %%% maybe get rid of [re]set_last_Java_frame duke@435: void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { duke@435: assert_not_delayed(); duke@435: Address flags(G2_thread, duke@435: 0, duke@435: in_bytes(JavaThread::frame_anchor_offset()) + duke@435: in_bytes(JavaFrameAnchor::flags_offset())); duke@435: Address pc_addr(G2_thread, duke@435: 0, duke@435: in_bytes(JavaThread::last_Java_pc_offset())); duke@435: duke@435: // Always set last_Java_pc and flags first because once last_Java_sp is visible duke@435: // has_last_Java_frame is true and users will look at the rest of the fields. duke@435: // (Note: flags should always be zero before we get here so doesn't need to be set.) duke@435: duke@435: #ifdef ASSERT duke@435: // Verify that flags was zeroed on return to Java duke@435: Label PcOk; duke@435: save_frame(0); // to avoid clobbering O0 duke@435: ld_ptr(pc_addr, L0); duke@435: tst(L0); duke@435: #ifdef _LP64 duke@435: brx(Assembler::zero, false, Assembler::pt, PcOk); duke@435: #else duke@435: br(Assembler::zero, false, Assembler::pt, PcOk); duke@435: #endif // _LP64 duke@435: delayed() -> nop(); duke@435: stop("last_Java_pc not zeroed before leaving Java"); duke@435: bind(PcOk); duke@435: duke@435: // Verify that flags was zeroed on return to Java duke@435: Label FlagsOk; duke@435: ld(flags, L0); duke@435: tst(L0); duke@435: br(Assembler::zero, false, Assembler::pt, FlagsOk); duke@435: delayed() -> restore(); duke@435: stop("flags not zeroed before leaving Java"); duke@435: bind(FlagsOk); duke@435: #endif /* ASSERT */ duke@435: // duke@435: // When returning from calling out from Java mode the frame anchor's last_Java_pc duke@435: // will always be set to NULL. It is set here so that if we are doing a call to duke@435: // native (not VM) that we capture the known pc and don't have to rely on the duke@435: // native call having a standard frame linkage where we can find the pc. duke@435: duke@435: if (last_Java_pc->is_valid()) { duke@435: st_ptr(last_Java_pc, pc_addr); duke@435: } duke@435: duke@435: #ifdef _LP64 duke@435: #ifdef ASSERT duke@435: // Make sure that we have an odd stack duke@435: Label StackOk; duke@435: andcc(last_java_sp, 0x01, G0); duke@435: br(Assembler::notZero, false, Assembler::pt, StackOk); duke@435: delayed() -> nop(); duke@435: stop("Stack Not Biased in set_last_Java_frame"); duke@435: bind(StackOk); duke@435: #endif // ASSERT duke@435: assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); duke@435: add( last_java_sp, STACK_BIAS, G4_scratch ); duke@435: st_ptr(G4_scratch, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()))); duke@435: #else duke@435: st_ptr(last_java_sp, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()))); duke@435: #endif // _LP64 duke@435: } duke@435: duke@435: void MacroAssembler::reset_last_Java_frame(void) { duke@435: assert_not_delayed(); duke@435: duke@435: Address sp_addr(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())); duke@435: Address pc_addr(G2_thread, duke@435: 0, duke@435: in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::last_Java_pc_offset())); duke@435: Address flags(G2_thread, duke@435: 0, duke@435: in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); duke@435: duke@435: #ifdef ASSERT duke@435: // check that it WAS previously set duke@435: #ifdef CC_INTERP duke@435: save_frame(0); duke@435: #else duke@435: save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof duke@435: #endif /* CC_INTERP */ duke@435: ld_ptr(sp_addr, L0); duke@435: tst(L0); duke@435: breakpoint_trap(Assembler::zero, Assembler::ptr_cc); duke@435: restore(); duke@435: #endif // ASSERT duke@435: duke@435: st_ptr(G0, sp_addr); duke@435: // Always return last_Java_pc to zero duke@435: st_ptr(G0, pc_addr); duke@435: // Always null flags after return to Java duke@435: st(G0, flags); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM_base( duke@435: Register oop_result, duke@435: Register thread_cache, duke@435: Register last_java_sp, duke@435: address entry_point, duke@435: int number_of_arguments, duke@435: bool check_exceptions) duke@435: { duke@435: assert_not_delayed(); duke@435: duke@435: // determine last_java_sp register duke@435: if (!last_java_sp->is_valid()) { duke@435: last_java_sp = SP; duke@435: } duke@435: // debugging support duke@435: assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); duke@435: duke@435: // 64-bit last_java_sp is biased! duke@435: set_last_Java_frame(last_java_sp, noreg); duke@435: if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early duke@435: save_thread(thread_cache); duke@435: // do the call duke@435: call(entry_point, relocInfo::runtime_call_type); duke@435: if (!VerifyThread) duke@435: delayed()->mov(G2_thread, O0); // pass thread as first argument duke@435: else duke@435: delayed()->nop(); // (thread already passed) duke@435: restore_thread(thread_cache); duke@435: reset_last_Java_frame(); duke@435: duke@435: // check for pending exceptions. use Gtemp as scratch register. duke@435: if (check_exceptions) { duke@435: check_and_forward_exception(Gtemp); duke@435: } duke@435: duke@435: // get oop result if there is one and reset the value in the thread duke@435: if (oop_result->is_valid()) { duke@435: get_vm_result(oop_result); duke@435: } duke@435: } duke@435: duke@435: void MacroAssembler::check_and_forward_exception(Register scratch_reg) duke@435: { duke@435: Label L; duke@435: duke@435: check_and_handle_popframe(scratch_reg); duke@435: check_and_handle_earlyret(scratch_reg); duke@435: duke@435: Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); duke@435: ld_ptr(exception_addr, scratch_reg); duke@435: br_null(scratch_reg,false,pt,L); duke@435: delayed()->nop(); duke@435: // we use O7 linkage so that forward_exception_entry has the issuing PC duke@435: call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); duke@435: delayed()->nop(); duke@435: bind(L); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { duke@435: call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { duke@435: // O0 is reserved for the thread duke@435: mov(arg_1, O1); duke@435: call_VM(oop_result, entry_point, 1, check_exceptions); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { duke@435: // O0 is reserved for the thread duke@435: mov(arg_1, O1); duke@435: mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); duke@435: call_VM(oop_result, entry_point, 2, check_exceptions); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { duke@435: // O0 is reserved for the thread duke@435: mov(arg_1, O1); duke@435: mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); duke@435: mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); duke@435: call_VM(oop_result, entry_point, 3, check_exceptions); duke@435: } duke@435: duke@435: duke@435: duke@435: // Note: The following call_VM overloadings are useful when a "save" duke@435: // has already been performed by a stub, and the last Java frame is duke@435: // the previous one. In that case, last_java_sp must be passed as FP duke@435: // instead of SP. duke@435: duke@435: duke@435: void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { duke@435: call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { duke@435: // O0 is reserved for the thread duke@435: mov(arg_1, O1); duke@435: call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { duke@435: // O0 is reserved for the thread duke@435: mov(arg_1, O1); duke@435: mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); duke@435: call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { duke@435: // O0 is reserved for the thread duke@435: mov(arg_1, O1); duke@435: mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); duke@435: mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); duke@435: call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); duke@435: } duke@435: duke@435: duke@435: duke@435: void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { duke@435: assert_not_delayed(); duke@435: save_thread(thread_cache); duke@435: // do the call duke@435: call(entry_point, relocInfo::runtime_call_type); duke@435: delayed()->nop(); duke@435: restore_thread(thread_cache); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { duke@435: call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { duke@435: mov(arg_1, O0); duke@435: call_VM_leaf(thread_cache, entry_point, 1); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { duke@435: mov(arg_1, O0); duke@435: mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); duke@435: call_VM_leaf(thread_cache, entry_point, 2); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { duke@435: mov(arg_1, O0); duke@435: mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); duke@435: mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); duke@435: call_VM_leaf(thread_cache, entry_point, 3); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::get_vm_result(Register oop_result) { duke@435: verify_thread(); duke@435: Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); duke@435: ld_ptr( vm_result_addr, oop_result); duke@435: st_ptr(G0, vm_result_addr); duke@435: verify_oop(oop_result); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::get_vm_result_2(Register oop_result) { duke@435: verify_thread(); duke@435: Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset())); duke@435: ld_ptr(vm_result_addr_2, oop_result); duke@435: st_ptr(G0, vm_result_addr_2); duke@435: verify_oop(oop_result); duke@435: } duke@435: duke@435: duke@435: // We require that C code which does not return a value in vm_result will duke@435: // leave it undisturbed. duke@435: void MacroAssembler::set_vm_result(Register oop_result) { duke@435: verify_thread(); duke@435: Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); duke@435: verify_oop(oop_result); duke@435: duke@435: # ifdef ASSERT duke@435: // Check that we are not overwriting any other oop. duke@435: #ifdef CC_INTERP duke@435: save_frame(0); duke@435: #else duke@435: save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof duke@435: #endif /* CC_INTERP */ duke@435: ld_ptr(vm_result_addr, L0); duke@435: tst(L0); duke@435: restore(); duke@435: breakpoint_trap(notZero, Assembler::ptr_cc); duke@435: // } duke@435: # endif duke@435: duke@435: st_ptr(oop_result, vm_result_addr); duke@435: } duke@435: duke@435: ysr@777: void MacroAssembler::card_table_write(jbyte* byte_map_base, ysr@777: Register tmp, Register obj) { duke@435: #ifdef _LP64 duke@435: srlx(obj, CardTableModRefBS::card_shift, obj); duke@435: #else duke@435: srl(obj, CardTableModRefBS::card_shift, obj); duke@435: #endif duke@435: assert( tmp != obj, "need separate temp reg"); ysr@777: Address rs(tmp, (address)byte_map_base); duke@435: load_address(rs); duke@435: stb(G0, rs.base(), obj); duke@435: } duke@435: duke@435: // %%% Note: The following six instructions have been moved, duke@435: // unchanged, from assembler_sparc.inline.hpp. duke@435: // They will be refactored at a later date. duke@435: duke@435: void MacroAssembler::sethi(intptr_t imm22a, duke@435: Register d, duke@435: bool ForceRelocatable, duke@435: RelocationHolder const& rspec) { duke@435: Address adr( d, (address)imm22a, rspec ); duke@435: MacroAssembler::sethi( adr, ForceRelocatable ); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::sethi(Address& a, bool ForceRelocatable) { duke@435: address save_pc; duke@435: int shiftcnt; duke@435: // if addr of local, do not need to load it duke@435: assert(a.base() != FP && a.base() != SP, "just use ld or st for locals"); duke@435: #ifdef _LP64 duke@435: # ifdef CHECK_DELAY duke@435: assert_not_delayed( (char *)"cannot put two instructions in delay slot" ); duke@435: # endif duke@435: v9_dep(); duke@435: // ForceRelocatable = 1; duke@435: save_pc = pc(); duke@435: if (a.hi32() == 0 && a.low32() >= 0) { duke@435: Assembler::sethi(a.low32(), a.base(), a.rspec()); duke@435: } duke@435: else if (a.hi32() == -1) { duke@435: Assembler::sethi(~a.low32(), a.base(), a.rspec()); duke@435: xor3(a.base(), ~low10(~0), a.base()); duke@435: } duke@435: else { duke@435: Assembler::sethi(a.hi32(), a.base(), a.rspec() ); // 22 duke@435: if ( a.hi32() & 0x3ff ) // Any bits? duke@435: or3( a.base(), a.hi32() & 0x3ff ,a.base() ); // High 32 bits are now in low 32 duke@435: if ( a.low32() & 0xFFFFFC00 ) { // done? duke@435: if( (a.low32() >> 20) & 0xfff ) { // Any bits set? duke@435: sllx(a.base(), 12, a.base()); // Make room for next 12 bits duke@435: or3( a.base(), (a.low32() >> 20) & 0xfff,a.base() ); // Or in next 12 duke@435: shiftcnt = 0; // We already shifted duke@435: } duke@435: else duke@435: shiftcnt = 12; duke@435: if( (a.low32() >> 10) & 0x3ff ) { duke@435: sllx(a.base(), shiftcnt+10, a.base());// Make room for last 10 bits duke@435: or3( a.base(), (a.low32() >> 10) & 0x3ff,a.base() ); // Or in next 10 duke@435: shiftcnt = 0; duke@435: } duke@435: else duke@435: shiftcnt = 10; duke@435: sllx(a.base(), shiftcnt+10 , a.base()); // Shift leaving disp field 0'd duke@435: } duke@435: else duke@435: sllx( a.base(), 32, a.base() ); duke@435: } duke@435: // Pad out the instruction sequence so it can be duke@435: // patched later. duke@435: if ( ForceRelocatable || (a.rtype() != relocInfo::none && duke@435: a.rtype() != relocInfo::runtime_call_type) ) { duke@435: while ( pc() < (save_pc + (7 * BytesPerInstWord )) ) duke@435: nop(); duke@435: } duke@435: #else duke@435: Assembler::sethi(a.hi(), a.base(), a.rspec()); duke@435: #endif duke@435: duke@435: } duke@435: duke@435: int MacroAssembler::size_of_sethi(address a, bool worst_case) { duke@435: #ifdef _LP64 duke@435: if (worst_case) return 7; duke@435: intptr_t iaddr = (intptr_t)a; duke@435: int hi32 = (int)(iaddr >> 32); duke@435: int lo32 = (int)(iaddr); duke@435: int inst_count; duke@435: if (hi32 == 0 && lo32 >= 0) duke@435: inst_count = 1; duke@435: else if (hi32 == -1) duke@435: inst_count = 2; duke@435: else { duke@435: inst_count = 2; duke@435: if ( hi32 & 0x3ff ) duke@435: inst_count++; duke@435: if ( lo32 & 0xFFFFFC00 ) { duke@435: if( (lo32 >> 20) & 0xfff ) inst_count += 2; duke@435: if( (lo32 >> 10) & 0x3ff ) inst_count += 2; duke@435: } duke@435: } duke@435: return BytesPerInstWord * inst_count; duke@435: #else duke@435: return BytesPerInstWord; duke@435: #endif duke@435: } duke@435: duke@435: int MacroAssembler::worst_case_size_of_set() { duke@435: return size_of_sethi(NULL, true) + 1; duke@435: } duke@435: duke@435: void MacroAssembler::set(intptr_t value, Register d, duke@435: RelocationHolder const& rspec) { duke@435: Address val( d, (address)value, rspec); duke@435: duke@435: if ( rspec.type() == relocInfo::none ) { duke@435: // can optimize duke@435: if (-4096 <= value && value <= 4095) { duke@435: or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) duke@435: return; duke@435: } duke@435: if (inv_hi22(hi22(value)) == value) { duke@435: sethi(val); duke@435: return; duke@435: } duke@435: } duke@435: assert_not_delayed( (char *)"cannot put two instructions in delay slot" ); duke@435: sethi( val ); duke@435: if (rspec.type() != relocInfo::none || (value & 0x3ff) != 0) { duke@435: add( d, value & 0x3ff, d, rspec); duke@435: } duke@435: } duke@435: duke@435: void MacroAssembler::setsw(int value, Register d, duke@435: RelocationHolder const& rspec) { duke@435: Address val( d, (address)value, rspec); duke@435: if ( rspec.type() == relocInfo::none ) { duke@435: // can optimize duke@435: if (-4096 <= value && value <= 4095) { duke@435: or3(G0, value, d); duke@435: return; duke@435: } duke@435: if (inv_hi22(hi22(value)) == value) { duke@435: sethi( val ); duke@435: #ifndef _LP64 duke@435: if ( value < 0 ) { duke@435: assert_not_delayed(); duke@435: sra (d, G0, d); duke@435: } duke@435: #endif duke@435: return; duke@435: } duke@435: } duke@435: assert_not_delayed(); duke@435: sethi( val ); duke@435: add( d, value & 0x3ff, d, rspec); duke@435: duke@435: // (A negative value could be loaded in 2 insns with sethi/xor, duke@435: // but it would take a more complex relocation.) duke@435: #ifndef _LP64 duke@435: if ( value < 0) duke@435: sra(d, G0, d); duke@435: #endif duke@435: } duke@435: duke@435: // %%% End of moved six set instructions. duke@435: duke@435: duke@435: void MacroAssembler::set64(jlong value, Register d, Register tmp) { duke@435: assert_not_delayed(); duke@435: v9_dep(); duke@435: duke@435: int hi = (int)(value >> 32); duke@435: int lo = (int)(value & ~0); duke@435: // (Matcher::isSimpleConstant64 knows about the following optimizations.) duke@435: if (Assembler::is_simm13(lo) && value == lo) { duke@435: or3(G0, lo, d); duke@435: } else if (hi == 0) { duke@435: Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 duke@435: if (low10(lo) != 0) duke@435: or3(d, low10(lo), d); duke@435: } duke@435: else if (hi == -1) { duke@435: Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 duke@435: xor3(d, low10(lo) ^ ~low10(~0), d); duke@435: } duke@435: else if (lo == 0) { duke@435: if (Assembler::is_simm13(hi)) { duke@435: or3(G0, hi, d); duke@435: } else { duke@435: Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 duke@435: if (low10(hi) != 0) duke@435: or3(d, low10(hi), d); duke@435: } duke@435: sllx(d, 32, d); duke@435: } duke@435: else { duke@435: Assembler::sethi(hi, tmp); duke@435: Assembler::sethi(lo, d); // macro assembler version sign-extends duke@435: if (low10(hi) != 0) duke@435: or3 (tmp, low10(hi), tmp); duke@435: if (low10(lo) != 0) duke@435: or3 ( d, low10(lo), d); duke@435: sllx(tmp, 32, tmp); duke@435: or3 (d, tmp, d); duke@435: } duke@435: } duke@435: duke@435: // compute size in bytes of sparc frame, given duke@435: // number of extraWords duke@435: int MacroAssembler::total_frame_size_in_bytes(int extraWords) { duke@435: duke@435: int nWords = frame::memory_parameter_word_sp_offset; duke@435: duke@435: nWords += extraWords; duke@435: duke@435: if (nWords & 1) ++nWords; // round up to double-word duke@435: duke@435: return nWords * BytesPerWord; duke@435: } duke@435: duke@435: duke@435: // save_frame: given number of "extra" words in frame, duke@435: // issue approp. save instruction (p 200, v8 manual) duke@435: duke@435: void MacroAssembler::save_frame(int extraWords = 0) { duke@435: int delta = -total_frame_size_in_bytes(extraWords); duke@435: if (is_simm13(delta)) { duke@435: save(SP, delta, SP); duke@435: } else { duke@435: set(delta, G3_scratch); duke@435: save(SP, G3_scratch, SP); duke@435: } duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::save_frame_c1(int size_in_bytes) { duke@435: if (is_simm13(-size_in_bytes)) { duke@435: save(SP, -size_in_bytes, SP); duke@435: } else { duke@435: set(-size_in_bytes, G3_scratch); duke@435: save(SP, G3_scratch, SP); duke@435: } duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::save_frame_and_mov(int extraWords, duke@435: Register s1, Register d1, duke@435: Register s2, Register d2) { duke@435: assert_not_delayed(); duke@435: duke@435: // The trick here is to use precisely the same memory word duke@435: // that trap handlers also use to save the register. duke@435: // This word cannot be used for any other purpose, but duke@435: // it works fine to save the register's value, whether or not duke@435: // an interrupt flushes register windows at any given moment! duke@435: Address s1_addr; duke@435: if (s1->is_valid() && (s1->is_in() || s1->is_local())) { duke@435: s1_addr = s1->address_in_saved_window(); duke@435: st_ptr(s1, s1_addr); duke@435: } duke@435: duke@435: Address s2_addr; duke@435: if (s2->is_valid() && (s2->is_in() || s2->is_local())) { duke@435: s2_addr = s2->address_in_saved_window(); duke@435: st_ptr(s2, s2_addr); duke@435: } duke@435: duke@435: save_frame(extraWords); duke@435: duke@435: if (s1_addr.base() == SP) { duke@435: ld_ptr(s1_addr.after_save(), d1); duke@435: } else if (s1->is_valid()) { duke@435: mov(s1->after_save(), d1); duke@435: } duke@435: duke@435: if (s2_addr.base() == SP) { duke@435: ld_ptr(s2_addr.after_save(), d2); duke@435: } else if (s2->is_valid()) { duke@435: mov(s2->after_save(), d2); duke@435: } duke@435: } duke@435: duke@435: duke@435: Address MacroAssembler::allocate_oop_address(jobject obj, Register d) { duke@435: assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); duke@435: int oop_index = oop_recorder()->allocate_index(obj); duke@435: return Address(d, address(obj), oop_Relocation::spec(oop_index)); duke@435: } duke@435: duke@435: duke@435: Address MacroAssembler::constant_oop_address(jobject obj, Register d) { duke@435: assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); duke@435: int oop_index = oop_recorder()->find_index(obj); duke@435: return Address(d, address(obj), oop_Relocation::spec(oop_index)); duke@435: } duke@435: kvn@599: void MacroAssembler::set_narrow_oop(jobject obj, Register d) { kvn@599: assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); kvn@599: int oop_index = oop_recorder()->find_index(obj); kvn@599: RelocationHolder rspec = oop_Relocation::spec(oop_index); kvn@599: kvn@599: assert_not_delayed(); kvn@599: // Relocation with special format (see relocInfo_sparc.hpp). kvn@599: relocate(rspec, 1); kvn@599: // Assembler::sethi(0x3fffff, d); kvn@599: emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); kvn@599: // Don't add relocation for 'add'. Do patching during 'sethi' processing. kvn@599: add(d, 0x3ff, d); kvn@599: kvn@599: } kvn@599: duke@435: duke@435: void MacroAssembler::align(int modulus) { duke@435: while (offset() % modulus != 0) nop(); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::safepoint() { duke@435: relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint)); duke@435: } duke@435: duke@435: duke@435: void RegistersForDebugging::print(outputStream* s) { duke@435: int j; duke@435: for ( j = 0; j < 8; ++j ) duke@435: if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]); duke@435: else s->print_cr( "fp = 0x%.16lx", i[j]); duke@435: s->cr(); duke@435: duke@435: for ( j = 0; j < 8; ++j ) duke@435: s->print_cr("l%d = 0x%.16lx", j, l[j]); duke@435: s->cr(); duke@435: duke@435: for ( j = 0; j < 8; ++j ) duke@435: if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]); duke@435: else s->print_cr( "sp = 0x%.16lx", o[j]); duke@435: s->cr(); duke@435: duke@435: for ( j = 0; j < 8; ++j ) duke@435: s->print_cr("g%d = 0x%.16lx", j, g[j]); duke@435: s->cr(); duke@435: duke@435: // print out floats with compression duke@435: for (j = 0; j < 32; ) { duke@435: jfloat val = f[j]; duke@435: int last = j; duke@435: for ( ; last+1 < 32; ++last ) { duke@435: char b1[1024], b2[1024]; duke@435: sprintf(b1, "%f", val); duke@435: sprintf(b2, "%f", f[last+1]); duke@435: if (strcmp(b1, b2)) duke@435: break; duke@435: } duke@435: s->print("f%d", j); duke@435: if ( j != last ) s->print(" - f%d", last); duke@435: s->print(" = %f", val); duke@435: s->fill_to(25); duke@435: s->print_cr(" (0x%x)", val); duke@435: j = last + 1; duke@435: } duke@435: s->cr(); duke@435: duke@435: // and doubles (evens only) duke@435: for (j = 0; j < 32; ) { duke@435: jdouble val = d[j]; duke@435: int last = j; duke@435: for ( ; last+1 < 32; ++last ) { duke@435: char b1[1024], b2[1024]; duke@435: sprintf(b1, "%f", val); duke@435: sprintf(b2, "%f", d[last+1]); duke@435: if (strcmp(b1, b2)) duke@435: break; duke@435: } duke@435: s->print("d%d", 2 * j); duke@435: if ( j != last ) s->print(" - d%d", last); duke@435: s->print(" = %f", val); duke@435: s->fill_to(30); duke@435: s->print("(0x%x)", *(int*)&val); duke@435: s->fill_to(42); duke@435: s->print_cr("(0x%x)", *(1 + (int*)&val)); duke@435: j = last + 1; duke@435: } duke@435: s->cr(); duke@435: } duke@435: duke@435: void RegistersForDebugging::save_registers(MacroAssembler* a) { duke@435: a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); duke@435: a->flush_windows(); duke@435: int i; duke@435: for (i = 0; i < 8; ++i) { duke@435: a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); duke@435: a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); duke@435: a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); duke@435: a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); duke@435: } duke@435: for (i = 0; i < 32; ++i) { duke@435: a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); duke@435: } duke@435: for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { duke@435: a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); duke@435: } duke@435: } duke@435: duke@435: void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { duke@435: for (int i = 1; i < 8; ++i) { duke@435: a->ld_ptr(r, g_offset(i), as_gRegister(i)); duke@435: } duke@435: for (int j = 0; j < 32; ++j) { duke@435: a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); duke@435: } duke@435: for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) { duke@435: a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); duke@435: } duke@435: } duke@435: duke@435: duke@435: // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack duke@435: void MacroAssembler::push_fTOS() { duke@435: // %%%%%% need to implement this duke@435: } duke@435: duke@435: // pops double TOS element from CPU stack and pushes on FPU stack duke@435: void MacroAssembler::pop_fTOS() { duke@435: // %%%%%% need to implement this duke@435: } duke@435: duke@435: void MacroAssembler::empty_FPU_stack() { duke@435: // %%%%%% need to implement this duke@435: } duke@435: duke@435: void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { duke@435: // plausibility check for oops duke@435: if (!VerifyOops) return; duke@435: duke@435: if (reg == G0) return; // always NULL, which is always an oop duke@435: ysr@777: char buffer[64]; ysr@777: #ifdef COMPILER1 ysr@777: if (CommentedAssembly) { ysr@777: snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); ysr@777: block_comment(buffer); ysr@777: } ysr@777: #endif ysr@777: ysr@777: int len = strlen(file) + strlen(msg) + 1 + 4; duke@435: sprintf(buffer, "%d", line); ysr@777: len += strlen(buffer); ysr@777: sprintf(buffer, " at offset %d ", offset()); ysr@777: len += strlen(buffer); duke@435: char * real_msg = new char[len]; ysr@777: sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line); duke@435: duke@435: // Call indirectly to solve generation ordering problem duke@435: Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address()); duke@435: duke@435: // Make some space on stack above the current register window. duke@435: // Enough to hold 8 64-bit registers. duke@435: add(SP,-8*8,SP); duke@435: duke@435: // Save some 64-bit registers; a normal 'save' chops the heads off duke@435: // of 64-bit longs in the 32-bit build. duke@435: stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); duke@435: stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); duke@435: mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed duke@435: stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); duke@435: duke@435: set((intptr_t)real_msg, O1); duke@435: // Load address to call to into O7 duke@435: load_ptr_contents(a, O7); duke@435: // Register call to verify_oop_subroutine duke@435: callr(O7, G0); duke@435: delayed()->nop(); duke@435: // recover frame size duke@435: add(SP, 8*8,SP); duke@435: } duke@435: duke@435: void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { duke@435: // plausibility check for oops duke@435: if (!VerifyOops) return; duke@435: duke@435: char buffer[64]; duke@435: sprintf(buffer, "%d", line); duke@435: int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer); duke@435: sprintf(buffer, " at SP+%d ", addr.disp()); duke@435: len += strlen(buffer); duke@435: char * real_msg = new char[len]; duke@435: sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); duke@435: duke@435: // Call indirectly to solve generation ordering problem duke@435: Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address()); duke@435: duke@435: // Make some space on stack above the current register window. duke@435: // Enough to hold 8 64-bit registers. duke@435: add(SP,-8*8,SP); duke@435: duke@435: // Save some 64-bit registers; a normal 'save' chops the heads off duke@435: // of 64-bit longs in the 32-bit build. duke@435: stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); duke@435: stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); duke@435: ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed duke@435: stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); duke@435: duke@435: set((intptr_t)real_msg, O1); duke@435: // Load address to call to into O7 duke@435: load_ptr_contents(a, O7); duke@435: // Register call to verify_oop_subroutine duke@435: callr(O7, G0); duke@435: delayed()->nop(); duke@435: // recover frame size duke@435: add(SP, 8*8,SP); duke@435: } duke@435: duke@435: // side-door communication with signalHandler in os_solaris.cpp duke@435: address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; duke@435: duke@435: // This macro is expanded just once; it creates shared code. Contract: duke@435: // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY duke@435: // registers, including flags. May not use a register 'save', as this blows duke@435: // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' duke@435: // call. duke@435: void MacroAssembler::verify_oop_subroutine() { duke@435: assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" ); duke@435: duke@435: // Leaf call; no frame. duke@435: Label succeed, fail, null_or_fail; duke@435: duke@435: // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). duke@435: // O0 is now the oop to be checked. O7 is the return address. duke@435: Register O0_obj = O0; duke@435: duke@435: // Save some more registers for temps. duke@435: stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); duke@435: stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); duke@435: stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); duke@435: stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); duke@435: duke@435: // Save flags duke@435: Register O5_save_flags = O5; duke@435: rdccr( O5_save_flags ); duke@435: duke@435: { // count number of verifies duke@435: Register O2_adr = O2; duke@435: Register O3_accum = O3; duke@435: Address count_addr( O2_adr, (address) StubRoutines::verify_oop_count_addr() ); duke@435: sethi(count_addr); duke@435: ld(count_addr, O3_accum); duke@435: inc(O3_accum); duke@435: st(O3_accum, count_addr); duke@435: } duke@435: duke@435: Register O2_mask = O2; duke@435: Register O3_bits = O3; duke@435: Register O4_temp = O4; duke@435: duke@435: // mark lower end of faulting range duke@435: assert(_verify_oop_implicit_branch[0] == NULL, "set once"); duke@435: _verify_oop_implicit_branch[0] = pc(); duke@435: duke@435: // We can't check the mark oop because it could be in the process of duke@435: // locking or unlocking while this is running. duke@435: set(Universe::verify_oop_mask (), O2_mask); duke@435: set(Universe::verify_oop_bits (), O3_bits); duke@435: duke@435: // assert((obj & oop_mask) == oop_bits); duke@435: and3(O0_obj, O2_mask, O4_temp); duke@435: cmp(O4_temp, O3_bits); duke@435: brx(notEqual, false, pn, null_or_fail); duke@435: delayed()->nop(); duke@435: duke@435: if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { duke@435: // the null_or_fail case is useless; must test for null separately duke@435: br_null(O0_obj, false, pn, succeed); duke@435: delayed()->nop(); duke@435: } duke@435: duke@435: // Check the klassOop of this object for being in the right area of memory. duke@435: // Cannot do the load in the delay above slot in case O0 is null coleenp@548: load_klass(O0_obj, O0_obj); duke@435: // assert((klass & klass_mask) == klass_bits); duke@435: if( Universe::verify_klass_mask() != Universe::verify_oop_mask() ) duke@435: set(Universe::verify_klass_mask(), O2_mask); duke@435: if( Universe::verify_klass_bits() != Universe::verify_oop_bits() ) duke@435: set(Universe::verify_klass_bits(), O3_bits); duke@435: and3(O0_obj, O2_mask, O4_temp); duke@435: cmp(O4_temp, O3_bits); duke@435: brx(notEqual, false, pn, fail); coleenp@548: delayed()->nop(); duke@435: // Check the klass's klass coleenp@548: load_klass(O0_obj, O0_obj); duke@435: and3(O0_obj, O2_mask, O4_temp); duke@435: cmp(O4_temp, O3_bits); duke@435: brx(notEqual, false, pn, fail); duke@435: delayed()->wrccr( O5_save_flags ); // Restore CCR's duke@435: duke@435: // mark upper end of faulting range duke@435: _verify_oop_implicit_branch[1] = pc(); duke@435: duke@435: //----------------------- duke@435: // all tests pass duke@435: bind(succeed); duke@435: duke@435: // Restore prior 64-bit registers duke@435: ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); duke@435: ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); duke@435: ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); duke@435: ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); duke@435: ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); duke@435: ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); duke@435: duke@435: retl(); // Leaf return; restore prior O7 in delay slot duke@435: delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); duke@435: duke@435: //----------------------- duke@435: bind(null_or_fail); // nulls are less common but OK duke@435: br_null(O0_obj, false, pt, succeed); duke@435: delayed()->wrccr( O5_save_flags ); // Restore CCR's duke@435: duke@435: //----------------------- duke@435: // report failure: duke@435: bind(fail); duke@435: _verify_oop_implicit_branch[2] = pc(); duke@435: duke@435: wrccr( O5_save_flags ); // Restore CCR's duke@435: duke@435: save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); duke@435: duke@435: // stop_subroutine expects message pointer in I1. duke@435: mov(I1, O1); duke@435: duke@435: // Restore prior 64-bit registers duke@435: ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); duke@435: ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); duke@435: ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); duke@435: ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); duke@435: ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); duke@435: ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); duke@435: duke@435: // factor long stop-sequence into subroutine to save space duke@435: assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); duke@435: duke@435: // call indirectly to solve generation ordering problem duke@435: Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address()); duke@435: load_ptr_contents(a, O5); duke@435: jmpl(O5, 0, O7); duke@435: delayed()->nop(); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::stop(const char* msg) { duke@435: // save frame first to get O7 for return address duke@435: // add one word to size in case struct is odd number of words long duke@435: // It must be doubleword-aligned for storing doubles into it. duke@435: duke@435: save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); duke@435: duke@435: // stop_subroutine expects message pointer in I1. duke@435: set((intptr_t)msg, O1); duke@435: duke@435: // factor long stop-sequence into subroutine to save space duke@435: assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); duke@435: duke@435: // call indirectly to solve generation ordering problem duke@435: Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address()); duke@435: load_ptr_contents(a, O5); duke@435: jmpl(O5, 0, O7); duke@435: delayed()->nop(); duke@435: duke@435: breakpoint_trap(); // make stop actually stop rather than writing duke@435: // unnoticeable results in the output files. duke@435: duke@435: // restore(); done in callee to save space! duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::warn(const char* msg) { duke@435: save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); duke@435: RegistersForDebugging::save_registers(this); duke@435: mov(O0, L0); duke@435: set((intptr_t)msg, O0); duke@435: call( CAST_FROM_FN_PTR(address, warning) ); duke@435: delayed()->nop(); duke@435: // ret(); duke@435: // delayed()->restore(); duke@435: RegistersForDebugging::restore_registers(this, L0); duke@435: restore(); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::untested(const char* what) { duke@435: // We must be able to turn interactive prompting off duke@435: // in order to run automated test scripts on the VM duke@435: // Use the flag ShowMessageBoxOnError duke@435: duke@435: char* b = new char[1024]; duke@435: sprintf(b, "untested: %s", what); duke@435: duke@435: if ( ShowMessageBoxOnError ) stop(b); duke@435: else warn(b); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::stop_subroutine() { duke@435: RegistersForDebugging::save_registers(this); duke@435: duke@435: // for the sake of the debugger, stick a PC on the current frame duke@435: // (this assumes that the caller has performed an extra "save") duke@435: mov(I7, L7); duke@435: add(O7, -7 * BytesPerInt, I7); duke@435: duke@435: save_frame(); // one more save to free up another O7 register duke@435: mov(I0, O1); // addr of reg save area duke@435: duke@435: // We expect pointer to message in I1. Caller must set it up in O1 duke@435: mov(I1, O0); // get msg duke@435: call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); duke@435: delayed()->nop(); duke@435: duke@435: restore(); duke@435: duke@435: RegistersForDebugging::restore_registers(this, O0); duke@435: duke@435: save_frame(0); duke@435: call(CAST_FROM_FN_PTR(address,breakpoint)); duke@435: delayed()->nop(); duke@435: restore(); duke@435: duke@435: mov(L7, I7); duke@435: retl(); duke@435: delayed()->restore(); // see stop above duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { duke@435: if ( ShowMessageBoxOnError ) { duke@435: JavaThreadState saved_state = JavaThread::current()->thread_state(); duke@435: JavaThread::current()->set_thread_state(_thread_in_vm); duke@435: { duke@435: // In order to get locks work, we need to fake a in_VM state duke@435: ttyLocker ttyl; duke@435: ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); duke@435: if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { duke@435: ::tty->print_cr("Interpreter::bytecode_counter = %d", BytecodeCounter::counter_value()); duke@435: } duke@435: if (os::message_box(msg, "Execution stopped, print registers?")) duke@435: regs->print(::tty); duke@435: } duke@435: ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); duke@435: } duke@435: else duke@435: ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); duke@435: assert(false, "error"); duke@435: } duke@435: duke@435: duke@435: #ifndef PRODUCT duke@435: void MacroAssembler::test() { duke@435: ResourceMark rm; duke@435: duke@435: CodeBuffer cb("test", 10000, 10000); duke@435: MacroAssembler* a = new MacroAssembler(&cb); duke@435: VM_Version::allow_all(); duke@435: a->test_v9(); duke@435: a->test_v8_onlys(); duke@435: VM_Version::revert(); duke@435: duke@435: StubRoutines::Sparc::test_stop_entry()(); duke@435: } duke@435: #endif duke@435: duke@435: duke@435: void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { duke@435: subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? duke@435: Label no_extras; duke@435: br( negative, true, pt, no_extras ); // if neg, clear reg duke@435: delayed()->set( 0, Rresult); // annuled, so only if taken duke@435: bind( no_extras ); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { duke@435: #ifdef _LP64 duke@435: add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); duke@435: #else duke@435: add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult); duke@435: #endif duke@435: bclr(1, Rresult); duke@435: sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { duke@435: calc_frame_size(Rextra_words, Rresult); duke@435: neg(Rresult); duke@435: save(SP, Rresult, SP); duke@435: } duke@435: duke@435: duke@435: // --------------------------------------------------------- duke@435: Assembler::RCondition cond2rcond(Assembler::Condition c) { duke@435: switch (c) { duke@435: /*case zero: */ duke@435: case Assembler::equal: return Assembler::rc_z; duke@435: case Assembler::lessEqual: return Assembler::rc_lez; duke@435: case Assembler::less: return Assembler::rc_lz; duke@435: /*case notZero:*/ duke@435: case Assembler::notEqual: return Assembler::rc_nz; duke@435: case Assembler::greater: return Assembler::rc_gz; duke@435: case Assembler::greaterEqual: return Assembler::rc_gez; duke@435: } duke@435: ShouldNotReachHere(); duke@435: return Assembler::rc_z; duke@435: } duke@435: duke@435: // compares register with zero and branches. NOT FOR USE WITH 64-bit POINTERS duke@435: void MacroAssembler::br_zero( Condition c, bool a, Predict p, Register s1, Label& L) { duke@435: tst(s1); duke@435: br (c, a, p, L); duke@435: } duke@435: duke@435: duke@435: // Compares a pointer register with zero and branches on null. duke@435: // Does a test & branch on 32-bit systems and a register-branch on 64-bit. duke@435: void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { duke@435: assert_not_delayed(); duke@435: #ifdef _LP64 duke@435: bpr( rc_z, a, p, s1, L ); duke@435: #else duke@435: tst(s1); duke@435: br ( zero, a, p, L ); duke@435: #endif duke@435: } duke@435: duke@435: void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { duke@435: assert_not_delayed(); duke@435: #ifdef _LP64 duke@435: bpr( rc_nz, a, p, s1, L ); duke@435: #else duke@435: tst(s1); duke@435: br ( notZero, a, p, L ); duke@435: #endif duke@435: } duke@435: ysr@777: void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p, ysr@777: Register s1, address d, ysr@777: relocInfo::relocType rt ) { ysr@777: if (VM_Version::v9_instructions_work()) { ysr@777: bpr(rc, a, p, s1, d, rt); ysr@777: } else { ysr@777: tst(s1); ysr@777: br(reg_cond_to_cc_cond(rc), a, p, d, rt); ysr@777: } ysr@777: } ysr@777: ysr@777: void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p, ysr@777: Register s1, Label& L ) { ysr@777: if (VM_Version::v9_instructions_work()) { ysr@777: bpr(rc, a, p, s1, L); ysr@777: } else { ysr@777: tst(s1); ysr@777: br(reg_cond_to_cc_cond(rc), a, p, L); ysr@777: } ysr@777: } ysr@777: duke@435: duke@435: // instruction sequences factored across compiler & interpreter duke@435: duke@435: duke@435: void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, duke@435: Register Rb_hi, Register Rb_low, duke@435: Register Rresult) { duke@435: duke@435: Label check_low_parts, done; duke@435: duke@435: cmp(Ra_hi, Rb_hi ); // compare hi parts duke@435: br(equal, true, pt, check_low_parts); duke@435: delayed()->cmp(Ra_low, Rb_low); // test low parts duke@435: duke@435: // And, with an unsigned comparison, it does not matter if the numbers duke@435: // are negative or not. duke@435: // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. duke@435: // The second one is bigger (unsignedly). duke@435: duke@435: // Other notes: The first move in each triplet can be unconditional duke@435: // (and therefore probably prefetchable). duke@435: // And the equals case for the high part does not need testing, duke@435: // since that triplet is reached only after finding the high halves differ. duke@435: duke@435: if (VM_Version::v9_instructions_work()) { duke@435: duke@435: mov ( -1, Rresult); duke@435: ba( false, done ); delayed()-> movcc(greater, false, icc, 1, Rresult); duke@435: } duke@435: else { duke@435: br(less, true, pt, done); delayed()-> set(-1, Rresult); duke@435: br(greater, true, pt, done); delayed()-> set( 1, Rresult); duke@435: } duke@435: duke@435: bind( check_low_parts ); duke@435: duke@435: if (VM_Version::v9_instructions_work()) { duke@435: mov( -1, Rresult); duke@435: movcc(equal, false, icc, 0, Rresult); duke@435: movcc(greaterUnsigned, false, icc, 1, Rresult); duke@435: } duke@435: else { duke@435: set(-1, Rresult); duke@435: br(equal, true, pt, done); delayed()->set( 0, Rresult); duke@435: br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult); duke@435: } duke@435: bind( done ); duke@435: } duke@435: duke@435: void MacroAssembler::lneg( Register Rhi, Register Rlow ) { duke@435: subcc( G0, Rlow, Rlow ); duke@435: subc( G0, Rhi, Rhi ); duke@435: } duke@435: duke@435: void MacroAssembler::lshl( Register Rin_high, Register Rin_low, duke@435: Register Rcount, duke@435: Register Rout_high, Register Rout_low, duke@435: Register Rtemp ) { duke@435: duke@435: duke@435: Register Ralt_count = Rtemp; duke@435: Register Rxfer_bits = Rtemp; duke@435: duke@435: assert( Ralt_count != Rin_high duke@435: && Ralt_count != Rin_low duke@435: && Ralt_count != Rcount duke@435: && Rxfer_bits != Rin_low duke@435: && Rxfer_bits != Rin_high duke@435: && Rxfer_bits != Rcount duke@435: && Rxfer_bits != Rout_low duke@435: && Rout_low != Rin_high, duke@435: "register alias checks"); duke@435: duke@435: Label big_shift, done; duke@435: duke@435: // This code can be optimized to use the 64 bit shifts in V9. duke@435: // Here we use the 32 bit shifts. duke@435: duke@435: and3( Rcount, 0x3f, Rcount); // take least significant 6 bits duke@435: subcc(Rcount, 31, Ralt_count); duke@435: br(greater, true, pn, big_shift); duke@435: delayed()-> duke@435: dec(Ralt_count); duke@435: duke@435: // shift < 32 bits, Ralt_count = Rcount-31 duke@435: duke@435: // We get the transfer bits by shifting right by 32-count the low duke@435: // register. This is done by shifting right by 31-count and then by one duke@435: // more to take care of the special (rare) case where count is zero duke@435: // (shifting by 32 would not work). duke@435: duke@435: neg( Ralt_count ); duke@435: duke@435: // The order of the next two instructions is critical in the case where duke@435: // Rin and Rout are the same and should not be reversed. duke@435: duke@435: srl( Rin_low, Ralt_count, Rxfer_bits ); // shift right by 31-count duke@435: if (Rcount != Rout_low) { duke@435: sll( Rin_low, Rcount, Rout_low ); // low half duke@435: } duke@435: sll( Rin_high, Rcount, Rout_high ); duke@435: if (Rcount == Rout_low) { duke@435: sll( Rin_low, Rcount, Rout_low ); // low half duke@435: } duke@435: srl( Rxfer_bits, 1, Rxfer_bits ); // shift right by one more duke@435: ba (false, done); duke@435: delayed()-> duke@435: or3( Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low duke@435: duke@435: // shift >= 32 bits, Ralt_count = Rcount-32 duke@435: bind(big_shift); duke@435: sll( Rin_low, Ralt_count, Rout_high ); duke@435: clr( Rout_low ); duke@435: duke@435: bind(done); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::lshr( Register Rin_high, Register Rin_low, duke@435: Register Rcount, duke@435: Register Rout_high, Register Rout_low, duke@435: Register Rtemp ) { duke@435: duke@435: Register Ralt_count = Rtemp; duke@435: Register Rxfer_bits = Rtemp; duke@435: duke@435: assert( Ralt_count != Rin_high duke@435: && Ralt_count != Rin_low duke@435: && Ralt_count != Rcount duke@435: && Rxfer_bits != Rin_low duke@435: && Rxfer_bits != Rin_high duke@435: && Rxfer_bits != Rcount duke@435: && Rxfer_bits != Rout_high duke@435: && Rout_high != Rin_low, duke@435: "register alias checks"); duke@435: duke@435: Label big_shift, done; duke@435: duke@435: // This code can be optimized to use the 64 bit shifts in V9. duke@435: // Here we use the 32 bit shifts. duke@435: duke@435: and3( Rcount, 0x3f, Rcount); // take least significant 6 bits duke@435: subcc(Rcount, 31, Ralt_count); duke@435: br(greater, true, pn, big_shift); duke@435: delayed()->dec(Ralt_count); duke@435: duke@435: // shift < 32 bits, Ralt_count = Rcount-31 duke@435: duke@435: // We get the transfer bits by shifting left by 32-count the high duke@435: // register. This is done by shifting left by 31-count and then by one duke@435: // more to take care of the special (rare) case where count is zero duke@435: // (shifting by 32 would not work). duke@435: duke@435: neg( Ralt_count ); duke@435: if (Rcount != Rout_low) { duke@435: srl( Rin_low, Rcount, Rout_low ); duke@435: } duke@435: duke@435: // The order of the next two instructions is critical in the case where duke@435: // Rin and Rout are the same and should not be reversed. duke@435: duke@435: sll( Rin_high, Ralt_count, Rxfer_bits ); // shift left by 31-count duke@435: sra( Rin_high, Rcount, Rout_high ); // high half duke@435: sll( Rxfer_bits, 1, Rxfer_bits ); // shift left by one more duke@435: if (Rcount == Rout_low) { duke@435: srl( Rin_low, Rcount, Rout_low ); duke@435: } duke@435: ba (false, done); duke@435: delayed()-> duke@435: or3( Rout_low, Rxfer_bits, Rout_low ); // new low value: or shifted old low part and xfer from high duke@435: duke@435: // shift >= 32 bits, Ralt_count = Rcount-32 duke@435: bind(big_shift); duke@435: duke@435: sra( Rin_high, Ralt_count, Rout_low ); duke@435: sra( Rin_high, 31, Rout_high ); // sign into hi duke@435: duke@435: bind( done ); duke@435: } duke@435: duke@435: duke@435: duke@435: void MacroAssembler::lushr( Register Rin_high, Register Rin_low, duke@435: Register Rcount, duke@435: Register Rout_high, Register Rout_low, duke@435: Register Rtemp ) { duke@435: duke@435: Register Ralt_count = Rtemp; duke@435: Register Rxfer_bits = Rtemp; duke@435: duke@435: assert( Ralt_count != Rin_high duke@435: && Ralt_count != Rin_low duke@435: && Ralt_count != Rcount duke@435: && Rxfer_bits != Rin_low duke@435: && Rxfer_bits != Rin_high duke@435: && Rxfer_bits != Rcount duke@435: && Rxfer_bits != Rout_high duke@435: && Rout_high != Rin_low, duke@435: "register alias checks"); duke@435: duke@435: Label big_shift, done; duke@435: duke@435: // This code can be optimized to use the 64 bit shifts in V9. duke@435: // Here we use the 32 bit shifts. duke@435: duke@435: and3( Rcount, 0x3f, Rcount); // take least significant 6 bits duke@435: subcc(Rcount, 31, Ralt_count); duke@435: br(greater, true, pn, big_shift); duke@435: delayed()->dec(Ralt_count); duke@435: duke@435: // shift < 32 bits, Ralt_count = Rcount-31 duke@435: duke@435: // We get the transfer bits by shifting left by 32-count the high duke@435: // register. This is done by shifting left by 31-count and then by one duke@435: // more to take care of the special (rare) case where count is zero duke@435: // (shifting by 32 would not work). duke@435: duke@435: neg( Ralt_count ); duke@435: if (Rcount != Rout_low) { duke@435: srl( Rin_low, Rcount, Rout_low ); duke@435: } duke@435: duke@435: // The order of the next two instructions is critical in the case where duke@435: // Rin and Rout are the same and should not be reversed. duke@435: duke@435: sll( Rin_high, Ralt_count, Rxfer_bits ); // shift left by 31-count duke@435: srl( Rin_high, Rcount, Rout_high ); // high half duke@435: sll( Rxfer_bits, 1, Rxfer_bits ); // shift left by one more duke@435: if (Rcount == Rout_low) { duke@435: srl( Rin_low, Rcount, Rout_low ); duke@435: } duke@435: ba (false, done); duke@435: delayed()-> duke@435: or3( Rout_low, Rxfer_bits, Rout_low ); // new low value: or shifted old low part and xfer from high duke@435: duke@435: // shift >= 32 bits, Ralt_count = Rcount-32 duke@435: bind(big_shift); duke@435: duke@435: srl( Rin_high, Ralt_count, Rout_low ); duke@435: clr( Rout_high ); duke@435: duke@435: bind( done ); duke@435: } duke@435: duke@435: #ifdef _LP64 duke@435: void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { duke@435: cmp(Ra, Rb); duke@435: mov( -1, Rresult); duke@435: movcc(equal, false, xcc, 0, Rresult); duke@435: movcc(greater, false, xcc, 1, Rresult); duke@435: } duke@435: #endif duke@435: duke@435: duke@435: void MacroAssembler::float_cmp( bool is_float, int unordered_result, duke@435: FloatRegister Fa, FloatRegister Fb, duke@435: Register Rresult) { duke@435: duke@435: fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb); duke@435: duke@435: Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less; duke@435: Condition eq = f_equal; duke@435: Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater; duke@435: duke@435: if (VM_Version::v9_instructions_work()) { duke@435: duke@435: mov( -1, Rresult ); duke@435: movcc( eq, true, fcc0, 0, Rresult ); duke@435: movcc( gt, true, fcc0, 1, Rresult ); duke@435: duke@435: } else { duke@435: Label done; duke@435: duke@435: set( -1, Rresult ); duke@435: //fb(lt, true, pn, done); delayed()->set( -1, Rresult ); duke@435: fb( eq, true, pn, done); delayed()->set( 0, Rresult ); duke@435: fb( gt, true, pn, done); delayed()->set( 1, Rresult ); duke@435: duke@435: bind (done); duke@435: } duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) duke@435: { duke@435: if (VM_Version::v9_instructions_work()) { duke@435: Assembler::fneg(w, s, d); duke@435: } else { duke@435: if (w == FloatRegisterImpl::S) { duke@435: Assembler::fneg(w, s, d); duke@435: } else if (w == FloatRegisterImpl::D) { duke@435: // number() does a sanity check on the alignment. duke@435: assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && duke@435: ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); duke@435: duke@435: Assembler::fneg(FloatRegisterImpl::S, s, d); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); duke@435: } else { duke@435: assert(w == FloatRegisterImpl::Q, "Invalid float register width"); duke@435: duke@435: // number() does a sanity check on the alignment. duke@435: assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && duke@435: ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); duke@435: duke@435: Assembler::fneg(FloatRegisterImpl::S, s, d); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); duke@435: } duke@435: } duke@435: } duke@435: duke@435: void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) duke@435: { duke@435: if (VM_Version::v9_instructions_work()) { duke@435: Assembler::fmov(w, s, d); duke@435: } else { duke@435: if (w == FloatRegisterImpl::S) { duke@435: Assembler::fmov(w, s, d); duke@435: } else if (w == FloatRegisterImpl::D) { duke@435: // number() does a sanity check on the alignment. duke@435: assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && duke@435: ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); duke@435: duke@435: Assembler::fmov(FloatRegisterImpl::S, s, d); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); duke@435: } else { duke@435: assert(w == FloatRegisterImpl::Q, "Invalid float register width"); duke@435: duke@435: // number() does a sanity check on the alignment. duke@435: assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && duke@435: ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); duke@435: duke@435: Assembler::fmov(FloatRegisterImpl::S, s, d); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); duke@435: } duke@435: } duke@435: } duke@435: duke@435: void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) duke@435: { duke@435: if (VM_Version::v9_instructions_work()) { duke@435: Assembler::fabs(w, s, d); duke@435: } else { duke@435: if (w == FloatRegisterImpl::S) { duke@435: Assembler::fabs(w, s, d); duke@435: } else if (w == FloatRegisterImpl::D) { duke@435: // number() does a sanity check on the alignment. duke@435: assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && duke@435: ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); duke@435: duke@435: Assembler::fabs(FloatRegisterImpl::S, s, d); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); duke@435: } else { duke@435: assert(w == FloatRegisterImpl::Q, "Invalid float register width"); duke@435: duke@435: // number() does a sanity check on the alignment. duke@435: assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && duke@435: ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); duke@435: duke@435: Assembler::fabs(FloatRegisterImpl::S, s, d); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); duke@435: Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); duke@435: } duke@435: } duke@435: } duke@435: duke@435: void MacroAssembler::save_all_globals_into_locals() { duke@435: mov(G1,L1); duke@435: mov(G2,L2); duke@435: mov(G3,L3); duke@435: mov(G4,L4); duke@435: mov(G5,L5); duke@435: mov(G6,L6); duke@435: mov(G7,L7); duke@435: } duke@435: duke@435: void MacroAssembler::restore_globals_from_locals() { duke@435: mov(L1,G1); duke@435: mov(L2,G2); duke@435: mov(L3,G3); duke@435: mov(L4,G4); duke@435: mov(L5,G5); duke@435: mov(L6,G6); duke@435: mov(L7,G7); duke@435: } duke@435: duke@435: // Use for 64 bit operation. duke@435: void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) duke@435: { duke@435: // store ptr_reg as the new top value duke@435: #ifdef _LP64 duke@435: casx(top_ptr_reg, top_reg, ptr_reg); duke@435: #else duke@435: cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm); duke@435: #endif // _LP64 duke@435: } duke@435: duke@435: // [RGV] This routine does not handle 64 bit operations. duke@435: // use casx_under_lock() or casx directly!!! duke@435: void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) duke@435: { duke@435: // store ptr_reg as the new top value duke@435: if (VM_Version::v9_instructions_work()) { duke@435: cas(top_ptr_reg, top_reg, ptr_reg); duke@435: } else { duke@435: duke@435: // If the register is not an out nor global, it is not visible duke@435: // after the save. Allocate a register for it, save its duke@435: // value in the register save area (the save may not flush duke@435: // registers to the save area). duke@435: duke@435: Register top_ptr_reg_after_save; duke@435: Register top_reg_after_save; duke@435: Register ptr_reg_after_save; duke@435: duke@435: if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) { duke@435: top_ptr_reg_after_save = top_ptr_reg->after_save(); duke@435: } else { duke@435: Address reg_save_addr = top_ptr_reg->address_in_saved_window(); duke@435: top_ptr_reg_after_save = L0; duke@435: st(top_ptr_reg, reg_save_addr); duke@435: } duke@435: duke@435: if (top_reg->is_out() || top_reg->is_global()) { duke@435: top_reg_after_save = top_reg->after_save(); duke@435: } else { duke@435: Address reg_save_addr = top_reg->address_in_saved_window(); duke@435: top_reg_after_save = L1; duke@435: st(top_reg, reg_save_addr); duke@435: } duke@435: duke@435: if (ptr_reg->is_out() || ptr_reg->is_global()) { duke@435: ptr_reg_after_save = ptr_reg->after_save(); duke@435: } else { duke@435: Address reg_save_addr = ptr_reg->address_in_saved_window(); duke@435: ptr_reg_after_save = L2; duke@435: st(ptr_reg, reg_save_addr); duke@435: } duke@435: duke@435: const Register& lock_reg = L3; duke@435: const Register& lock_ptr_reg = L4; duke@435: const Register& value_reg = L5; duke@435: const Register& yield_reg = L6; duke@435: const Register& yieldall_reg = L7; duke@435: duke@435: save_frame(); duke@435: duke@435: if (top_ptr_reg_after_save == L0) { duke@435: ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save); duke@435: } duke@435: duke@435: if (top_reg_after_save == L1) { duke@435: ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save); duke@435: } duke@435: duke@435: if (ptr_reg_after_save == L2) { duke@435: ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save); duke@435: } duke@435: duke@435: Label(retry_get_lock); duke@435: Label(not_same); duke@435: Label(dont_yield); duke@435: duke@435: assert(lock_addr, "lock_address should be non null for v8"); duke@435: set((intptr_t)lock_addr, lock_ptr_reg); duke@435: // Initialize yield counter duke@435: mov(G0,yield_reg); duke@435: mov(G0, yieldall_reg); duke@435: set(StubRoutines::Sparc::locked, lock_reg); duke@435: duke@435: bind(retry_get_lock); duke@435: cmp(yield_reg, V8AtomicOperationUnderLockSpinCount); duke@435: br(Assembler::less, false, Assembler::pt, dont_yield); duke@435: delayed()->nop(); duke@435: duke@435: if(use_call_vm) { duke@435: Untested("Need to verify global reg consistancy"); duke@435: call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg); duke@435: } else { duke@435: // Save the regs and make space for a C call duke@435: save(SP, -96, SP); duke@435: save_all_globals_into_locals(); duke@435: call(CAST_FROM_FN_PTR(address,os::yield_all)); duke@435: delayed()->mov(yieldall_reg, O0); duke@435: restore_globals_from_locals(); duke@435: restore(); duke@435: } duke@435: duke@435: // reset the counter duke@435: mov(G0,yield_reg); duke@435: add(yieldall_reg, 1, yieldall_reg); duke@435: duke@435: bind(dont_yield); duke@435: // try to get lock duke@435: swap(lock_ptr_reg, 0, lock_reg); duke@435: duke@435: // did we get the lock? duke@435: cmp(lock_reg, StubRoutines::Sparc::unlocked); duke@435: br(Assembler::notEqual, true, Assembler::pn, retry_get_lock); duke@435: delayed()->add(yield_reg,1,yield_reg); duke@435: duke@435: // yes, got lock. do we have the same top? duke@435: ld(top_ptr_reg_after_save, 0, value_reg); duke@435: cmp(value_reg, top_reg_after_save); duke@435: br(Assembler::notEqual, false, Assembler::pn, not_same); duke@435: delayed()->nop(); duke@435: duke@435: // yes, same top. duke@435: st(ptr_reg_after_save, top_ptr_reg_after_save, 0); duke@435: membar(Assembler::StoreStore); duke@435: duke@435: bind(not_same); duke@435: mov(value_reg, ptr_reg_after_save); duke@435: st(lock_reg, lock_ptr_reg, 0); // unlock duke@435: duke@435: restore(); duke@435: } duke@435: } duke@435: duke@435: void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg, duke@435: Label& done, Label* slow_case, duke@435: BiasedLockingCounters* counters) { duke@435: assert(UseBiasedLocking, "why call this otherwise?"); duke@435: duke@435: if (PrintBiasedLockingStatistics) { duke@435: assert_different_registers(obj_reg, mark_reg, temp_reg, O7); duke@435: if (counters == NULL) duke@435: counters = BiasedLocking::counters(); duke@435: } duke@435: duke@435: Label cas_label; duke@435: duke@435: // Biased locking duke@435: // See whether the lock is currently biased toward our thread and duke@435: // whether the epoch is still valid duke@435: // Note that the runtime guarantees sufficient alignment of JavaThread duke@435: // pointers to allow age to be placed into low bits duke@435: assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); duke@435: and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); duke@435: cmp(temp_reg, markOopDesc::biased_lock_pattern); duke@435: brx(Assembler::notEqual, false, Assembler::pn, cas_label); coleenp@548: delayed()->nop(); coleenp@548: coleenp@548: load_klass(obj_reg, temp_reg); duke@435: ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); duke@435: or3(G2_thread, temp_reg, temp_reg); duke@435: xor3(mark_reg, temp_reg, temp_reg); duke@435: andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); duke@435: if (counters != NULL) { duke@435: cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); duke@435: // Reload mark_reg as we may need it later duke@435: ld_ptr(Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()), mark_reg); duke@435: } duke@435: brx(Assembler::equal, true, Assembler::pt, done); duke@435: delayed()->nop(); duke@435: duke@435: Label try_revoke_bias; duke@435: Label try_rebias; duke@435: Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()); duke@435: assert(mark_addr.disp() == 0, "cas must take a zero displacement"); duke@435: duke@435: // At this point we know that the header has the bias pattern and duke@435: // that we are not the bias owner in the current epoch. We need to duke@435: // figure out more details about the state of the header in order to duke@435: // know what operations can be legally performed on the object's duke@435: // header. duke@435: duke@435: // If the low three bits in the xor result aren't clear, that means duke@435: // the prototype header is no longer biased and we have to revoke duke@435: // the bias on this object. duke@435: btst(markOopDesc::biased_lock_mask_in_place, temp_reg); duke@435: brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); duke@435: duke@435: // Biasing is still enabled for this data type. See whether the duke@435: // epoch of the current bias is still valid, meaning that the epoch duke@435: // bits of the mark word are equal to the epoch bits of the duke@435: // prototype header. (Note that the prototype header's epoch bits duke@435: // only change at a safepoint.) If not, attempt to rebias the object duke@435: // toward the current thread. Note that we must be absolutely sure duke@435: // that the current epoch is invalid in order to do this because duke@435: // otherwise the manipulations it performs on the mark word are duke@435: // illegal. duke@435: delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); duke@435: brx(Assembler::notZero, false, Assembler::pn, try_rebias); duke@435: duke@435: // The epoch of the current bias is still valid but we know nothing duke@435: // about the owner; it might be set or it might be clear. Try to duke@435: // acquire the bias of the object using an atomic operation. If this duke@435: // fails we will go in to the runtime to revoke the object's bias. duke@435: // Note that we first construct the presumed unbiased header so we duke@435: // don't accidentally blow away another thread's valid bias. duke@435: delayed()->and3(mark_reg, duke@435: markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, duke@435: mark_reg); duke@435: or3(G2_thread, mark_reg, temp_reg); duke@435: casx_under_lock(mark_addr.base(), mark_reg, temp_reg, duke@435: (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); duke@435: // If the biasing toward our thread failed, this means that duke@435: // another thread succeeded in biasing it toward itself and we duke@435: // need to revoke that bias. The revocation will occur in the duke@435: // interpreter runtime in the slow case. duke@435: cmp(mark_reg, temp_reg); duke@435: if (counters != NULL) { duke@435: cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); duke@435: } duke@435: if (slow_case != NULL) { duke@435: brx(Assembler::notEqual, true, Assembler::pn, *slow_case); duke@435: delayed()->nop(); duke@435: } duke@435: br(Assembler::always, false, Assembler::pt, done); duke@435: delayed()->nop(); duke@435: duke@435: bind(try_rebias); duke@435: // At this point we know the epoch has expired, meaning that the duke@435: // current "bias owner", if any, is actually invalid. Under these duke@435: // circumstances _only_, we are allowed to use the current header's duke@435: // value as the comparison value when doing the cas to acquire the duke@435: // bias in the current epoch. In other words, we allow transfer of duke@435: // the bias from one thread to another directly in this situation. duke@435: // duke@435: // FIXME: due to a lack of registers we currently blow away the age duke@435: // bits in this situation. Should attempt to preserve them. coleenp@548: load_klass(obj_reg, temp_reg); duke@435: ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); duke@435: or3(G2_thread, temp_reg, temp_reg); duke@435: casx_under_lock(mark_addr.base(), mark_reg, temp_reg, duke@435: (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); duke@435: // If the biasing toward our thread failed, this means that duke@435: // another thread succeeded in biasing it toward itself and we duke@435: // need to revoke that bias. The revocation will occur in the duke@435: // interpreter runtime in the slow case. duke@435: cmp(mark_reg, temp_reg); duke@435: if (counters != NULL) { duke@435: cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); duke@435: } duke@435: if (slow_case != NULL) { duke@435: brx(Assembler::notEqual, true, Assembler::pn, *slow_case); duke@435: delayed()->nop(); duke@435: } duke@435: br(Assembler::always, false, Assembler::pt, done); duke@435: delayed()->nop(); duke@435: duke@435: bind(try_revoke_bias); duke@435: // The prototype mark in the klass doesn't have the bias bit set any duke@435: // more, indicating that objects of this data type are not supposed duke@435: // to be biased any more. We are going to try to reset the mark of duke@435: // this object to the prototype value and fall through to the duke@435: // CAS-based locking scheme. Note that if our CAS fails, it means duke@435: // that another thread raced us for the privilege of revoking the duke@435: // bias of this particular object, so it's okay to continue in the duke@435: // normal locking code. duke@435: // duke@435: // FIXME: due to a lack of registers we currently blow away the age duke@435: // bits in this situation. Should attempt to preserve them. coleenp@548: load_klass(obj_reg, temp_reg); duke@435: ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); duke@435: casx_under_lock(mark_addr.base(), mark_reg, temp_reg, duke@435: (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); duke@435: // Fall through to the normal CAS-based lock, because no matter what duke@435: // the result of the above CAS, some thread must have succeeded in duke@435: // removing the bias bit from the object's header. duke@435: if (counters != NULL) { duke@435: cmp(mark_reg, temp_reg); duke@435: cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); duke@435: } duke@435: duke@435: bind(cas_label); duke@435: } duke@435: duke@435: void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, duke@435: bool allow_delay_slot_filling) { duke@435: // Check for biased locking unlock case, which is a no-op duke@435: // Note: we do not have to check the thread ID for two reasons. duke@435: // First, the interpreter checks for IllegalMonitorStateException at duke@435: // a higher level. Second, if the bias was revoked while we held the duke@435: // lock, the object could not be rebiased toward another thread, so duke@435: // the bias bit would be clear. duke@435: ld_ptr(mark_addr, temp_reg); duke@435: and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); duke@435: cmp(temp_reg, markOopDesc::biased_lock_pattern); duke@435: brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); duke@435: delayed(); duke@435: if (!allow_delay_slot_filling) { duke@435: nop(); duke@435: } duke@435: } duke@435: duke@435: duke@435: // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by duke@435: // Solaris/SPARC's "as". Another apt name would be cas_ptr() duke@435: duke@435: void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) { duke@435: casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()) ; duke@435: } duke@435: duke@435: duke@435: duke@435: // compiler_lock_object() and compiler_unlock_object() are direct transliterations duke@435: // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. duke@435: // The code could be tightened up considerably. duke@435: // duke@435: // box->dhw disposition - post-conditions at DONE_LABEL. duke@435: // - Successful inflated lock: box->dhw != 0. duke@435: // Any non-zero value suffices. duke@435: // Consider G2_thread, rsp, boxReg, or unused_mark() duke@435: // - Successful Stack-lock: box->dhw == mark. duke@435: // box->dhw must contain the displaced mark word value duke@435: // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. duke@435: // The slow-path fast_enter() and slow_enter() operators duke@435: // are responsible for setting box->dhw = NonZero (typically ::unused_mark). duke@435: // - Biased: box->dhw is undefined duke@435: // duke@435: // SPARC refworkload performance - specifically jetstream and scimark - are duke@435: // extremely sensitive to the size of the code emitted by compiler_lock_object duke@435: // and compiler_unlock_object. Critically, the key factor is code size, not path duke@435: // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the duke@435: // effect). duke@435: duke@435: duke@435: void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch, duke@435: BiasedLockingCounters* counters) { duke@435: Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); duke@435: duke@435: verify_oop(Roop); duke@435: Label done ; duke@435: duke@435: if (counters != NULL) { duke@435: inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); duke@435: } duke@435: duke@435: if (EmitSync & 1) { duke@435: mov (3, Rscratch) ; duke@435: st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); duke@435: cmp (SP, G0) ; duke@435: return ; duke@435: } duke@435: duke@435: if (EmitSync & 2) { duke@435: duke@435: // Fetch object's markword duke@435: ld_ptr(mark_addr, Rmark); duke@435: duke@435: if (UseBiasedLocking) { duke@435: biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); duke@435: } duke@435: duke@435: // Save Rbox in Rscratch to be used for the cas operation duke@435: mov(Rbox, Rscratch); duke@435: duke@435: // set Rmark to markOop | markOopDesc::unlocked_value duke@435: or3(Rmark, markOopDesc::unlocked_value, Rmark); duke@435: duke@435: // Initialize the box. (Must happen before we update the object mark!) duke@435: st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); duke@435: duke@435: // compare object markOop with Rmark and if equal exchange Rscratch with object markOop duke@435: assert(mark_addr.disp() == 0, "cas must take a zero displacement"); duke@435: casx_under_lock(mark_addr.base(), Rmark, Rscratch, duke@435: (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); duke@435: duke@435: // if compare/exchange succeeded we found an unlocked object and we now have locked it duke@435: // hence we are done duke@435: cmp(Rmark, Rscratch); duke@435: #ifdef _LP64 duke@435: sub(Rscratch, STACK_BIAS, Rscratch); duke@435: #endif duke@435: brx(Assembler::equal, false, Assembler::pt, done); duke@435: delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot duke@435: duke@435: // we did not find an unlocked object so see if this is a recursive case duke@435: // sub(Rscratch, SP, Rscratch); duke@435: assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); duke@435: andcc(Rscratch, 0xfffff003, Rscratch); duke@435: st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); duke@435: bind (done) ; duke@435: return ; duke@435: } duke@435: duke@435: Label Egress ; duke@435: duke@435: if (EmitSync & 256) { duke@435: Label IsInflated ; duke@435: duke@435: ld_ptr (mark_addr, Rmark); // fetch obj->mark duke@435: // Triage: biased, stack-locked, neutral, inflated duke@435: if (UseBiasedLocking) { duke@435: biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); duke@435: // Invariant: if control reaches this point in the emitted stream duke@435: // then Rmark has not been modified. duke@435: } duke@435: duke@435: // Store mark into displaced mark field in the on-stack basic-lock "box" duke@435: // Critically, this must happen before the CAS duke@435: // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. duke@435: st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); duke@435: andcc (Rmark, 2, G0) ; duke@435: brx (Assembler::notZero, false, Assembler::pn, IsInflated) ; duke@435: delayed() -> duke@435: duke@435: // Try stack-lock acquisition. duke@435: // Beware: the 1st instruction is in a delay slot duke@435: mov (Rbox, Rscratch); duke@435: or3 (Rmark, markOopDesc::unlocked_value, Rmark); duke@435: assert (mark_addr.disp() == 0, "cas must take a zero displacement"); duke@435: casn (mark_addr.base(), Rmark, Rscratch) ; duke@435: cmp (Rmark, Rscratch); duke@435: brx (Assembler::equal, false, Assembler::pt, done); duke@435: delayed()->sub(Rscratch, SP, Rscratch); duke@435: duke@435: // Stack-lock attempt failed - check for recursive stack-lock. duke@435: // See the comments below about how we might remove this case. duke@435: #ifdef _LP64 duke@435: sub (Rscratch, STACK_BIAS, Rscratch); duke@435: #endif duke@435: assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); duke@435: andcc (Rscratch, 0xfffff003, Rscratch); duke@435: br (Assembler::always, false, Assembler::pt, done) ; duke@435: delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); duke@435: duke@435: bind (IsInflated) ; duke@435: if (EmitSync & 64) { duke@435: // If m->owner != null goto IsLocked duke@435: // Pessimistic form: Test-and-CAS vs CAS duke@435: // The optimistic form avoids RTS->RTO cache line upgrades. duke@435: ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; duke@435: andcc (Rscratch, Rscratch, G0) ; duke@435: brx (Assembler::notZero, false, Assembler::pn, done) ; duke@435: delayed()->nop() ; duke@435: // m->owner == null : it's unlocked. duke@435: } duke@435: duke@435: // Try to CAS m->owner from null to Self duke@435: // Invariant: if we acquire the lock then _recursions should be 0. duke@435: add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ; duke@435: mov (G2_thread, Rscratch) ; duke@435: casn (Rmark, G0, Rscratch) ; duke@435: cmp (Rscratch, G0) ; duke@435: // Intentional fall-through into done duke@435: } else { duke@435: // Aggressively avoid the Store-before-CAS penalty duke@435: // Defer the store into box->dhw until after the CAS duke@435: Label IsInflated, Recursive ; duke@435: duke@435: // Anticipate CAS -- Avoid RTS->RTO upgrade duke@435: // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ; duke@435: duke@435: ld_ptr (mark_addr, Rmark); // fetch obj->mark duke@435: // Triage: biased, stack-locked, neutral, inflated duke@435: duke@435: if (UseBiasedLocking) { duke@435: biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); duke@435: // Invariant: if control reaches this point in the emitted stream duke@435: // then Rmark has not been modified. duke@435: } duke@435: andcc (Rmark, 2, G0) ; duke@435: brx (Assembler::notZero, false, Assembler::pn, IsInflated) ; duke@435: delayed()-> // Beware - dangling delay-slot duke@435: duke@435: // Try stack-lock acquisition. duke@435: // Transiently install BUSY (0) encoding in the mark word. duke@435: // if the CAS of 0 into the mark was successful then we execute: duke@435: // ST box->dhw = mark -- save fetched mark in on-stack basiclock box duke@435: // ST obj->mark = box -- overwrite transient 0 value duke@435: // This presumes TSO, of course. duke@435: duke@435: mov (0, Rscratch) ; duke@435: or3 (Rmark, markOopDesc::unlocked_value, Rmark); duke@435: assert (mark_addr.disp() == 0, "cas must take a zero displacement"); duke@435: casn (mark_addr.base(), Rmark, Rscratch) ; duke@435: // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ; duke@435: cmp (Rscratch, Rmark) ; duke@435: brx (Assembler::notZero, false, Assembler::pn, Recursive) ; duke@435: delayed() -> duke@435: st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); duke@435: if (counters != NULL) { duke@435: cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); duke@435: } duke@435: br (Assembler::always, false, Assembler::pt, done); duke@435: delayed() -> duke@435: st_ptr (Rbox, mark_addr) ; duke@435: duke@435: bind (Recursive) ; duke@435: // Stack-lock attempt failed - check for recursive stack-lock. duke@435: // Tests show that we can remove the recursive case with no impact duke@435: // on refworkload 0.83. If we need to reduce the size of the code duke@435: // emitted by compiler_lock_object() the recursive case is perfect duke@435: // candidate. duke@435: // duke@435: // A more extreme idea is to always inflate on stack-lock recursion. duke@435: // This lets us eliminate the recursive checks in compiler_lock_object duke@435: // and compiler_unlock_object and the (box->dhw == 0) encoding. duke@435: // A brief experiment - requiring changes to synchronizer.cpp, interpreter, duke@435: // and showed a performance *increase*. In the same experiment I eliminated duke@435: // the fast-path stack-lock code from the interpreter and always passed duke@435: // control to the "slow" operators in synchronizer.cpp. duke@435: duke@435: // RScratch contains the fetched obj->mark value from the failed CASN. duke@435: #ifdef _LP64 duke@435: sub (Rscratch, STACK_BIAS, Rscratch); duke@435: #endif duke@435: sub(Rscratch, SP, Rscratch); duke@435: assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); duke@435: andcc (Rscratch, 0xfffff003, Rscratch); duke@435: if (counters != NULL) { duke@435: // Accounting needs the Rscratch register duke@435: st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); duke@435: cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); duke@435: br (Assembler::always, false, Assembler::pt, done) ; duke@435: delayed()->nop() ; duke@435: } else { duke@435: br (Assembler::always, false, Assembler::pt, done) ; duke@435: delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); duke@435: } duke@435: duke@435: bind (IsInflated) ; duke@435: if (EmitSync & 64) { duke@435: // If m->owner != null goto IsLocked duke@435: // Test-and-CAS vs CAS duke@435: // Pessimistic form avoids futile (doomed) CAS attempts duke@435: // The optimistic form avoids RTS->RTO cache line upgrades. duke@435: ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; duke@435: andcc (Rscratch, Rscratch, G0) ; duke@435: brx (Assembler::notZero, false, Assembler::pn, done) ; duke@435: delayed()->nop() ; duke@435: // m->owner == null : it's unlocked. duke@435: } duke@435: duke@435: // Try to CAS m->owner from null to Self duke@435: // Invariant: if we acquire the lock then _recursions should be 0. duke@435: add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ; duke@435: mov (G2_thread, Rscratch) ; duke@435: casn (Rmark, G0, Rscratch) ; duke@435: cmp (Rscratch, G0) ; duke@435: // ST box->displaced_header = NonZero. duke@435: // Any non-zero value suffices: duke@435: // unused_mark(), G2_thread, RBox, RScratch, rsp, etc. duke@435: st_ptr (Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); duke@435: // Intentional fall-through into done duke@435: } duke@435: duke@435: bind (done) ; duke@435: } duke@435: duke@435: void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch) { duke@435: Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); duke@435: duke@435: Label done ; duke@435: duke@435: if (EmitSync & 4) { duke@435: cmp (SP, G0) ; duke@435: return ; duke@435: } duke@435: duke@435: if (EmitSync & 8) { duke@435: if (UseBiasedLocking) { duke@435: biased_locking_exit(mark_addr, Rscratch, done); duke@435: } duke@435: duke@435: // Test first if it is a fast recursive unlock duke@435: ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); duke@435: cmp(Rmark, G0); duke@435: brx(Assembler::equal, false, Assembler::pt, done); duke@435: delayed()->nop(); duke@435: duke@435: // Check if it is still a light weight lock, this is is true if we see duke@435: // the stack address of the basicLock in the markOop of the object duke@435: assert(mark_addr.disp() == 0, "cas must take a zero displacement"); duke@435: casx_under_lock(mark_addr.base(), Rbox, Rmark, duke@435: (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); duke@435: br (Assembler::always, false, Assembler::pt, done); duke@435: delayed()->cmp(Rbox, Rmark); duke@435: bind (done) ; duke@435: return ; duke@435: } duke@435: duke@435: // Beware ... If the aggregate size of the code emitted by CLO and CUO is duke@435: // is too large performance rolls abruptly off a cliff. duke@435: // This could be related to inlining policies, code cache management, or duke@435: // I$ effects. duke@435: Label LStacked ; duke@435: duke@435: if (UseBiasedLocking) { duke@435: // TODO: eliminate redundant LDs of obj->mark duke@435: biased_locking_exit(mark_addr, Rscratch, done); duke@435: } duke@435: duke@435: ld_ptr (Roop, oopDesc::mark_offset_in_bytes(), Rmark) ; duke@435: ld_ptr (Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); duke@435: andcc (Rscratch, Rscratch, G0); duke@435: brx (Assembler::zero, false, Assembler::pn, done); duke@435: delayed()-> nop() ; // consider: relocate fetch of mark, above, into this DS duke@435: andcc (Rmark, 2, G0) ; duke@435: brx (Assembler::zero, false, Assembler::pt, LStacked) ; duke@435: delayed()-> nop() ; duke@435: duke@435: // It's inflated duke@435: // Conceptually we need a #loadstore|#storestore "release" MEMBAR before duke@435: // the ST of 0 into _owner which releases the lock. This prevents loads duke@435: // and stores within the critical section from reordering (floating) duke@435: // past the store that releases the lock. But TSO is a strong memory model duke@435: // and that particular flavor of barrier is a noop, so we can safely elide it. duke@435: // Note that we use 1-0 locking by default for the inflated case. We duke@435: // close the resultant (and rare) race by having contented threads in duke@435: // monitorenter periodically poll _owner. duke@435: ld_ptr (Address(Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; duke@435: ld_ptr (Address(Rmark, 0, ObjectMonitor::recursions_offset_in_bytes()-2), Rbox) ; duke@435: xor3 (Rscratch, G2_thread, Rscratch) ; duke@435: orcc (Rbox, Rscratch, Rbox) ; duke@435: brx (Assembler::notZero, false, Assembler::pn, done) ; duke@435: delayed()-> duke@435: ld_ptr (Address (Rmark, 0, ObjectMonitor::EntryList_offset_in_bytes()-2), Rscratch) ; duke@435: ld_ptr (Address (Rmark, 0, ObjectMonitor::cxq_offset_in_bytes()-2), Rbox) ; duke@435: orcc (Rbox, Rscratch, G0) ; duke@435: if (EmitSync & 65536) { duke@435: Label LSucc ; duke@435: brx (Assembler::notZero, false, Assembler::pn, LSucc) ; duke@435: delayed()->nop() ; duke@435: br (Assembler::always, false, Assembler::pt, done) ; duke@435: delayed()-> duke@435: st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; duke@435: duke@435: bind (LSucc) ; duke@435: st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; duke@435: if (os::is_MP()) { membar (StoreLoad) ; } duke@435: ld_ptr (Address (Rmark, 0, ObjectMonitor::succ_offset_in_bytes()-2), Rscratch) ; duke@435: andcc (Rscratch, Rscratch, G0) ; duke@435: brx (Assembler::notZero, false, Assembler::pt, done) ; duke@435: delayed()-> andcc (G0, G0, G0) ; duke@435: add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ; duke@435: mov (G2_thread, Rscratch) ; duke@435: casn (Rmark, G0, Rscratch) ; duke@435: cmp (Rscratch, G0) ; duke@435: // invert icc.zf and goto done duke@435: brx (Assembler::notZero, false, Assembler::pt, done) ; duke@435: delayed() -> cmp (G0, G0) ; duke@435: br (Assembler::always, false, Assembler::pt, done); duke@435: delayed() -> cmp (G0, 1) ; duke@435: } else { duke@435: brx (Assembler::notZero, false, Assembler::pn, done) ; duke@435: delayed()->nop() ; duke@435: br (Assembler::always, false, Assembler::pt, done) ; duke@435: delayed()-> duke@435: st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; duke@435: } duke@435: duke@435: bind (LStacked) ; duke@435: // Consider: we could replace the expensive CAS in the exit duke@435: // path with a simple ST of the displaced mark value fetched from duke@435: // the on-stack basiclock box. That admits a race where a thread T2 duke@435: // in the slow lock path -- inflating with monitor M -- could race a duke@435: // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. duke@435: // More precisely T1 in the stack-lock unlock path could "stomp" the duke@435: // inflated mark value M installed by T2, resulting in an orphan duke@435: // object monitor M and T2 becoming stranded. We can remedy that situation duke@435: // by having T2 periodically poll the object's mark word using timed wait duke@435: // operations. If T2 discovers that a stomp has occurred it vacates duke@435: // the monitor M and wakes any other threads stranded on the now-orphan M. duke@435: // In addition the monitor scavenger, which performs deflation, duke@435: // would also need to check for orpan monitors and stranded threads. duke@435: // duke@435: // Finally, inflation is also used when T2 needs to assign a hashCode duke@435: // to O and O is stack-locked by T1. The "stomp" race could cause duke@435: // an assigned hashCode value to be lost. We can avoid that condition duke@435: // and provide the necessary hashCode stability invariants by ensuring duke@435: // that hashCode generation is idempotent between copying GCs. duke@435: // For example we could compute the hashCode of an object O as duke@435: // O's heap address XOR some high quality RNG value that is refreshed duke@435: // at GC-time. The monitor scavenger would install the hashCode duke@435: // found in any orphan monitors. Again, the mechanism admits a duke@435: // lost-update "stomp" WAW race but detects and recovers as needed. duke@435: // duke@435: // A prototype implementation showed excellent results, although duke@435: // the scavenger and timeout code was rather involved. duke@435: duke@435: casn (mark_addr.base(), Rbox, Rscratch) ; duke@435: cmp (Rbox, Rscratch); duke@435: // Intentional fall through into done ... duke@435: duke@435: bind (done) ; duke@435: } duke@435: duke@435: duke@435: duke@435: void MacroAssembler::print_CPU_state() { duke@435: // %%%%% need to implement this duke@435: } duke@435: duke@435: void MacroAssembler::verify_FPU(int stack_depth, const char* s) { duke@435: // %%%%% need to implement this duke@435: } duke@435: duke@435: void MacroAssembler::push_IU_state() { duke@435: // %%%%% need to implement this duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::pop_IU_state() { duke@435: // %%%%% need to implement this duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::push_FPU_state() { duke@435: // %%%%% need to implement this duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::pop_FPU_state() { duke@435: // %%%%% need to implement this duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::push_CPU_state() { duke@435: // %%%%% need to implement this duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::pop_CPU_state() { duke@435: // %%%%% need to implement this duke@435: } duke@435: duke@435: duke@435: duke@435: void MacroAssembler::verify_tlab() { duke@435: #ifdef ASSERT duke@435: if (UseTLAB && VerifyOops) { duke@435: Label next, next2, ok; duke@435: Register t1 = L0; duke@435: Register t2 = L1; duke@435: Register t3 = L2; duke@435: duke@435: save_frame(0); duke@435: ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); duke@435: ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); duke@435: or3(t1, t2, t3); duke@435: cmp(t1, t2); duke@435: br(Assembler::greaterEqual, false, Assembler::pn, next); duke@435: delayed()->nop(); duke@435: stop("assert(top >= start)"); duke@435: should_not_reach_here(); duke@435: duke@435: bind(next); duke@435: ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); duke@435: ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); duke@435: or3(t3, t2, t3); duke@435: cmp(t1, t2); duke@435: br(Assembler::lessEqual, false, Assembler::pn, next2); duke@435: delayed()->nop(); duke@435: stop("assert(top <= end)"); duke@435: should_not_reach_here(); duke@435: duke@435: bind(next2); duke@435: and3(t3, MinObjAlignmentInBytesMask, t3); duke@435: cmp(t3, 0); duke@435: br(Assembler::lessEqual, false, Assembler::pn, ok); duke@435: delayed()->nop(); duke@435: stop("assert(aligned)"); duke@435: should_not_reach_here(); duke@435: duke@435: bind(ok); duke@435: restore(); duke@435: } duke@435: #endif duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::eden_allocate( duke@435: Register obj, // result: pointer to object after successful allocation duke@435: Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise duke@435: int con_size_in_bytes, // object size in bytes if known at compile time duke@435: Register t1, // temp register duke@435: Register t2, // temp register duke@435: Label& slow_case // continuation point if fast allocation fails duke@435: ){ duke@435: // make sure arguments make sense duke@435: assert_different_registers(obj, var_size_in_bytes, t1, t2); duke@435: assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); duke@435: assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); duke@435: ysr@777: if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { ysr@777: // No allocation in the shared eden. ysr@777: br(Assembler::always, false, Assembler::pt, slow_case); ysr@777: delayed()->nop(); ysr@777: } else { ysr@777: // get eden boundaries ysr@777: // note: we need both top & top_addr! ysr@777: const Register top_addr = t1; ysr@777: const Register end = t2; ysr@777: ysr@777: CollectedHeap* ch = Universe::heap(); ysr@777: set((intx)ch->top_addr(), top_addr); ysr@777: intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); ysr@777: ld_ptr(top_addr, delta, end); ysr@777: ld_ptr(top_addr, 0, obj); ysr@777: ysr@777: // try to allocate ysr@777: Label retry; ysr@777: bind(retry); duke@435: #ifdef ASSERT ysr@777: // make sure eden top is properly aligned ysr@777: { ysr@777: Label L; ysr@777: btst(MinObjAlignmentInBytesMask, obj); ysr@777: br(Assembler::zero, false, Assembler::pt, L); ysr@777: delayed()->nop(); ysr@777: stop("eden top is not properly aligned"); ysr@777: bind(L); ysr@777: } ysr@777: #endif // ASSERT ysr@777: const Register free = end; ysr@777: sub(end, obj, free); // compute amount of free space ysr@777: if (var_size_in_bytes->is_valid()) { ysr@777: // size is unknown at compile time ysr@777: cmp(free, var_size_in_bytes); ysr@777: br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case ysr@777: delayed()->add(obj, var_size_in_bytes, end); ysr@777: } else { ysr@777: // size is known at compile time ysr@777: cmp(free, con_size_in_bytes); ysr@777: br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case ysr@777: delayed()->add(obj, con_size_in_bytes, end); ysr@777: } ysr@777: // Compare obj with the value at top_addr; if still equal, swap the value of ysr@777: // end with the value at top_addr. If not equal, read the value at top_addr ysr@777: // into end. ysr@777: casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); ysr@777: // if someone beat us on the allocation, try again, otherwise continue ysr@777: cmp(obj, end); ysr@777: brx(Assembler::notEqual, false, Assembler::pn, retry); ysr@777: delayed()->mov(end, obj); // nop if successfull since obj == end ysr@777: ysr@777: #ifdef ASSERT ysr@777: // make sure eden top is properly aligned ysr@777: { ysr@777: Label L; ysr@777: const Register top_addr = t1; ysr@777: ysr@777: set((intx)ch->top_addr(), top_addr); ysr@777: ld_ptr(top_addr, 0, top_addr); ysr@777: btst(MinObjAlignmentInBytesMask, top_addr); ysr@777: br(Assembler::zero, false, Assembler::pt, L); ysr@777: delayed()->nop(); ysr@777: stop("eden top is not properly aligned"); ysr@777: bind(L); ysr@777: } ysr@777: #endif // ASSERT duke@435: } duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::tlab_allocate( duke@435: Register obj, // result: pointer to object after successful allocation duke@435: Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise duke@435: int con_size_in_bytes, // object size in bytes if known at compile time duke@435: Register t1, // temp register duke@435: Label& slow_case // continuation point if fast allocation fails duke@435: ){ duke@435: // make sure arguments make sense duke@435: assert_different_registers(obj, var_size_in_bytes, t1); duke@435: assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); duke@435: assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); duke@435: duke@435: const Register free = t1; duke@435: duke@435: verify_tlab(); duke@435: duke@435: ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); duke@435: duke@435: // calculate amount of free space duke@435: ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); duke@435: sub(free, obj, free); duke@435: duke@435: Label done; duke@435: if (var_size_in_bytes == noreg) { duke@435: cmp(free, con_size_in_bytes); duke@435: } else { duke@435: cmp(free, var_size_in_bytes); duke@435: } duke@435: br(Assembler::less, false, Assembler::pn, slow_case); duke@435: // calculate the new top pointer duke@435: if (var_size_in_bytes == noreg) { duke@435: delayed()->add(obj, con_size_in_bytes, free); duke@435: } else { duke@435: delayed()->add(obj, var_size_in_bytes, free); duke@435: } duke@435: duke@435: bind(done); duke@435: duke@435: #ifdef ASSERT duke@435: // make sure new free pointer is properly aligned duke@435: { duke@435: Label L; duke@435: btst(MinObjAlignmentInBytesMask, free); duke@435: br(Assembler::zero, false, Assembler::pt, L); duke@435: delayed()->nop(); duke@435: stop("updated TLAB free is not properly aligned"); duke@435: bind(L); duke@435: } duke@435: #endif // ASSERT duke@435: duke@435: // update the tlab top pointer duke@435: st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); duke@435: verify_tlab(); duke@435: } duke@435: duke@435: duke@435: void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { duke@435: Register top = O0; duke@435: Register t1 = G1; duke@435: Register t2 = G3; duke@435: Register t3 = O1; duke@435: assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); duke@435: Label do_refill, discard_tlab; duke@435: duke@435: if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { duke@435: // No allocation in the shared eden. duke@435: br(Assembler::always, false, Assembler::pt, slow_case); duke@435: delayed()->nop(); duke@435: } duke@435: duke@435: ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); duke@435: ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); duke@435: ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); duke@435: duke@435: // calculate amount of free space duke@435: sub(t1, top, t1); duke@435: srl_ptr(t1, LogHeapWordSize, t1); duke@435: duke@435: // Retain tlab and allocate object in shared space if duke@435: // the amount free in the tlab is too large to discard. duke@435: cmp(t1, t2); duke@435: brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); duke@435: duke@435: // increment waste limit to prevent getting stuck on this slow path duke@435: delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); duke@435: st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); duke@435: if (TLABStats) { duke@435: // increment number of slow_allocations duke@435: ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); duke@435: add(t2, 1, t2); duke@435: stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); duke@435: } duke@435: br(Assembler::always, false, Assembler::pt, try_eden); duke@435: delayed()->nop(); duke@435: duke@435: bind(discard_tlab); duke@435: if (TLABStats) { duke@435: // increment number of refills duke@435: ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); duke@435: add(t2, 1, t2); duke@435: stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); duke@435: // accumulate wastage duke@435: ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); duke@435: add(t2, t1, t2); duke@435: stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); duke@435: } duke@435: duke@435: // if tlab is currently allocated (top or end != null) then duke@435: // fill [top, end + alignment_reserve) with array object duke@435: br_null(top, false, Assembler::pn, do_refill); duke@435: delayed()->nop(); duke@435: duke@435: set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); duke@435: st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word duke@435: // set klass to intArrayKlass duke@435: sub(t1, typeArrayOopDesc::header_size(T_INT), t1); duke@435: add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); duke@435: sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); duke@435: st(t1, top, arrayOopDesc::length_offset_in_bytes()); coleenp@602: set((intptr_t)Universe::intArrayKlassObj_addr(), t2); coleenp@602: ld_ptr(t2, 0, t2); coleenp@602: // store klass last. concurrent gcs assumes klass length is valid if coleenp@602: // klass field is not null. coleenp@602: store_klass(t2, top); duke@435: verify_oop(top); duke@435: duke@435: // refill the tlab with an eden allocation duke@435: bind(do_refill); duke@435: ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); duke@435: sll_ptr(t1, LogHeapWordSize, t1); duke@435: // add object_size ?? duke@435: eden_allocate(top, t1, 0, t2, t3, slow_case); duke@435: duke@435: st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); duke@435: st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); duke@435: #ifdef ASSERT duke@435: // check that tlab_size (t1) is still valid duke@435: { duke@435: Label ok; duke@435: ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); duke@435: sll_ptr(t2, LogHeapWordSize, t2); duke@435: cmp(t1, t2); duke@435: br(Assembler::equal, false, Assembler::pt, ok); duke@435: delayed()->nop(); duke@435: stop("assert(t1 == tlab_size)"); duke@435: should_not_reach_here(); duke@435: duke@435: bind(ok); duke@435: } duke@435: #endif // ASSERT duke@435: add(top, t1, top); // t1 is tlab_size duke@435: sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); duke@435: st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); duke@435: verify_tlab(); duke@435: br(Assembler::always, false, Assembler::pt, retry); duke@435: delayed()->nop(); duke@435: } duke@435: duke@435: Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { duke@435: switch (cond) { duke@435: // Note some conditions are synonyms for others duke@435: case Assembler::never: return Assembler::always; duke@435: case Assembler::zero: return Assembler::notZero; duke@435: case Assembler::lessEqual: return Assembler::greater; duke@435: case Assembler::less: return Assembler::greaterEqual; duke@435: case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; duke@435: case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; duke@435: case Assembler::negative: return Assembler::positive; duke@435: case Assembler::overflowSet: return Assembler::overflowClear; duke@435: case Assembler::always: return Assembler::never; duke@435: case Assembler::notZero: return Assembler::zero; duke@435: case Assembler::greater: return Assembler::lessEqual; duke@435: case Assembler::greaterEqual: return Assembler::less; duke@435: case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; duke@435: case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; duke@435: case Assembler::positive: return Assembler::negative; duke@435: case Assembler::overflowClear: return Assembler::overflowSet; duke@435: } duke@435: duke@435: ShouldNotReachHere(); return Assembler::overflowClear; duke@435: } duke@435: duke@435: void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, duke@435: Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { duke@435: Condition negated_cond = negate_condition(cond); duke@435: Label L; duke@435: brx(negated_cond, false, Assembler::pt, L); duke@435: delayed()->nop(); duke@435: inc_counter(counter_ptr, Rtmp1, Rtmp2); duke@435: bind(L); duke@435: } duke@435: duke@435: void MacroAssembler::inc_counter(address counter_ptr, Register Rtmp1, Register Rtmp2) { duke@435: Address counter_addr(Rtmp1, counter_ptr); duke@435: load_contents(counter_addr, Rtmp2); duke@435: inc(Rtmp2); duke@435: store_contents(Rtmp2, counter_addr); duke@435: } duke@435: duke@435: SkipIfEqual::SkipIfEqual( duke@435: MacroAssembler* masm, Register temp, const bool* flag_addr, duke@435: Assembler::Condition condition) { duke@435: _masm = masm; duke@435: Address flag(temp, (address)flag_addr, relocInfo::none); duke@435: _masm->sethi(flag); duke@435: _masm->ldub(flag, temp); duke@435: _masm->tst(temp); duke@435: _masm->br(condition, false, Assembler::pt, _label); duke@435: _masm->delayed()->nop(); duke@435: } duke@435: duke@435: SkipIfEqual::~SkipIfEqual() { duke@435: _masm->bind(_label); duke@435: } duke@435: duke@435: duke@435: // Writes to stack successive pages until offset reached to check for duke@435: // stack overflow + shadow pages. This clobbers tsp and scratch. duke@435: void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, duke@435: Register Rscratch) { duke@435: // Use stack pointer in temp stack pointer duke@435: mov(SP, Rtsp); duke@435: duke@435: // Bang stack for total size given plus stack shadow page size. duke@435: // Bang one page at a time because a large size can overflow yellow and duke@435: // red zones (the bang will fail but stack overflow handling can't tell that duke@435: // it was a stack overflow bang vs a regular segv). duke@435: int offset = os::vm_page_size(); duke@435: Register Roffset = Rscratch; duke@435: duke@435: Label loop; duke@435: bind(loop); duke@435: set((-offset)+STACK_BIAS, Rscratch); duke@435: st(G0, Rtsp, Rscratch); duke@435: set(offset, Roffset); duke@435: sub(Rsize, Roffset, Rsize); duke@435: cmp(Rsize, G0); duke@435: br(Assembler::greater, false, Assembler::pn, loop); duke@435: delayed()->sub(Rtsp, Roffset, Rtsp); duke@435: duke@435: // Bang down shadow pages too. duke@435: // The -1 because we already subtracted 1 page. duke@435: for (int i = 0; i< StackShadowPages-1; i++) { duke@435: set((-i*offset)+STACK_BIAS, Rscratch); duke@435: st(G0, Rtsp, Rscratch); duke@435: } duke@435: } coleenp@548: ysr@777: /////////////////////////////////////////////////////////////////////////////////// ysr@777: #ifndef SERIALGC ysr@777: ysr@777: static uint num_stores = 0; ysr@777: static uint num_null_pre_stores = 0; ysr@777: ysr@777: static void count_null_pre_vals(void* pre_val) { ysr@777: num_stores++; ysr@777: if (pre_val == NULL) num_null_pre_stores++; ysr@777: if ((num_stores % 1000000) == 0) { ysr@777: tty->print_cr(UINT32_FORMAT " stores, " UINT32_FORMAT " (%5.2f%%) with null pre-vals.", ysr@777: num_stores, num_null_pre_stores, ysr@777: 100.0*(float)num_null_pre_stores/(float)num_stores); ysr@777: } ysr@777: } ysr@777: ysr@777: static address satb_log_enqueue_with_frame = 0; ysr@777: static u_char* satb_log_enqueue_with_frame_end = 0; ysr@777: ysr@777: static address satb_log_enqueue_frameless = 0; ysr@777: static u_char* satb_log_enqueue_frameless_end = 0; ysr@777: ysr@777: static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? ysr@777: ysr@777: // The calls to this don't work. We'd need to do a fair amount of work to ysr@777: // make it work. ysr@777: static void check_index(int ind) { ysr@777: assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0), ysr@777: "Invariants.") ysr@777: } ysr@777: ysr@777: static void generate_satb_log_enqueue(bool with_frame) { ysr@777: BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); ysr@777: CodeBuffer buf(bb->instructions_begin(), bb->instructions_size()); ysr@777: MacroAssembler masm(&buf); ysr@777: address start = masm.pc(); ysr@777: Register pre_val; ysr@777: ysr@777: Label refill, restart; ysr@777: if (with_frame) { ysr@777: masm.save_frame(0); ysr@777: pre_val = I0; // Was O0 before the save. ysr@777: } else { ysr@777: pre_val = O0; ysr@777: } ysr@777: int satb_q_index_byte_offset = ysr@777: in_bytes(JavaThread::satb_mark_queue_offset() + ysr@777: PtrQueue::byte_offset_of_index()); ysr@777: int satb_q_buf_byte_offset = ysr@777: in_bytes(JavaThread::satb_mark_queue_offset() + ysr@777: PtrQueue::byte_offset_of_buf()); ysr@777: assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) && ysr@777: in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t), ysr@777: "check sizes in assembly below"); ysr@777: ysr@777: masm.bind(restart); ysr@777: masm.ld_ptr(G2_thread, satb_q_index_byte_offset, L0); ysr@777: ysr@777: masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, L0, refill); ysr@777: // If the branch is taken, no harm in executing this in the delay slot. ysr@777: masm.delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); ysr@777: masm.sub(L0, oopSize, L0); ysr@777: ysr@777: masm.st_ptr(pre_val, L1, L0); // [_buf + index] := I0 ysr@777: if (!with_frame) { ysr@777: // Use return-from-leaf ysr@777: masm.retl(); ysr@777: masm.delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); ysr@777: } else { ysr@777: // Not delayed. ysr@777: masm.st_ptr(L0, G2_thread, satb_q_index_byte_offset); ysr@777: } ysr@777: if (with_frame) { ysr@777: masm.ret(); ysr@777: masm.delayed()->restore(); ysr@777: } ysr@777: masm.bind(refill); ysr@777: ysr@777: address handle_zero = ysr@777: CAST_FROM_FN_PTR(address, ysr@777: &SATBMarkQueueSet::handle_zero_index_for_thread); ysr@777: // This should be rare enough that we can afford to save all the ysr@777: // scratch registers that the calling context might be using. ysr@777: masm.mov(G1_scratch, L0); ysr@777: masm.mov(G3_scratch, L1); ysr@777: masm.mov(G4, L2); ysr@777: // We need the value of O0 above (for the write into the buffer), so we ysr@777: // save and restore it. ysr@777: masm.mov(O0, L3); ysr@777: // Since the call will overwrite O7, we save and restore that, as well. ysr@777: masm.mov(O7, L4); ysr@777: masm.call_VM_leaf(L5, handle_zero, G2_thread); ysr@777: masm.mov(L0, G1_scratch); ysr@777: masm.mov(L1, G3_scratch); ysr@777: masm.mov(L2, G4); ysr@777: masm.mov(L3, O0); ysr@777: masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart); ysr@777: masm.delayed()->mov(L4, O7); ysr@777: ysr@777: if (with_frame) { ysr@777: satb_log_enqueue_with_frame = start; ysr@777: satb_log_enqueue_with_frame_end = masm.pc(); ysr@777: } else { ysr@777: satb_log_enqueue_frameless = start; ysr@777: satb_log_enqueue_frameless_end = masm.pc(); ysr@777: } ysr@777: } ysr@777: ysr@777: static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) { ysr@777: if (with_frame) { ysr@777: if (satb_log_enqueue_with_frame == 0) { ysr@777: generate_satb_log_enqueue(with_frame); ysr@777: assert(satb_log_enqueue_with_frame != 0, "postcondition."); ysr@777: if (G1SATBPrintStubs) { ysr@777: tty->print_cr("Generated with-frame satb enqueue:"); ysr@777: Disassembler::decode((u_char*)satb_log_enqueue_with_frame, ysr@777: satb_log_enqueue_with_frame_end, ysr@777: tty); ysr@777: } ysr@777: } ysr@777: } else { ysr@777: if (satb_log_enqueue_frameless == 0) { ysr@777: generate_satb_log_enqueue(with_frame); ysr@777: assert(satb_log_enqueue_frameless != 0, "postcondition."); ysr@777: if (G1SATBPrintStubs) { ysr@777: tty->print_cr("Generated frameless satb enqueue:"); ysr@777: Disassembler::decode((u_char*)satb_log_enqueue_frameless, ysr@777: satb_log_enqueue_frameless_end, ysr@777: tty); ysr@777: } ysr@777: } ysr@777: } ysr@777: } ysr@777: ysr@777: void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs) { ysr@777: assert(offset == 0 || index == noreg, "choose one"); ysr@777: ysr@777: if (G1DisablePreBarrier) return; ysr@777: // satb_log_barrier(tmp, obj, offset, preserve_o_regs); ysr@777: Label filtered; ysr@777: // satb_log_barrier_work0(tmp, filtered); ysr@777: if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { ysr@777: ld(G2, ysr@777: in_bytes(JavaThread::satb_mark_queue_offset() + ysr@777: PtrQueue::byte_offset_of_active()), ysr@777: tmp); ysr@777: } else { ysr@777: guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, ysr@777: "Assumption"); ysr@777: ldsb(G2, ysr@777: in_bytes(JavaThread::satb_mark_queue_offset() + ysr@777: PtrQueue::byte_offset_of_active()), ysr@777: tmp); ysr@777: } ysr@777: // Check on whether to annul. ysr@777: br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered); ysr@777: delayed() -> nop(); ysr@777: ysr@777: // satb_log_barrier_work1(tmp, offset); ysr@777: if (index == noreg) { ysr@777: if (Assembler::is_simm13(offset)) { ysr@777: ld_ptr(obj, offset, tmp); ysr@777: } else { ysr@777: set(offset, tmp); ysr@777: ld_ptr(obj, tmp, tmp); ysr@777: } ysr@777: } else { ysr@777: ld_ptr(obj, index, tmp); ysr@777: } ysr@777: ysr@777: // satb_log_barrier_work2(obj, tmp, offset); ysr@777: ysr@777: // satb_log_barrier_work3(tmp, filtered, preserve_o_regs); ysr@777: ysr@777: const Register pre_val = tmp; ysr@777: ysr@777: if (G1SATBBarrierPrintNullPreVals) { ysr@777: save_frame(0); ysr@777: mov(pre_val, O0); ysr@777: // Save G-regs that target may use. ysr@777: mov(G1, L1); ysr@777: mov(G2, L2); ysr@777: mov(G3, L3); ysr@777: mov(G4, L4); ysr@777: mov(G5, L5); ysr@777: call(CAST_FROM_FN_PTR(address, &count_null_pre_vals)); ysr@777: delayed()->nop(); ysr@777: // Restore G-regs that target may have used. ysr@777: mov(L1, G1); ysr@777: mov(L2, G2); ysr@777: mov(L3, G3); ysr@777: mov(L4, G4); ysr@777: mov(L5, G5); ysr@777: restore(G0, G0, G0); ysr@777: } ysr@777: ysr@777: // Check on whether to annul. ysr@777: br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered); ysr@777: delayed() -> nop(); ysr@777: ysr@777: // OK, it's not filtered, so we'll need to call enqueue. In the normal ysr@777: // case, pre_val will be a scratch G-reg, but there's some cases in which ysr@777: // it's an O-reg. In the first case, do a normal call. In the latter, ysr@777: // do a save here and call the frameless version. ysr@777: ysr@777: guarantee(pre_val->is_global() || pre_val->is_out(), ysr@777: "Or we need to think harder."); ysr@777: if (pre_val->is_global() && !preserve_o_regs) { ysr@777: generate_satb_log_enqueue_if_necessary(true); // with frame. ysr@777: call(satb_log_enqueue_with_frame); ysr@777: delayed()->mov(pre_val, O0); ysr@777: } else { ysr@777: generate_satb_log_enqueue_if_necessary(false); // with frameless. ysr@777: save_frame(0); ysr@777: call(satb_log_enqueue_frameless); ysr@777: delayed()->mov(pre_val->after_save(), O0); ysr@777: restore(); ysr@777: } ysr@777: ysr@777: bind(filtered); ysr@777: } ysr@777: ysr@777: static jint num_ct_writes = 0; ysr@777: static jint num_ct_writes_filtered_in_hr = 0; ysr@777: static jint num_ct_writes_filtered_null = 0; ysr@777: static jint num_ct_writes_filtered_pop = 0; ysr@777: static G1CollectedHeap* g1 = NULL; ysr@777: ysr@777: static Thread* count_ct_writes(void* filter_val, void* new_val) { ysr@777: Atomic::inc(&num_ct_writes); ysr@777: if (filter_val == NULL) { ysr@777: Atomic::inc(&num_ct_writes_filtered_in_hr); ysr@777: } else if (new_val == NULL) { ysr@777: Atomic::inc(&num_ct_writes_filtered_null); ysr@777: } else { ysr@777: if (g1 == NULL) { ysr@777: g1 = G1CollectedHeap::heap(); ysr@777: } ysr@777: if ((HeapWord*)new_val < g1->popular_object_boundary()) { ysr@777: Atomic::inc(&num_ct_writes_filtered_pop); ysr@777: } ysr@777: } ysr@777: if ((num_ct_writes % 1000000) == 0) { ysr@777: jint num_ct_writes_filtered = ysr@777: num_ct_writes_filtered_in_hr + ysr@777: num_ct_writes_filtered_null + ysr@777: num_ct_writes_filtered_pop; ysr@777: ysr@777: tty->print_cr("%d potential CT writes: %5.2f%% filtered\n" ysr@777: " (%5.2f%% intra-HR, %5.2f%% null, %5.2f%% popular).", ysr@777: num_ct_writes, ysr@777: 100.0*(float)num_ct_writes_filtered/(float)num_ct_writes, ysr@777: 100.0*(float)num_ct_writes_filtered_in_hr/ ysr@777: (float)num_ct_writes, ysr@777: 100.0*(float)num_ct_writes_filtered_null/ ysr@777: (float)num_ct_writes, ysr@777: 100.0*(float)num_ct_writes_filtered_pop/ ysr@777: (float)num_ct_writes); ysr@777: } ysr@777: return Thread::current(); ysr@777: } ysr@777: ysr@777: static address dirty_card_log_enqueue = 0; ysr@777: static u_char* dirty_card_log_enqueue_end = 0; ysr@777: ysr@777: // This gets to assume that o0 contains the object address. ysr@777: static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { ysr@777: BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); ysr@777: CodeBuffer buf(bb->instructions_begin(), bb->instructions_size()); ysr@777: MacroAssembler masm(&buf); ysr@777: address start = masm.pc(); ysr@777: ysr@777: Label not_already_dirty, restart, refill; ysr@777: ysr@777: #ifdef _LP64 ysr@777: masm.srlx(O0, CardTableModRefBS::card_shift, O0); ysr@777: #else ysr@777: masm.srl(O0, CardTableModRefBS::card_shift, O0); ysr@777: #endif ysr@777: Address rs(O1, (address)byte_map_base); ysr@777: masm.load_address(rs); // O1 := ysr@777: masm.ldub(O0, O1, O2); // O2 := [O0 + O1] ysr@777: ysr@777: masm.br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt, ysr@777: O2, not_already_dirty); ysr@777: // Get O1 + O2 into a reg by itself -- useful in the take-the-branch ysr@777: // case, harmless if not. ysr@777: masm.delayed()->add(O0, O1, O3); ysr@777: ysr@777: // We didn't take the branch, so we're already dirty: return. ysr@777: // Use return-from-leaf ysr@777: masm.retl(); ysr@777: masm.delayed()->nop(); ysr@777: ysr@777: // Not dirty. ysr@777: masm.bind(not_already_dirty); ysr@777: // First, dirty it. ysr@777: masm.stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). ysr@777: int dirty_card_q_index_byte_offset = ysr@777: in_bytes(JavaThread::dirty_card_queue_offset() + ysr@777: PtrQueue::byte_offset_of_index()); ysr@777: int dirty_card_q_buf_byte_offset = ysr@777: in_bytes(JavaThread::dirty_card_queue_offset() + ysr@777: PtrQueue::byte_offset_of_buf()); ysr@777: masm.bind(restart); ysr@777: masm.ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); ysr@777: ysr@777: masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, ysr@777: L0, refill); ysr@777: // If the branch is taken, no harm in executing this in the delay slot. ysr@777: masm.delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); ysr@777: masm.sub(L0, oopSize, L0); ysr@777: ysr@777: masm.st_ptr(O3, L1, L0); // [_buf + index] := I0 ysr@777: // Use return-from-leaf ysr@777: masm.retl(); ysr@777: masm.delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); ysr@777: ysr@777: masm.bind(refill); ysr@777: address handle_zero = ysr@777: CAST_FROM_FN_PTR(address, ysr@777: &DirtyCardQueueSet::handle_zero_index_for_thread); ysr@777: // This should be rare enough that we can afford to save all the ysr@777: // scratch registers that the calling context might be using. ysr@777: masm.mov(G1_scratch, L3); ysr@777: masm.mov(G3_scratch, L5); ysr@777: // We need the value of O3 above (for the write into the buffer), so we ysr@777: // save and restore it. ysr@777: masm.mov(O3, L6); ysr@777: // Since the call will overwrite O7, we save and restore that, as well. ysr@777: masm.mov(O7, L4); ysr@777: ysr@777: masm.call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); ysr@777: masm.mov(L3, G1_scratch); ysr@777: masm.mov(L5, G3_scratch); ysr@777: masm.mov(L6, O3); ysr@777: masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart); ysr@777: masm.delayed()->mov(L4, O7); ysr@777: ysr@777: dirty_card_log_enqueue = start; ysr@777: dirty_card_log_enqueue_end = masm.pc(); ysr@777: // XXX Should have a guarantee here about not going off the end! ysr@777: // Does it already do so? Do an experiment... ysr@777: } ysr@777: ysr@777: static inline void ysr@777: generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) { ysr@777: if (dirty_card_log_enqueue == 0) { ysr@777: generate_dirty_card_log_enqueue(byte_map_base); ysr@777: assert(dirty_card_log_enqueue != 0, "postcondition."); ysr@777: if (G1SATBPrintStubs) { ysr@777: tty->print_cr("Generated dirty_card enqueue:"); ysr@777: Disassembler::decode((u_char*)dirty_card_log_enqueue, ysr@777: dirty_card_log_enqueue_end, ysr@777: tty); ysr@777: } ysr@777: } ysr@777: } ysr@777: ysr@777: ysr@777: void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { ysr@777: ysr@777: Label filtered; ysr@777: MacroAssembler* post_filter_masm = this; ysr@777: ysr@777: if (new_val == G0) return; ysr@777: if (G1DisablePostBarrier) return; ysr@777: ysr@777: G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); ysr@777: assert(bs->kind() == BarrierSet::G1SATBCT || ysr@777: bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier"); ysr@777: if (G1RSBarrierRegionFilter) { ysr@777: xor3(store_addr, new_val, tmp); ysr@777: #ifdef _LP64 ysr@777: srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); ysr@777: #else ysr@777: srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp); ysr@777: #endif ysr@777: if (G1PrintCTFilterStats) { ysr@777: guarantee(tmp->is_global(), "Or stats won't work..."); ysr@777: // This is a sleazy hack: I'm temporarily hijacking G2, which I ysr@777: // promise to restore. ysr@777: mov(new_val, G2); ysr@777: save_frame(0); ysr@777: mov(tmp, O0); ysr@777: mov(G2, O1); ysr@777: // Save G-regs that target may use. ysr@777: mov(G1, L1); ysr@777: mov(G2, L2); ysr@777: mov(G3, L3); ysr@777: mov(G4, L4); ysr@777: mov(G5, L5); ysr@777: call(CAST_FROM_FN_PTR(address, &count_ct_writes)); ysr@777: delayed()->nop(); ysr@777: mov(O0, G2); ysr@777: // Restore G-regs that target may have used. ysr@777: mov(L1, G1); ysr@777: mov(L3, G3); ysr@777: mov(L4, G4); ysr@777: mov(L5, G5); ysr@777: restore(G0, G0, G0); ysr@777: } ysr@777: // XXX Should I predict this taken or not? Does it mattern? ysr@777: br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered); ysr@777: delayed()->nop(); ysr@777: } ysr@777: ysr@777: // Now we decide how to generate the card table write. If we're ysr@777: // enqueueing, we call out to a generated function. Otherwise, we do it ysr@777: // inline here. ysr@777: ysr@777: if (G1RSBarrierUseQueue) { ysr@777: // If the "store_addr" register is an "in" or "local" register, move it to ysr@777: // a scratch reg so we can pass it as an argument. ysr@777: bool use_scr = !(store_addr->is_global() || store_addr->is_out()); ysr@777: // Pick a scratch register different from "tmp". ysr@777: Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); ysr@777: // Make sure we use up the delay slot! ysr@777: if (use_scr) { ysr@777: post_filter_masm->mov(store_addr, scr); ysr@777: } else { ysr@777: post_filter_masm->nop(); ysr@777: } ysr@777: generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base); ysr@777: save_frame(0); ysr@777: call(dirty_card_log_enqueue); ysr@777: if (use_scr) { ysr@777: delayed()->mov(scr, O0); ysr@777: } else { ysr@777: delayed()->mov(store_addr->after_save(), O0); ysr@777: } ysr@777: restore(); ysr@777: ysr@777: } else { ysr@777: ysr@777: #ifdef _LP64 ysr@777: post_filter_masm->srlx(store_addr, CardTableModRefBS::card_shift, store_addr); ysr@777: #else ysr@777: post_filter_masm->srl(store_addr, CardTableModRefBS::card_shift, store_addr); ysr@777: #endif ysr@777: assert( tmp != store_addr, "need separate temp reg"); ysr@777: Address rs(tmp, (address)bs->byte_map_base); ysr@777: load_address(rs); ysr@777: stb(G0, rs.base(), store_addr); ysr@777: } ysr@777: ysr@777: bind(filtered); ysr@777: ysr@777: } ysr@777: ysr@777: #endif // SERIALGC ysr@777: /////////////////////////////////////////////////////////////////////////////////// ysr@777: ysr@777: void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { ysr@777: // If we're writing constant NULL, we can skip the write barrier. ysr@777: if (new_val == G0) return; ysr@777: CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set(); ysr@777: assert(bs->kind() == BarrierSet::CardTableModRef || ysr@777: bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); ysr@777: card_table_write(bs->byte_map_base, tmp, store_addr); ysr@777: } ysr@777: kvn@599: void MacroAssembler::load_klass(Register src_oop, Register klass) { coleenp@548: // The number of bytes in this code is used by coleenp@548: // MachCallDynamicJavaNode::ret_addr_offset() coleenp@548: // if this changes, change that. coleenp@548: if (UseCompressedOops) { kvn@599: lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); kvn@599: decode_heap_oop_not_null(klass); coleenp@548: } else { kvn@599: ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); coleenp@548: } coleenp@548: } coleenp@548: kvn@599: void MacroAssembler::store_klass(Register klass, Register dst_oop) { coleenp@548: if (UseCompressedOops) { kvn@599: assert(dst_oop != klass, "not enough registers"); kvn@599: encode_heap_oop_not_null(klass); coleenp@602: st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); coleenp@548: } else { kvn@599: st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); kvn@559: } kvn@559: } kvn@559: coleenp@602: void MacroAssembler::store_klass_gap(Register s, Register d) { coleenp@602: if (UseCompressedOops) { coleenp@602: assert(s != d, "not enough registers"); coleenp@602: st(s, d, oopDesc::klass_gap_offset_in_bytes()); coleenp@548: } coleenp@548: } coleenp@548: coleenp@548: void MacroAssembler::load_heap_oop(const Address& s, Register d, int offset) { coleenp@548: if (UseCompressedOops) { coleenp@548: lduw(s, d, offset); coleenp@548: decode_heap_oop(d); coleenp@548: } else { coleenp@548: ld_ptr(s, d, offset); coleenp@548: } coleenp@548: } coleenp@548: coleenp@548: void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { coleenp@548: if (UseCompressedOops) { coleenp@548: lduw(s1, s2, d); coleenp@548: decode_heap_oop(d, d); coleenp@548: } else { coleenp@548: ld_ptr(s1, s2, d); coleenp@548: } coleenp@548: } coleenp@548: coleenp@548: void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { coleenp@548: if (UseCompressedOops) { coleenp@548: lduw(s1, simm13a, d); coleenp@548: decode_heap_oop(d, d); coleenp@548: } else { coleenp@548: ld_ptr(s1, simm13a, d); coleenp@548: } coleenp@548: } coleenp@548: coleenp@548: void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { coleenp@548: if (UseCompressedOops) { coleenp@548: assert(s1 != d && s2 != d, "not enough registers"); coleenp@548: encode_heap_oop(d); coleenp@548: st(d, s1, s2); coleenp@548: } else { coleenp@548: st_ptr(d, s1, s2); coleenp@548: } coleenp@548: } coleenp@548: coleenp@548: void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { coleenp@548: if (UseCompressedOops) { coleenp@548: assert(s1 != d, "not enough registers"); coleenp@548: encode_heap_oop(d); coleenp@548: st(d, s1, simm13a); coleenp@548: } else { coleenp@548: st_ptr(d, s1, simm13a); coleenp@548: } coleenp@548: } coleenp@548: coleenp@548: void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { coleenp@548: if (UseCompressedOops) { coleenp@548: assert(a.base() != d, "not enough registers"); coleenp@548: encode_heap_oop(d); coleenp@548: st(d, a, offset); coleenp@548: } else { coleenp@548: st_ptr(d, a, offset); coleenp@548: } coleenp@548: } coleenp@548: coleenp@548: coleenp@548: void MacroAssembler::encode_heap_oop(Register src, Register dst) { coleenp@548: assert (UseCompressedOops, "must be compressed"); coleenp@613: verify_oop(src); coleenp@548: Label done; coleenp@548: if (src == dst) { coleenp@548: // optimize for frequent case src == dst coleenp@548: bpr(rc_nz, true, Assembler::pt, src, done); coleenp@548: delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken coleenp@548: bind(done); coleenp@548: srlx(src, LogMinObjAlignmentInBytes, dst); coleenp@548: } else { coleenp@548: bpr(rc_z, false, Assembler::pn, src, done); coleenp@548: delayed() -> mov(G0, dst); coleenp@548: // could be moved before branch, and annulate delay, coleenp@548: // but may add some unneeded work decoding null coleenp@548: sub(src, G6_heapbase, dst); coleenp@548: srlx(dst, LogMinObjAlignmentInBytes, dst); coleenp@548: bind(done); coleenp@548: } coleenp@548: } coleenp@548: coleenp@548: coleenp@548: void MacroAssembler::encode_heap_oop_not_null(Register r) { coleenp@548: assert (UseCompressedOops, "must be compressed"); coleenp@613: verify_oop(r); coleenp@548: sub(r, G6_heapbase, r); coleenp@548: srlx(r, LogMinObjAlignmentInBytes, r); coleenp@548: } coleenp@548: kvn@559: void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { kvn@559: assert (UseCompressedOops, "must be compressed"); coleenp@613: verify_oop(src); kvn@559: sub(src, G6_heapbase, dst); kvn@559: srlx(dst, LogMinObjAlignmentInBytes, dst); kvn@559: } kvn@559: coleenp@548: // Same algorithm as oops.inline.hpp decode_heap_oop. coleenp@548: void MacroAssembler::decode_heap_oop(Register src, Register dst) { coleenp@548: assert (UseCompressedOops, "must be compressed"); coleenp@548: Label done; coleenp@548: sllx(src, LogMinObjAlignmentInBytes, dst); coleenp@548: bpr(rc_nz, true, Assembler::pt, dst, done); coleenp@548: delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken coleenp@548: bind(done); coleenp@613: verify_oop(dst); coleenp@548: } coleenp@548: coleenp@548: void MacroAssembler::decode_heap_oop_not_null(Register r) { coleenp@548: // Do not add assert code to this unless you change vtableStubs_sparc.cpp coleenp@548: // pd_code_size_limit. coleenp@613: // Also do not verify_oop as this is called by verify_oop. coleenp@548: assert (UseCompressedOops, "must be compressed"); coleenp@548: sllx(r, LogMinObjAlignmentInBytes, r); coleenp@548: add(r, G6_heapbase, r); coleenp@548: } coleenp@548: kvn@559: void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { kvn@559: // Do not add assert code to this unless you change vtableStubs_sparc.cpp kvn@559: // pd_code_size_limit. coleenp@613: // Also do not verify_oop as this is called by verify_oop. kvn@559: assert (UseCompressedOops, "must be compressed"); kvn@559: sllx(src, LogMinObjAlignmentInBytes, dst); kvn@559: add(dst, G6_heapbase, dst); kvn@559: } kvn@559: coleenp@548: void MacroAssembler::reinit_heapbase() { coleenp@548: if (UseCompressedOops) { coleenp@548: // call indirectly to solve generation ordering problem coleenp@548: Address base(G6_heapbase, (address)Universe::heap_base_addr()); coleenp@548: load_ptr_contents(base, G6_heapbase); coleenp@548: } coleenp@548: }