src/cpu/sparc/vm/assembler_sparc.cpp

Tue, 14 Jun 2011 14:41:33 -0700

author
never
date
Tue, 14 Jun 2011 14:41:33 -0700
changeset 2954
f8c9417e3571
parent 2950
cba7b5c2d53f
child 3037
3d42f82cd811
permissions
-rw-r--r--

7052219: JSR 292: Crash in ~BufferBlob::MethodHandles adapters
Reviewed-by: twisti, kvn, jrose

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "asm/assembler.hpp"
    27 #include "assembler_sparc.inline.hpp"
    28 #include "gc_interface/collectedHeap.inline.hpp"
    29 #include "interpreter/interpreter.hpp"
    30 #include "memory/cardTableModRefBS.hpp"
    31 #include "memory/resourceArea.hpp"
    32 #include "prims/methodHandles.hpp"
    33 #include "runtime/biasedLocking.hpp"
    34 #include "runtime/interfaceSupport.hpp"
    35 #include "runtime/objectMonitor.hpp"
    36 #include "runtime/os.hpp"
    37 #include "runtime/sharedRuntime.hpp"
    38 #include "runtime/stubRoutines.hpp"
    39 #ifndef SERIALGC
    40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    41 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    42 #include "gc_implementation/g1/heapRegion.hpp"
    43 #endif
    45 #ifdef PRODUCT
    46 #define BLOCK_COMMENT(str) /* nothing */
    47 #else
    48 #define BLOCK_COMMENT(str) block_comment(str)
    49 #endif
    51 // Convert the raw encoding form into the form expected by the
    52 // constructor for Address.
    53 Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
    54   assert(scale == 0, "not supported");
    55   RelocationHolder rspec;
    56   if (disp_is_oop) {
    57     rspec = Relocation::spec_simple(relocInfo::oop_type);
    58   }
    60   Register rindex = as_Register(index);
    61   if (rindex != G0) {
    62     Address madr(as_Register(base), rindex);
    63     madr._rspec = rspec;
    64     return madr;
    65   } else {
    66     Address madr(as_Register(base), disp);
    67     madr._rspec = rspec;
    68     return madr;
    69   }
    70 }
    72 Address Argument::address_in_frame() const {
    73   // Warning: In LP64 mode disp will occupy more than 10 bits, but
    74   //          op codes such as ld or ldx, only access disp() to get
    75   //          their simm13 argument.
    76   int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
    77   if (is_in())
    78     return Address(FP, disp); // In argument.
    79   else
    80     return Address(SP, disp); // Out argument.
    81 }
    83 static const char* argumentNames[][2] = {
    84   {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"},
    85   {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"},
    86   {"A(n>9)","P(n>9)"}
    87 };
    89 const char* Argument::name() const {
    90   int nofArgs = sizeof argumentNames / sizeof argumentNames[0];
    91   int num = number();
    92   if (num >= nofArgs)  num = nofArgs - 1;
    93   return argumentNames[num][is_in() ? 1 : 0];
    94 }
    96 void Assembler::print_instruction(int inst) {
    97   const char* s;
    98   switch (inv_op(inst)) {
    99   default:         s = "????"; break;
   100   case call_op:    s = "call"; break;
   101   case branch_op:
   102     switch (inv_op2(inst)) {
   103       case bpr_op2:    s = "bpr";  break;
   104       case fb_op2:     s = "fb";   break;
   105       case fbp_op2:    s = "fbp";  break;
   106       case br_op2:     s = "br";   break;
   107       case bp_op2:     s = "bp";   break;
   108       case cb_op2:     s = "cb";   break;
   109       default:         s = "????"; break;
   110     }
   111   }
   112   ::tty->print("%s", s);
   113 }
   116 // Patch instruction inst at offset inst_pos to refer to dest_pos
   117 // and return the resulting instruction.
   118 // We should have pcs, not offsets, but since all is relative, it will work out
   119 // OK.
   120 int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
   122   int m; // mask for displacement field
   123   int v; // new value for displacement field
   124   const int word_aligned_ones = -4;
   125   switch (inv_op(inst)) {
   126   default: ShouldNotReachHere();
   127   case call_op:    m = wdisp(word_aligned_ones, 0, 30);  v = wdisp(dest_pos, inst_pos, 30); break;
   128   case branch_op:
   129     switch (inv_op2(inst)) {
   130       case bpr_op2:    m = wdisp16(word_aligned_ones, 0);      v = wdisp16(dest_pos, inst_pos);     break;
   131       case fbp_op2:    m = wdisp(  word_aligned_ones, 0, 19);  v = wdisp(  dest_pos, inst_pos, 19); break;
   132       case bp_op2:     m = wdisp(  word_aligned_ones, 0, 19);  v = wdisp(  dest_pos, inst_pos, 19); break;
   133       case fb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
   134       case br_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
   135       case cb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
   136       default: ShouldNotReachHere();
   137     }
   138   }
   139   return  inst & ~m  |  v;
   140 }
   142 // Return the offset of the branch destionation of instruction inst
   143 // at offset pos.
   144 // Should have pcs, but since all is relative, it works out.
   145 int Assembler::branch_destination(int inst, int pos) {
   146   int r;
   147   switch (inv_op(inst)) {
   148   default: ShouldNotReachHere();
   149   case call_op:        r = inv_wdisp(inst, pos, 30);  break;
   150   case branch_op:
   151     switch (inv_op2(inst)) {
   152       case bpr_op2:    r = inv_wdisp16(inst, pos);    break;
   153       case fbp_op2:    r = inv_wdisp(  inst, pos, 19);  break;
   154       case bp_op2:     r = inv_wdisp(  inst, pos, 19);  break;
   155       case fb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
   156       case br_op2:     r = inv_wdisp(  inst, pos, 22);  break;
   157       case cb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
   158       default: ShouldNotReachHere();
   159     }
   160   }
   161   return r;
   162 }
   164 int AbstractAssembler::code_fill_byte() {
   165   return 0x00;                  // illegal instruction 0x00000000
   166 }
   168 Assembler::Condition Assembler::reg_cond_to_cc_cond(Assembler::RCondition in) {
   169   switch (in) {
   170   case rc_z:   return equal;
   171   case rc_lez: return lessEqual;
   172   case rc_lz:  return less;
   173   case rc_nz:  return notEqual;
   174   case rc_gz:  return greater;
   175   case rc_gez: return greaterEqual;
   176   default:
   177     ShouldNotReachHere();
   178   }
   179   return equal;
   180 }
   182 // Generate a bunch 'o stuff (including v9's
   183 #ifndef PRODUCT
   184 void Assembler::test_v9() {
   185   add(    G0, G1, G2 );
   186   add(    G3,  0, G4 );
   188   addcc(  G5, G6, G7 );
   189   addcc(  I0,  1, I1 );
   190   addc(   I2, I3, I4 );
   191   addc(   I5, -1, I6 );
   192   addccc( I7, L0, L1 );
   193   addccc( L2, (1 << 12) - 2, L3 );
   195   Label lbl1, lbl2, lbl3;
   197   bind(lbl1);
   199   bpr( rc_z,    true, pn, L4, pc(),  relocInfo::oop_type );
   200   delayed()->nop();
   201   bpr( rc_lez, false, pt, L5, lbl1);
   202   delayed()->nop();
   204   fb( f_never,     true, pc() + 4,  relocInfo::none);
   205   delayed()->nop();
   206   fb( f_notEqual, false, lbl2 );
   207   delayed()->nop();
   209   fbp( f_notZero,        true, fcc0, pn, pc() - 4,  relocInfo::none);
   210   delayed()->nop();
   211   fbp( f_lessOrGreater, false, fcc1, pt, lbl3 );
   212   delayed()->nop();
   214   br( equal,  true, pc() + 1024, relocInfo::none);
   215   delayed()->nop();
   216   br( lessEqual, false, lbl1 );
   217   delayed()->nop();
   218   br( never, false, lbl1 );
   219   delayed()->nop();
   221   bp( less,               true, icc, pn, pc(), relocInfo::none);
   222   delayed()->nop();
   223   bp( lessEqualUnsigned, false, xcc, pt, lbl2 );
   224   delayed()->nop();
   226   call( pc(), relocInfo::none);
   227   delayed()->nop();
   228   call( lbl3 );
   229   delayed()->nop();
   232   casa(  L6, L7, O0 );
   233   casxa( O1, O2, O3, 0 );
   235   udiv(   O4, O5, O7 );
   236   udiv(   G0, (1 << 12) - 1, G1 );
   237   sdiv(   G1, G2, G3 );
   238   sdiv(   G4, -((1 << 12) - 1), G5 );
   239   udivcc( G6, G7, I0 );
   240   udivcc( I1, -((1 << 12) - 2), I2 );
   241   sdivcc( I3, I4, I5 );
   242   sdivcc( I6, -((1 << 12) - 0), I7 );
   244   done();
   245   retry();
   247   fadd( FloatRegisterImpl::S, F0,  F1, F2 );
   248   fsub( FloatRegisterImpl::D, F34, F0, F62 );
   250   fcmp(  FloatRegisterImpl::Q, fcc0, F0, F60);
   251   fcmpe( FloatRegisterImpl::S, fcc1, F31, F30);
   253   ftox( FloatRegisterImpl::D, F2, F4 );
   254   ftoi( FloatRegisterImpl::Q, F4, F8 );
   256   ftof( FloatRegisterImpl::S, FloatRegisterImpl::Q, F3, F12 );
   258   fxtof( FloatRegisterImpl::S, F4, F5 );
   259   fitof( FloatRegisterImpl::D, F6, F8 );
   261   fmov( FloatRegisterImpl::Q, F16, F20 );
   262   fneg( FloatRegisterImpl::S, F6, F7 );
   263   fabs( FloatRegisterImpl::D, F10, F12 );
   265   fmul( FloatRegisterImpl::Q,  F24, F28, F32 );
   266   fmul( FloatRegisterImpl::S,  FloatRegisterImpl::D,  F8, F9, F14 );
   267   fdiv( FloatRegisterImpl::S,  F10, F11, F12 );
   269   fsqrt( FloatRegisterImpl::S, F13, F14 );
   271   flush( L0, L1 );
   272   flush( L2, -1 );
   274   flushw();
   276   illtrap( (1 << 22) - 2);
   278   impdep1( 17, (1 << 19) - 1 );
   279   impdep2( 3,  0 );
   281   jmpl( L3, L4, L5 );
   282   delayed()->nop();
   283   jmpl( L6, -1, L7, Relocation::spec_simple(relocInfo::none));
   284   delayed()->nop();
   287   ldf(    FloatRegisterImpl::S, O0, O1, F15 );
   288   ldf(    FloatRegisterImpl::D, O2, -1, F14 );
   291   ldfsr(  O3, O4 );
   292   ldfsr(  O5, -1 );
   293   ldxfsr( O6, O7 );
   294   ldxfsr( I0, -1 );
   296   ldfa(  FloatRegisterImpl::D, I1, I2, 1, F16 );
   297   ldfa(  FloatRegisterImpl::Q, I3, -1,    F36 );
   299   ldsb(  I4, I5, I6 );
   300   ldsb(  I7, -1, G0 );
   301   ldsh(  G1, G3, G4 );
   302   ldsh(  G5, -1, G6 );
   303   ldsw(  G7, L0, L1 );
   304   ldsw(  L2, -1, L3 );
   305   ldub(  L4, L5, L6 );
   306   ldub(  L7, -1, O0 );
   307   lduh(  O1, O2, O3 );
   308   lduh(  O4, -1, O5 );
   309   lduw(  O6, O7, G0 );
   310   lduw(  G1, -1, G2 );
   311   ldx(   G3, G4, G5 );
   312   ldx(   G6, -1, G7 );
   313   ldd(   I0, I1, I2 );
   314   ldd(   I3, -1, I4 );
   316   ldsba(  I5, I6, 2, I7 );
   317   ldsba(  L0, -1, L1 );
   318   ldsha(  L2, L3, 3, L4 );
   319   ldsha(  L5, -1, L6 );
   320   ldswa(  L7, O0, (1 << 8) - 1, O1 );
   321   ldswa(  O2, -1, O3 );
   322   lduba(  O4, O5, 0, O6 );
   323   lduba(  O7, -1, I0 );
   324   lduha(  I1, I2, 1, I3 );
   325   lduha(  I4, -1, I5 );
   326   lduwa(  I6, I7, 2, L0 );
   327   lduwa(  L1, -1, L2 );
   328   ldxa(   L3, L4, 3, L5 );
   329   ldxa(   L6, -1, L7 );
   330   ldda(   G0, G1, 4, G2 );
   331   ldda(   G3, -1, G4 );
   333   ldstub(  G5, G6, G7 );
   334   ldstub(  O0, -1, O1 );
   336   ldstuba( O2, O3, 5, O4 );
   337   ldstuba( O5, -1, O6 );
   339   and3(    I0, L0, O0 );
   340   and3(    G7, -1, O7 );
   341   andcc(   L2, I2, G2 );
   342   andcc(   L4, -1, G4 );
   343   andn(    I5, I6, I7 );
   344   andn(    I6, -1, I7 );
   345   andncc(  I5, I6, I7 );
   346   andncc(  I7, -1, I6 );
   347   or3(     I5, I6, I7 );
   348   or3(     I7, -1, I6 );
   349   orcc(    I5, I6, I7 );
   350   orcc(    I7, -1, I6 );
   351   orn(     I5, I6, I7 );
   352   orn(     I7, -1, I6 );
   353   orncc(   I5, I6, I7 );
   354   orncc(   I7, -1, I6 );
   355   xor3(    I5, I6, I7 );
   356   xor3(    I7, -1, I6 );
   357   xorcc(   I5, I6, I7 );
   358   xorcc(   I7, -1, I6 );
   359   xnor(    I5, I6, I7 );
   360   xnor(    I7, -1, I6 );
   361   xnorcc(  I5, I6, I7 );
   362   xnorcc(  I7, -1, I6 );
   364   membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) );
   365   membar( StoreStore );
   366   membar( LoadStore );
   367   membar( StoreLoad );
   368   membar( LoadLoad );
   369   membar( Sync );
   370   membar( MemIssue );
   371   membar( Lookaside );
   373   fmov( FloatRegisterImpl::S, f_ordered,  true, fcc2, F16, F17 );
   374   fmov( FloatRegisterImpl::D, rc_lz, L5, F18, F20 );
   376   movcc( overflowClear,  false, icc, I6, L4 );
   377   movcc( f_unorderedOrEqual, true, fcc2, (1 << 10) - 1, O0 );
   379   movr( rc_nz, I5, I6, I7 );
   380   movr( rc_gz, L1, -1,  L2 );
   382   mulx(  I5, I6, I7 );
   383   mulx(  I7, -1, I6 );
   384   sdivx( I5, I6, I7 );
   385   sdivx( I7, -1, I6 );
   386   udivx( I5, I6, I7 );
   387   udivx( I7, -1, I6 );
   389   umul(   I5, I6, I7 );
   390   umul(   I7, -1, I6 );
   391   smul(   I5, I6, I7 );
   392   smul(   I7, -1, I6 );
   393   umulcc( I5, I6, I7 );
   394   umulcc( I7, -1, I6 );
   395   smulcc( I5, I6, I7 );
   396   smulcc( I7, -1, I6 );
   398   mulscc(   I5, I6, I7 );
   399   mulscc(   I7, -1, I6 );
   401   nop();
   404   popc( G0,  G1);
   405   popc( -1, G2);
   407   prefetch(   L1, L2,    severalReads );
   408   prefetch(   L3, -1,    oneRead );
   409   prefetcha(  O3, O2, 6, severalWritesAndPossiblyReads );
   410   prefetcha(  G2, -1,    oneWrite );
   412   rett( I7, I7);
   413   delayed()->nop();
   414   rett( G0, -1, relocInfo::none);
   415   delayed()->nop();
   417   save(    I5, I6, I7 );
   418   save(    I7, -1, I6 );
   419   restore( I5, I6, I7 );
   420   restore( I7, -1, I6 );
   422   saved();
   423   restored();
   425   sethi( 0xaaaaaaaa, I3, Relocation::spec_simple(relocInfo::none));
   427   sll(  I5, I6, I7 );
   428   sll(  I7, 31, I6 );
   429   srl(  I5, I6, I7 );
   430   srl(  I7,  0, I6 );
   431   sra(  I5, I6, I7 );
   432   sra(  I7, 30, I6 );
   433   sllx( I5, I6, I7 );
   434   sllx( I7, 63, I6 );
   435   srlx( I5, I6, I7 );
   436   srlx( I7,  0, I6 );
   437   srax( I5, I6, I7 );
   438   srax( I7, 62, I6 );
   440   sir( -1 );
   442   stbar();
   444   stf(    FloatRegisterImpl::Q, F40, G0, I7 );
   445   stf(    FloatRegisterImpl::S, F18, I3, -1 );
   447   stfsr(  L1, L2 );
   448   stfsr(  I7, -1 );
   449   stxfsr( I6, I5 );
   450   stxfsr( L4, -1 );
   452   stfa(  FloatRegisterImpl::D, F22, I6, I7, 7 );
   453   stfa(  FloatRegisterImpl::Q, F44, G0, -1 );
   455   stb(  L5, O2, I7 );
   456   stb(  I7, I6, -1 );
   457   sth(  L5, O2, I7 );
   458   sth(  I7, I6, -1 );
   459   stw(  L5, O2, I7 );
   460   stw(  I7, I6, -1 );
   461   stx(  L5, O2, I7 );
   462   stx(  I7, I6, -1 );
   463   std(  L5, O2, I7 );
   464   std(  I7, I6, -1 );
   466   stba(  L5, O2, I7, 8 );
   467   stba(  I7, I6, -1    );
   468   stha(  L5, O2, I7, 9 );
   469   stha(  I7, I6, -1    );
   470   stwa(  L5, O2, I7, 0 );
   471   stwa(  I7, I6, -1    );
   472   stxa(  L5, O2, I7, 11 );
   473   stxa(  I7, I6, -1     );
   474   stda(  L5, O2, I7, 12 );
   475   stda(  I7, I6, -1     );
   477   sub(    I5, I6, I7 );
   478   sub(    I7, -1, I6 );
   479   subcc(  I5, I6, I7 );
   480   subcc(  I7, -1, I6 );
   481   subc(   I5, I6, I7 );
   482   subc(   I7, -1, I6 );
   483   subccc( I5, I6, I7 );
   484   subccc( I7, -1, I6 );
   486   swap( I5, I6, I7 );
   487   swap( I7, -1, I6 );
   489   swapa(   G0, G1, 13, G2 );
   490   swapa(   I7, -1,     I6 );
   492   taddcc(    I5, I6, I7 );
   493   taddcc(    I7, -1, I6 );
   494   taddcctv(  I5, I6, I7 );
   495   taddcctv(  I7, -1, I6 );
   497   tsubcc(    I5, I6, I7 );
   498   tsubcc(    I7, -1, I6 );
   499   tsubcctv(  I5, I6, I7 );
   500   tsubcctv(  I7, -1, I6 );
   502   trap( overflowClear, xcc, G0, G1 );
   503   trap( lessEqual,     icc, I7, 17 );
   505   bind(lbl2);
   506   bind(lbl3);
   508   code()->decode();
   509 }
   511 // Generate a bunch 'o stuff unique to V8
   512 void Assembler::test_v8_onlys() {
   513   Label lbl1;
   515   cb( cp_0or1or2, false, pc() - 4, relocInfo::none);
   516   delayed()->nop();
   517   cb( cp_never,    true, lbl1);
   518   delayed()->nop();
   520   cpop1(1, 2, 3, 4);
   521   cpop2(5, 6, 7, 8);
   523   ldc( I0, I1, 31);
   524   ldc( I2, -1,  0);
   526   lddc( I4, I4, 30);
   527   lddc( I6,  0, 1 );
   529   ldcsr( L0, L1, 0);
   530   ldcsr( L1, (1 << 12) - 1, 17 );
   532   stc( 31, L4, L5);
   533   stc( 30, L6, -(1 << 12) );
   535   stdc( 0, L7, G0);
   536   stdc( 1, G1, 0 );
   538   stcsr( 16, G2, G3);
   539   stcsr( 17, G4, 1 );
   541   stdcq( 4, G5, G6);
   542   stdcq( 5, G7, -1 );
   544   bind(lbl1);
   546   code()->decode();
   547 }
   548 #endif
   550 // Implementation of MacroAssembler
   552 void MacroAssembler::null_check(Register reg, int offset) {
   553   if (needs_explicit_null_check((intptr_t)offset)) {
   554     // provoke OS NULL exception if reg = NULL by
   555     // accessing M[reg] w/o changing any registers
   556     ld_ptr(reg, 0, G0);
   557   }
   558   else {
   559     // nothing to do, (later) access of M[reg + offset]
   560     // will provoke OS NULL exception if reg = NULL
   561   }
   562 }
   564 // Ring buffer jumps
   566 #ifndef PRODUCT
   567 void MacroAssembler::ret(  bool trace )   { if (trace) {
   568                                                     mov(I7, O7); // traceable register
   569                                                     JMP(O7, 2 * BytesPerInstWord);
   570                                                   } else {
   571                                                     jmpl( I7, 2 * BytesPerInstWord, G0 );
   572                                                   }
   573                                                 }
   575 void MacroAssembler::retl( bool trace )  { if (trace) JMP(O7, 2 * BytesPerInstWord);
   576                                                  else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
   577 #endif /* PRODUCT */
   580 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
   581   assert_not_delayed();
   582   // This can only be traceable if r1 & r2 are visible after a window save
   583   if (TraceJumps) {
   584 #ifndef PRODUCT
   585     save_frame(0);
   586     verify_thread();
   587     ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
   588     add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
   589     sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
   590     add(O2, O1, O1);
   592     add(r1->after_save(), r2->after_save(), O2);
   593     set((intptr_t)file, O3);
   594     set(line, O4);
   595     Label L;
   596     // get nearby pc, store jmp target
   597     call(L, relocInfo::none);  // No relocation for call to pc+0x8
   598     delayed()->st(O2, O1, 0);
   599     bind(L);
   601     // store nearby pc
   602     st(O7, O1, sizeof(intptr_t));
   603     // store file
   604     st(O3, O1, 2*sizeof(intptr_t));
   605     // store line
   606     st(O4, O1, 3*sizeof(intptr_t));
   607     add(O0, 1, O0);
   608     and3(O0, JavaThread::jump_ring_buffer_size  - 1, O0);
   609     st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
   610     restore();
   611 #endif /* PRODUCT */
   612   }
   613   jmpl(r1, r2, G0);
   614 }
   615 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
   616   assert_not_delayed();
   617   // This can only be traceable if r1 is visible after a window save
   618   if (TraceJumps) {
   619 #ifndef PRODUCT
   620     save_frame(0);
   621     verify_thread();
   622     ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
   623     add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
   624     sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
   625     add(O2, O1, O1);
   627     add(r1->after_save(), offset, O2);
   628     set((intptr_t)file, O3);
   629     set(line, O4);
   630     Label L;
   631     // get nearby pc, store jmp target
   632     call(L, relocInfo::none);  // No relocation for call to pc+0x8
   633     delayed()->st(O2, O1, 0);
   634     bind(L);
   636     // store nearby pc
   637     st(O7, O1, sizeof(intptr_t));
   638     // store file
   639     st(O3, O1, 2*sizeof(intptr_t));
   640     // store line
   641     st(O4, O1, 3*sizeof(intptr_t));
   642     add(O0, 1, O0);
   643     and3(O0, JavaThread::jump_ring_buffer_size  - 1, O0);
   644     st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
   645     restore();
   646 #endif /* PRODUCT */
   647   }
   648   jmp(r1, offset);
   649 }
   651 // This code sequence is relocatable to any address, even on LP64.
   652 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
   653   assert_not_delayed();
   654   // Force fixed length sethi because NativeJump and NativeFarCall don't handle
   655   // variable length instruction streams.
   656   patchable_sethi(addrlit, temp);
   657   Address a(temp, addrlit.low10() + offset);  // Add the offset to the displacement.
   658   if (TraceJumps) {
   659 #ifndef PRODUCT
   660     // Must do the add here so relocation can find the remainder of the
   661     // value to be relocated.
   662     add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
   663     save_frame(0);
   664     verify_thread();
   665     ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
   666     add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
   667     sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
   668     add(O2, O1, O1);
   670     set((intptr_t)file, O3);
   671     set(line, O4);
   672     Label L;
   674     // get nearby pc, store jmp target
   675     call(L, relocInfo::none);  // No relocation for call to pc+0x8
   676     delayed()->st(a.base()->after_save(), O1, 0);
   677     bind(L);
   679     // store nearby pc
   680     st(O7, O1, sizeof(intptr_t));
   681     // store file
   682     st(O3, O1, 2*sizeof(intptr_t));
   683     // store line
   684     st(O4, O1, 3*sizeof(intptr_t));
   685     add(O0, 1, O0);
   686     and3(O0, JavaThread::jump_ring_buffer_size  - 1, O0);
   687     st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
   688     restore();
   689     jmpl(a.base(), G0, d);
   690 #else
   691     jmpl(a.base(), a.disp(), d);
   692 #endif /* PRODUCT */
   693   } else {
   694     jmpl(a.base(), a.disp(), d);
   695   }
   696 }
   698 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
   699   jumpl(addrlit, temp, G0, offset, file, line);
   700 }
   703 // Convert to C varargs format
   704 void MacroAssembler::set_varargs( Argument inArg, Register d ) {
   705   // spill register-resident args to their memory slots
   706   // (SPARC calling convention requires callers to have already preallocated these)
   707   // Note that the inArg might in fact be an outgoing argument,
   708   // if a leaf routine or stub does some tricky argument shuffling.
   709   // This routine must work even though one of the saved arguments
   710   // is in the d register (e.g., set_varargs(Argument(0, false), O0)).
   711   for (Argument savePtr = inArg;
   712        savePtr.is_register();
   713        savePtr = savePtr.successor()) {
   714     st_ptr(savePtr.as_register(), savePtr.address_in_frame());
   715   }
   716   // return the address of the first memory slot
   717   Address a = inArg.address_in_frame();
   718   add(a.base(), a.disp(), d);
   719 }
   721 // Conditional breakpoint (for assertion checks in assembly code)
   722 void MacroAssembler::breakpoint_trap(Condition c, CC cc) {
   723   trap(c, cc, G0, ST_RESERVED_FOR_USER_0);
   724 }
   726 // We want to use ST_BREAKPOINT here, but the debugger is confused by it.
   727 void MacroAssembler::breakpoint_trap() {
   728   trap(ST_RESERVED_FOR_USER_0);
   729 }
   731 // flush windows (except current) using flushw instruction if avail.
   732 void MacroAssembler::flush_windows() {
   733   if (VM_Version::v9_instructions_work())  flushw();
   734   else                                     flush_windows_trap();
   735 }
   737 // Write serialization page so VM thread can do a pseudo remote membar
   738 // We use the current thread pointer to calculate a thread specific
   739 // offset to write to within the page. This minimizes bus traffic
   740 // due to cache line collision.
   741 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
   742   srl(thread, os::get_serialize_page_shift_count(), tmp2);
   743   if (Assembler::is_simm13(os::vm_page_size())) {
   744     and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
   745   }
   746   else {
   747     set((os::vm_page_size() - sizeof(int)), tmp1);
   748     and3(tmp2, tmp1, tmp2);
   749   }
   750   set(os::get_memory_serialize_page(), tmp1);
   751   st(G0, tmp1, tmp2);
   752 }
   756 void MacroAssembler::enter() {
   757   Unimplemented();
   758 }
   760 void MacroAssembler::leave() {
   761   Unimplemented();
   762 }
   764 void MacroAssembler::mult(Register s1, Register s2, Register d) {
   765   if(VM_Version::v9_instructions_work()) {
   766     mulx (s1, s2, d);
   767   } else {
   768     smul (s1, s2, d);
   769   }
   770 }
   772 void MacroAssembler::mult(Register s1, int simm13a, Register d) {
   773   if(VM_Version::v9_instructions_work()) {
   774     mulx (s1, simm13a, d);
   775   } else {
   776     smul (s1, simm13a, d);
   777   }
   778 }
   781 #ifdef ASSERT
   782 void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
   783   const Register s1 = G3_scratch;
   784   const Register s2 = G4_scratch;
   785   Label get_psr_test;
   786   // Get the condition codes the V8 way.
   787   read_ccr_trap(s1);
   788   mov(ccr_save, s2);
   789   // This is a test of V8 which has icc but not xcc
   790   // so mask off the xcc bits
   791   and3(s2, 0xf, s2);
   792   // Compare condition codes from the V8 and V9 ways.
   793   subcc(s2, s1, G0);
   794   br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
   795   delayed()->breakpoint_trap();
   796   bind(get_psr_test);
   797 }
   799 void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
   800   const Register s1 = G3_scratch;
   801   const Register s2 = G4_scratch;
   802   Label set_psr_test;
   803   // Write out the saved condition codes the V8 way
   804   write_ccr_trap(ccr_save, s1, s2);
   805   // Read back the condition codes using the V9 instruction
   806   rdccr(s1);
   807   mov(ccr_save, s2);
   808   // This is a test of V8 which has icc but not xcc
   809   // so mask off the xcc bits
   810   and3(s2, 0xf, s2);
   811   and3(s1, 0xf, s1);
   812   // Compare the V8 way with the V9 way.
   813   subcc(s2, s1, G0);
   814   br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
   815   delayed()->breakpoint_trap();
   816   bind(set_psr_test);
   817 }
   818 #else
   819 #define read_ccr_v8_assert(x)
   820 #define write_ccr_v8_assert(x)
   821 #endif // ASSERT
   823 void MacroAssembler::read_ccr(Register ccr_save) {
   824   if (VM_Version::v9_instructions_work()) {
   825     rdccr(ccr_save);
   826     // Test code sequence used on V8.  Do not move above rdccr.
   827     read_ccr_v8_assert(ccr_save);
   828   } else {
   829     read_ccr_trap(ccr_save);
   830   }
   831 }
   833 void MacroAssembler::write_ccr(Register ccr_save) {
   834   if (VM_Version::v9_instructions_work()) {
   835     // Test code sequence used on V8.  Do not move below wrccr.
   836     write_ccr_v8_assert(ccr_save);
   837     wrccr(ccr_save);
   838   } else {
   839     const Register temp_reg1 = G3_scratch;
   840     const Register temp_reg2 = G4_scratch;
   841     write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
   842   }
   843 }
   846 // Calls to C land
   848 #ifdef ASSERT
   849 // a hook for debugging
   850 static Thread* reinitialize_thread() {
   851   return ThreadLocalStorage::thread();
   852 }
   853 #else
   854 #define reinitialize_thread ThreadLocalStorage::thread
   855 #endif
   857 #ifdef ASSERT
   858 address last_get_thread = NULL;
   859 #endif
   861 // call this when G2_thread is not known to be valid
   862 void MacroAssembler::get_thread() {
   863   save_frame(0);                // to avoid clobbering O0
   864   mov(G1, L0);                  // avoid clobbering G1
   865   mov(G5_method, L1);           // avoid clobbering G5
   866   mov(G3, L2);                  // avoid clobbering G3 also
   867   mov(G4, L5);                  // avoid clobbering G4
   868 #ifdef ASSERT
   869   AddressLiteral last_get_thread_addrlit(&last_get_thread);
   870   set(last_get_thread_addrlit, L3);
   871   inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
   872   st_ptr(L4, L3, 0);
   873 #endif
   874   call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
   875   delayed()->nop();
   876   mov(L0, G1);
   877   mov(L1, G5_method);
   878   mov(L2, G3);
   879   mov(L5, G4);
   880   restore(O0, 0, G2_thread);
   881 }
   883 static Thread* verify_thread_subroutine(Thread* gthread_value) {
   884   Thread* correct_value = ThreadLocalStorage::thread();
   885   guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
   886   return correct_value;
   887 }
   889 void MacroAssembler::verify_thread() {
   890   if (VerifyThread) {
   891     // NOTE: this chops off the heads of the 64-bit O registers.
   892 #ifdef CC_INTERP
   893     save_frame(0);
   894 #else
   895     // make sure G2_thread contains the right value
   896     save_frame_and_mov(0, Lmethod, Lmethod);   // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
   897     mov(G1, L1);                // avoid clobbering G1
   898     // G2 saved below
   899     mov(G3, L3);                // avoid clobbering G3
   900     mov(G4, L4);                // avoid clobbering G4
   901     mov(G5_method, L5);         // avoid clobbering G5_method
   902 #endif /* CC_INTERP */
   903 #if defined(COMPILER2) && !defined(_LP64)
   904     // Save & restore possible 64-bit Long arguments in G-regs
   905     srlx(G1,32,L0);
   906     srlx(G4,32,L6);
   907 #endif
   908     call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
   909     delayed()->mov(G2_thread, O0);
   911     mov(L1, G1);                // Restore G1
   912     // G2 restored below
   913     mov(L3, G3);                // restore G3
   914     mov(L4, G4);                // restore G4
   915     mov(L5, G5_method);         // restore G5_method
   916 #if defined(COMPILER2) && !defined(_LP64)
   917     // Save & restore possible 64-bit Long arguments in G-regs
   918     sllx(L0,32,G2);             // Move old high G1 bits high in G2
   919     srl(G1, 0,G1);              // Clear current high G1 bits
   920     or3 (G1,G2,G1);             // Recover 64-bit G1
   921     sllx(L6,32,G2);             // Move old high G4 bits high in G2
   922     srl(G4, 0,G4);              // Clear current high G4 bits
   923     or3 (G4,G2,G4);             // Recover 64-bit G4
   924 #endif
   925     restore(O0, 0, G2_thread);
   926   }
   927 }
   930 void MacroAssembler::save_thread(const Register thread_cache) {
   931   verify_thread();
   932   if (thread_cache->is_valid()) {
   933     assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
   934     mov(G2_thread, thread_cache);
   935   }
   936   if (VerifyThread) {
   937     // smash G2_thread, as if the VM were about to anyway
   938     set(0x67676767, G2_thread);
   939   }
   940 }
   943 void MacroAssembler::restore_thread(const Register thread_cache) {
   944   if (thread_cache->is_valid()) {
   945     assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
   946     mov(thread_cache, G2_thread);
   947     verify_thread();
   948   } else {
   949     // do it the slow way
   950     get_thread();
   951   }
   952 }
   955 // %%% maybe get rid of [re]set_last_Java_frame
   956 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
   957   assert_not_delayed();
   958   Address flags(G2_thread, JavaThread::frame_anchor_offset() +
   959                            JavaFrameAnchor::flags_offset());
   960   Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset());
   962   // Always set last_Java_pc and flags first because once last_Java_sp is visible
   963   // has_last_Java_frame is true and users will look at the rest of the fields.
   964   // (Note: flags should always be zero before we get here so doesn't need to be set.)
   966 #ifdef ASSERT
   967   // Verify that flags was zeroed on return to Java
   968   Label PcOk;
   969   save_frame(0);                // to avoid clobbering O0
   970   ld_ptr(pc_addr, L0);
   971   tst(L0);
   972 #ifdef _LP64
   973   brx(Assembler::zero, false, Assembler::pt, PcOk);
   974 #else
   975   br(Assembler::zero, false, Assembler::pt, PcOk);
   976 #endif // _LP64
   977   delayed() -> nop();
   978   stop("last_Java_pc not zeroed before leaving Java");
   979   bind(PcOk);
   981   // Verify that flags was zeroed on return to Java
   982   Label FlagsOk;
   983   ld(flags, L0);
   984   tst(L0);
   985   br(Assembler::zero, false, Assembler::pt, FlagsOk);
   986   delayed() -> restore();
   987   stop("flags not zeroed before leaving Java");
   988   bind(FlagsOk);
   989 #endif /* ASSERT */
   990   //
   991   // When returning from calling out from Java mode the frame anchor's last_Java_pc
   992   // will always be set to NULL. It is set here so that if we are doing a call to
   993   // native (not VM) that we capture the known pc and don't have to rely on the
   994   // native call having a standard frame linkage where we can find the pc.
   996   if (last_Java_pc->is_valid()) {
   997     st_ptr(last_Java_pc, pc_addr);
   998   }
  1000 #ifdef _LP64
  1001 #ifdef ASSERT
  1002   // Make sure that we have an odd stack
  1003   Label StackOk;
  1004   andcc(last_java_sp, 0x01, G0);
  1005   br(Assembler::notZero, false, Assembler::pt, StackOk);
  1006   delayed() -> nop();
  1007   stop("Stack Not Biased in set_last_Java_frame");
  1008   bind(StackOk);
  1009 #endif // ASSERT
  1010   assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
  1011   add( last_java_sp, STACK_BIAS, G4_scratch );
  1012   st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
  1013 #else
  1014   st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
  1015 #endif // _LP64
  1018 void MacroAssembler::reset_last_Java_frame(void) {
  1019   assert_not_delayed();
  1021   Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset());
  1022   Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
  1023   Address flags  (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
  1025 #ifdef ASSERT
  1026   // check that it WAS previously set
  1027 #ifdef CC_INTERP
  1028     save_frame(0);
  1029 #else
  1030     save_frame_and_mov(0, Lmethod, Lmethod);     // Propagate Lmethod to helper frame for -Xprof
  1031 #endif /* CC_INTERP */
  1032     ld_ptr(sp_addr, L0);
  1033     tst(L0);
  1034     breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
  1035     restore();
  1036 #endif // ASSERT
  1038   st_ptr(G0, sp_addr);
  1039   // Always return last_Java_pc to zero
  1040   st_ptr(G0, pc_addr);
  1041   // Always null flags after return to Java
  1042   st(G0, flags);
  1046 void MacroAssembler::call_VM_base(
  1047   Register        oop_result,
  1048   Register        thread_cache,
  1049   Register        last_java_sp,
  1050   address         entry_point,
  1051   int             number_of_arguments,
  1052   bool            check_exceptions)
  1054   assert_not_delayed();
  1056   // determine last_java_sp register
  1057   if (!last_java_sp->is_valid()) {
  1058     last_java_sp = SP;
  1060   // debugging support
  1061   assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
  1063   // 64-bit last_java_sp is biased!
  1064   set_last_Java_frame(last_java_sp, noreg);
  1065   if (VerifyThread)  mov(G2_thread, O0); // about to be smashed; pass early
  1066   save_thread(thread_cache);
  1067   // do the call
  1068   call(entry_point, relocInfo::runtime_call_type);
  1069   if (!VerifyThread)
  1070     delayed()->mov(G2_thread, O0);  // pass thread as first argument
  1071   else
  1072     delayed()->nop();             // (thread already passed)
  1073   restore_thread(thread_cache);
  1074   reset_last_Java_frame();
  1076   // check for pending exceptions. use Gtemp as scratch register.
  1077   if (check_exceptions) {
  1078     check_and_forward_exception(Gtemp);
  1081 #ifdef ASSERT
  1082   set(badHeapWordVal, G3);
  1083   set(badHeapWordVal, G4);
  1084   set(badHeapWordVal, G5);
  1085 #endif
  1087   // get oop result if there is one and reset the value in the thread
  1088   if (oop_result->is_valid()) {
  1089     get_vm_result(oop_result);
  1093 void MacroAssembler::check_and_forward_exception(Register scratch_reg)
  1095   Label L;
  1097   check_and_handle_popframe(scratch_reg);
  1098   check_and_handle_earlyret(scratch_reg);
  1100   Address exception_addr(G2_thread, Thread::pending_exception_offset());
  1101   ld_ptr(exception_addr, scratch_reg);
  1102   br_null(scratch_reg,false,pt,L);
  1103   delayed()->nop();
  1104   // we use O7 linkage so that forward_exception_entry has the issuing PC
  1105   call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
  1106   delayed()->nop();
  1107   bind(L);
  1111 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) {
  1115 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
  1119 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
  1120   call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
  1124 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
  1125   // O0 is reserved for the thread
  1126   mov(arg_1, O1);
  1127   call_VM(oop_result, entry_point, 1, check_exceptions);
  1131 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
  1132   // O0 is reserved for the thread
  1133   mov(arg_1, O1);
  1134   mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
  1135   call_VM(oop_result, entry_point, 2, check_exceptions);
  1139 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
  1140   // O0 is reserved for the thread
  1141   mov(arg_1, O1);
  1142   mov(arg_2, O2); assert(arg_2 != O1,                "smashed argument");
  1143   mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
  1144   call_VM(oop_result, entry_point, 3, check_exceptions);
  1149 // Note: The following call_VM overloadings are useful when a "save"
  1150 // has already been performed by a stub, and the last Java frame is
  1151 // the previous one.  In that case, last_java_sp must be passed as FP
  1152 // instead of SP.
  1155 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
  1156   call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
  1160 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
  1161   // O0 is reserved for the thread
  1162   mov(arg_1, O1);
  1163   call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
  1167 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
  1168   // O0 is reserved for the thread
  1169   mov(arg_1, O1);
  1170   mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
  1171   call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
  1175 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
  1176   // O0 is reserved for the thread
  1177   mov(arg_1, O1);
  1178   mov(arg_2, O2); assert(arg_2 != O1,                "smashed argument");
  1179   mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
  1180   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
  1185 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) {
  1186   assert_not_delayed();
  1187   save_thread(thread_cache);
  1188   // do the call
  1189   call(entry_point, relocInfo::runtime_call_type);
  1190   delayed()->nop();
  1191   restore_thread(thread_cache);
  1192 #ifdef ASSERT
  1193   set(badHeapWordVal, G3);
  1194   set(badHeapWordVal, G4);
  1195   set(badHeapWordVal, G5);
  1196 #endif
  1200 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) {
  1201   call_VM_leaf_base(thread_cache, entry_point, number_of_arguments);
  1205 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
  1206   mov(arg_1, O0);
  1207   call_VM_leaf(thread_cache, entry_point, 1);
  1211 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
  1212   mov(arg_1, O0);
  1213   mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
  1214   call_VM_leaf(thread_cache, entry_point, 2);
  1218 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) {
  1219   mov(arg_1, O0);
  1220   mov(arg_2, O1); assert(arg_2 != O0,                "smashed argument");
  1221   mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument");
  1222   call_VM_leaf(thread_cache, entry_point, 3);
  1226 void MacroAssembler::get_vm_result(Register oop_result) {
  1227   verify_thread();
  1228   Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
  1229   ld_ptr(    vm_result_addr, oop_result);
  1230   st_ptr(G0, vm_result_addr);
  1231   verify_oop(oop_result);
  1235 void MacroAssembler::get_vm_result_2(Register oop_result) {
  1236   verify_thread();
  1237   Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
  1238   ld_ptr(vm_result_addr_2, oop_result);
  1239   st_ptr(G0, vm_result_addr_2);
  1240   verify_oop(oop_result);
  1244 // We require that C code which does not return a value in vm_result will
  1245 // leave it undisturbed.
  1246 void MacroAssembler::set_vm_result(Register oop_result) {
  1247   verify_thread();
  1248   Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
  1249   verify_oop(oop_result);
  1251 # ifdef ASSERT
  1252     // Check that we are not overwriting any other oop.
  1253 #ifdef CC_INTERP
  1254     save_frame(0);
  1255 #else
  1256     save_frame_and_mov(0, Lmethod, Lmethod);     // Propagate Lmethod for -Xprof
  1257 #endif /* CC_INTERP */
  1258     ld_ptr(vm_result_addr, L0);
  1259     tst(L0);
  1260     restore();
  1261     breakpoint_trap(notZero, Assembler::ptr_cc);
  1262     // }
  1263 # endif
  1265   st_ptr(oop_result, vm_result_addr);
  1269 void MacroAssembler::card_table_write(jbyte* byte_map_base,
  1270                                       Register tmp, Register obj) {
  1271 #ifdef _LP64
  1272   srlx(obj, CardTableModRefBS::card_shift, obj);
  1273 #else
  1274   srl(obj, CardTableModRefBS::card_shift, obj);
  1275 #endif
  1276   assert(tmp != obj, "need separate temp reg");
  1277   set((address) byte_map_base, tmp);
  1278   stb(G0, tmp, obj);
  1282 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
  1283   address save_pc;
  1284   int shiftcnt;
  1285 #ifdef _LP64
  1286 # ifdef CHECK_DELAY
  1287   assert_not_delayed((char*) "cannot put two instructions in delay slot");
  1288 # endif
  1289   v9_dep();
  1290   save_pc = pc();
  1292   int msb32 = (int) (addrlit.value() >> 32);
  1293   int lsb32 = (int) (addrlit.value());
  1295   if (msb32 == 0 && lsb32 >= 0) {
  1296     Assembler::sethi(lsb32, d, addrlit.rspec());
  1298   else if (msb32 == -1) {
  1299     Assembler::sethi(~lsb32, d, addrlit.rspec());
  1300     xor3(d, ~low10(~0), d);
  1302   else {
  1303     Assembler::sethi(msb32, d, addrlit.rspec());  // msb 22-bits
  1304     if (msb32 & 0x3ff)                            // Any bits?
  1305       or3(d, msb32 & 0x3ff, d);                   // msb 32-bits are now in lsb 32
  1306     if (lsb32 & 0xFFFFFC00) {                     // done?
  1307       if ((lsb32 >> 20) & 0xfff) {                // Any bits set?
  1308         sllx(d, 12, d);                           // Make room for next 12 bits
  1309         or3(d, (lsb32 >> 20) & 0xfff, d);         // Or in next 12
  1310         shiftcnt = 0;                             // We already shifted
  1312       else
  1313         shiftcnt = 12;
  1314       if ((lsb32 >> 10) & 0x3ff) {
  1315         sllx(d, shiftcnt + 10, d);                // Make room for last 10 bits
  1316         or3(d, (lsb32 >> 10) & 0x3ff, d);         // Or in next 10
  1317         shiftcnt = 0;
  1319       else
  1320         shiftcnt = 10;
  1321       sllx(d, shiftcnt + 10, d);                  // Shift leaving disp field 0'd
  1323     else
  1324       sllx(d, 32, d);
  1326   // Pad out the instruction sequence so it can be patched later.
  1327   if (ForceRelocatable || (addrlit.rtype() != relocInfo::none &&
  1328                            addrlit.rtype() != relocInfo::runtime_call_type)) {
  1329     while (pc() < (save_pc + (7 * BytesPerInstWord)))
  1330       nop();
  1332 #else
  1333   Assembler::sethi(addrlit.value(), d, addrlit.rspec());
  1334 #endif
  1338 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) {
  1339   internal_sethi(addrlit, d, false);
  1343 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) {
  1344   internal_sethi(addrlit, d, true);
  1348 int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
  1349 #ifdef _LP64
  1350   if (worst_case)  return 7;
  1351   intptr_t iaddr = (intptr_t) a;
  1352   int msb32 = (int) (iaddr >> 32);
  1353   int lsb32 = (int) (iaddr);
  1354   int count;
  1355   if (msb32 == 0 && lsb32 >= 0)
  1356     count = 1;
  1357   else if (msb32 == -1)
  1358     count = 2;
  1359   else {
  1360     count = 2;
  1361     if (msb32 & 0x3ff)
  1362       count++;
  1363     if (lsb32 & 0xFFFFFC00 ) {
  1364       if ((lsb32 >> 20) & 0xfff)  count += 2;
  1365       if ((lsb32 >> 10) & 0x3ff)  count += 2;
  1368   return count;
  1369 #else
  1370   return 1;
  1371 #endif
  1374 int MacroAssembler::worst_case_insts_for_set() {
  1375   return insts_for_sethi(NULL, true) + 1;
  1379 // Keep in sync with MacroAssembler::insts_for_internal_set
  1380 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
  1381   intptr_t value = addrlit.value();
  1383   if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) {
  1384     // can optimize
  1385     if (-4096 <= value && value <= 4095) {
  1386       or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
  1387       return;
  1389     if (inv_hi22(hi22(value)) == value) {
  1390       sethi(addrlit, d);
  1391       return;
  1394   assert_not_delayed((char*) "cannot put two instructions in delay slot");
  1395   internal_sethi(addrlit, d, ForceRelocatable);
  1396   if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) {
  1397     add(d, addrlit.low10(), d, addrlit.rspec());
  1401 // Keep in sync with MacroAssembler::internal_set
  1402 int MacroAssembler::insts_for_internal_set(intptr_t value) {
  1403   // can optimize
  1404   if (-4096 <= value && value <= 4095) {
  1405     return 1;
  1407   if (inv_hi22(hi22(value)) == value) {
  1408     return insts_for_sethi((address) value);
  1410   int count = insts_for_sethi((address) value);
  1411   AddressLiteral al(value);
  1412   if (al.low10() != 0) {
  1413     count++;
  1415   return count;
  1418 void MacroAssembler::set(const AddressLiteral& al, Register d) {
  1419   internal_set(al, d, false);
  1422 void MacroAssembler::set(intptr_t value, Register d) {
  1423   AddressLiteral al(value);
  1424   internal_set(al, d, false);
  1427 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) {
  1428   AddressLiteral al(addr, rspec);
  1429   internal_set(al, d, false);
  1432 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) {
  1433   internal_set(al, d, true);
  1436 void MacroAssembler::patchable_set(intptr_t value, Register d) {
  1437   AddressLiteral al(value);
  1438   internal_set(al, d, true);
  1442 void MacroAssembler::set64(jlong value, Register d, Register tmp) {
  1443   assert_not_delayed();
  1444   v9_dep();
  1446   int hi = (int)(value >> 32);
  1447   int lo = (int)(value & ~0);
  1448   // (Matcher::isSimpleConstant64 knows about the following optimizations.)
  1449   if (Assembler::is_simm13(lo) && value == lo) {
  1450     or3(G0, lo, d);
  1451   } else if (hi == 0) {
  1452     Assembler::sethi(lo, d);   // hardware version zero-extends to upper 32
  1453     if (low10(lo) != 0)
  1454       or3(d, low10(lo), d);
  1456   else if (hi == -1) {
  1457     Assembler::sethi(~lo, d);  // hardware version zero-extends to upper 32
  1458     xor3(d, low10(lo) ^ ~low10(~0), d);
  1460   else if (lo == 0) {
  1461     if (Assembler::is_simm13(hi)) {
  1462       or3(G0, hi, d);
  1463     } else {
  1464       Assembler::sethi(hi, d);   // hardware version zero-extends to upper 32
  1465       if (low10(hi) != 0)
  1466         or3(d, low10(hi), d);
  1468     sllx(d, 32, d);
  1470   else {
  1471     Assembler::sethi(hi, tmp);
  1472     Assembler::sethi(lo,   d); // macro assembler version sign-extends
  1473     if (low10(hi) != 0)
  1474       or3 (tmp, low10(hi), tmp);
  1475     if (low10(lo) != 0)
  1476       or3 (  d, low10(lo),   d);
  1477     sllx(tmp, 32, tmp);
  1478     or3 (d, tmp, d);
  1482 int MacroAssembler::insts_for_set64(jlong value) {
  1483   v9_dep();
  1485   int hi = (int) (value >> 32);
  1486   int lo = (int) (value & ~0);
  1487   int count = 0;
  1489   // (Matcher::isSimpleConstant64 knows about the following optimizations.)
  1490   if (Assembler::is_simm13(lo) && value == lo) {
  1491     count++;
  1492   } else if (hi == 0) {
  1493     count++;
  1494     if (low10(lo) != 0)
  1495       count++;
  1497   else if (hi == -1) {
  1498     count += 2;
  1500   else if (lo == 0) {
  1501     if (Assembler::is_simm13(hi)) {
  1502       count++;
  1503     } else {
  1504       count++;
  1505       if (low10(hi) != 0)
  1506         count++;
  1508     count++;
  1510   else {
  1511     count += 2;
  1512     if (low10(hi) != 0)
  1513       count++;
  1514     if (low10(lo) != 0)
  1515       count++;
  1516     count += 2;
  1518   return count;
  1521 // compute size in bytes of sparc frame, given
  1522 // number of extraWords
  1523 int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
  1525   int nWords = frame::memory_parameter_word_sp_offset;
  1527   nWords += extraWords;
  1529   if (nWords & 1) ++nWords; // round up to double-word
  1531   return nWords * BytesPerWord;
  1535 // save_frame: given number of "extra" words in frame,
  1536 // issue approp. save instruction (p 200, v8 manual)
  1538 void MacroAssembler::save_frame(int extraWords) {
  1539   int delta = -total_frame_size_in_bytes(extraWords);
  1540   if (is_simm13(delta)) {
  1541     save(SP, delta, SP);
  1542   } else {
  1543     set(delta, G3_scratch);
  1544     save(SP, G3_scratch, SP);
  1549 void MacroAssembler::save_frame_c1(int size_in_bytes) {
  1550   if (is_simm13(-size_in_bytes)) {
  1551     save(SP, -size_in_bytes, SP);
  1552   } else {
  1553     set(-size_in_bytes, G3_scratch);
  1554     save(SP, G3_scratch, SP);
  1559 void MacroAssembler::save_frame_and_mov(int extraWords,
  1560                                         Register s1, Register d1,
  1561                                         Register s2, Register d2) {
  1562   assert_not_delayed();
  1564   // The trick here is to use precisely the same memory word
  1565   // that trap handlers also use to save the register.
  1566   // This word cannot be used for any other purpose, but
  1567   // it works fine to save the register's value, whether or not
  1568   // an interrupt flushes register windows at any given moment!
  1569   Address s1_addr;
  1570   if (s1->is_valid() && (s1->is_in() || s1->is_local())) {
  1571     s1_addr = s1->address_in_saved_window();
  1572     st_ptr(s1, s1_addr);
  1575   Address s2_addr;
  1576   if (s2->is_valid() && (s2->is_in() || s2->is_local())) {
  1577     s2_addr = s2->address_in_saved_window();
  1578     st_ptr(s2, s2_addr);
  1581   save_frame(extraWords);
  1583   if (s1_addr.base() == SP) {
  1584     ld_ptr(s1_addr.after_save(), d1);
  1585   } else if (s1->is_valid()) {
  1586     mov(s1->after_save(), d1);
  1589   if (s2_addr.base() == SP) {
  1590     ld_ptr(s2_addr.after_save(), d2);
  1591   } else if (s2->is_valid()) {
  1592     mov(s2->after_save(), d2);
  1597 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
  1598   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
  1599   int oop_index = oop_recorder()->allocate_index(obj);
  1600   return AddressLiteral(obj, oop_Relocation::spec(oop_index));
  1604 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
  1605   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
  1606   int oop_index = oop_recorder()->find_index(obj);
  1607   return AddressLiteral(obj, oop_Relocation::spec(oop_index));
  1610 void  MacroAssembler::set_narrow_oop(jobject obj, Register d) {
  1611   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
  1612   int oop_index = oop_recorder()->find_index(obj);
  1613   RelocationHolder rspec = oop_Relocation::spec(oop_index);
  1615   assert_not_delayed();
  1616   // Relocation with special format (see relocInfo_sparc.hpp).
  1617   relocate(rspec, 1);
  1618   // Assembler::sethi(0x3fffff, d);
  1619   emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) );
  1620   // Don't add relocation for 'add'. Do patching during 'sethi' processing.
  1621   add(d, 0x3ff, d);
  1626 void MacroAssembler::align(int modulus) {
  1627   while (offset() % modulus != 0) nop();
  1631 void MacroAssembler::safepoint() {
  1632   relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint));
  1636 void RegistersForDebugging::print(outputStream* s) {
  1637   int j;
  1638   for ( j = 0;  j < 8;  ++j )
  1639     if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]);
  1640     else          s->print_cr( "fp = 0x%.16lx",    i[j]);
  1641   s->cr();
  1643   for ( j = 0;  j < 8;  ++j )
  1644     s->print_cr("l%d = 0x%.16lx", j, l[j]);
  1645   s->cr();
  1647   for ( j = 0;  j < 8;  ++j )
  1648     if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]);
  1649     else          s->print_cr( "sp = 0x%.16lx",    o[j]);
  1650   s->cr();
  1652   for ( j = 0;  j < 8;  ++j )
  1653     s->print_cr("g%d = 0x%.16lx", j, g[j]);
  1654   s->cr();
  1656   // print out floats with compression
  1657   for (j = 0; j < 32; ) {
  1658     jfloat val = f[j];
  1659     int last = j;
  1660     for ( ;  last+1 < 32;  ++last ) {
  1661       char b1[1024], b2[1024];
  1662       sprintf(b1, "%f", val);
  1663       sprintf(b2, "%f", f[last+1]);
  1664       if (strcmp(b1, b2))
  1665         break;
  1667     s->print("f%d", j);
  1668     if ( j != last )  s->print(" - f%d", last);
  1669     s->print(" = %f", val);
  1670     s->fill_to(25);
  1671     s->print_cr(" (0x%x)", val);
  1672     j = last + 1;
  1674   s->cr();
  1676   // and doubles (evens only)
  1677   for (j = 0; j < 32; ) {
  1678     jdouble val = d[j];
  1679     int last = j;
  1680     for ( ;  last+1 < 32;  ++last ) {
  1681       char b1[1024], b2[1024];
  1682       sprintf(b1, "%f", val);
  1683       sprintf(b2, "%f", d[last+1]);
  1684       if (strcmp(b1, b2))
  1685         break;
  1687     s->print("d%d", 2 * j);
  1688     if ( j != last )  s->print(" - d%d", last);
  1689     s->print(" = %f", val);
  1690     s->fill_to(30);
  1691     s->print("(0x%x)", *(int*)&val);
  1692     s->fill_to(42);
  1693     s->print_cr("(0x%x)", *(1 + (int*)&val));
  1694     j = last + 1;
  1696   s->cr();
  1699 void RegistersForDebugging::save_registers(MacroAssembler* a) {
  1700   a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
  1701   a->flush_windows();
  1702   int i;
  1703   for (i = 0; i < 8; ++i) {
  1704     a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1);  a->st_ptr( L1, O0, i_offset(i));
  1705     a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1);  a->st_ptr( L1, O0, l_offset(i));
  1706     a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i));
  1707     a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i));
  1709   for (i = 0;  i < 32; ++i) {
  1710     a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
  1712   for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
  1713     a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
  1717 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
  1718   for (int i = 1; i < 8;  ++i) {
  1719     a->ld_ptr(r, g_offset(i), as_gRegister(i));
  1721   for (int j = 0; j < 32; ++j) {
  1722     a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
  1724   for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
  1725     a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
  1730 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
  1731 void MacroAssembler::push_fTOS() {
  1732   // %%%%%% need to implement this
  1735 // pops double TOS element from CPU stack and pushes on FPU stack
  1736 void MacroAssembler::pop_fTOS() {
  1737   // %%%%%% need to implement this
  1740 void MacroAssembler::empty_FPU_stack() {
  1741   // %%%%%% need to implement this
  1744 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
  1745   // plausibility check for oops
  1746   if (!VerifyOops) return;
  1748   if (reg == G0)  return;       // always NULL, which is always an oop
  1750   BLOCK_COMMENT("verify_oop {");
  1751   char buffer[64];
  1752 #ifdef COMPILER1
  1753   if (CommentedAssembly) {
  1754     snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset());
  1755     block_comment(buffer);
  1757 #endif
  1759   int len = strlen(file) + strlen(msg) + 1 + 4;
  1760   sprintf(buffer, "%d", line);
  1761   len += strlen(buffer);
  1762   sprintf(buffer, " at offset %d ", offset());
  1763   len += strlen(buffer);
  1764   char * real_msg = new char[len];
  1765   sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
  1767   // Call indirectly to solve generation ordering problem
  1768   AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
  1770   // Make some space on stack above the current register window.
  1771   // Enough to hold 8 64-bit registers.
  1772   add(SP,-8*8,SP);
  1774   // Save some 64-bit registers; a normal 'save' chops the heads off
  1775   // of 64-bit longs in the 32-bit build.
  1776   stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
  1777   stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
  1778   mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
  1779   stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
  1781   set((intptr_t)real_msg, O1);
  1782   // Load address to call to into O7
  1783   load_ptr_contents(a, O7);
  1784   // Register call to verify_oop_subroutine
  1785   callr(O7, G0);
  1786   delayed()->nop();
  1787   // recover frame size
  1788   add(SP, 8*8,SP);
  1789   BLOCK_COMMENT("} verify_oop");
  1792 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
  1793   // plausibility check for oops
  1794   if (!VerifyOops) return;
  1796   char buffer[64];
  1797   sprintf(buffer, "%d", line);
  1798   int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
  1799   sprintf(buffer, " at SP+%d ", addr.disp());
  1800   len += strlen(buffer);
  1801   char * real_msg = new char[len];
  1802   sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
  1804   // Call indirectly to solve generation ordering problem
  1805   AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
  1807   // Make some space on stack above the current register window.
  1808   // Enough to hold 8 64-bit registers.
  1809   add(SP,-8*8,SP);
  1811   // Save some 64-bit registers; a normal 'save' chops the heads off
  1812   // of 64-bit longs in the 32-bit build.
  1813   stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
  1814   stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
  1815   ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
  1816   stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
  1818   set((intptr_t)real_msg, O1);
  1819   // Load address to call to into O7
  1820   load_ptr_contents(a, O7);
  1821   // Register call to verify_oop_subroutine
  1822   callr(O7, G0);
  1823   delayed()->nop();
  1824   // recover frame size
  1825   add(SP, 8*8,SP);
  1828 // side-door communication with signalHandler in os_solaris.cpp
  1829 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
  1831 // This macro is expanded just once; it creates shared code.  Contract:
  1832 // receives an oop in O0.  Must restore O0 & O7 from TLS.  Must not smash ANY
  1833 // registers, including flags.  May not use a register 'save', as this blows
  1834 // the high bits of the O-regs if they contain Long values.  Acts as a 'leaf'
  1835 // call.
  1836 void MacroAssembler::verify_oop_subroutine() {
  1837   assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
  1839   // Leaf call; no frame.
  1840   Label succeed, fail, null_or_fail;
  1842   // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home).
  1843   // O0 is now the oop to be checked.  O7 is the return address.
  1844   Register O0_obj = O0;
  1846   // Save some more registers for temps.
  1847   stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8);
  1848   stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8);
  1849   stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8);
  1850   stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8);
  1852   // Save flags
  1853   Register O5_save_flags = O5;
  1854   rdccr( O5_save_flags );
  1856   { // count number of verifies
  1857     Register O2_adr   = O2;
  1858     Register O3_accum = O3;
  1859     inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum);
  1862   Register O2_mask = O2;
  1863   Register O3_bits = O3;
  1864   Register O4_temp = O4;
  1866   // mark lower end of faulting range
  1867   assert(_verify_oop_implicit_branch[0] == NULL, "set once");
  1868   _verify_oop_implicit_branch[0] = pc();
  1870   // We can't check the mark oop because it could be in the process of
  1871   // locking or unlocking while this is running.
  1872   set(Universe::verify_oop_mask (), O2_mask);
  1873   set(Universe::verify_oop_bits (), O3_bits);
  1875   // assert((obj & oop_mask) == oop_bits);
  1876   and3(O0_obj, O2_mask, O4_temp);
  1877   cmp(O4_temp, O3_bits);
  1878   brx(notEqual, false, pn, null_or_fail);
  1879   delayed()->nop();
  1881   if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) {
  1882     // the null_or_fail case is useless; must test for null separately
  1883     br_null(O0_obj, false, pn, succeed);
  1884     delayed()->nop();
  1887   // Check the klassOop of this object for being in the right area of memory.
  1888   // Cannot do the load in the delay above slot in case O0 is null
  1889   load_klass(O0_obj, O0_obj);
  1890   // assert((klass & klass_mask) == klass_bits);
  1891   if( Universe::verify_klass_mask() != Universe::verify_oop_mask() )
  1892     set(Universe::verify_klass_mask(), O2_mask);
  1893   if( Universe::verify_klass_bits() != Universe::verify_oop_bits() )
  1894     set(Universe::verify_klass_bits(), O3_bits);
  1895   and3(O0_obj, O2_mask, O4_temp);
  1896   cmp(O4_temp, O3_bits);
  1897   brx(notEqual, false, pn, fail);
  1898   delayed()->nop();
  1899   // Check the klass's klass
  1900   load_klass(O0_obj, O0_obj);
  1901   and3(O0_obj, O2_mask, O4_temp);
  1902   cmp(O4_temp, O3_bits);
  1903   brx(notEqual, false, pn, fail);
  1904   delayed()->wrccr( O5_save_flags ); // Restore CCR's
  1906   // mark upper end of faulting range
  1907   _verify_oop_implicit_branch[1] = pc();
  1909   //-----------------------
  1910   // all tests pass
  1911   bind(succeed);
  1913   // Restore prior 64-bit registers
  1914   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0);
  1915   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1);
  1916   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2);
  1917   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3);
  1918   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4);
  1919   ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5);
  1921   retl();                       // Leaf return; restore prior O7 in delay slot
  1922   delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7);
  1924   //-----------------------
  1925   bind(null_or_fail);           // nulls are less common but OK
  1926   br_null(O0_obj, false, pt, succeed);
  1927   delayed()->wrccr( O5_save_flags ); // Restore CCR's
  1929   //-----------------------
  1930   // report failure:
  1931   bind(fail);
  1932   _verify_oop_implicit_branch[2] = pc();
  1934   wrccr( O5_save_flags ); // Restore CCR's
  1936   save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
  1938   // stop_subroutine expects message pointer in I1.
  1939   mov(I1, O1);
  1941   // Restore prior 64-bit registers
  1942   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0);
  1943   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1);
  1944   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2);
  1945   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3);
  1946   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4);
  1947   ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5);
  1949   // factor long stop-sequence into subroutine to save space
  1950   assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
  1952   // call indirectly to solve generation ordering problem
  1953   AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address());
  1954   load_ptr_contents(al, O5);
  1955   jmpl(O5, 0, O7);
  1956   delayed()->nop();
  1960 void MacroAssembler::stop(const char* msg) {
  1961   // save frame first to get O7 for return address
  1962   // add one word to size in case struct is odd number of words long
  1963   // It must be doubleword-aligned for storing doubles into it.
  1965     save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
  1967     // stop_subroutine expects message pointer in I1.
  1968     set((intptr_t)msg, O1);
  1970     // factor long stop-sequence into subroutine to save space
  1971     assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
  1973     // call indirectly to solve generation ordering problem
  1974     AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address());
  1975     load_ptr_contents(a, O5);
  1976     jmpl(O5, 0, O7);
  1977     delayed()->nop();
  1979     breakpoint_trap();   // make stop actually stop rather than writing
  1980                          // unnoticeable results in the output files.
  1982     // restore(); done in callee to save space!
  1986 void MacroAssembler::warn(const char* msg) {
  1987   save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
  1988   RegistersForDebugging::save_registers(this);
  1989   mov(O0, L0);
  1990   set((intptr_t)msg, O0);
  1991   call( CAST_FROM_FN_PTR(address, warning) );
  1992   delayed()->nop();
  1993 //  ret();
  1994 //  delayed()->restore();
  1995   RegistersForDebugging::restore_registers(this, L0);
  1996   restore();
  2000 void MacroAssembler::untested(const char* what) {
  2001   // We must be able to turn interactive prompting off
  2002   // in order to run automated test scripts on the VM
  2003   // Use the flag ShowMessageBoxOnError
  2005   char* b = new char[1024];
  2006   sprintf(b, "untested: %s", what);
  2008   if ( ShowMessageBoxOnError )   stop(b);
  2009   else                           warn(b);
  2013 void MacroAssembler::stop_subroutine() {
  2014   RegistersForDebugging::save_registers(this);
  2016   // for the sake of the debugger, stick a PC on the current frame
  2017   // (this assumes that the caller has performed an extra "save")
  2018   mov(I7, L7);
  2019   add(O7, -7 * BytesPerInt, I7);
  2021   save_frame(); // one more save to free up another O7 register
  2022   mov(I0, O1); // addr of reg save area
  2024   // We expect pointer to message in I1. Caller must set it up in O1
  2025   mov(I1, O0); // get msg
  2026   call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
  2027   delayed()->nop();
  2029   restore();
  2031   RegistersForDebugging::restore_registers(this, O0);
  2033   save_frame(0);
  2034   call(CAST_FROM_FN_PTR(address,breakpoint));
  2035   delayed()->nop();
  2036   restore();
  2038   mov(L7, I7);
  2039   retl();
  2040   delayed()->restore(); // see stop above
  2044 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
  2045   if ( ShowMessageBoxOnError ) {
  2046       JavaThreadState saved_state = JavaThread::current()->thread_state();
  2047       JavaThread::current()->set_thread_state(_thread_in_vm);
  2049         // In order to get locks work, we need to fake a in_VM state
  2050         ttyLocker ttyl;
  2051         ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
  2052         if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
  2053           ::tty->print_cr("Interpreter::bytecode_counter = %d", BytecodeCounter::counter_value());
  2055         if (os::message_box(msg, "Execution stopped, print registers?"))
  2056           regs->print(::tty);
  2058       ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
  2060   else
  2061      ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
  2062   assert(false, err_msg("DEBUG MESSAGE: %s", msg));
  2066 #ifndef PRODUCT
  2067 void MacroAssembler::test() {
  2068   ResourceMark rm;
  2070   CodeBuffer cb("test", 10000, 10000);
  2071   MacroAssembler* a = new MacroAssembler(&cb);
  2072   VM_Version::allow_all();
  2073   a->test_v9();
  2074   a->test_v8_onlys();
  2075   VM_Version::revert();
  2077   StubRoutines::Sparc::test_stop_entry()();
  2079 #endif
  2082 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) {
  2083   subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
  2084   Label no_extras;
  2085   br( negative, true, pt, no_extras ); // if neg, clear reg
  2086   delayed()->set(0, Rresult);          // annuled, so only if taken
  2087   bind( no_extras );
  2091 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
  2092 #ifdef _LP64
  2093   add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
  2094 #else
  2095   add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
  2096 #endif
  2097   bclr(1, Rresult);
  2098   sll(Rresult, LogBytesPerWord, Rresult);  // Rresult has total frame bytes
  2102 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) {
  2103   calc_frame_size(Rextra_words, Rresult);
  2104   neg(Rresult);
  2105   save(SP, Rresult, SP);
  2109 // ---------------------------------------------------------
  2110 Assembler::RCondition cond2rcond(Assembler::Condition c) {
  2111   switch (c) {
  2112     /*case zero: */
  2113     case Assembler::equal:        return Assembler::rc_z;
  2114     case Assembler::lessEqual:    return Assembler::rc_lez;
  2115     case Assembler::less:         return Assembler::rc_lz;
  2116     /*case notZero:*/
  2117     case Assembler::notEqual:     return Assembler::rc_nz;
  2118     case Assembler::greater:      return Assembler::rc_gz;
  2119     case Assembler::greaterEqual: return Assembler::rc_gez;
  2121   ShouldNotReachHere();
  2122   return Assembler::rc_z;
  2125 // compares register with zero and branches.  NOT FOR USE WITH 64-bit POINTERS
  2126 void MacroAssembler::br_zero( Condition c, bool a, Predict p, Register s1, Label& L) {
  2127   tst(s1);
  2128   br (c, a, p, L);
  2132 // Compares a pointer register with zero and branches on null.
  2133 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
  2134 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
  2135   assert_not_delayed();
  2136 #ifdef _LP64
  2137   bpr( rc_z, a, p, s1, L );
  2138 #else
  2139   tst(s1);
  2140   br ( zero, a, p, L );
  2141 #endif
  2144 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
  2145   assert_not_delayed();
  2146 #ifdef _LP64
  2147   bpr( rc_nz, a, p, s1, L );
  2148 #else
  2149   tst(s1);
  2150   br ( notZero, a, p, L );
  2151 #endif
  2154 void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
  2155                                      Register s1, address d,
  2156                                      relocInfo::relocType rt ) {
  2157   if (VM_Version::v9_instructions_work()) {
  2158     bpr(rc, a, p, s1, d, rt);
  2159   } else {
  2160     tst(s1);
  2161     br(reg_cond_to_cc_cond(rc), a, p, d, rt);
  2165 void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
  2166                                      Register s1, Label& L ) {
  2167   if (VM_Version::v9_instructions_work()) {
  2168     bpr(rc, a, p, s1, L);
  2169   } else {
  2170     tst(s1);
  2171     br(reg_cond_to_cc_cond(rc), a, p, L);
  2176 // instruction sequences factored across compiler & interpreter
  2179 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
  2180                            Register Rb_hi, Register Rb_low,
  2181                            Register Rresult) {
  2183   Label check_low_parts, done;
  2185   cmp(Ra_hi, Rb_hi );  // compare hi parts
  2186   br(equal, true, pt, check_low_parts);
  2187   delayed()->cmp(Ra_low, Rb_low); // test low parts
  2189   // And, with an unsigned comparison, it does not matter if the numbers
  2190   // are negative or not.
  2191   // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff.
  2192   // The second one is bigger (unsignedly).
  2194   // Other notes:  The first move in each triplet can be unconditional
  2195   // (and therefore probably prefetchable).
  2196   // And the equals case for the high part does not need testing,
  2197   // since that triplet is reached only after finding the high halves differ.
  2199   if (VM_Version::v9_instructions_work()) {
  2201                                     mov  (                     -1, Rresult);
  2202     ba( false, done );  delayed()-> movcc(greater, false, icc,  1, Rresult);
  2204   else {
  2205     br(less,    true, pt, done); delayed()-> set(-1, Rresult);
  2206     br(greater, true, pt, done); delayed()-> set( 1, Rresult);
  2209   bind( check_low_parts );
  2211   if (VM_Version::v9_instructions_work()) {
  2212     mov(                               -1, Rresult);
  2213     movcc(equal,           false, icc,  0, Rresult);
  2214     movcc(greaterUnsigned, false, icc,  1, Rresult);
  2216   else {
  2217                                                     set(-1, Rresult);
  2218     br(equal,           true, pt, done); delayed()->set( 0, Rresult);
  2219     br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
  2221   bind( done );
  2224 void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
  2225   subcc(  G0, Rlow, Rlow );
  2226   subc(   G0, Rhi,  Rhi  );
  2229 void MacroAssembler::lshl( Register Rin_high,  Register Rin_low,
  2230                            Register Rcount,
  2231                            Register Rout_high, Register Rout_low,
  2232                            Register Rtemp ) {
  2235   Register Ralt_count = Rtemp;
  2236   Register Rxfer_bits = Rtemp;
  2238   assert( Ralt_count != Rin_high
  2239       &&  Ralt_count != Rin_low
  2240       &&  Ralt_count != Rcount
  2241       &&  Rxfer_bits != Rin_low
  2242       &&  Rxfer_bits != Rin_high
  2243       &&  Rxfer_bits != Rcount
  2244       &&  Rxfer_bits != Rout_low
  2245       &&  Rout_low   != Rin_high,
  2246         "register alias checks");
  2248   Label big_shift, done;
  2250   // This code can be optimized to use the 64 bit shifts in V9.
  2251   // Here we use the 32 bit shifts.
  2253   and3( Rcount,         0x3f,           Rcount);     // take least significant 6 bits
  2254   subcc(Rcount,         31,             Ralt_count);
  2255   br(greater, true, pn, big_shift);
  2256   delayed()->
  2257   dec(Ralt_count);
  2259   // shift < 32 bits, Ralt_count = Rcount-31
  2261   // We get the transfer bits by shifting right by 32-count the low
  2262   // register. This is done by shifting right by 31-count and then by one
  2263   // more to take care of the special (rare) case where count is zero
  2264   // (shifting by 32 would not work).
  2266   neg(  Ralt_count                                 );
  2268   // The order of the next two instructions is critical in the case where
  2269   // Rin and Rout are the same and should not be reversed.
  2271   srl(  Rin_low,        Ralt_count,     Rxfer_bits ); // shift right by 31-count
  2272   if (Rcount != Rout_low) {
  2273     sll(        Rin_low,        Rcount,         Rout_low   ); // low half
  2275   sll(  Rin_high,       Rcount,         Rout_high  );
  2276   if (Rcount == Rout_low) {
  2277     sll(        Rin_low,        Rcount,         Rout_low   ); // low half
  2279   srl(  Rxfer_bits,     1,              Rxfer_bits ); // shift right by one more
  2280   ba (false, done);
  2281   delayed()->
  2282   or3(  Rout_high,      Rxfer_bits,     Rout_high);   // new hi value: or in shifted old hi part and xfer from low
  2284   // shift >= 32 bits, Ralt_count = Rcount-32
  2285   bind(big_shift);
  2286   sll(  Rin_low,        Ralt_count,     Rout_high  );
  2287   clr(  Rout_low                                   );
  2289   bind(done);
  2293 void MacroAssembler::lshr( Register Rin_high,  Register Rin_low,
  2294                            Register Rcount,
  2295                            Register Rout_high, Register Rout_low,
  2296                            Register Rtemp ) {
  2298   Register Ralt_count = Rtemp;
  2299   Register Rxfer_bits = Rtemp;
  2301   assert( Ralt_count != Rin_high
  2302       &&  Ralt_count != Rin_low
  2303       &&  Ralt_count != Rcount
  2304       &&  Rxfer_bits != Rin_low
  2305       &&  Rxfer_bits != Rin_high
  2306       &&  Rxfer_bits != Rcount
  2307       &&  Rxfer_bits != Rout_high
  2308       &&  Rout_high  != Rin_low,
  2309         "register alias checks");
  2311   Label big_shift, done;
  2313   // This code can be optimized to use the 64 bit shifts in V9.
  2314   // Here we use the 32 bit shifts.
  2316   and3( Rcount,         0x3f,           Rcount);     // take least significant 6 bits
  2317   subcc(Rcount,         31,             Ralt_count);
  2318   br(greater, true, pn, big_shift);
  2319   delayed()->dec(Ralt_count);
  2321   // shift < 32 bits, Ralt_count = Rcount-31
  2323   // We get the transfer bits by shifting left by 32-count the high
  2324   // register. This is done by shifting left by 31-count and then by one
  2325   // more to take care of the special (rare) case where count is zero
  2326   // (shifting by 32 would not work).
  2328   neg(  Ralt_count                                  );
  2329   if (Rcount != Rout_low) {
  2330     srl(        Rin_low,        Rcount,         Rout_low    );
  2333   // The order of the next two instructions is critical in the case where
  2334   // Rin and Rout are the same and should not be reversed.
  2336   sll(  Rin_high,       Ralt_count,     Rxfer_bits  ); // shift left by 31-count
  2337   sra(  Rin_high,       Rcount,         Rout_high   ); // high half
  2338   sll(  Rxfer_bits,     1,              Rxfer_bits  ); // shift left by one more
  2339   if (Rcount == Rout_low) {
  2340     srl(        Rin_low,        Rcount,         Rout_low    );
  2342   ba (false, done);
  2343   delayed()->
  2344   or3(  Rout_low,       Rxfer_bits,     Rout_low    ); // new low value: or shifted old low part and xfer from high
  2346   // shift >= 32 bits, Ralt_count = Rcount-32
  2347   bind(big_shift);
  2349   sra(  Rin_high,       Ralt_count,     Rout_low    );
  2350   sra(  Rin_high,       31,             Rout_high   ); // sign into hi
  2352   bind( done );
  2357 void MacroAssembler::lushr( Register Rin_high,  Register Rin_low,
  2358                             Register Rcount,
  2359                             Register Rout_high, Register Rout_low,
  2360                             Register Rtemp ) {
  2362   Register Ralt_count = Rtemp;
  2363   Register Rxfer_bits = Rtemp;
  2365   assert( Ralt_count != Rin_high
  2366       &&  Ralt_count != Rin_low
  2367       &&  Ralt_count != Rcount
  2368       &&  Rxfer_bits != Rin_low
  2369       &&  Rxfer_bits != Rin_high
  2370       &&  Rxfer_bits != Rcount
  2371       &&  Rxfer_bits != Rout_high
  2372       &&  Rout_high  != Rin_low,
  2373         "register alias checks");
  2375   Label big_shift, done;
  2377   // This code can be optimized to use the 64 bit shifts in V9.
  2378   // Here we use the 32 bit shifts.
  2380   and3( Rcount,         0x3f,           Rcount);     // take least significant 6 bits
  2381   subcc(Rcount,         31,             Ralt_count);
  2382   br(greater, true, pn, big_shift);
  2383   delayed()->dec(Ralt_count);
  2385   // shift < 32 bits, Ralt_count = Rcount-31
  2387   // We get the transfer bits by shifting left by 32-count the high
  2388   // register. This is done by shifting left by 31-count and then by one
  2389   // more to take care of the special (rare) case where count is zero
  2390   // (shifting by 32 would not work).
  2392   neg(  Ralt_count                                  );
  2393   if (Rcount != Rout_low) {
  2394     srl(        Rin_low,        Rcount,         Rout_low    );
  2397   // The order of the next two instructions is critical in the case where
  2398   // Rin and Rout are the same and should not be reversed.
  2400   sll(  Rin_high,       Ralt_count,     Rxfer_bits  ); // shift left by 31-count
  2401   srl(  Rin_high,       Rcount,         Rout_high   ); // high half
  2402   sll(  Rxfer_bits,     1,              Rxfer_bits  ); // shift left by one more
  2403   if (Rcount == Rout_low) {
  2404     srl(        Rin_low,        Rcount,         Rout_low    );
  2406   ba (false, done);
  2407   delayed()->
  2408   or3(  Rout_low,       Rxfer_bits,     Rout_low    ); // new low value: or shifted old low part and xfer from high
  2410   // shift >= 32 bits, Ralt_count = Rcount-32
  2411   bind(big_shift);
  2413   srl(  Rin_high,       Ralt_count,     Rout_low    );
  2414   clr(  Rout_high                                   );
  2416   bind( done );
  2419 #ifdef _LP64
  2420 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
  2421   cmp(Ra, Rb);
  2422   mov(                       -1, Rresult);
  2423   movcc(equal,   false, xcc,  0, Rresult);
  2424   movcc(greater, false, xcc,  1, Rresult);
  2426 #endif
  2429 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
  2430   switch (size_in_bytes) {
  2431   case  8:  ld_long(src, dst); break;
  2432   case  4:  ld(     src, dst); break;
  2433   case  2:  is_signed ? ldsh(src, dst) : lduh(src, dst); break;
  2434   case  1:  is_signed ? ldsb(src, dst) : ldub(src, dst); break;
  2435   default:  ShouldNotReachHere();
  2439 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
  2440   switch (size_in_bytes) {
  2441   case  8:  st_long(src, dst); break;
  2442   case  4:  st(     src, dst); break;
  2443   case  2:  sth(    src, dst); break;
  2444   case  1:  stb(    src, dst); break;
  2445   default:  ShouldNotReachHere();
  2450 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
  2451                                 FloatRegister Fa, FloatRegister Fb,
  2452                                 Register Rresult) {
  2454   fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
  2456   Condition lt = unordered_result == -1 ? f_unorderedOrLess    : f_less;
  2457   Condition eq =                          f_equal;
  2458   Condition gt = unordered_result ==  1 ? f_unorderedOrGreater : f_greater;
  2460   if (VM_Version::v9_instructions_work()) {
  2462     mov(                   -1, Rresult );
  2463     movcc( eq, true, fcc0,  0, Rresult );
  2464     movcc( gt, true, fcc0,  1, Rresult );
  2466   } else {
  2467     Label done;
  2469                                          set( -1, Rresult );
  2470     //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
  2471     fb( eq, true, pn, done);  delayed()->set(  0, Rresult );
  2472     fb( gt, true, pn, done);  delayed()->set(  1, Rresult );
  2474     bind (done);
  2479 void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
  2481   if (VM_Version::v9_instructions_work()) {
  2482     Assembler::fneg(w, s, d);
  2483   } else {
  2484     if (w == FloatRegisterImpl::S) {
  2485       Assembler::fneg(w, s, d);
  2486     } else if (w == FloatRegisterImpl::D) {
  2487       // number() does a sanity check on the alignment.
  2488       assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
  2489         ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
  2491       Assembler::fneg(FloatRegisterImpl::S, s, d);
  2492       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  2493     } else {
  2494       assert(w == FloatRegisterImpl::Q, "Invalid float register width");
  2496       // number() does a sanity check on the alignment.
  2497       assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
  2498         ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
  2500       Assembler::fneg(FloatRegisterImpl::S, s, d);
  2501       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  2502       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
  2503       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
  2508 void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
  2510   if (VM_Version::v9_instructions_work()) {
  2511     Assembler::fmov(w, s, d);
  2512   } else {
  2513     if (w == FloatRegisterImpl::S) {
  2514       Assembler::fmov(w, s, d);
  2515     } else if (w == FloatRegisterImpl::D) {
  2516       // number() does a sanity check on the alignment.
  2517       assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
  2518         ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
  2520       Assembler::fmov(FloatRegisterImpl::S, s, d);
  2521       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  2522     } else {
  2523       assert(w == FloatRegisterImpl::Q, "Invalid float register width");
  2525       // number() does a sanity check on the alignment.
  2526       assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
  2527         ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
  2529       Assembler::fmov(FloatRegisterImpl::S, s, d);
  2530       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  2531       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
  2532       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
  2537 void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
  2539   if (VM_Version::v9_instructions_work()) {
  2540     Assembler::fabs(w, s, d);
  2541   } else {
  2542     if (w == FloatRegisterImpl::S) {
  2543       Assembler::fabs(w, s, d);
  2544     } else if (w == FloatRegisterImpl::D) {
  2545       // number() does a sanity check on the alignment.
  2546       assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
  2547         ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
  2549       Assembler::fabs(FloatRegisterImpl::S, s, d);
  2550       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  2551     } else {
  2552       assert(w == FloatRegisterImpl::Q, "Invalid float register width");
  2554       // number() does a sanity check on the alignment.
  2555       assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
  2556        ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
  2558       Assembler::fabs(FloatRegisterImpl::S, s, d);
  2559       Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  2560       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
  2561       Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
  2566 void MacroAssembler::save_all_globals_into_locals() {
  2567   mov(G1,L1);
  2568   mov(G2,L2);
  2569   mov(G3,L3);
  2570   mov(G4,L4);
  2571   mov(G5,L5);
  2572   mov(G6,L6);
  2573   mov(G7,L7);
  2576 void MacroAssembler::restore_globals_from_locals() {
  2577   mov(L1,G1);
  2578   mov(L2,G2);
  2579   mov(L3,G3);
  2580   mov(L4,G4);
  2581   mov(L5,G5);
  2582   mov(L6,G6);
  2583   mov(L7,G7);
  2586 // Use for 64 bit operation.
  2587 void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
  2589   // store ptr_reg as the new top value
  2590 #ifdef _LP64
  2591   casx(top_ptr_reg, top_reg, ptr_reg);
  2592 #else
  2593   cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
  2594 #endif // _LP64
  2597 // [RGV] This routine does not handle 64 bit operations.
  2598 //       use casx_under_lock() or casx directly!!!
  2599 void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
  2601   // store ptr_reg as the new top value
  2602   if (VM_Version::v9_instructions_work()) {
  2603     cas(top_ptr_reg, top_reg, ptr_reg);
  2604   } else {
  2606     // If the register is not an out nor global, it is not visible
  2607     // after the save.  Allocate a register for it, save its
  2608     // value in the register save area (the save may not flush
  2609     // registers to the save area).
  2611     Register top_ptr_reg_after_save;
  2612     Register top_reg_after_save;
  2613     Register ptr_reg_after_save;
  2615     if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
  2616       top_ptr_reg_after_save = top_ptr_reg->after_save();
  2617     } else {
  2618       Address reg_save_addr = top_ptr_reg->address_in_saved_window();
  2619       top_ptr_reg_after_save = L0;
  2620       st(top_ptr_reg, reg_save_addr);
  2623     if (top_reg->is_out() || top_reg->is_global()) {
  2624       top_reg_after_save = top_reg->after_save();
  2625     } else {
  2626       Address reg_save_addr = top_reg->address_in_saved_window();
  2627       top_reg_after_save = L1;
  2628       st(top_reg, reg_save_addr);
  2631     if (ptr_reg->is_out() || ptr_reg->is_global()) {
  2632       ptr_reg_after_save = ptr_reg->after_save();
  2633     } else {
  2634       Address reg_save_addr = ptr_reg->address_in_saved_window();
  2635       ptr_reg_after_save = L2;
  2636       st(ptr_reg, reg_save_addr);
  2639     const Register& lock_reg = L3;
  2640     const Register& lock_ptr_reg = L4;
  2641     const Register& value_reg = L5;
  2642     const Register& yield_reg = L6;
  2643     const Register& yieldall_reg = L7;
  2645     save_frame();
  2647     if (top_ptr_reg_after_save == L0) {
  2648       ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
  2651     if (top_reg_after_save == L1) {
  2652       ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
  2655     if (ptr_reg_after_save == L2) {
  2656       ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
  2659     Label(retry_get_lock);
  2660     Label(not_same);
  2661     Label(dont_yield);
  2663     assert(lock_addr, "lock_address should be non null for v8");
  2664     set((intptr_t)lock_addr, lock_ptr_reg);
  2665     // Initialize yield counter
  2666     mov(G0,yield_reg);
  2667     mov(G0, yieldall_reg);
  2668     set(StubRoutines::Sparc::locked, lock_reg);
  2670     bind(retry_get_lock);
  2671     cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
  2672     br(Assembler::less, false, Assembler::pt, dont_yield);
  2673     delayed()->nop();
  2675     if(use_call_vm) {
  2676       Untested("Need to verify global reg consistancy");
  2677       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
  2678     } else {
  2679       // Save the regs and make space for a C call
  2680       save(SP, -96, SP);
  2681       save_all_globals_into_locals();
  2682       call(CAST_FROM_FN_PTR(address,os::yield_all));
  2683       delayed()->mov(yieldall_reg, O0);
  2684       restore_globals_from_locals();
  2685       restore();
  2688     // reset the counter
  2689     mov(G0,yield_reg);
  2690     add(yieldall_reg, 1, yieldall_reg);
  2692     bind(dont_yield);
  2693     // try to get lock
  2694     swap(lock_ptr_reg, 0, lock_reg);
  2696     // did we get the lock?
  2697     cmp(lock_reg, StubRoutines::Sparc::unlocked);
  2698     br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
  2699     delayed()->add(yield_reg,1,yield_reg);
  2701     // yes, got lock.  do we have the same top?
  2702     ld(top_ptr_reg_after_save, 0, value_reg);
  2703     cmp(value_reg, top_reg_after_save);
  2704     br(Assembler::notEqual, false, Assembler::pn, not_same);
  2705     delayed()->nop();
  2707     // yes, same top.
  2708     st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
  2709     membar(Assembler::StoreStore);
  2711     bind(not_same);
  2712     mov(value_reg, ptr_reg_after_save);
  2713     st(lock_reg, lock_ptr_reg, 0); // unlock
  2715     restore();
  2719 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
  2720                                                       Register tmp,
  2721                                                       int offset) {
  2722   intptr_t value = *delayed_value_addr;
  2723   if (value != 0)
  2724     return RegisterOrConstant(value + offset);
  2726   // load indirectly to solve generation ordering problem
  2727   AddressLiteral a(delayed_value_addr);
  2728   load_ptr_contents(a, tmp);
  2730 #ifdef ASSERT
  2731   tst(tmp);
  2732   breakpoint_trap(zero, xcc);
  2733 #endif
  2735   if (offset != 0)
  2736     add(tmp, offset, tmp);
  2738   return RegisterOrConstant(tmp);
  2742 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
  2743   assert(d.register_or_noreg() != G0, "lost side effect");
  2744   if ((s2.is_constant() && s2.as_constant() == 0) ||
  2745       (s2.is_register() && s2.as_register() == G0)) {
  2746     // Do nothing, just move value.
  2747     if (s1.is_register()) {
  2748       if (d.is_constant())  d = temp;
  2749       mov(s1.as_register(), d.as_register());
  2750       return d;
  2751     } else {
  2752       return s1;
  2756   if (s1.is_register()) {
  2757     assert_different_registers(s1.as_register(), temp);
  2758     if (d.is_constant())  d = temp;
  2759     andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
  2760     return d;
  2761   } else {
  2762     if (s2.is_register()) {
  2763       assert_different_registers(s2.as_register(), temp);
  2764       if (d.is_constant())  d = temp;
  2765       set(s1.as_constant(), temp);
  2766       andn(temp, s2.as_register(), d.as_register());
  2767       return d;
  2768     } else {
  2769       intptr_t res = s1.as_constant() & ~s2.as_constant();
  2770       return res;
  2775 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
  2776   assert(d.register_or_noreg() != G0, "lost side effect");
  2777   if ((s2.is_constant() && s2.as_constant() == 0) ||
  2778       (s2.is_register() && s2.as_register() == G0)) {
  2779     // Do nothing, just move value.
  2780     if (s1.is_register()) {
  2781       if (d.is_constant())  d = temp;
  2782       mov(s1.as_register(), d.as_register());
  2783       return d;
  2784     } else {
  2785       return s1;
  2789   if (s1.is_register()) {
  2790     assert_different_registers(s1.as_register(), temp);
  2791     if (d.is_constant())  d = temp;
  2792     add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
  2793     return d;
  2794   } else {
  2795     if (s2.is_register()) {
  2796       assert_different_registers(s2.as_register(), temp);
  2797       if (d.is_constant())  d = temp;
  2798       add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
  2799       return d;
  2800     } else {
  2801       intptr_t res = s1.as_constant() + s2.as_constant();
  2802       return res;
  2807 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
  2808   assert(d.register_or_noreg() != G0, "lost side effect");
  2809   if (!is_simm13(s2.constant_or_zero()))
  2810     s2 = (s2.as_constant() & 0xFF);
  2811   if ((s2.is_constant() && s2.as_constant() == 0) ||
  2812       (s2.is_register() && s2.as_register() == G0)) {
  2813     // Do nothing, just move value.
  2814     if (s1.is_register()) {
  2815       if (d.is_constant())  d = temp;
  2816       mov(s1.as_register(), d.as_register());
  2817       return d;
  2818     } else {
  2819       return s1;
  2823   if (s1.is_register()) {
  2824     assert_different_registers(s1.as_register(), temp);
  2825     if (d.is_constant())  d = temp;
  2826     sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
  2827     return d;
  2828   } else {
  2829     if (s2.is_register()) {
  2830       assert_different_registers(s2.as_register(), temp);
  2831       if (d.is_constant())  d = temp;
  2832       set(s1.as_constant(), temp);
  2833       sll_ptr(temp, s2.as_register(), d.as_register());
  2834       return d;
  2835     } else {
  2836       intptr_t res = s1.as_constant() << s2.as_constant();
  2837       return res;
  2843 // Look up the method for a megamorphic invokeinterface call.
  2844 // The target method is determined by <intf_klass, itable_index>.
  2845 // The receiver klass is in recv_klass.
  2846 // On success, the result will be in method_result, and execution falls through.
  2847 // On failure, execution transfers to the given label.
  2848 void MacroAssembler::lookup_interface_method(Register recv_klass,
  2849                                              Register intf_klass,
  2850                                              RegisterOrConstant itable_index,
  2851                                              Register method_result,
  2852                                              Register scan_temp,
  2853                                              Register sethi_temp,
  2854                                              Label& L_no_such_interface) {
  2855   assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
  2856   assert(itable_index.is_constant() || itable_index.as_register() == method_result,
  2857          "caller must use same register for non-constant itable index as for method");
  2859   // Compute start of first itableOffsetEntry (which is at the end of the vtable)
  2860   int vtable_base = instanceKlass::vtable_start_offset() * wordSize;
  2861   int scan_step   = itableOffsetEntry::size() * wordSize;
  2862   int vte_size    = vtableEntry::size() * wordSize;
  2864   lduw(recv_klass, instanceKlass::vtable_length_offset() * wordSize, scan_temp);
  2865   // %%% We should store the aligned, prescaled offset in the klassoop.
  2866   // Then the next several instructions would fold away.
  2868   int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0);
  2869   int itb_offset = vtable_base;
  2870   if (round_to_unit != 0) {
  2871     // hoist first instruction of round_to(scan_temp, BytesPerLong):
  2872     itb_offset += round_to_unit - wordSize;
  2874   int itb_scale = exact_log2(vtableEntry::size() * wordSize);
  2875   sll(scan_temp, itb_scale,  scan_temp);
  2876   add(scan_temp, itb_offset, scan_temp);
  2877   if (round_to_unit != 0) {
  2878     // Round up to align_object_offset boundary
  2879     // see code for instanceKlass::start_of_itable!
  2880     // Was: round_to(scan_temp, BytesPerLong);
  2881     // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp);
  2882     and3(scan_temp, -round_to_unit, scan_temp);
  2884   add(recv_klass, scan_temp, scan_temp);
  2886   // Adjust recv_klass by scaled itable_index, so we can free itable_index.
  2887   RegisterOrConstant itable_offset = itable_index;
  2888   itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
  2889   itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
  2890   add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
  2892   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
  2893   //   if (scan->interface() == intf) {
  2894   //     result = (klass + scan->offset() + itable_index);
  2895   //   }
  2896   // }
  2897   Label search, found_method;
  2899   for (int peel = 1; peel >= 0; peel--) {
  2900     // %%%% Could load both offset and interface in one ldx, if they were
  2901     // in the opposite order.  This would save a load.
  2902     ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result);
  2904     // Check that this entry is non-null.  A null entry means that
  2905     // the receiver class doesn't implement the interface, and wasn't the
  2906     // same as when the caller was compiled.
  2907     bpr(Assembler::rc_z, false, Assembler::pn, method_result, L_no_such_interface);
  2908     delayed()->cmp(method_result, intf_klass);
  2910     if (peel) {
  2911       brx(Assembler::equal,    false, Assembler::pt, found_method);
  2912     } else {
  2913       brx(Assembler::notEqual, false, Assembler::pn, search);
  2914       // (invert the test to fall through to found_method...)
  2916     delayed()->add(scan_temp, scan_step, scan_temp);
  2918     if (!peel)  break;
  2920     bind(search);
  2923   bind(found_method);
  2925   // Got a hit.
  2926   int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
  2927   // scan_temp[-scan_step] points to the vtable offset we need
  2928   ito_offset -= scan_step;
  2929   lduw(scan_temp, ito_offset, scan_temp);
  2930   ld_ptr(recv_klass, scan_temp, method_result);
  2934 void MacroAssembler::check_klass_subtype(Register sub_klass,
  2935                                          Register super_klass,
  2936                                          Register temp_reg,
  2937                                          Register temp2_reg,
  2938                                          Label& L_success) {
  2939   Label L_failure, L_pop_to_failure;
  2940   check_klass_subtype_fast_path(sub_klass, super_klass,
  2941                                 temp_reg, temp2_reg,
  2942                                 &L_success, &L_failure, NULL);
  2943   Register sub_2 = sub_klass;
  2944   Register sup_2 = super_klass;
  2945   if (!sub_2->is_global())  sub_2 = L0;
  2946   if (!sup_2->is_global())  sup_2 = L1;
  2948   save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
  2949   check_klass_subtype_slow_path(sub_2, sup_2,
  2950                                 L2, L3, L4, L5,
  2951                                 NULL, &L_pop_to_failure);
  2953   // on success:
  2954   restore();
  2955   ba(false, L_success);
  2956   delayed()->nop();
  2958   // on failure:
  2959   bind(L_pop_to_failure);
  2960   restore();
  2961   bind(L_failure);
  2965 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
  2966                                                    Register super_klass,
  2967                                                    Register temp_reg,
  2968                                                    Register temp2_reg,
  2969                                                    Label* L_success,
  2970                                                    Label* L_failure,
  2971                                                    Label* L_slow_path,
  2972                                         RegisterOrConstant super_check_offset,
  2973                                         Register instanceof_hack) {
  2974   int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
  2975                    Klass::secondary_super_cache_offset_in_bytes());
  2976   int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
  2977                     Klass::super_check_offset_offset_in_bytes());
  2979   bool must_load_sco  = (super_check_offset.constant_or_zero() == -1);
  2980   bool need_slow_path = (must_load_sco ||
  2981                          super_check_offset.constant_or_zero() == sco_offset);
  2983   assert_different_registers(sub_klass, super_klass, temp_reg);
  2984   if (super_check_offset.is_register()) {
  2985     assert_different_registers(sub_klass, super_klass, temp_reg,
  2986                                super_check_offset.as_register());
  2987   } else if (must_load_sco) {
  2988     assert(temp2_reg != noreg, "supply either a temp or a register offset");
  2991   Label L_fallthrough;
  2992   int label_nulls = 0;
  2993   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
  2994   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
  2995   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
  2996   assert(label_nulls <= 1 || instanceof_hack != noreg ||
  2997          (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
  2998          "at most one NULL in the batch, usually");
  3000   // Support for the instanceof hack, which uses delay slots to
  3001   // set a destination register to zero or one.
  3002   bool do_bool_sets = (instanceof_hack != noreg);
  3003 #define BOOL_SET(bool_value)                            \
  3004   if (do_bool_sets && bool_value >= 0)                  \
  3005     set(bool_value, instanceof_hack)
  3006 #define DELAYED_BOOL_SET(bool_value)                    \
  3007   if (do_bool_sets && bool_value >= 0)                  \
  3008     delayed()->set(bool_value, instanceof_hack);        \
  3009   else delayed()->nop()
  3010   // Hacked ba(), which may only be used just before L_fallthrough.
  3011 #define FINAL_JUMP(label, bool_value)                   \
  3012   if (&(label) == &L_fallthrough) {                     \
  3013     BOOL_SET(bool_value);                               \
  3014   } else {                                              \
  3015     ba((do_bool_sets && bool_value >= 0), label);       \
  3016     DELAYED_BOOL_SET(bool_value);                       \
  3019   // If the pointers are equal, we are done (e.g., String[] elements).
  3020   // This self-check enables sharing of secondary supertype arrays among
  3021   // non-primary types such as array-of-interface.  Otherwise, each such
  3022   // type would need its own customized SSA.
  3023   // We move this check to the front of the fast path because many
  3024   // type checks are in fact trivially successful in this manner,
  3025   // so we get a nicely predicted branch right at the start of the check.
  3026   cmp(super_klass, sub_klass);
  3027   brx(Assembler::equal, do_bool_sets, Assembler::pn, *L_success);
  3028   DELAYED_BOOL_SET(1);
  3030   // Check the supertype display:
  3031   if (must_load_sco) {
  3032     // The super check offset is always positive...
  3033     lduw(super_klass, sco_offset, temp2_reg);
  3034     super_check_offset = RegisterOrConstant(temp2_reg);
  3035     // super_check_offset is register.
  3036     assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
  3038   ld_ptr(sub_klass, super_check_offset, temp_reg);
  3039   cmp(super_klass, temp_reg);
  3041   // This check has worked decisively for primary supers.
  3042   // Secondary supers are sought in the super_cache ('super_cache_addr').
  3043   // (Secondary supers are interfaces and very deeply nested subtypes.)
  3044   // This works in the same check above because of a tricky aliasing
  3045   // between the super_cache and the primary super display elements.
  3046   // (The 'super_check_addr' can address either, as the case requires.)
  3047   // Note that the cache is updated below if it does not help us find
  3048   // what we need immediately.
  3049   // So if it was a primary super, we can just fail immediately.
  3050   // Otherwise, it's the slow path for us (no success at this point).
  3052   if (super_check_offset.is_register()) {
  3053     brx(Assembler::equal, do_bool_sets, Assembler::pn, *L_success);
  3054     delayed(); if (do_bool_sets)  BOOL_SET(1);
  3055     // if !do_bool_sets, sneak the next cmp into the delay slot:
  3056     cmp(super_check_offset.as_register(), sc_offset);
  3058     if (L_failure == &L_fallthrough) {
  3059       brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_slow_path);
  3060       delayed()->nop();
  3061       BOOL_SET(0);  // fallthrough on failure
  3062     } else {
  3063       brx(Assembler::notEqual, do_bool_sets, Assembler::pn, *L_failure);
  3064       DELAYED_BOOL_SET(0);
  3065       FINAL_JUMP(*L_slow_path, -1);  // -1 => vanilla delay slot
  3067   } else if (super_check_offset.as_constant() == sc_offset) {
  3068     // Need a slow path; fast failure is impossible.
  3069     if (L_slow_path == &L_fallthrough) {
  3070       brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_success);
  3071       DELAYED_BOOL_SET(1);
  3072     } else {
  3073       brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path);
  3074       delayed()->nop();
  3075       FINAL_JUMP(*L_success, 1);
  3077   } else {
  3078     // No slow path; it's a fast decision.
  3079     if (L_failure == &L_fallthrough) {
  3080       brx(Assembler::equal, do_bool_sets, Assembler::pt, *L_success);
  3081       DELAYED_BOOL_SET(1);
  3082       BOOL_SET(0);
  3083     } else {
  3084       brx(Assembler::notEqual, do_bool_sets, Assembler::pn, *L_failure);
  3085       DELAYED_BOOL_SET(0);
  3086       FINAL_JUMP(*L_success, 1);
  3090   bind(L_fallthrough);
  3092 #undef final_jump
  3093 #undef bool_set
  3094 #undef DELAYED_BOOL_SET
  3095 #undef final_jump
  3099 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
  3100                                                    Register super_klass,
  3101                                                    Register count_temp,
  3102                                                    Register scan_temp,
  3103                                                    Register scratch_reg,
  3104                                                    Register coop_reg,
  3105                                                    Label* L_success,
  3106                                                    Label* L_failure) {
  3107   assert_different_registers(sub_klass, super_klass,
  3108                              count_temp, scan_temp, scratch_reg, coop_reg);
  3110   Label L_fallthrough, L_loop;
  3111   int label_nulls = 0;
  3112   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
  3113   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
  3114   assert(label_nulls <= 1, "at most one NULL in the batch");
  3116   // a couple of useful fields in sub_klass:
  3117   int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
  3118                    Klass::secondary_supers_offset_in_bytes());
  3119   int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
  3120                    Klass::secondary_super_cache_offset_in_bytes());
  3122   // Do a linear scan of the secondary super-klass chain.
  3123   // This code is rarely used, so simplicity is a virtue here.
  3125 #ifndef PRODUCT
  3126   int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
  3127   inc_counter((address) pst_counter, count_temp, scan_temp);
  3128 #endif
  3130   // We will consult the secondary-super array.
  3131   ld_ptr(sub_klass, ss_offset, scan_temp);
  3133   // Compress superclass if necessary.
  3134   Register search_key = super_klass;
  3135   bool decode_super_klass = false;
  3136   if (UseCompressedOops) {
  3137     if (coop_reg != noreg) {
  3138       encode_heap_oop_not_null(super_klass, coop_reg);
  3139       search_key = coop_reg;
  3140     } else {
  3141       encode_heap_oop_not_null(super_klass);
  3142       decode_super_klass = true; // scarce temps!
  3144     // The superclass is never null; it would be a basic system error if a null
  3145     // pointer were to sneak in here.  Note that we have already loaded the
  3146     // Klass::super_check_offset from the super_klass in the fast path,
  3147     // so if there is a null in that register, we are already in the afterlife.
  3150   // Load the array length.  (Positive movl does right thing on LP64.)
  3151   lduw(scan_temp, arrayOopDesc::length_offset_in_bytes(), count_temp);
  3153   // Check for empty secondary super list
  3154   tst(count_temp);
  3156   // Top of search loop
  3157   bind(L_loop);
  3158   br(Assembler::equal, false, Assembler::pn, *L_failure);
  3159   delayed()->add(scan_temp, heapOopSize, scan_temp);
  3160   assert(heapOopSize != 0, "heapOopSize should be initialized");
  3162   // Skip the array header in all array accesses.
  3163   int elem_offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
  3164   elem_offset -= heapOopSize;   // the scan pointer was pre-incremented also
  3166   // Load next super to check
  3167   if (UseCompressedOops) {
  3168     // Don't use load_heap_oop; we don't want to decode the element.
  3169     lduw(   scan_temp, elem_offset, scratch_reg );
  3170   } else {
  3171     ld_ptr( scan_temp, elem_offset, scratch_reg );
  3174   // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
  3175   cmp(scratch_reg, search_key);
  3177   // A miss means we are NOT a subtype and need to keep looping
  3178   brx(Assembler::notEqual, false, Assembler::pn, L_loop);
  3179   delayed()->deccc(count_temp); // decrement trip counter in delay slot
  3181   // Falling out the bottom means we found a hit; we ARE a subtype
  3182   if (decode_super_klass) decode_heap_oop(super_klass);
  3184   // Success.  Cache the super we found and proceed in triumph.
  3185   st_ptr(super_klass, sub_klass, sc_offset);
  3187   if (L_success != &L_fallthrough) {
  3188     ba(false, *L_success);
  3189     delayed()->nop();
  3192   bind(L_fallthrough);
  3196 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
  3197                                               Register temp_reg,
  3198                                               Label& wrong_method_type) {
  3199   assert_different_registers(mtype_reg, mh_reg, temp_reg);
  3200   // compare method type against that of the receiver
  3201   RegisterOrConstant mhtype_offset = delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg);
  3202   load_heap_oop(mh_reg, mhtype_offset, temp_reg);
  3203   cmp(temp_reg, mtype_reg);
  3204   br(Assembler::notEqual, false, Assembler::pn, wrong_method_type);
  3205   delayed()->nop();
  3209 // A method handle has a "vmslots" field which gives the size of its
  3210 // argument list in JVM stack slots.  This field is either located directly
  3211 // in every method handle, or else is indirectly accessed through the
  3212 // method handle's MethodType.  This macro hides the distinction.
  3213 void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
  3214                                                 Register temp_reg) {
  3215   assert_different_registers(vmslots_reg, mh_reg, temp_reg);
  3216   // load mh.type.form.vmslots
  3217   if (java_lang_invoke_MethodHandle::vmslots_offset_in_bytes() != 0) {
  3218     // hoist vmslots into every mh to avoid dependent load chain
  3219     ld(           Address(mh_reg,    delayed_value(java_lang_invoke_MethodHandle::vmslots_offset_in_bytes, temp_reg)),   vmslots_reg);
  3220   } else {
  3221     Register temp2_reg = vmslots_reg;
  3222     load_heap_oop(Address(mh_reg,    delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)),      temp2_reg);
  3223     load_heap_oop(Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)),        temp2_reg);
  3224     ld(           Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
  3229 void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
  3230   assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
  3231   assert_different_registers(mh_reg, temp_reg);
  3233   // pick out the interpreted side of the handler
  3234   // NOTE: vmentry is not an oop!
  3235   ld_ptr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
  3237   // off we go...
  3238   ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg);
  3239   jmp(temp_reg, 0);
  3241   // for the various stubs which take control at this point,
  3242   // see MethodHandles::generate_method_handle_stub
  3244   // Some callers can fill the delay slot.
  3245   if (emit_delayed_nop) {
  3246     delayed()->nop();
  3251 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
  3252                                                    Register temp_reg,
  3253                                                    int extra_slot_offset) {
  3254   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
  3255   int stackElementSize = Interpreter::stackElementSize;
  3256   int offset = extra_slot_offset * stackElementSize;
  3257   if (arg_slot.is_constant()) {
  3258     offset += arg_slot.as_constant() * stackElementSize;
  3259     return offset;
  3260   } else {
  3261     assert(temp_reg != noreg, "must specify");
  3262     sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg);
  3263     if (offset != 0)
  3264       add(temp_reg, offset, temp_reg);
  3265     return temp_reg;
  3270 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
  3271                                          Register temp_reg,
  3272                                          int extra_slot_offset) {
  3273   return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset));
  3277 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
  3278                                           Register temp_reg,
  3279                                           Label& done, Label* slow_case,
  3280                                           BiasedLockingCounters* counters) {
  3281   assert(UseBiasedLocking, "why call this otherwise?");
  3283   if (PrintBiasedLockingStatistics) {
  3284     assert_different_registers(obj_reg, mark_reg, temp_reg, O7);
  3285     if (counters == NULL)
  3286       counters = BiasedLocking::counters();
  3289   Label cas_label;
  3291   // Biased locking
  3292   // See whether the lock is currently biased toward our thread and
  3293   // whether the epoch is still valid
  3294   // Note that the runtime guarantees sufficient alignment of JavaThread
  3295   // pointers to allow age to be placed into low bits
  3296   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
  3297   and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
  3298   cmp(temp_reg, markOopDesc::biased_lock_pattern);
  3299   brx(Assembler::notEqual, false, Assembler::pn, cas_label);
  3300   delayed()->nop();
  3302   load_klass(obj_reg, temp_reg);
  3303   ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
  3304   or3(G2_thread, temp_reg, temp_reg);
  3305   xor3(mark_reg, temp_reg, temp_reg);
  3306   andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
  3307   if (counters != NULL) {
  3308     cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
  3309     // Reload mark_reg as we may need it later
  3310     ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg);
  3312   brx(Assembler::equal, true, Assembler::pt, done);
  3313   delayed()->nop();
  3315   Label try_revoke_bias;
  3316   Label try_rebias;
  3317   Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
  3318   assert(mark_addr.disp() == 0, "cas must take a zero displacement");
  3320   // At this point we know that the header has the bias pattern and
  3321   // that we are not the bias owner in the current epoch. We need to
  3322   // figure out more details about the state of the header in order to
  3323   // know what operations can be legally performed on the object's
  3324   // header.
  3326   // If the low three bits in the xor result aren't clear, that means
  3327   // the prototype header is no longer biased and we have to revoke
  3328   // the bias on this object.
  3329   btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
  3330   brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
  3332   // Biasing is still enabled for this data type. See whether the
  3333   // epoch of the current bias is still valid, meaning that the epoch
  3334   // bits of the mark word are equal to the epoch bits of the
  3335   // prototype header. (Note that the prototype header's epoch bits
  3336   // only change at a safepoint.) If not, attempt to rebias the object
  3337   // toward the current thread. Note that we must be absolutely sure
  3338   // that the current epoch is invalid in order to do this because
  3339   // otherwise the manipulations it performs on the mark word are
  3340   // illegal.
  3341   delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
  3342   brx(Assembler::notZero, false, Assembler::pn, try_rebias);
  3344   // The epoch of the current bias is still valid but we know nothing
  3345   // about the owner; it might be set or it might be clear. Try to
  3346   // acquire the bias of the object using an atomic operation. If this
  3347   // fails we will go in to the runtime to revoke the object's bias.
  3348   // Note that we first construct the presumed unbiased header so we
  3349   // don't accidentally blow away another thread's valid bias.
  3350   delayed()->and3(mark_reg,
  3351                   markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
  3352                   mark_reg);
  3353   or3(G2_thread, mark_reg, temp_reg);
  3354   casn(mark_addr.base(), mark_reg, temp_reg);
  3355   // If the biasing toward our thread failed, this means that
  3356   // another thread succeeded in biasing it toward itself and we
  3357   // need to revoke that bias. The revocation will occur in the
  3358   // interpreter runtime in the slow case.
  3359   cmp(mark_reg, temp_reg);
  3360   if (counters != NULL) {
  3361     cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
  3363   if (slow_case != NULL) {
  3364     brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
  3365     delayed()->nop();
  3367   br(Assembler::always, false, Assembler::pt, done);
  3368   delayed()->nop();
  3370   bind(try_rebias);
  3371   // At this point we know the epoch has expired, meaning that the
  3372   // current "bias owner", if any, is actually invalid. Under these
  3373   // circumstances _only_, we are allowed to use the current header's
  3374   // value as the comparison value when doing the cas to acquire the
  3375   // bias in the current epoch. In other words, we allow transfer of
  3376   // the bias from one thread to another directly in this situation.
  3377   //
  3378   // FIXME: due to a lack of registers we currently blow away the age
  3379   // bits in this situation. Should attempt to preserve them.
  3380   load_klass(obj_reg, temp_reg);
  3381   ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
  3382   or3(G2_thread, temp_reg, temp_reg);
  3383   casn(mark_addr.base(), mark_reg, temp_reg);
  3384   // If the biasing toward our thread failed, this means that
  3385   // another thread succeeded in biasing it toward itself and we
  3386   // need to revoke that bias. The revocation will occur in the
  3387   // interpreter runtime in the slow case.
  3388   cmp(mark_reg, temp_reg);
  3389   if (counters != NULL) {
  3390     cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
  3392   if (slow_case != NULL) {
  3393     brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
  3394     delayed()->nop();
  3396   br(Assembler::always, false, Assembler::pt, done);
  3397   delayed()->nop();
  3399   bind(try_revoke_bias);
  3400   // The prototype mark in the klass doesn't have the bias bit set any
  3401   // more, indicating that objects of this data type are not supposed
  3402   // to be biased any more. We are going to try to reset the mark of
  3403   // this object to the prototype value and fall through to the
  3404   // CAS-based locking scheme. Note that if our CAS fails, it means
  3405   // that another thread raced us for the privilege of revoking the
  3406   // bias of this particular object, so it's okay to continue in the
  3407   // normal locking code.
  3408   //
  3409   // FIXME: due to a lack of registers we currently blow away the age
  3410   // bits in this situation. Should attempt to preserve them.
  3411   load_klass(obj_reg, temp_reg);
  3412   ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
  3413   casn(mark_addr.base(), mark_reg, temp_reg);
  3414   // Fall through to the normal CAS-based lock, because no matter what
  3415   // the result of the above CAS, some thread must have succeeded in
  3416   // removing the bias bit from the object's header.
  3417   if (counters != NULL) {
  3418     cmp(mark_reg, temp_reg);
  3419     cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg);
  3422   bind(cas_label);
  3425 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done,
  3426                                           bool allow_delay_slot_filling) {
  3427   // Check for biased locking unlock case, which is a no-op
  3428   // Note: we do not have to check the thread ID for two reasons.
  3429   // First, the interpreter checks for IllegalMonitorStateException at
  3430   // a higher level. Second, if the bias was revoked while we held the
  3431   // lock, the object could not be rebiased toward another thread, so
  3432   // the bias bit would be clear.
  3433   ld_ptr(mark_addr, temp_reg);
  3434   and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
  3435   cmp(temp_reg, markOopDesc::biased_lock_pattern);
  3436   brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
  3437   delayed();
  3438   if (!allow_delay_slot_filling) {
  3439     nop();
  3444 // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
  3445 // Solaris/SPARC's "as".  Another apt name would be cas_ptr()
  3447 void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
  3448   casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()) ;
  3453 // compiler_lock_object() and compiler_unlock_object() are direct transliterations
  3454 // of i486.ad fast_lock() and fast_unlock().  See those methods for detailed comments.
  3455 // The code could be tightened up considerably.
  3456 //
  3457 // box->dhw disposition - post-conditions at DONE_LABEL.
  3458 // -   Successful inflated lock:  box->dhw != 0.
  3459 //     Any non-zero value suffices.
  3460 //     Consider G2_thread, rsp, boxReg, or unused_mark()
  3461 // -   Successful Stack-lock: box->dhw == mark.
  3462 //     box->dhw must contain the displaced mark word value
  3463 // -   Failure -- icc.ZFlag == 0 and box->dhw is undefined.
  3464 //     The slow-path fast_enter() and slow_enter() operators
  3465 //     are responsible for setting box->dhw = NonZero (typically ::unused_mark).
  3466 // -   Biased: box->dhw is undefined
  3467 //
  3468 // SPARC refworkload performance - specifically jetstream and scimark - are
  3469 // extremely sensitive to the size of the code emitted by compiler_lock_object
  3470 // and compiler_unlock_object.  Critically, the key factor is code size, not path
  3471 // length.  (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
  3472 // effect).
  3475 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
  3476                                           Register Rbox, Register Rscratch,
  3477                                           BiasedLockingCounters* counters,
  3478                                           bool try_bias) {
  3479    Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
  3481    verify_oop(Roop);
  3482    Label done ;
  3484    if (counters != NULL) {
  3485      inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch);
  3488    if (EmitSync & 1) {
  3489      mov    (3, Rscratch) ;
  3490      st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
  3491      cmp    (SP, G0) ;
  3492      return ;
  3495    if (EmitSync & 2) {
  3497      // Fetch object's markword
  3498      ld_ptr(mark_addr, Rmark);
  3500      if (try_bias) {
  3501         biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
  3504      // Save Rbox in Rscratch to be used for the cas operation
  3505      mov(Rbox, Rscratch);
  3507      // set Rmark to markOop | markOopDesc::unlocked_value
  3508      or3(Rmark, markOopDesc::unlocked_value, Rmark);
  3510      // Initialize the box.  (Must happen before we update the object mark!)
  3511      st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
  3513      // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
  3514      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
  3515      casx_under_lock(mark_addr.base(), Rmark, Rscratch,
  3516         (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
  3518      // if compare/exchange succeeded we found an unlocked object and we now have locked it
  3519      // hence we are done
  3520      cmp(Rmark, Rscratch);
  3521 #ifdef _LP64
  3522      sub(Rscratch, STACK_BIAS, Rscratch);
  3523 #endif
  3524      brx(Assembler::equal, false, Assembler::pt, done);
  3525      delayed()->sub(Rscratch, SP, Rscratch);  //pull next instruction into delay slot
  3527      // we did not find an unlocked object so see if this is a recursive case
  3528      // sub(Rscratch, SP, Rscratch);
  3529      assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
  3530      andcc(Rscratch, 0xfffff003, Rscratch);
  3531      st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
  3532      bind (done) ;
  3533      return ;
  3536    Label Egress ;
  3538    if (EmitSync & 256) {
  3539       Label IsInflated ;
  3541       ld_ptr (mark_addr, Rmark);           // fetch obj->mark
  3542       // Triage: biased, stack-locked, neutral, inflated
  3543       if (try_bias) {
  3544         biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
  3545         // Invariant: if control reaches this point in the emitted stream
  3546         // then Rmark has not been modified.
  3549       // Store mark into displaced mark field in the on-stack basic-lock "box"
  3550       // Critically, this must happen before the CAS
  3551       // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
  3552       st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
  3553       andcc  (Rmark, 2, G0) ;
  3554       brx    (Assembler::notZero, false, Assembler::pn, IsInflated) ;
  3555       delayed() ->
  3557       // Try stack-lock acquisition.
  3558       // Beware: the 1st instruction is in a delay slot
  3559       mov    (Rbox,  Rscratch);
  3560       or3    (Rmark, markOopDesc::unlocked_value, Rmark);
  3561       assert (mark_addr.disp() == 0, "cas must take a zero displacement");
  3562       casn   (mark_addr.base(), Rmark, Rscratch) ;
  3563       cmp    (Rmark, Rscratch);
  3564       brx    (Assembler::equal, false, Assembler::pt, done);
  3565       delayed()->sub(Rscratch, SP, Rscratch);
  3567       // Stack-lock attempt failed - check for recursive stack-lock.
  3568       // See the comments below about how we might remove this case.
  3569 #ifdef _LP64
  3570       sub    (Rscratch, STACK_BIAS, Rscratch);
  3571 #endif
  3572       assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
  3573       andcc  (Rscratch, 0xfffff003, Rscratch);
  3574       br     (Assembler::always, false, Assembler::pt, done) ;
  3575       delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
  3577       bind   (IsInflated) ;
  3578       if (EmitSync & 64) {
  3579          // If m->owner != null goto IsLocked
  3580          // Pessimistic form: Test-and-CAS vs CAS
  3581          // The optimistic form avoids RTS->RTO cache line upgrades.
  3582          ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
  3583          andcc  (Rscratch, Rscratch, G0) ;
  3584          brx    (Assembler::notZero, false, Assembler::pn, done) ;
  3585          delayed()->nop() ;
  3586          // m->owner == null : it's unlocked.
  3589       // Try to CAS m->owner from null to Self
  3590       // Invariant: if we acquire the lock then _recursions should be 0.
  3591       add    (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
  3592       mov    (G2_thread, Rscratch) ;
  3593       casn   (Rmark, G0, Rscratch) ;
  3594       cmp    (Rscratch, G0) ;
  3595       // Intentional fall-through into done
  3596    } else {
  3597       // Aggressively avoid the Store-before-CAS penalty
  3598       // Defer the store into box->dhw until after the CAS
  3599       Label IsInflated, Recursive ;
  3601 // Anticipate CAS -- Avoid RTS->RTO upgrade
  3602 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
  3604       ld_ptr (mark_addr, Rmark);           // fetch obj->mark
  3605       // Triage: biased, stack-locked, neutral, inflated
  3607       if (try_bias) {
  3608         biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
  3609         // Invariant: if control reaches this point in the emitted stream
  3610         // then Rmark has not been modified.
  3612       andcc  (Rmark, 2, G0) ;
  3613       brx    (Assembler::notZero, false, Assembler::pn, IsInflated) ;
  3614       delayed()->                         // Beware - dangling delay-slot
  3616       // Try stack-lock acquisition.
  3617       // Transiently install BUSY (0) encoding in the mark word.
  3618       // if the CAS of 0 into the mark was successful then we execute:
  3619       //   ST box->dhw  = mark   -- save fetched mark in on-stack basiclock box
  3620       //   ST obj->mark = box    -- overwrite transient 0 value
  3621       // This presumes TSO, of course.
  3623       mov    (0, Rscratch) ;
  3624       or3    (Rmark, markOopDesc::unlocked_value, Rmark);
  3625       assert (mark_addr.disp() == 0, "cas must take a zero displacement");
  3626       casn   (mark_addr.base(), Rmark, Rscratch) ;
  3627 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
  3628       cmp    (Rscratch, Rmark) ;
  3629       brx    (Assembler::notZero, false, Assembler::pn, Recursive) ;
  3630       delayed() ->
  3631         st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
  3632       if (counters != NULL) {
  3633         cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
  3635       br     (Assembler::always, false, Assembler::pt, done);
  3636       delayed() ->
  3637         st_ptr (Rbox, mark_addr) ;
  3639       bind   (Recursive) ;
  3640       // Stack-lock attempt failed - check for recursive stack-lock.
  3641       // Tests show that we can remove the recursive case with no impact
  3642       // on refworkload 0.83.  If we need to reduce the size of the code
  3643       // emitted by compiler_lock_object() the recursive case is perfect
  3644       // candidate.
  3645       //
  3646       // A more extreme idea is to always inflate on stack-lock recursion.
  3647       // This lets us eliminate the recursive checks in compiler_lock_object
  3648       // and compiler_unlock_object and the (box->dhw == 0) encoding.
  3649       // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
  3650       // and showed a performance *increase*.  In the same experiment I eliminated
  3651       // the fast-path stack-lock code from the interpreter and always passed
  3652       // control to the "slow" operators in synchronizer.cpp.
  3654       // RScratch contains the fetched obj->mark value from the failed CASN.
  3655 #ifdef _LP64
  3656       sub    (Rscratch, STACK_BIAS, Rscratch);
  3657 #endif
  3658       sub(Rscratch, SP, Rscratch);
  3659       assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
  3660       andcc  (Rscratch, 0xfffff003, Rscratch);
  3661       if (counters != NULL) {
  3662         // Accounting needs the Rscratch register
  3663         st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
  3664         cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
  3665         br     (Assembler::always, false, Assembler::pt, done) ;
  3666         delayed()->nop() ;
  3667       } else {
  3668         br     (Assembler::always, false, Assembler::pt, done) ;
  3669         delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
  3672       bind   (IsInflated) ;
  3673       if (EmitSync & 64) {
  3674          // If m->owner != null goto IsLocked
  3675          // Test-and-CAS vs CAS
  3676          // Pessimistic form avoids futile (doomed) CAS attempts
  3677          // The optimistic form avoids RTS->RTO cache line upgrades.
  3678          ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
  3679          andcc  (Rscratch, Rscratch, G0) ;
  3680          brx    (Assembler::notZero, false, Assembler::pn, done) ;
  3681          delayed()->nop() ;
  3682          // m->owner == null : it's unlocked.
  3685       // Try to CAS m->owner from null to Self
  3686       // Invariant: if we acquire the lock then _recursions should be 0.
  3687       add    (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
  3688       mov    (G2_thread, Rscratch) ;
  3689       casn   (Rmark, G0, Rscratch) ;
  3690       cmp    (Rscratch, G0) ;
  3691       // ST box->displaced_header = NonZero.
  3692       // Any non-zero value suffices:
  3693       //    unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
  3694       st_ptr (Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
  3695       // Intentional fall-through into done
  3698    bind   (done) ;
  3701 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
  3702                                             Register Rbox, Register Rscratch,
  3703                                             bool try_bias) {
  3704    Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
  3706    Label done ;
  3708    if (EmitSync & 4) {
  3709      cmp  (SP, G0) ;
  3710      return ;
  3713    if (EmitSync & 8) {
  3714      if (try_bias) {
  3715         biased_locking_exit(mark_addr, Rscratch, done);
  3718      // Test first if it is a fast recursive unlock
  3719      ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
  3720      cmp(Rmark, G0);
  3721      brx(Assembler::equal, false, Assembler::pt, done);
  3722      delayed()->nop();
  3724      // Check if it is still a light weight lock, this is is true if we see
  3725      // the stack address of the basicLock in the markOop of the object
  3726      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
  3727      casx_under_lock(mark_addr.base(), Rbox, Rmark,
  3728        (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
  3729      br (Assembler::always, false, Assembler::pt, done);
  3730      delayed()->cmp(Rbox, Rmark);
  3731      bind (done) ;
  3732      return ;
  3735    // Beware ... If the aggregate size of the code emitted by CLO and CUO is
  3736    // is too large performance rolls abruptly off a cliff.
  3737    // This could be related to inlining policies, code cache management, or
  3738    // I$ effects.
  3739    Label LStacked ;
  3741    if (try_bias) {
  3742       // TODO: eliminate redundant LDs of obj->mark
  3743       biased_locking_exit(mark_addr, Rscratch, done);
  3746    ld_ptr (Roop, oopDesc::mark_offset_in_bytes(), Rmark) ;
  3747    ld_ptr (Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
  3748    andcc  (Rscratch, Rscratch, G0);
  3749    brx    (Assembler::zero, false, Assembler::pn, done);
  3750    delayed()-> nop() ;      // consider: relocate fetch of mark, above, into this DS
  3751    andcc  (Rmark, 2, G0) ;
  3752    brx    (Assembler::zero, false, Assembler::pt, LStacked) ;
  3753    delayed()-> nop() ;
  3755    // It's inflated
  3756    // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
  3757    // the ST of 0 into _owner which releases the lock.  This prevents loads
  3758    // and stores within the critical section from reordering (floating)
  3759    // past the store that releases the lock.  But TSO is a strong memory model
  3760    // and that particular flavor of barrier is a noop, so we can safely elide it.
  3761    // Note that we use 1-0 locking by default for the inflated case.  We
  3762    // close the resultant (and rare) race by having contented threads in
  3763    // monitorenter periodically poll _owner.
  3764    ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
  3765    ld_ptr (Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
  3766    xor3   (Rscratch, G2_thread, Rscratch) ;
  3767    orcc   (Rbox, Rscratch, Rbox) ;
  3768    brx    (Assembler::notZero, false, Assembler::pn, done) ;
  3769    delayed()->
  3770    ld_ptr (Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
  3771    ld_ptr (Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
  3772    orcc   (Rbox, Rscratch, G0) ;
  3773    if (EmitSync & 65536) {
  3774       Label LSucc ;
  3775       brx    (Assembler::notZero, false, Assembler::pn, LSucc) ;
  3776       delayed()->nop() ;
  3777       br     (Assembler::always, false, Assembler::pt, done) ;
  3778       delayed()->
  3779       st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
  3781       bind   (LSucc) ;
  3782       st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
  3783       if (os::is_MP()) { membar (StoreLoad) ; }
  3784       ld_ptr (Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
  3785       andcc  (Rscratch, Rscratch, G0) ;
  3786       brx    (Assembler::notZero, false, Assembler::pt, done) ;
  3787       delayed()-> andcc (G0, G0, G0) ;
  3788       add    (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ;
  3789       mov    (G2_thread, Rscratch) ;
  3790       casn   (Rmark, G0, Rscratch) ;
  3791       cmp    (Rscratch, G0) ;
  3792       // invert icc.zf and goto done
  3793       brx    (Assembler::notZero, false, Assembler::pt, done) ;
  3794       delayed() -> cmp (G0, G0) ;
  3795       br     (Assembler::always, false, Assembler::pt, done);
  3796       delayed() -> cmp (G0, 1) ;
  3797    } else {
  3798       brx    (Assembler::notZero, false, Assembler::pn, done) ;
  3799       delayed()->nop() ;
  3800       br     (Assembler::always, false, Assembler::pt, done) ;
  3801       delayed()->
  3802       st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
  3805    bind   (LStacked) ;
  3806    // Consider: we could replace the expensive CAS in the exit
  3807    // path with a simple ST of the displaced mark value fetched from
  3808    // the on-stack basiclock box.  That admits a race where a thread T2
  3809    // in the slow lock path -- inflating with monitor M -- could race a
  3810    // thread T1 in the fast unlock path, resulting in a missed wakeup for T2.
  3811    // More precisely T1 in the stack-lock unlock path could "stomp" the
  3812    // inflated mark value M installed by T2, resulting in an orphan
  3813    // object monitor M and T2 becoming stranded.  We can remedy that situation
  3814    // by having T2 periodically poll the object's mark word using timed wait
  3815    // operations.  If T2 discovers that a stomp has occurred it vacates
  3816    // the monitor M and wakes any other threads stranded on the now-orphan M.
  3817    // In addition the monitor scavenger, which performs deflation,
  3818    // would also need to check for orpan monitors and stranded threads.
  3819    //
  3820    // Finally, inflation is also used when T2 needs to assign a hashCode
  3821    // to O and O is stack-locked by T1.  The "stomp" race could cause
  3822    // an assigned hashCode value to be lost.  We can avoid that condition
  3823    // and provide the necessary hashCode stability invariants by ensuring
  3824    // that hashCode generation is idempotent between copying GCs.
  3825    // For example we could compute the hashCode of an object O as
  3826    // O's heap address XOR some high quality RNG value that is refreshed
  3827    // at GC-time.  The monitor scavenger would install the hashCode
  3828    // found in any orphan monitors.  Again, the mechanism admits a
  3829    // lost-update "stomp" WAW race but detects and recovers as needed.
  3830    //
  3831    // A prototype implementation showed excellent results, although
  3832    // the scavenger and timeout code was rather involved.
  3834    casn   (mark_addr.base(), Rbox, Rscratch) ;
  3835    cmp    (Rbox, Rscratch);
  3836    // Intentional fall through into done ...
  3838    bind   (done) ;
  3843 void MacroAssembler::print_CPU_state() {
  3844   // %%%%% need to implement this
  3847 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
  3848   // %%%%% need to implement this
  3851 void MacroAssembler::push_IU_state() {
  3852   // %%%%% need to implement this
  3856 void MacroAssembler::pop_IU_state() {
  3857   // %%%%% need to implement this
  3861 void MacroAssembler::push_FPU_state() {
  3862   // %%%%% need to implement this
  3866 void MacroAssembler::pop_FPU_state() {
  3867   // %%%%% need to implement this
  3871 void MacroAssembler::push_CPU_state() {
  3872   // %%%%% need to implement this
  3876 void MacroAssembler::pop_CPU_state() {
  3877   // %%%%% need to implement this
  3882 void MacroAssembler::verify_tlab() {
  3883 #ifdef ASSERT
  3884   if (UseTLAB && VerifyOops) {
  3885     Label next, next2, ok;
  3886     Register t1 = L0;
  3887     Register t2 = L1;
  3888     Register t3 = L2;
  3890     save_frame(0);
  3891     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
  3892     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
  3893     or3(t1, t2, t3);
  3894     cmp(t1, t2);
  3895     br(Assembler::greaterEqual, false, Assembler::pn, next);
  3896     delayed()->nop();
  3897     stop("assert(top >= start)");
  3898     should_not_reach_here();
  3900     bind(next);
  3901     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
  3902     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
  3903     or3(t3, t2, t3);
  3904     cmp(t1, t2);
  3905     br(Assembler::lessEqual, false, Assembler::pn, next2);
  3906     delayed()->nop();
  3907     stop("assert(top <= end)");
  3908     should_not_reach_here();
  3910     bind(next2);
  3911     and3(t3, MinObjAlignmentInBytesMask, t3);
  3912     cmp(t3, 0);
  3913     br(Assembler::lessEqual, false, Assembler::pn, ok);
  3914     delayed()->nop();
  3915     stop("assert(aligned)");
  3916     should_not_reach_here();
  3918     bind(ok);
  3919     restore();
  3921 #endif
  3925 void MacroAssembler::eden_allocate(
  3926   Register obj,                        // result: pointer to object after successful allocation
  3927   Register var_size_in_bytes,          // object size in bytes if unknown at compile time; invalid otherwise
  3928   int      con_size_in_bytes,          // object size in bytes if   known at compile time
  3929   Register t1,                         // temp register
  3930   Register t2,                         // temp register
  3931   Label&   slow_case                   // continuation point if fast allocation fails
  3932 ){
  3933   // make sure arguments make sense
  3934   assert_different_registers(obj, var_size_in_bytes, t1, t2);
  3935   assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
  3936   assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
  3938   if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
  3939     // No allocation in the shared eden.
  3940     br(Assembler::always, false, Assembler::pt, slow_case);
  3941     delayed()->nop();
  3942   } else {
  3943     // get eden boundaries
  3944     // note: we need both top & top_addr!
  3945     const Register top_addr = t1;
  3946     const Register end      = t2;
  3948     CollectedHeap* ch = Universe::heap();
  3949     set((intx)ch->top_addr(), top_addr);
  3950     intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
  3951     ld_ptr(top_addr, delta, end);
  3952     ld_ptr(top_addr, 0, obj);
  3954     // try to allocate
  3955     Label retry;
  3956     bind(retry);
  3957 #ifdef ASSERT
  3958     // make sure eden top is properly aligned
  3960       Label L;
  3961       btst(MinObjAlignmentInBytesMask, obj);
  3962       br(Assembler::zero, false, Assembler::pt, L);
  3963       delayed()->nop();
  3964       stop("eden top is not properly aligned");
  3965       bind(L);
  3967 #endif // ASSERT
  3968     const Register free = end;
  3969     sub(end, obj, free);                                   // compute amount of free space
  3970     if (var_size_in_bytes->is_valid()) {
  3971       // size is unknown at compile time
  3972       cmp(free, var_size_in_bytes);
  3973       br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
  3974       delayed()->add(obj, var_size_in_bytes, end);
  3975     } else {
  3976       // size is known at compile time
  3977       cmp(free, con_size_in_bytes);
  3978       br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
  3979       delayed()->add(obj, con_size_in_bytes, end);
  3981     // Compare obj with the value at top_addr; if still equal, swap the value of
  3982     // end with the value at top_addr. If not equal, read the value at top_addr
  3983     // into end.
  3984     casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
  3985     // if someone beat us on the allocation, try again, otherwise continue
  3986     cmp(obj, end);
  3987     brx(Assembler::notEqual, false, Assembler::pn, retry);
  3988     delayed()->mov(end, obj);                              // nop if successfull since obj == end
  3990 #ifdef ASSERT
  3991     // make sure eden top is properly aligned
  3993       Label L;
  3994       const Register top_addr = t1;
  3996       set((intx)ch->top_addr(), top_addr);
  3997       ld_ptr(top_addr, 0, top_addr);
  3998       btst(MinObjAlignmentInBytesMask, top_addr);
  3999       br(Assembler::zero, false, Assembler::pt, L);
  4000       delayed()->nop();
  4001       stop("eden top is not properly aligned");
  4002       bind(L);
  4004 #endif // ASSERT
  4009 void MacroAssembler::tlab_allocate(
  4010   Register obj,                        // result: pointer to object after successful allocation
  4011   Register var_size_in_bytes,          // object size in bytes if unknown at compile time; invalid otherwise
  4012   int      con_size_in_bytes,          // object size in bytes if   known at compile time
  4013   Register t1,                         // temp register
  4014   Label&   slow_case                   // continuation point if fast allocation fails
  4015 ){
  4016   // make sure arguments make sense
  4017   assert_different_registers(obj, var_size_in_bytes, t1);
  4018   assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size");
  4019   assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
  4021   const Register free  = t1;
  4023   verify_tlab();
  4025   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj);
  4027   // calculate amount of free space
  4028   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free);
  4029   sub(free, obj, free);
  4031   Label done;
  4032   if (var_size_in_bytes == noreg) {
  4033     cmp(free, con_size_in_bytes);
  4034   } else {
  4035     cmp(free, var_size_in_bytes);
  4037   br(Assembler::less, false, Assembler::pn, slow_case);
  4038   // calculate the new top pointer
  4039   if (var_size_in_bytes == noreg) {
  4040     delayed()->add(obj, con_size_in_bytes, free);
  4041   } else {
  4042     delayed()->add(obj, var_size_in_bytes, free);
  4045   bind(done);
  4047 #ifdef ASSERT
  4048   // make sure new free pointer is properly aligned
  4050     Label L;
  4051     btst(MinObjAlignmentInBytesMask, free);
  4052     br(Assembler::zero, false, Assembler::pt, L);
  4053     delayed()->nop();
  4054     stop("updated TLAB free is not properly aligned");
  4055     bind(L);
  4057 #endif // ASSERT
  4059   // update the tlab top pointer
  4060   st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
  4061   verify_tlab();
  4065 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
  4066   Register top = O0;
  4067   Register t1 = G1;
  4068   Register t2 = G3;
  4069   Register t3 = O1;
  4070   assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
  4071   Label do_refill, discard_tlab;
  4073   if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
  4074     // No allocation in the shared eden.
  4075     br(Assembler::always, false, Assembler::pt, slow_case);
  4076     delayed()->nop();
  4079   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
  4080   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1);
  4081   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2);
  4083   // calculate amount of free space
  4084   sub(t1, top, t1);
  4085   srl_ptr(t1, LogHeapWordSize, t1);
  4087   // Retain tlab and allocate object in shared space if
  4088   // the amount free in the tlab is too large to discard.
  4089   cmp(t1, t2);
  4090   brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
  4092   // increment waste limit to prevent getting stuck on this slow path
  4093   delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
  4094   st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
  4095   if (TLABStats) {
  4096     // increment number of slow_allocations
  4097     ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2);
  4098     add(t2, 1, t2);
  4099     stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
  4101   br(Assembler::always, false, Assembler::pt, try_eden);
  4102   delayed()->nop();
  4104   bind(discard_tlab);
  4105   if (TLABStats) {
  4106     // increment number of refills
  4107     ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2);
  4108     add(t2, 1, t2);
  4109     stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()));
  4110     // accumulate wastage
  4111     ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2);
  4112     add(t2, t1, t2);
  4113     stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
  4116   // if tlab is currently allocated (top or end != null) then
  4117   // fill [top, end + alignment_reserve) with array object
  4118   br_null(top, false, Assembler::pn, do_refill);
  4119   delayed()->nop();
  4121   set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
  4122   st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
  4123   // set klass to intArrayKlass
  4124   sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
  4125   add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
  4126   sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
  4127   st(t1, top, arrayOopDesc::length_offset_in_bytes());
  4128   set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
  4129   ld_ptr(t2, 0, t2);
  4130   // store klass last.  concurrent gcs assumes klass length is valid if
  4131   // klass field is not null.
  4132   store_klass(t2, top);
  4133   verify_oop(top);
  4135   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1);
  4136   sub(top, t1, t1); // size of tlab's allocated portion
  4137   incr_allocated_bytes(t1, t2, t3);
  4139   // refill the tlab with an eden allocation
  4140   bind(do_refill);
  4141   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
  4142   sll_ptr(t1, LogHeapWordSize, t1);
  4143   // allocate new tlab, address returned in top
  4144   eden_allocate(top, t1, 0, t2, t3, slow_case);
  4146   st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
  4147   st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
  4148 #ifdef ASSERT
  4149   // check that tlab_size (t1) is still valid
  4151     Label ok;
  4152     ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
  4153     sll_ptr(t2, LogHeapWordSize, t2);
  4154     cmp(t1, t2);
  4155     br(Assembler::equal, false, Assembler::pt, ok);
  4156     delayed()->nop();
  4157     stop("assert(t1 == tlab_size)");
  4158     should_not_reach_here();
  4160     bind(ok);
  4162 #endif // ASSERT
  4163   add(top, t1, top); // t1 is tlab_size
  4164   sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
  4165   st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
  4166   verify_tlab();
  4167   br(Assembler::always, false, Assembler::pt, retry);
  4168   delayed()->nop();
  4171 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
  4172                                           Register t1, Register t2) {
  4173   // Bump total bytes allocated by this thread
  4174   assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch
  4175   assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2);
  4176   // v8 support has gone the way of the dodo
  4177   ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1);
  4178   add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1);
  4179   stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset()));
  4182 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
  4183   switch (cond) {
  4184     // Note some conditions are synonyms for others
  4185     case Assembler::never:                return Assembler::always;
  4186     case Assembler::zero:                 return Assembler::notZero;
  4187     case Assembler::lessEqual:            return Assembler::greater;
  4188     case Assembler::less:                 return Assembler::greaterEqual;
  4189     case Assembler::lessEqualUnsigned:    return Assembler::greaterUnsigned;
  4190     case Assembler::lessUnsigned:         return Assembler::greaterEqualUnsigned;
  4191     case Assembler::negative:             return Assembler::positive;
  4192     case Assembler::overflowSet:          return Assembler::overflowClear;
  4193     case Assembler::always:               return Assembler::never;
  4194     case Assembler::notZero:              return Assembler::zero;
  4195     case Assembler::greater:              return Assembler::lessEqual;
  4196     case Assembler::greaterEqual:         return Assembler::less;
  4197     case Assembler::greaterUnsigned:      return Assembler::lessEqualUnsigned;
  4198     case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned;
  4199     case Assembler::positive:             return Assembler::negative;
  4200     case Assembler::overflowClear:        return Assembler::overflowSet;
  4203   ShouldNotReachHere(); return Assembler::overflowClear;
  4206 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
  4207                               Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) {
  4208   Condition negated_cond = negate_condition(cond);
  4209   Label L;
  4210   brx(negated_cond, false, Assembler::pt, L);
  4211   delayed()->nop();
  4212   inc_counter(counter_ptr, Rtmp1, Rtmp2);
  4213   bind(L);
  4216 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) {
  4217   AddressLiteral addrlit(counter_addr);
  4218   sethi(addrlit, Rtmp1);                 // Move hi22 bits into temporary register.
  4219   Address addr(Rtmp1, addrlit.low10());  // Build an address with low10 bits.
  4220   ld(addr, Rtmp2);
  4221   inc(Rtmp2);
  4222   st(Rtmp2, addr);
  4225 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) {
  4226   inc_counter((address) counter_addr, Rtmp1, Rtmp2);
  4229 SkipIfEqual::SkipIfEqual(
  4230     MacroAssembler* masm, Register temp, const bool* flag_addr,
  4231     Assembler::Condition condition) {
  4232   _masm = masm;
  4233   AddressLiteral flag(flag_addr);
  4234   _masm->sethi(flag, temp);
  4235   _masm->ldub(temp, flag.low10(), temp);
  4236   _masm->tst(temp);
  4237   _masm->br(condition, false, Assembler::pt, _label);
  4238   _masm->delayed()->nop();
  4241 SkipIfEqual::~SkipIfEqual() {
  4242   _masm->bind(_label);
  4246 // Writes to stack successive pages until offset reached to check for
  4247 // stack overflow + shadow pages.  This clobbers tsp and scratch.
  4248 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
  4249                                      Register Rscratch) {
  4250   // Use stack pointer in temp stack pointer
  4251   mov(SP, Rtsp);
  4253   // Bang stack for total size given plus stack shadow page size.
  4254   // Bang one page at a time because a large size can overflow yellow and
  4255   // red zones (the bang will fail but stack overflow handling can't tell that
  4256   // it was a stack overflow bang vs a regular segv).
  4257   int offset = os::vm_page_size();
  4258   Register Roffset = Rscratch;
  4260   Label loop;
  4261   bind(loop);
  4262   set((-offset)+STACK_BIAS, Rscratch);
  4263   st(G0, Rtsp, Rscratch);
  4264   set(offset, Roffset);
  4265   sub(Rsize, Roffset, Rsize);
  4266   cmp(Rsize, G0);
  4267   br(Assembler::greater, false, Assembler::pn, loop);
  4268   delayed()->sub(Rtsp, Roffset, Rtsp);
  4270   // Bang down shadow pages too.
  4271   // The -1 because we already subtracted 1 page.
  4272   for (int i = 0; i< StackShadowPages-1; i++) {
  4273     set((-i*offset)+STACK_BIAS, Rscratch);
  4274     st(G0, Rtsp, Rscratch);
  4278 ///////////////////////////////////////////////////////////////////////////////////
  4279 #ifndef SERIALGC
  4281 static address satb_log_enqueue_with_frame = NULL;
  4282 static u_char* satb_log_enqueue_with_frame_end = NULL;
  4284 static address satb_log_enqueue_frameless = NULL;
  4285 static u_char* satb_log_enqueue_frameless_end = NULL;
  4287 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
  4289 static void generate_satb_log_enqueue(bool with_frame) {
  4290   BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
  4291   CodeBuffer buf(bb);
  4292   MacroAssembler masm(&buf);
  4293   address start = masm.pc();
  4294   Register pre_val;
  4296   Label refill, restart;
  4297   if (with_frame) {
  4298     masm.save_frame(0);
  4299     pre_val = I0;  // Was O0 before the save.
  4300   } else {
  4301     pre_val = O0;
  4303   int satb_q_index_byte_offset =
  4304     in_bytes(JavaThread::satb_mark_queue_offset() +
  4305              PtrQueue::byte_offset_of_index());
  4306   int satb_q_buf_byte_offset =
  4307     in_bytes(JavaThread::satb_mark_queue_offset() +
  4308              PtrQueue::byte_offset_of_buf());
  4309   assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) &&
  4310          in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t),
  4311          "check sizes in assembly below");
  4313   masm.bind(restart);
  4314   masm.ld_ptr(G2_thread, satb_q_index_byte_offset, L0);
  4316   masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, L0, refill);
  4317   // If the branch is taken, no harm in executing this in the delay slot.
  4318   masm.delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, L1);
  4319   masm.sub(L0, oopSize, L0);
  4321   masm.st_ptr(pre_val, L1, L0);  // [_buf + index] := I0
  4322   if (!with_frame) {
  4323     // Use return-from-leaf
  4324     masm.retl();
  4325     masm.delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset);
  4326   } else {
  4327     // Not delayed.
  4328     masm.st_ptr(L0, G2_thread, satb_q_index_byte_offset);
  4330   if (with_frame) {
  4331     masm.ret();
  4332     masm.delayed()->restore();
  4334   masm.bind(refill);
  4336   address handle_zero =
  4337     CAST_FROM_FN_PTR(address,
  4338                      &SATBMarkQueueSet::handle_zero_index_for_thread);
  4339   // This should be rare enough that we can afford to save all the
  4340   // scratch registers that the calling context might be using.
  4341   masm.mov(G1_scratch, L0);
  4342   masm.mov(G3_scratch, L1);
  4343   masm.mov(G4, L2);
  4344   // We need the value of O0 above (for the write into the buffer), so we
  4345   // save and restore it.
  4346   masm.mov(O0, L3);
  4347   // Since the call will overwrite O7, we save and restore that, as well.
  4348   masm.mov(O7, L4);
  4349   masm.call_VM_leaf(L5, handle_zero, G2_thread);
  4350   masm.mov(L0, G1_scratch);
  4351   masm.mov(L1, G3_scratch);
  4352   masm.mov(L2, G4);
  4353   masm.mov(L3, O0);
  4354   masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart);
  4355   masm.delayed()->mov(L4, O7);
  4357   if (with_frame) {
  4358     satb_log_enqueue_with_frame = start;
  4359     satb_log_enqueue_with_frame_end = masm.pc();
  4360   } else {
  4361     satb_log_enqueue_frameless = start;
  4362     satb_log_enqueue_frameless_end = masm.pc();
  4366 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
  4367   if (with_frame) {
  4368     if (satb_log_enqueue_with_frame == 0) {
  4369       generate_satb_log_enqueue(with_frame);
  4370       assert(satb_log_enqueue_with_frame != 0, "postcondition.");
  4371       if (G1SATBPrintStubs) {
  4372         tty->print_cr("Generated with-frame satb enqueue:");
  4373         Disassembler::decode((u_char*)satb_log_enqueue_with_frame,
  4374                              satb_log_enqueue_with_frame_end,
  4375                              tty);
  4378   } else {
  4379     if (satb_log_enqueue_frameless == 0) {
  4380       generate_satb_log_enqueue(with_frame);
  4381       assert(satb_log_enqueue_frameless != 0, "postcondition.");
  4382       if (G1SATBPrintStubs) {
  4383         tty->print_cr("Generated frameless satb enqueue:");
  4384         Disassembler::decode((u_char*)satb_log_enqueue_frameless,
  4385                              satb_log_enqueue_frameless_end,
  4386                              tty);
  4392 void MacroAssembler::g1_write_barrier_pre(Register obj,
  4393                                           Register index,
  4394                                           int offset,
  4395                                           Register pre_val,
  4396                                           Register tmp,
  4397                                           bool preserve_o_regs) {
  4398   Label filtered;
  4400   if (obj == noreg) {
  4401     // We are not loading the previous value so make
  4402     // sure that we don't trash the value in pre_val
  4403     // with the code below.
  4404     assert_different_registers(pre_val, tmp);
  4405   } else {
  4406     // We will be loading the previous value
  4407     // in this code so...
  4408     assert(offset == 0 || index == noreg, "choose one");
  4409     assert(pre_val == noreg, "check this code");
  4412   // Is marking active?
  4413   if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
  4414     ld(G2,
  4415        in_bytes(JavaThread::satb_mark_queue_offset() +
  4416                 PtrQueue::byte_offset_of_active()),
  4417        tmp);
  4418   } else {
  4419     guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
  4420               "Assumption");
  4421     ldsb(G2,
  4422          in_bytes(JavaThread::satb_mark_queue_offset() +
  4423                   PtrQueue::byte_offset_of_active()),
  4424          tmp);
  4427   // Check on whether to annul.
  4428   br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
  4429   delayed() -> nop();
  4431   // Do we need to load the previous value?
  4432   if (obj != noreg) {
  4433     // Load the previous value...
  4434     if (index == noreg) {
  4435       if (Assembler::is_simm13(offset)) {
  4436         load_heap_oop(obj, offset, tmp);
  4437       } else {
  4438         set(offset, tmp);
  4439         load_heap_oop(obj, tmp, tmp);
  4441     } else {
  4442       load_heap_oop(obj, index, tmp);
  4444     // Previous value has been loaded into tmp
  4445     pre_val = tmp;
  4448   assert(pre_val != noreg, "must have a real register");
  4450   // Is the previous value null?
  4451   // Check on whether to annul.
  4452   br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered);
  4453   delayed() -> nop();
  4455   // OK, it's not filtered, so we'll need to call enqueue.  In the normal
  4456   // case, pre_val will be a scratch G-reg, but there are some cases in
  4457   // which it's an O-reg.  In the first case, do a normal call.  In the
  4458   // latter, do a save here and call the frameless version.
  4460   guarantee(pre_val->is_global() || pre_val->is_out(),
  4461             "Or we need to think harder.");
  4463   if (pre_val->is_global() && !preserve_o_regs) {
  4464     generate_satb_log_enqueue_if_necessary(true); // with frame
  4466     call(satb_log_enqueue_with_frame);
  4467     delayed()->mov(pre_val, O0);
  4468   } else {
  4469     generate_satb_log_enqueue_if_necessary(false); // frameless
  4471     save_frame(0);
  4472     call(satb_log_enqueue_frameless);
  4473     delayed()->mov(pre_val->after_save(), O0);
  4474     restore();
  4477   bind(filtered);
  4480 static jint num_ct_writes = 0;
  4481 static jint num_ct_writes_filtered_in_hr = 0;
  4482 static jint num_ct_writes_filtered_null = 0;
  4483 static G1CollectedHeap* g1 = NULL;
  4485 static Thread* count_ct_writes(void* filter_val, void* new_val) {
  4486   Atomic::inc(&num_ct_writes);
  4487   if (filter_val == NULL) {
  4488     Atomic::inc(&num_ct_writes_filtered_in_hr);
  4489   } else if (new_val == NULL) {
  4490     Atomic::inc(&num_ct_writes_filtered_null);
  4491   } else {
  4492     if (g1 == NULL) {
  4493       g1 = G1CollectedHeap::heap();
  4496   if ((num_ct_writes % 1000000) == 0) {
  4497     jint num_ct_writes_filtered =
  4498       num_ct_writes_filtered_in_hr +
  4499       num_ct_writes_filtered_null;
  4501     tty->print_cr("%d potential CT writes: %5.2f%% filtered\n"
  4502                   "   (%5.2f%% intra-HR, %5.2f%% null).",
  4503                   num_ct_writes,
  4504                   100.0*(float)num_ct_writes_filtered/(float)num_ct_writes,
  4505                   100.0*(float)num_ct_writes_filtered_in_hr/
  4506                   (float)num_ct_writes,
  4507                   100.0*(float)num_ct_writes_filtered_null/
  4508                   (float)num_ct_writes);
  4510   return Thread::current();
  4513 static address dirty_card_log_enqueue = 0;
  4514 static u_char* dirty_card_log_enqueue_end = 0;
  4516 // This gets to assume that o0 contains the object address.
  4517 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
  4518   BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
  4519   CodeBuffer buf(bb);
  4520   MacroAssembler masm(&buf);
  4521   address start = masm.pc();
  4523   Label not_already_dirty, restart, refill;
  4525 #ifdef _LP64
  4526   masm.srlx(O0, CardTableModRefBS::card_shift, O0);
  4527 #else
  4528   masm.srl(O0, CardTableModRefBS::card_shift, O0);
  4529 #endif
  4530   AddressLiteral addrlit(byte_map_base);
  4531   masm.set(addrlit, O1); // O1 := <card table base>
  4532   masm.ldub(O0, O1, O2); // O2 := [O0 + O1]
  4534   masm.br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
  4535                       O2, not_already_dirty);
  4536   // Get O1 + O2 into a reg by itself -- useful in the take-the-branch
  4537   // case, harmless if not.
  4538   masm.delayed()->add(O0, O1, O3);
  4540   // We didn't take the branch, so we're already dirty: return.
  4541   // Use return-from-leaf
  4542   masm.retl();
  4543   masm.delayed()->nop();
  4545   // Not dirty.
  4546   masm.bind(not_already_dirty);
  4547   // First, dirty it.
  4548   masm.stb(G0, O3, G0);  // [cardPtr] := 0  (i.e., dirty).
  4549   int dirty_card_q_index_byte_offset =
  4550     in_bytes(JavaThread::dirty_card_queue_offset() +
  4551              PtrQueue::byte_offset_of_index());
  4552   int dirty_card_q_buf_byte_offset =
  4553     in_bytes(JavaThread::dirty_card_queue_offset() +
  4554              PtrQueue::byte_offset_of_buf());
  4555   masm.bind(restart);
  4556   masm.ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
  4558   masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn,
  4559                       L0, refill);
  4560   // If the branch is taken, no harm in executing this in the delay slot.
  4561   masm.delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1);
  4562   masm.sub(L0, oopSize, L0);
  4564   masm.st_ptr(O3, L1, L0);  // [_buf + index] := I0
  4565   // Use return-from-leaf
  4566   masm.retl();
  4567   masm.delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset);
  4569   masm.bind(refill);
  4570   address handle_zero =
  4571     CAST_FROM_FN_PTR(address,
  4572                      &DirtyCardQueueSet::handle_zero_index_for_thread);
  4573   // This should be rare enough that we can afford to save all the
  4574   // scratch registers that the calling context might be using.
  4575   masm.mov(G1_scratch, L3);
  4576   masm.mov(G3_scratch, L5);
  4577   // We need the value of O3 above (for the write into the buffer), so we
  4578   // save and restore it.
  4579   masm.mov(O3, L6);
  4580   // Since the call will overwrite O7, we save and restore that, as well.
  4581   masm.mov(O7, L4);
  4583   masm.call_VM_leaf(L7_thread_cache, handle_zero, G2_thread);
  4584   masm.mov(L3, G1_scratch);
  4585   masm.mov(L5, G3_scratch);
  4586   masm.mov(L6, O3);
  4587   masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart);
  4588   masm.delayed()->mov(L4, O7);
  4590   dirty_card_log_enqueue = start;
  4591   dirty_card_log_enqueue_end = masm.pc();
  4592   // XXX Should have a guarantee here about not going off the end!
  4593   // Does it already do so?  Do an experiment...
  4596 static inline void
  4597 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) {
  4598   if (dirty_card_log_enqueue == 0) {
  4599     generate_dirty_card_log_enqueue(byte_map_base);
  4600     assert(dirty_card_log_enqueue != 0, "postcondition.");
  4601     if (G1SATBPrintStubs) {
  4602       tty->print_cr("Generated dirty_card enqueue:");
  4603       Disassembler::decode((u_char*)dirty_card_log_enqueue,
  4604                            dirty_card_log_enqueue_end,
  4605                            tty);
  4611 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
  4613   Label filtered;
  4614   MacroAssembler* post_filter_masm = this;
  4616   if (new_val == G0) return;
  4618   G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
  4619   assert(bs->kind() == BarrierSet::G1SATBCT ||
  4620          bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
  4621   if (G1RSBarrierRegionFilter) {
  4622     xor3(store_addr, new_val, tmp);
  4623 #ifdef _LP64
  4624     srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
  4625 #else
  4626     srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
  4627 #endif
  4629     if (G1PrintCTFilterStats) {
  4630       guarantee(tmp->is_global(), "Or stats won't work...");
  4631       // This is a sleazy hack: I'm temporarily hijacking G2, which I
  4632       // promise to restore.
  4633       mov(new_val, G2);
  4634       save_frame(0);
  4635       mov(tmp, O0);
  4636       mov(G2, O1);
  4637       // Save G-regs that target may use.
  4638       mov(G1, L1);
  4639       mov(G2, L2);
  4640       mov(G3, L3);
  4641       mov(G4, L4);
  4642       mov(G5, L5);
  4643       call(CAST_FROM_FN_PTR(address, &count_ct_writes));
  4644       delayed()->nop();
  4645       mov(O0, G2);
  4646       // Restore G-regs that target may have used.
  4647       mov(L1, G1);
  4648       mov(L3, G3);
  4649       mov(L4, G4);
  4650       mov(L5, G5);
  4651       restore(G0, G0, G0);
  4653     // XXX Should I predict this taken or not?  Does it mattern?
  4654     br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
  4655     delayed()->nop();
  4658   // If the "store_addr" register is an "in" or "local" register, move it to
  4659   // a scratch reg so we can pass it as an argument.
  4660   bool use_scr = !(store_addr->is_global() || store_addr->is_out());
  4661   // Pick a scratch register different from "tmp".
  4662   Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch);
  4663   // Make sure we use up the delay slot!
  4664   if (use_scr) {
  4665     post_filter_masm->mov(store_addr, scr);
  4666   } else {
  4667     post_filter_masm->nop();
  4669   generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base);
  4670   save_frame(0);
  4671   call(dirty_card_log_enqueue);
  4672   if (use_scr) {
  4673     delayed()->mov(scr, O0);
  4674   } else {
  4675     delayed()->mov(store_addr->after_save(), O0);
  4677   restore();
  4679   bind(filtered);
  4683 #endif  // SERIALGC
  4684 ///////////////////////////////////////////////////////////////////////////////////
  4686 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
  4687   // If we're writing constant NULL, we can skip the write barrier.
  4688   if (new_val == G0) return;
  4689   CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
  4690   assert(bs->kind() == BarrierSet::CardTableModRef ||
  4691          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
  4692   card_table_write(bs->byte_map_base, tmp, store_addr);
  4695 void MacroAssembler::load_klass(Register src_oop, Register klass) {
  4696   // The number of bytes in this code is used by
  4697   // MachCallDynamicJavaNode::ret_addr_offset()
  4698   // if this changes, change that.
  4699   if (UseCompressedOops) {
  4700     lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
  4701     decode_heap_oop_not_null(klass);
  4702   } else {
  4703     ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass);
  4707 void MacroAssembler::store_klass(Register klass, Register dst_oop) {
  4708   if (UseCompressedOops) {
  4709     assert(dst_oop != klass, "not enough registers");
  4710     encode_heap_oop_not_null(klass);
  4711     st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
  4712   } else {
  4713     st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes());
  4717 void MacroAssembler::store_klass_gap(Register s, Register d) {
  4718   if (UseCompressedOops) {
  4719     assert(s != d, "not enough registers");
  4720     st(s, d, oopDesc::klass_gap_offset_in_bytes());
  4724 void MacroAssembler::load_heap_oop(const Address& s, Register d) {
  4725   if (UseCompressedOops) {
  4726     lduw(s, d);
  4727     decode_heap_oop(d);
  4728   } else {
  4729     ld_ptr(s, d);
  4733 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) {
  4734    if (UseCompressedOops) {
  4735     lduw(s1, s2, d);
  4736     decode_heap_oop(d, d);
  4737   } else {
  4738     ld_ptr(s1, s2, d);
  4742 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) {
  4743    if (UseCompressedOops) {
  4744     lduw(s1, simm13a, d);
  4745     decode_heap_oop(d, d);
  4746   } else {
  4747     ld_ptr(s1, simm13a, d);
  4751 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) {
  4752   if (s2.is_constant())  load_heap_oop(s1, s2.as_constant(), d);
  4753   else                   load_heap_oop(s1, s2.as_register(), d);
  4756 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
  4757   if (UseCompressedOops) {
  4758     assert(s1 != d && s2 != d, "not enough registers");
  4759     encode_heap_oop(d);
  4760     st(d, s1, s2);
  4761   } else {
  4762     st_ptr(d, s1, s2);
  4766 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) {
  4767   if (UseCompressedOops) {
  4768     assert(s1 != d, "not enough registers");
  4769     encode_heap_oop(d);
  4770     st(d, s1, simm13a);
  4771   } else {
  4772     st_ptr(d, s1, simm13a);
  4776 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
  4777   if (UseCompressedOops) {
  4778     assert(a.base() != d, "not enough registers");
  4779     encode_heap_oop(d);
  4780     st(d, a, offset);
  4781   } else {
  4782     st_ptr(d, a, offset);
  4787 void MacroAssembler::encode_heap_oop(Register src, Register dst) {
  4788   assert (UseCompressedOops, "must be compressed");
  4789   assert (Universe::heap() != NULL, "java heap should be initialized");
  4790   assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
  4791   verify_oop(src);
  4792   if (Universe::narrow_oop_base() == NULL) {
  4793     srlx(src, LogMinObjAlignmentInBytes, dst);
  4794     return;
  4796   Label done;
  4797   if (src == dst) {
  4798     // optimize for frequent case src == dst
  4799     bpr(rc_nz, true, Assembler::pt, src, done);
  4800     delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken
  4801     bind(done);
  4802     srlx(src, LogMinObjAlignmentInBytes, dst);
  4803   } else {
  4804     bpr(rc_z, false, Assembler::pn, src, done);
  4805     delayed() -> mov(G0, dst);
  4806     // could be moved before branch, and annulate delay,
  4807     // but may add some unneeded work decoding null
  4808     sub(src, G6_heapbase, dst);
  4809     srlx(dst, LogMinObjAlignmentInBytes, dst);
  4810     bind(done);
  4815 void MacroAssembler::encode_heap_oop_not_null(Register r) {
  4816   assert (UseCompressedOops, "must be compressed");
  4817   assert (Universe::heap() != NULL, "java heap should be initialized");
  4818   assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
  4819   verify_oop(r);
  4820   if (Universe::narrow_oop_base() != NULL)
  4821     sub(r, G6_heapbase, r);
  4822   srlx(r, LogMinObjAlignmentInBytes, r);
  4825 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) {
  4826   assert (UseCompressedOops, "must be compressed");
  4827   assert (Universe::heap() != NULL, "java heap should be initialized");
  4828   assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
  4829   verify_oop(src);
  4830   if (Universe::narrow_oop_base() == NULL) {
  4831     srlx(src, LogMinObjAlignmentInBytes, dst);
  4832   } else {
  4833     sub(src, G6_heapbase, dst);
  4834     srlx(dst, LogMinObjAlignmentInBytes, dst);
  4838 // Same algorithm as oops.inline.hpp decode_heap_oop.
  4839 void  MacroAssembler::decode_heap_oop(Register src, Register dst) {
  4840   assert (UseCompressedOops, "must be compressed");
  4841   assert (Universe::heap() != NULL, "java heap should be initialized");
  4842   assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
  4843   sllx(src, LogMinObjAlignmentInBytes, dst);
  4844   if (Universe::narrow_oop_base() != NULL) {
  4845     Label done;
  4846     bpr(rc_nz, true, Assembler::pt, dst, done);
  4847     delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
  4848     bind(done);
  4850   verify_oop(dst);
  4853 void  MacroAssembler::decode_heap_oop_not_null(Register r) {
  4854   // Do not add assert code to this unless you change vtableStubs_sparc.cpp
  4855   // pd_code_size_limit.
  4856   // Also do not verify_oop as this is called by verify_oop.
  4857   assert (UseCompressedOops, "must be compressed");
  4858   assert (Universe::heap() != NULL, "java heap should be initialized");
  4859   assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
  4860   sllx(r, LogMinObjAlignmentInBytes, r);
  4861   if (Universe::narrow_oop_base() != NULL)
  4862     add(r, G6_heapbase, r);
  4865 void  MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
  4866   // Do not add assert code to this unless you change vtableStubs_sparc.cpp
  4867   // pd_code_size_limit.
  4868   // Also do not verify_oop as this is called by verify_oop.
  4869   assert (UseCompressedOops, "must be compressed");
  4870   assert (Universe::heap() != NULL, "java heap should be initialized");
  4871   assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
  4872   sllx(src, LogMinObjAlignmentInBytes, dst);
  4873   if (Universe::narrow_oop_base() != NULL)
  4874     add(dst, G6_heapbase, dst);
  4877 void MacroAssembler::reinit_heapbase() {
  4878   if (UseCompressedOops) {
  4879     // call indirectly to solve generation ordering problem
  4880     AddressLiteral base(Universe::narrow_oop_base_addr());
  4881     load_ptr_contents(base, G6_heapbase);
  4885 // Compare char[] arrays aligned to 4 bytes.
  4886 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
  4887                                         Register limit, Register result,
  4888                                         Register chr1, Register chr2, Label& Ldone) {
  4889   Label Lvector, Lloop;
  4890   assert(chr1 == result, "should be the same");
  4892   // Note: limit contains number of bytes (2*char_elements) != 0.
  4893   andcc(limit, 0x2, chr1); // trailing character ?
  4894   br(Assembler::zero, false, Assembler::pt, Lvector);
  4895   delayed()->nop();
  4897   // compare the trailing char
  4898   sub(limit, sizeof(jchar), limit);
  4899   lduh(ary1, limit, chr1);
  4900   lduh(ary2, limit, chr2);
  4901   cmp(chr1, chr2);
  4902   br(Assembler::notEqual, true, Assembler::pt, Ldone);
  4903   delayed()->mov(G0, result);     // not equal
  4905   // only one char ?
  4906   br_on_reg_cond(rc_z, true, Assembler::pn, limit, Ldone);
  4907   delayed()->add(G0, 1, result); // zero-length arrays are equal
  4909   // word by word compare, dont't need alignment check
  4910   bind(Lvector);
  4911   // Shift ary1 and ary2 to the end of the arrays, negate limit
  4912   add(ary1, limit, ary1);
  4913   add(ary2, limit, ary2);
  4914   neg(limit, limit);
  4916   lduw(ary1, limit, chr1);
  4917   bind(Lloop);
  4918   lduw(ary2, limit, chr2);
  4919   cmp(chr1, chr2);
  4920   br(Assembler::notEqual, true, Assembler::pt, Ldone);
  4921   delayed()->mov(G0, result);     // not equal
  4922   inccc(limit, 2*sizeof(jchar));
  4923   // annul LDUW if branch is not taken to prevent access past end of array
  4924   br(Assembler::notZero, true, Assembler::pt, Lloop);
  4925   delayed()->lduw(ary1, limit, chr1); // hoisted
  4927   // Caller should set it:
  4928   // add(G0, 1, result); // equals

mercurial