src/cpu/sparc/vm/templateTable_sparc.cpp

Wed, 28 May 2008 21:06:24 -0700

author
coleenp
date
Wed, 28 May 2008 21:06:24 -0700
changeset 602
feeb96a45707
parent 548
ba764ed4b6f2
child 631
d1605aabd0a1
child 779
6aae2f9d0294
permissions
-rw-r--r--

6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
Summary: decouple set_klass() with zeroing the gap when compressed.
Reviewed-by: kvn, ysr, jrose

     1 /*
     2  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_templateTable_sparc.cpp.incl"
    28 #ifndef CC_INTERP
    29 #define __ _masm->
    32 //----------------------------------------------------------------------------------------------------
    33 // Platform-dependent initialization
    35 void TemplateTable::pd_initialize() {
    36   // (none)
    37 }
    40 //----------------------------------------------------------------------------------------------------
    41 // Condition conversion
    42 Assembler::Condition ccNot(TemplateTable::Condition cc) {
    43   switch (cc) {
    44     case TemplateTable::equal        : return Assembler::notEqual;
    45     case TemplateTable::not_equal    : return Assembler::equal;
    46     case TemplateTable::less         : return Assembler::greaterEqual;
    47     case TemplateTable::less_equal   : return Assembler::greater;
    48     case TemplateTable::greater      : return Assembler::lessEqual;
    49     case TemplateTable::greater_equal: return Assembler::less;
    50   }
    51   ShouldNotReachHere();
    52   return Assembler::zero;
    53 }
    55 //----------------------------------------------------------------------------------------------------
    56 // Miscelaneous helper routines
    59 Address TemplateTable::at_bcp(int offset) {
    60   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
    61   return Address( Lbcp, 0, offset);
    62 }
    65 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register Rbyte_code,
    66                                    Register Rscratch,
    67                                    bool load_bc_into_scratch /*=true*/) {
    68   // With sharing on, may need to test methodOop flag.
    69   if (!RewriteBytecodes) return;
    70   if (load_bc_into_scratch) __ set(bc, Rbyte_code);
    71   Label patch_done;
    72   if (JvmtiExport::can_post_breakpoint()) {
    73     Label fast_patch;
    74     __ ldub(at_bcp(0), Rscratch);
    75     __ cmp(Rscratch, Bytecodes::_breakpoint);
    76     __ br(Assembler::notEqual, false, Assembler::pt, fast_patch);
    77     __ delayed()->nop();  // don't bother to hoist the stb here
    78     // perform the quickening, slowly, in the bowels of the breakpoint table
    79     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, Rbyte_code);
    80     __ ba(false, patch_done);
    81     __ delayed()->nop();
    82     __ bind(fast_patch);
    83   }
    84 #ifdef ASSERT
    85   Bytecodes::Code orig_bytecode =  Bytecodes::java_code(bc);
    86   Label okay;
    87   __ ldub(at_bcp(0), Rscratch);
    88   __ cmp(Rscratch, orig_bytecode);
    89   __ br(Assembler::equal, false, Assembler::pt, okay);
    90   __ delayed() ->cmp(Rscratch, Rbyte_code);
    91   __ br(Assembler::equal, false, Assembler::pt, okay);
    92   __ delayed()->nop();
    93   __ stop("Rewriting wrong bytecode location");
    94   __ bind(okay);
    95 #endif
    96   __ stb(Rbyte_code, at_bcp(0));
    97   __ bind(patch_done);
    98 }
   100 //----------------------------------------------------------------------------------------------------
   101 // Individual instructions
   103 void TemplateTable::nop() {
   104   transition(vtos, vtos);
   105   // nothing to do
   106 }
   108 void TemplateTable::shouldnotreachhere() {
   109   transition(vtos, vtos);
   110   __ stop("shouldnotreachhere bytecode");
   111 }
   113 void TemplateTable::aconst_null() {
   114   transition(vtos, atos);
   115   __ clr(Otos_i);
   116 }
   119 void TemplateTable::iconst(int value) {
   120   transition(vtos, itos);
   121   __ set(value, Otos_i);
   122 }
   125 void TemplateTable::lconst(int value) {
   126   transition(vtos, ltos);
   127   assert(value >= 0, "check this code");
   128 #ifdef _LP64
   129   __ set(value, Otos_l);
   130 #else
   131   __ set(value, Otos_l2);
   132   __ clr( Otos_l1);
   133 #endif
   134 }
   137 void TemplateTable::fconst(int value) {
   138   transition(vtos, ftos);
   139   static float zero = 0.0, one = 1.0, two = 2.0;
   140   float* p;
   141   switch( value ) {
   142    default: ShouldNotReachHere();
   143    case 0:  p = &zero;  break;
   144    case 1:  p = &one;   break;
   145    case 2:  p = &two;   break;
   146   }
   147   Address a(G3_scratch, (address)p);
   148   __ sethi(a);
   149   __ ldf(FloatRegisterImpl::S, a, Ftos_f);
   150 }
   153 void TemplateTable::dconst(int value) {
   154   transition(vtos, dtos);
   155   static double zero = 0.0, one = 1.0;
   156   double* p;
   157   switch( value ) {
   158    default: ShouldNotReachHere();
   159    case 0:  p = &zero;  break;
   160    case 1:  p = &one;   break;
   161   }
   162   Address a(G3_scratch, (address)p);
   163   __ sethi(a);
   164   __ ldf(FloatRegisterImpl::D, a, Ftos_d);
   165 }
   168 // %%%%% Should factore most snippet templates across platforms
   170 void TemplateTable::bipush() {
   171   transition(vtos, itos);
   172   __ ldsb( at_bcp(1), Otos_i );
   173 }
   175 void TemplateTable::sipush() {
   176   transition(vtos, itos);
   177   __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
   178 }
   180 void TemplateTable::ldc(bool wide) {
   181   transition(vtos, vtos);
   182   Label call_ldc, notInt, notString, notClass, exit;
   184   if (wide) {
   185     __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
   186   } else {
   187     __ ldub(Lbcp, 1, O1);
   188   }
   189   __ get_cpool_and_tags(O0, O2);
   191   const int base_offset = constantPoolOopDesc::header_size() * wordSize;
   192   const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
   194   // get type from tags
   195   __ add(O2, tags_offset, O2);
   196   __ ldub(O2, O1, O2);
   197   __ cmp(O2, JVM_CONSTANT_UnresolvedString);    // unresolved string? If so, must resolve
   198   __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
   199   __ delayed()->nop();
   201   __ cmp(O2, JVM_CONSTANT_UnresolvedClass);     // unresolved class? If so, must resolve
   202   __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
   203   __ delayed()->nop();
   205   __ cmp(O2, JVM_CONSTANT_UnresolvedClassInError);     // unresolved class in error state
   206   __ brx(Assembler::equal, true, Assembler::pn, call_ldc);
   207   __ delayed()->nop();
   209   __ cmp(O2, JVM_CONSTANT_Class);      // need to call vm to get java mirror of the class
   210   __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
   211   __ delayed()->add(O0, base_offset, O0);
   213   __ bind(call_ldc);
   214   __ set(wide, O1);
   215   call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
   216   __ push(atos);
   217   __ ba(false, exit);
   218   __ delayed()->nop();
   220   __ bind(notClass);
   221  // __ add(O0, base_offset, O0);
   222   __ sll(O1, LogBytesPerWord, O1);
   223   __ cmp(O2, JVM_CONSTANT_Integer);
   224   __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
   225   __ delayed()->cmp(O2, JVM_CONSTANT_String);
   226   __ ld(O0, O1, Otos_i);
   227   __ push(itos);
   228   __ ba(false, exit);
   229   __ delayed()->nop();
   231   __ bind(notInt);
   232  // __ cmp(O2, JVM_CONSTANT_String);
   233   __ brx(Assembler::notEqual, true, Assembler::pt, notString);
   234   __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
   235   __ ld_ptr(O0, O1, Otos_i);
   236   __ verify_oop(Otos_i);
   237   __ push(atos);
   238   __ ba(false, exit);
   239   __ delayed()->nop();
   241   __ bind(notString);
   242  // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
   243   __ push(ftos);
   245   __ bind(exit);
   246 }
   248 void TemplateTable::ldc2_w() {
   249   transition(vtos, vtos);
   250   Label retry, resolved, Long, exit;
   252   __ bind(retry);
   253   __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
   254   __ get_cpool_and_tags(O0, O2);
   256   const int base_offset = constantPoolOopDesc::header_size() * wordSize;
   257   const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
   258   // get type from tags
   259   __ add(O2, tags_offset, O2);
   260   __ ldub(O2, O1, O2);
   262   __ sll(O1, LogBytesPerWord, O1);
   263   __ add(O0, O1, G3_scratch);
   265   __ cmp(O2, JVM_CONSTANT_Double);
   266   __ brx(Assembler::notEqual, false, Assembler::pt, Long);
   267   __ delayed()->nop();
   268   // A double can be placed at word-aligned locations in the constant pool.
   269   // Check out Conversions.java for an example.
   270   // Also constantPoolOopDesc::header_size() is 20, which makes it very difficult
   271   // to double-align double on the constant pool.  SG, 11/7/97
   272 #ifdef _LP64
   273   __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
   274 #else
   275   FloatRegister f = Ftos_d;
   276   __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
   277   __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
   278          f->successor());
   279 #endif
   280   __ push(dtos);
   281   __ ba(false, exit);
   282   __ delayed()->nop();
   284   __ bind(Long);
   285 #ifdef _LP64
   286   __ ldx(G3_scratch, base_offset, Otos_l);
   287 #else
   288   __ ld(G3_scratch, base_offset, Otos_l);
   289   __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
   290 #endif
   291   __ push(ltos);
   293   __ bind(exit);
   294 }
   297 void TemplateTable::locals_index(Register reg, int offset) {
   298   __ ldub( at_bcp(offset), reg );
   299 }
   302 void TemplateTable::locals_index_wide(Register reg) {
   303   // offset is 2, not 1, because Lbcp points to wide prefix code
   304   __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
   305 }
   307 void TemplateTable::iload() {
   308   transition(vtos, itos);
   309   // Rewrite iload,iload  pair into fast_iload2
   310   //         iload,caload pair into fast_icaload
   311   if (RewriteFrequentPairs) {
   312     Label rewrite, done;
   314     // get next byte
   315     __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
   317     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
   318     // last two iloads in a pair.  Comparing against fast_iload means that
   319     // the next bytecode is neither an iload or a caload, and therefore
   320     // an iload pair.
   321     __ cmp(G3_scratch, (int)Bytecodes::_iload);
   322     __ br(Assembler::equal, false, Assembler::pn, done);
   323     __ delayed()->nop();
   325     __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
   326     __ br(Assembler::equal, false, Assembler::pn, rewrite);
   327     __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
   329     __ cmp(G3_scratch, (int)Bytecodes::_caload);
   330     __ br(Assembler::equal, false, Assembler::pn, rewrite);
   331     __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
   333     __ set(Bytecodes::_fast_iload, G4_scratch);  // don't check again
   334     // rewrite
   335     // G4_scratch: fast bytecode
   336     __ bind(rewrite);
   337     patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
   338     __ bind(done);
   339   }
   341   // Get the local value into tos
   342   locals_index(G3_scratch);
   343   __ access_local_int( G3_scratch, Otos_i );
   344 }
   346 void TemplateTable::fast_iload2() {
   347   transition(vtos, itos);
   348   locals_index(G3_scratch);
   349   __ access_local_int( G3_scratch, Otos_i );
   350   __ push_i();
   351   locals_index(G3_scratch, 3);  // get next bytecode's local index.
   352   __ access_local_int( G3_scratch, Otos_i );
   353 }
   355 void TemplateTable::fast_iload() {
   356   transition(vtos, itos);
   357   locals_index(G3_scratch);
   358   __ access_local_int( G3_scratch, Otos_i );
   359 }
   361 void TemplateTable::lload() {
   362   transition(vtos, ltos);
   363   locals_index(G3_scratch);
   364   __ access_local_long( G3_scratch, Otos_l );
   365 }
   368 void TemplateTable::fload() {
   369   transition(vtos, ftos);
   370   locals_index(G3_scratch);
   371   __ access_local_float( G3_scratch, Ftos_f );
   372 }
   375 void TemplateTable::dload() {
   376   transition(vtos, dtos);
   377   locals_index(G3_scratch);
   378   __ access_local_double( G3_scratch, Ftos_d );
   379 }
   382 void TemplateTable::aload() {
   383   transition(vtos, atos);
   384   locals_index(G3_scratch);
   385   __ access_local_ptr( G3_scratch, Otos_i);
   386 }
   389 void TemplateTable::wide_iload() {
   390   transition(vtos, itos);
   391   locals_index_wide(G3_scratch);
   392   __ access_local_int( G3_scratch, Otos_i );
   393 }
   396 void TemplateTable::wide_lload() {
   397   transition(vtos, ltos);
   398   locals_index_wide(G3_scratch);
   399   __ access_local_long( G3_scratch, Otos_l );
   400 }
   403 void TemplateTable::wide_fload() {
   404   transition(vtos, ftos);
   405   locals_index_wide(G3_scratch);
   406   __ access_local_float( G3_scratch, Ftos_f );
   407 }
   410 void TemplateTable::wide_dload() {
   411   transition(vtos, dtos);
   412   locals_index_wide(G3_scratch);
   413   __ access_local_double( G3_scratch, Ftos_d );
   414 }
   417 void TemplateTable::wide_aload() {
   418   transition(vtos, atos);
   419   locals_index_wide(G3_scratch);
   420   __ access_local_ptr( G3_scratch, Otos_i );
   421   __ verify_oop(Otos_i);
   422 }
   425 void TemplateTable::iaload() {
   426   transition(itos, itos);
   427   // Otos_i: index
   428   // tos: array
   429   __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
   430   __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
   431 }
   434 void TemplateTable::laload() {
   435   transition(itos, ltos);
   436   // Otos_i: index
   437   // O2: array
   438   __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
   439   __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
   440 }
   443 void TemplateTable::faload() {
   444   transition(itos, ftos);
   445   // Otos_i: index
   446   // O2: array
   447   __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
   448   __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
   449 }
   452 void TemplateTable::daload() {
   453   transition(itos, dtos);
   454   // Otos_i: index
   455   // O2: array
   456   __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
   457   __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
   458 }
   461 void TemplateTable::aaload() {
   462   transition(itos, atos);
   463   // Otos_i: index
   464   // tos: array
   465   __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
   466   __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
   467   __ verify_oop(Otos_i);
   468 }
   471 void TemplateTable::baload() {
   472   transition(itos, itos);
   473   // Otos_i: index
   474   // tos: array
   475   __ index_check(O2, Otos_i, 0, G3_scratch, O3);
   476   __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
   477 }
   480 void TemplateTable::caload() {
   481   transition(itos, itos);
   482   // Otos_i: index
   483   // tos: array
   484   __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
   485   __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
   486 }
   488 void TemplateTable::fast_icaload() {
   489   transition(vtos, itos);
   490   // Otos_i: index
   491   // tos: array
   492   locals_index(G3_scratch);
   493   __ access_local_int( G3_scratch, Otos_i );
   494   __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
   495   __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
   496 }
   499 void TemplateTable::saload() {
   500   transition(itos, itos);
   501   // Otos_i: index
   502   // tos: array
   503   __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
   504   __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
   505 }
   508 void TemplateTable::iload(int n) {
   509   transition(vtos, itos);
   510   debug_only(__ verify_local_tag(frame::TagValue, Llocals, Otos_i, n));
   511   __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
   512 }
   515 void TemplateTable::lload(int n) {
   516   transition(vtos, ltos);
   517   assert(n+1 < Argument::n_register_parameters, "would need more code");
   518   debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, Otos_l, n));
   519   __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
   520 }
   523 void TemplateTable::fload(int n) {
   524   transition(vtos, ftos);
   525   assert(n < Argument::n_register_parameters, "would need more code");
   526   debug_only(__ verify_local_tag(frame::TagValue, Llocals, G3_scratch, n));
   527   __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n),     Ftos_f );
   528 }
   531 void TemplateTable::dload(int n) {
   532   transition(vtos, dtos);
   533   FloatRegister dst = Ftos_d;
   534   debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, G3_scratch, n));
   535   __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
   536 }
   539 void TemplateTable::aload(int n) {
   540   transition(vtos, atos);
   541   debug_only(__ verify_local_tag(frame::TagReference, Llocals, Otos_i, n));
   542   __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
   543 }
   546 void TemplateTable::aload_0() {
   547   transition(vtos, atos);
   549   // According to bytecode histograms, the pairs:
   550   //
   551   // _aload_0, _fast_igetfield (itos)
   552   // _aload_0, _fast_agetfield (atos)
   553   // _aload_0, _fast_fgetfield (ftos)
   554   //
   555   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
   556   // bytecode checks the next bytecode and then rewrites the current
   557   // bytecode into a pair bytecode; otherwise it rewrites the current
   558   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
   559   //
   560   if (RewriteFrequentPairs) {
   561     Label rewrite, done;
   563     // get next byte
   564     __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
   566     // do actual aload_0
   567     aload(0);
   569     // if _getfield then wait with rewrite
   570     __ cmp(G3_scratch, (int)Bytecodes::_getfield);
   571     __ br(Assembler::equal, false, Assembler::pn, done);
   572     __ delayed()->nop();
   574     // if _igetfield then rewrite to _fast_iaccess_0
   575     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
   576     __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
   577     __ br(Assembler::equal, false, Assembler::pn, rewrite);
   578     __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
   580     // if _agetfield then rewrite to _fast_aaccess_0
   581     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
   582     __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
   583     __ br(Assembler::equal, false, Assembler::pn, rewrite);
   584     __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
   586     // if _fgetfield then rewrite to _fast_faccess_0
   587     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
   588     __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
   589     __ br(Assembler::equal, false, Assembler::pn, rewrite);
   590     __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
   592     // else rewrite to _fast_aload0
   593     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
   594     __ set(Bytecodes::_fast_aload_0, G4_scratch);
   596     // rewrite
   597     // G4_scratch: fast bytecode
   598     __ bind(rewrite);
   599     patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
   600     __ bind(done);
   601   } else {
   602     aload(0);
   603   }
   604 }
   607 void TemplateTable::istore() {
   608   transition(itos, vtos);
   609   locals_index(G3_scratch);
   610   __ store_local_int( G3_scratch, Otos_i );
   611 }
   614 void TemplateTable::lstore() {
   615   transition(ltos, vtos);
   616   locals_index(G3_scratch);
   617   __ store_local_long( G3_scratch, Otos_l );
   618 }
   621 void TemplateTable::fstore() {
   622   transition(ftos, vtos);
   623   locals_index(G3_scratch);
   624   __ store_local_float( G3_scratch, Ftos_f );
   625 }
   628 void TemplateTable::dstore() {
   629   transition(dtos, vtos);
   630   locals_index(G3_scratch);
   631   __ store_local_double( G3_scratch, Ftos_d );
   632 }
   635 void TemplateTable::astore() {
   636   transition(vtos, vtos);
   637   // astore tos can also be a returnAddress, so load and store the tag too
   638   __ load_ptr_and_tag(0, Otos_i, Otos_l2);
   639   __ inc(Lesp, Interpreter::stackElementSize());
   640   __ verify_oop_or_return_address(Otos_i, G3_scratch);
   641   locals_index(G3_scratch);
   642   __ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
   643 }
   646 void TemplateTable::wide_istore() {
   647   transition(vtos, vtos);
   648   __ pop_i();
   649   locals_index_wide(G3_scratch);
   650   __ store_local_int( G3_scratch, Otos_i );
   651 }
   654 void TemplateTable::wide_lstore() {
   655   transition(vtos, vtos);
   656   __ pop_l();
   657   locals_index_wide(G3_scratch);
   658   __ store_local_long( G3_scratch, Otos_l );
   659 }
   662 void TemplateTable::wide_fstore() {
   663   transition(vtos, vtos);
   664   __ pop_f();
   665   locals_index_wide(G3_scratch);
   666   __ store_local_float( G3_scratch, Ftos_f );
   667 }
   670 void TemplateTable::wide_dstore() {
   671   transition(vtos, vtos);
   672   __ pop_d();
   673   locals_index_wide(G3_scratch);
   674   __ store_local_double( G3_scratch, Ftos_d );
   675 }
   678 void TemplateTable::wide_astore() {
   679   transition(vtos, vtos);
   680   // astore tos can also be a returnAddress, so load and store the tag too
   681   __ load_ptr_and_tag(0, Otos_i, Otos_l2);
   682   __ inc(Lesp, Interpreter::stackElementSize());
   683   __ verify_oop_or_return_address(Otos_i, G3_scratch);
   684   locals_index_wide(G3_scratch);
   685   __ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
   686 }
   689 void TemplateTable::iastore() {
   690   transition(itos, vtos);
   691   __ pop_i(O2); // index
   692   // Otos_i: val
   693   // O3: array
   694   __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
   695   __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
   696 }
   699 void TemplateTable::lastore() {
   700   transition(ltos, vtos);
   701   __ pop_i(O2); // index
   702   // Otos_l: val
   703   // O3: array
   704   __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
   705   __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
   706 }
   709 void TemplateTable::fastore() {
   710   transition(ftos, vtos);
   711   __ pop_i(O2); // index
   712   // Ftos_f: val
   713   // O3: array
   714   __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
   715   __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
   716 }
   719 void TemplateTable::dastore() {
   720   transition(dtos, vtos);
   721   __ pop_i(O2); // index
   722   // Fos_d: val
   723   // O3: array
   724   __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
   725   __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
   726 }
   729 void TemplateTable::aastore() {
   730   Label store_ok, is_null, done;
   731   transition(vtos, vtos);
   732   __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
   733   __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2);         // get index
   734   __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3);     // get array
   735   // Otos_i: val
   736   // O2: index
   737   // O3: array
   738   __ verify_oop(Otos_i);
   739   __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
   741   // do array store check - check for NULL value first
   742   __ br_null( Otos_i, false, Assembler::pn, is_null );
   743   __ delayed()->nop();
   745   __ load_klass(O3, O4); // get array klass
   746   __ load_klass(Otos_i, O5); // get value klass
   748   // do fast instanceof cache test
   750   __ ld_ptr(O4,     sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(),  O4);
   752   assert(Otos_i == O0, "just checking");
   754   // Otos_i:    value
   755   // O1:        addr - offset
   756   // O2:        index
   757   // O3:        array
   758   // O4:        array element klass
   759   // O5:        value klass
   761   // Generate a fast subtype check.  Branch to store_ok if no
   762   // failure.  Throw if failure.
   763   __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
   765   // Not a subtype; so must throw exception
   766   __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
   768   // Store is OK.
   769   __ bind(store_ok);
   770   __ store_heap_oop(Otos_i, O1, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
   771   // Quote from rememberedSet.hpp: For objArrays, the precise card
   772   // corresponding to the pointer store is dirtied so we don't need to
   773   // scavenge the entire array.
   774   Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
   775   __ add(element, O1);              // address the element precisely
   776   __ store_check(G3_scratch, O1);
   777   __ ba(false,done);
   778   __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
   780   __ bind(is_null);
   781   __ store_heap_oop(Otos_i, element);
   782   __ profile_null_seen(G3_scratch);
   783   __ inc(Lesp, 3* Interpreter::stackElementSize());     // adj sp (pops array, index and value)
   784   __ bind(done);
   785 }
   788 void TemplateTable::bastore() {
   789   transition(itos, vtos);
   790   __ pop_i(O2); // index
   791   // Otos_i: val
   792   // O3: array
   793   __ index_check(O3, O2, 0, G3_scratch, O2);
   794   __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
   795 }
   798 void TemplateTable::castore() {
   799   transition(itos, vtos);
   800   __ pop_i(O2); // index
   801   // Otos_i: val
   802   // O3: array
   803   __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
   804   __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
   805 }
   808 void TemplateTable::sastore() {
   809   // %%%%% Factor across platform
   810   castore();
   811 }
   814 void TemplateTable::istore(int n) {
   815   transition(itos, vtos);
   816   __ tag_local(frame::TagValue, Llocals, Otos_i, n);
   817   __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
   818 }
   821 void TemplateTable::lstore(int n) {
   822   transition(ltos, vtos);
   823   assert(n+1 < Argument::n_register_parameters, "only handle register cases");
   824   __ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
   825   __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
   827 }
   830 void TemplateTable::fstore(int n) {
   831   transition(ftos, vtos);
   832   assert(n < Argument::n_register_parameters, "only handle register cases");
   833   __ tag_local(frame::TagValue, Llocals, Otos_l, n);
   834   __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
   835 }
   838 void TemplateTable::dstore(int n) {
   839   transition(dtos, vtos);
   840   FloatRegister src = Ftos_d;
   841   __ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
   842   __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
   843 }
   846 void TemplateTable::astore(int n) {
   847   transition(vtos, vtos);
   848   // astore tos can also be a returnAddress, so load and store the tag too
   849   __ load_ptr_and_tag(0, Otos_i, Otos_l2);
   850   __ inc(Lesp, Interpreter::stackElementSize());
   851   __ verify_oop_or_return_address(Otos_i, G3_scratch);
   852   __ store_local_ptr( n, Otos_i, Otos_l2 );
   853 }
   856 void TemplateTable::pop() {
   857   transition(vtos, vtos);
   858   __ inc(Lesp, Interpreter::stackElementSize());
   859 }
   862 void TemplateTable::pop2() {
   863   transition(vtos, vtos);
   864   __ inc(Lesp, 2 * Interpreter::stackElementSize());
   865 }
   868 void TemplateTable::dup() {
   869   transition(vtos, vtos);
   870   // stack: ..., a
   871   // load a and tag
   872   __ load_ptr_and_tag(0, Otos_i, Otos_l2);
   873   __ push_ptr(Otos_i, Otos_l2);
   874   // stack: ..., a, a
   875 }
   878 void TemplateTable::dup_x1() {
   879   transition(vtos, vtos);
   880   // stack: ..., a, b
   881   __ load_ptr_and_tag(1, G3_scratch, G4_scratch);   // get a
   882   __ load_ptr_and_tag(0, Otos_l1, Otos_l2);         // get b
   883   __ store_ptr_and_tag(1, Otos_l1, Otos_l2);        // put b
   884   __ store_ptr_and_tag(0, G3_scratch, G4_scratch);  // put a - like swap
   885   __ push_ptr(Otos_l1, Otos_l2);                    // push b
   886   // stack: ..., b, a, b
   887 }
   890 void TemplateTable::dup_x2() {
   891   transition(vtos, vtos);
   892   // stack: ..., a, b, c
   893   // get c and push on stack, reuse registers
   894   __ load_ptr_and_tag(0, G3_scratch, G4_scratch);     // get c
   895   __ push_ptr(G3_scratch, G4_scratch);               // push c with tag
   896   // stack: ..., a, b, c, c  (c in reg)  (Lesp - 4)
   897   // (stack offsets n+1 now)
   898   __ load_ptr_and_tag(3, Otos_l1, Otos_l2);          // get a
   899   __ store_ptr_and_tag(3, G3_scratch, G4_scratch);   // put c at 3
   900   // stack: ..., c, b, c, c  (a in reg)
   901   __ load_ptr_and_tag(2, G3_scratch, G4_scratch);    // get b
   902   __ store_ptr_and_tag(2, Otos_l1, Otos_l2);         // put a at 2
   903   // stack: ..., c, a, c, c  (b in reg)
   904   __ store_ptr_and_tag(1, G3_scratch, G4_scratch);   // put b at 1
   905   // stack: ..., c, a, b, c
   906 }
   909 void TemplateTable::dup2() {
   910   transition(vtos, vtos);
   911   __ load_ptr_and_tag(1, G3_scratch, G4_scratch);     // get a
   912   __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get b
   913   __ push_ptr(G3_scratch, G4_scratch);                // push a
   914   __ push_ptr(Otos_l1, Otos_l2);                      // push b
   915   // stack: ..., a, b, a, b
   916 }
   919 void TemplateTable::dup2_x1() {
   920   transition(vtos, vtos);
   921   // stack: ..., a, b, c
   922   __ load_ptr_and_tag(1, Lscratch, G1_scratch);       // get b
   923   __ load_ptr_and_tag(2, Otos_l1, Otos_l2);           // get a
   924   __ store_ptr_and_tag(2, Lscratch, G1_scratch);      // put b at a
   925   // stack: ..., b, b, c
   926   __ load_ptr_and_tag(0, G3_scratch, G4_scratch);     // get c
   927   __ store_ptr_and_tag(1, G3_scratch, G4_scratch);    // put c at b
   928   // stack: ..., b, c, c
   929   __ store_ptr_and_tag(0, Otos_l1, Otos_l2);          // put a at c
   930   // stack: ..., b, c, a
   931   __ push_ptr(Lscratch, G1_scratch);                  // push b
   932   __ push_ptr(G3_scratch, G4_scratch);                // push c
   933   // stack: ..., b, c, a, b, c
   934 }
   937 // The spec says that these types can be a mixture of category 1 (1 word)
   938 // types and/or category 2 types (long and doubles)
   939 void TemplateTable::dup2_x2() {
   940   transition(vtos, vtos);
   941   // stack: ..., a, b, c, d
   942   __ load_ptr_and_tag(1, Lscratch, G1_scratch);       // get c
   943   __ load_ptr_and_tag(3, Otos_l1, Otos_l2);           // get a
   944   __ store_ptr_and_tag(3, Lscratch, G1_scratch);      // put c at 3
   945   __ store_ptr_and_tag(1, Otos_l1, Otos_l2);          // put a at 1
   946   // stack: ..., c, b, a, d
   947   __ load_ptr_and_tag(2, G3_scratch, G4_scratch);     // get b
   948   __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get d
   949   __ store_ptr_and_tag(0, G3_scratch, G4_scratch);    // put b at 0
   950   __ store_ptr_and_tag(2, Otos_l1, Otos_l2);          // put d at 2
   951   // stack: ..., c, d, a, b
   952   __ push_ptr(Lscratch, G1_scratch);                  // push c
   953   __ push_ptr(Otos_l1, Otos_l2);                      // push d
   954   // stack: ..., c, d, a, b, c, d
   955 }
   958 void TemplateTable::swap() {
   959   transition(vtos, vtos);
   960   // stack: ..., a, b
   961   __ load_ptr_and_tag(1, G3_scratch, G4_scratch);     // get a
   962   __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get b
   963   __ store_ptr_and_tag(0, G3_scratch, G4_scratch);    // put b
   964   __ store_ptr_and_tag(1, Otos_l1, Otos_l2);          // put a
   965   // stack: ..., b, a
   966 }
   969 void TemplateTable::iop2(Operation op) {
   970   transition(itos, itos);
   971   __ pop_i(O1);
   972   switch (op) {
   973    case  add:  __  add(O1, Otos_i, Otos_i);  break;
   974    case  sub:  __  sub(O1, Otos_i, Otos_i);  break;
   975      // %%%%% Mul may not exist: better to call .mul?
   976    case  mul:  __ smul(O1, Otos_i, Otos_i);  break;
   977    case _and:  __  and3(O1, Otos_i, Otos_i);  break;
   978    case  _or:  __   or3(O1, Otos_i, Otos_i);  break;
   979    case _xor:  __  xor3(O1, Otos_i, Otos_i);  break;
   980    case  shl:  __  sll(O1, Otos_i, Otos_i);  break;
   981    case  shr:  __  sra(O1, Otos_i, Otos_i);  break;
   982    case ushr:  __  srl(O1, Otos_i, Otos_i);  break;
   983    default: ShouldNotReachHere();
   984   }
   985 }
   988 void TemplateTable::lop2(Operation op) {
   989   transition(ltos, ltos);
   990   __ pop_l(O2);
   991   switch (op) {
   992 #ifdef _LP64
   993    case  add:  __ add(O2, Otos_l, Otos_l);  break;
   994    case  sub:  __ sub(O2, Otos_l, Otos_l);  break;
   995    case _and:  __ and3( O2, Otos_l, Otos_l);  break;
   996    case  _or:  __  or3( O2, Otos_l, Otos_l);  break;
   997    case _xor:  __ xor3( O2, Otos_l, Otos_l);  break;
   998 #else
   999    case  add:  __ addcc(O3, Otos_l2, Otos_l2);  __ addc(O2, Otos_l1, Otos_l1);  break;
  1000    case  sub:  __ subcc(O3, Otos_l2, Otos_l2);  __ subc(O2, Otos_l1, Otos_l1);  break;
  1001    case _and:  __ and3(  O3, Otos_l2, Otos_l2);  __ and3( O2, Otos_l1, Otos_l1);  break;
  1002    case  _or:  __  or3(  O3, Otos_l2, Otos_l2);  __  or3( O2, Otos_l1, Otos_l1);  break;
  1003    case _xor:  __ xor3(  O3, Otos_l2, Otos_l2);  __ xor3( O2, Otos_l1, Otos_l1);  break;
  1004 #endif
  1005    default: ShouldNotReachHere();
  1010 void TemplateTable::idiv() {
  1011   // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
  1012   // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
  1014   transition(itos, itos);
  1015   __ pop_i(O1); // get 1st op
  1017   // Y contains upper 32 bits of result, set it to 0 or all ones
  1018   __ wry(G0);
  1019   __ mov(~0, G3_scratch);
  1021   __ tst(O1);
  1022      Label neg;
  1023   __ br(Assembler::negative, true, Assembler::pn, neg);
  1024   __ delayed()->wry(G3_scratch);
  1025   __ bind(neg);
  1027      Label ok;
  1028   __ tst(Otos_i);
  1029   __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
  1031   const int min_int = 0x80000000;
  1032   Label regular;
  1033   __ cmp(Otos_i, -1);
  1034   __ br(Assembler::notEqual, false, Assembler::pt, regular);
  1035 #ifdef _LP64
  1036   // Don't put set in delay slot
  1037   // Set will turn into multiple instructions in 64 bit mode
  1038   __ delayed()->nop();
  1039   __ set(min_int, G4_scratch);
  1040 #else
  1041   __ delayed()->set(min_int, G4_scratch);
  1042 #endif
  1043   Label done;
  1044   __ cmp(O1, G4_scratch);
  1045   __ br(Assembler::equal, true, Assembler::pt, done);
  1046   __ delayed()->mov(O1, Otos_i);   // (mov only executed if branch taken)
  1048   __ bind(regular);
  1049   __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
  1050   __ bind(done);
  1054 void TemplateTable::irem() {
  1055   transition(itos, itos);
  1056   __ mov(Otos_i, O2); // save divisor
  1057   idiv();                               // %%%% Hack: exploits fact that idiv leaves dividend in O1
  1058   __ smul(Otos_i, O2, Otos_i);
  1059   __ sub(O1, Otos_i, Otos_i);
  1063 void TemplateTable::lmul() {
  1064   transition(ltos, ltos);
  1065   __ pop_l(O2);
  1066 #ifdef _LP64
  1067   __ mulx(Otos_l, O2, Otos_l);
  1068 #else
  1069   __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
  1070 #endif
  1075 void TemplateTable::ldiv() {
  1076   transition(ltos, ltos);
  1078   // check for zero
  1079   __ pop_l(O2);
  1080 #ifdef _LP64
  1081   __ tst(Otos_l);
  1082   __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
  1083   __ sdivx(O2, Otos_l, Otos_l);
  1084 #else
  1085   __ orcc(Otos_l1, Otos_l2, G0);
  1086   __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
  1087   __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
  1088 #endif
  1092 void TemplateTable::lrem() {
  1093   transition(ltos, ltos);
  1095   // check for zero
  1096   __ pop_l(O2);
  1097 #ifdef _LP64
  1098   __ tst(Otos_l);
  1099   __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
  1100   __ sdivx(O2, Otos_l, Otos_l2);
  1101   __ mulx (Otos_l2, Otos_l, Otos_l2);
  1102   __ sub  (O2, Otos_l2, Otos_l);
  1103 #else
  1104   __ orcc(Otos_l1, Otos_l2, G0);
  1105   __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
  1106   __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
  1107 #endif
  1111 void TemplateTable::lshl() {
  1112   transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
  1114   __ pop_l(O2);                          // shift value in O2, O3
  1115 #ifdef _LP64
  1116   __ sllx(O2, Otos_i, Otos_l);
  1117 #else
  1118   __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
  1119 #endif
  1123 void TemplateTable::lshr() {
  1124   transition(itos, ltos); // %%%% see lshl comment
  1126   __ pop_l(O2);                          // shift value in O2, O3
  1127 #ifdef _LP64
  1128   __ srax(O2, Otos_i, Otos_l);
  1129 #else
  1130   __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
  1131 #endif
  1136 void TemplateTable::lushr() {
  1137   transition(itos, ltos); // %%%% see lshl comment
  1139   __ pop_l(O2);                          // shift value in O2, O3
  1140 #ifdef _LP64
  1141   __ srlx(O2, Otos_i, Otos_l);
  1142 #else
  1143   __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
  1144 #endif
  1148 void TemplateTable::fop2(Operation op) {
  1149   transition(ftos, ftos);
  1150   switch (op) {
  1151    case  add:  __  pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
  1152    case  sub:  __  pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
  1153    case  mul:  __  pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
  1154    case  div:  __  pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
  1155    case  rem:
  1156      assert(Ftos_f == F0, "just checking");
  1157 #ifdef _LP64
  1158      // LP64 calling conventions use F1, F3 for passing 2 floats
  1159      __ pop_f(F1);
  1160      __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
  1161 #else
  1162      __ pop_i(O0);
  1163      __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
  1164      __ ld( __ d_tmp, O1 );
  1165 #endif
  1166      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
  1167      assert( Ftos_f == F0, "fix this code" );
  1168      break;
  1170    default: ShouldNotReachHere();
  1175 void TemplateTable::dop2(Operation op) {
  1176   transition(dtos, dtos);
  1177   switch (op) {
  1178    case  add:  __  pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
  1179    case  sub:  __  pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
  1180    case  mul:  __  pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
  1181    case  div:  __  pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
  1182    case  rem:
  1183 #ifdef _LP64
  1184      // Pass arguments in D0, D2
  1185      __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
  1186      __ pop_d( F0 );
  1187 #else
  1188      // Pass arguments in O0O1, O2O3
  1189      __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
  1190      __ ldd( __ d_tmp, O2 );
  1191      __ pop_d(Ftos_f);
  1192      __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
  1193      __ ldd( __ d_tmp, O0 );
  1194 #endif
  1195      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
  1196      assert( Ftos_d == F0, "fix this code" );
  1197      break;
  1199    default: ShouldNotReachHere();
  1204 void TemplateTable::ineg() {
  1205   transition(itos, itos);
  1206   __ neg(Otos_i);
  1210 void TemplateTable::lneg() {
  1211   transition(ltos, ltos);
  1212 #ifdef _LP64
  1213   __ sub(G0, Otos_l, Otos_l);
  1214 #else
  1215   __ lneg(Otos_l1, Otos_l2);
  1216 #endif
  1220 void TemplateTable::fneg() {
  1221   transition(ftos, ftos);
  1222   __ fneg(FloatRegisterImpl::S, Ftos_f);
  1226 void TemplateTable::dneg() {
  1227   transition(dtos, dtos);
  1228   // v8 has fnegd if source and dest are the same
  1229   __ fneg(FloatRegisterImpl::D, Ftos_f);
  1233 void TemplateTable::iinc() {
  1234   transition(vtos, vtos);
  1235   locals_index(G3_scratch);
  1236   __ ldsb(Lbcp, 2, O2);  // load constant
  1237   __ access_local_int(G3_scratch, Otos_i);
  1238   __ add(Otos_i, O2, Otos_i);
  1239   __ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes());    // access_local_int puts E.A. in G3_scratch
  1243 void TemplateTable::wide_iinc() {
  1244   transition(vtos, vtos);
  1245   locals_index_wide(G3_scratch);
  1246   __ get_2_byte_integer_at_bcp( 4,  O2, O3, InterpreterMacroAssembler::Signed);
  1247   __ access_local_int(G3_scratch, Otos_i);
  1248   __ add(Otos_i, O3, Otos_i);
  1249   __ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes());    // access_local_int puts E.A. in G3_scratch
  1253 void TemplateTable::convert() {
  1254 // %%%%% Factor this first part accross platforms
  1255   #ifdef ASSERT
  1256     TosState tos_in  = ilgl;
  1257     TosState tos_out = ilgl;
  1258     switch (bytecode()) {
  1259       case Bytecodes::_i2l: // fall through
  1260       case Bytecodes::_i2f: // fall through
  1261       case Bytecodes::_i2d: // fall through
  1262       case Bytecodes::_i2b: // fall through
  1263       case Bytecodes::_i2c: // fall through
  1264       case Bytecodes::_i2s: tos_in = itos; break;
  1265       case Bytecodes::_l2i: // fall through
  1266       case Bytecodes::_l2f: // fall through
  1267       case Bytecodes::_l2d: tos_in = ltos; break;
  1268       case Bytecodes::_f2i: // fall through
  1269       case Bytecodes::_f2l: // fall through
  1270       case Bytecodes::_f2d: tos_in = ftos; break;
  1271       case Bytecodes::_d2i: // fall through
  1272       case Bytecodes::_d2l: // fall through
  1273       case Bytecodes::_d2f: tos_in = dtos; break;
  1274       default             : ShouldNotReachHere();
  1276     switch (bytecode()) {
  1277       case Bytecodes::_l2i: // fall through
  1278       case Bytecodes::_f2i: // fall through
  1279       case Bytecodes::_d2i: // fall through
  1280       case Bytecodes::_i2b: // fall through
  1281       case Bytecodes::_i2c: // fall through
  1282       case Bytecodes::_i2s: tos_out = itos; break;
  1283       case Bytecodes::_i2l: // fall through
  1284       case Bytecodes::_f2l: // fall through
  1285       case Bytecodes::_d2l: tos_out = ltos; break;
  1286       case Bytecodes::_i2f: // fall through
  1287       case Bytecodes::_l2f: // fall through
  1288       case Bytecodes::_d2f: tos_out = ftos; break;
  1289       case Bytecodes::_i2d: // fall through
  1290       case Bytecodes::_l2d: // fall through
  1291       case Bytecodes::_f2d: tos_out = dtos; break;
  1292       default             : ShouldNotReachHere();
  1294     transition(tos_in, tos_out);
  1295   #endif
  1298   // Conversion
  1299   Label done;
  1300   switch (bytecode()) {
  1301    case Bytecodes::_i2l:
  1302 #ifdef _LP64
  1303     // Sign extend the 32 bits
  1304     __ sra ( Otos_i, 0, Otos_l );
  1305 #else
  1306     __ addcc(Otos_i, 0, Otos_l2);
  1307     __ br(Assembler::greaterEqual, true, Assembler::pt, done);
  1308     __ delayed()->clr(Otos_l1);
  1309     __ set(~0, Otos_l1);
  1310 #endif
  1311     break;
  1313    case Bytecodes::_i2f:
  1314     __ st(Otos_i, __ d_tmp );
  1315     __ ldf(FloatRegisterImpl::S,  __ d_tmp, F0);
  1316     __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
  1317     break;
  1319    case Bytecodes::_i2d:
  1320     __ st(Otos_i, __ d_tmp);
  1321     __ ldf(FloatRegisterImpl::S,  __ d_tmp, F0);
  1322     __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
  1323     break;
  1325    case Bytecodes::_i2b:
  1326     __ sll(Otos_i, 24, Otos_i);
  1327     __ sra(Otos_i, 24, Otos_i);
  1328     break;
  1330    case Bytecodes::_i2c:
  1331     __ sll(Otos_i, 16, Otos_i);
  1332     __ srl(Otos_i, 16, Otos_i);
  1333     break;
  1335    case Bytecodes::_i2s:
  1336     __ sll(Otos_i, 16, Otos_i);
  1337     __ sra(Otos_i, 16, Otos_i);
  1338     break;
  1340    case Bytecodes::_l2i:
  1341 #ifndef _LP64
  1342     __ mov(Otos_l2, Otos_i);
  1343 #else
  1344     // Sign-extend into the high 32 bits
  1345     __ sra(Otos_l, 0, Otos_i);
  1346 #endif
  1347     break;
  1349    case Bytecodes::_l2f:
  1350    case Bytecodes::_l2d:
  1351     __ st_long(Otos_l, __ d_tmp);
  1352     __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
  1354     if (VM_Version::v9_instructions_work()) {
  1355       if (bytecode() == Bytecodes::_l2f) {
  1356         __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
  1357       } else {
  1358         __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
  1360     } else {
  1361       __ call_VM_leaf(
  1362         Lscratch,
  1363         bytecode() == Bytecodes::_l2f
  1364           ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
  1365           : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
  1366       );
  1368     break;
  1370   case Bytecodes::_f2i:  {
  1371       Label isNaN;
  1372       // result must be 0 if value is NaN; test by comparing value to itself
  1373       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
  1374       // According to the v8 manual, you have to have a non-fp instruction
  1375       // between fcmp and fb.
  1376       if (!VM_Version::v9_instructions_work()) {
  1377         __ nop();
  1379       __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
  1380       __ delayed()->clr(Otos_i);                                     // NaN
  1381       __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
  1382       __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
  1383       __ ld(__ d_tmp, Otos_i);
  1384       __ bind(isNaN);
  1386     break;
  1388    case Bytecodes::_f2l:
  1389     // must uncache tos
  1390     __ push_f();
  1391 #ifdef _LP64
  1392     __ pop_f(F1);
  1393 #else
  1394     __ pop_i(O0);
  1395 #endif
  1396     __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
  1397     break;
  1399    case Bytecodes::_f2d:
  1400     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
  1401     break;
  1403    case Bytecodes::_d2i:
  1404    case Bytecodes::_d2l:
  1405     // must uncache tos
  1406     __ push_d();
  1407 #ifdef _LP64
  1408     // LP64 calling conventions pass first double arg in D0
  1409     __ pop_d( Ftos_d );
  1410 #else
  1411     __ pop_i( O0 );
  1412     __ pop_i( O1 );
  1413 #endif
  1414     __ call_VM_leaf(Lscratch,
  1415         bytecode() == Bytecodes::_d2i
  1416           ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
  1417           : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
  1418     break;
  1420     case Bytecodes::_d2f:
  1421     if (VM_Version::v9_instructions_work()) {
  1422       __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
  1424     else {
  1425       // must uncache tos
  1426       __ push_d();
  1427       __ pop_i(O0);
  1428       __ pop_i(O1);
  1429       __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
  1431     break;
  1433     default: ShouldNotReachHere();
  1435   __ bind(done);
  1439 void TemplateTable::lcmp() {
  1440   transition(ltos, itos);
  1442 #ifdef _LP64
  1443   __ pop_l(O1); // pop off value 1, value 2 is in O0
  1444   __ lcmp( O1, Otos_l, Otos_i );
  1445 #else
  1446   __ pop_l(O2); // cmp O2,3 to O0,1
  1447   __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
  1448 #endif
  1452 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
  1454   if (is_float) __ pop_f(F2);
  1455   else          __ pop_d(F2);
  1457   assert(Ftos_f == F0  &&  Ftos_d == F0,  "alias checking:");
  1459   __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
  1462 void TemplateTable::branch(bool is_jsr, bool is_wide) {
  1463   // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
  1464   __ verify_oop(Lmethod);
  1465   __ verify_thread();
  1467   const Register O2_bumped_count = O2;
  1468   __ profile_taken_branch(G3_scratch, O2_bumped_count);
  1470   // get (wide) offset to O1_disp
  1471   const Register O1_disp = O1;
  1472   if (is_wide)  __ get_4_byte_integer_at_bcp( 1,  G4_scratch, O1_disp,                                    InterpreterMacroAssembler::set_CC);
  1473   else          __ get_2_byte_integer_at_bcp( 1,  G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
  1475   // Handle all the JSR stuff here, then exit.
  1476   // It's much shorter and cleaner than intermingling with the
  1477   // non-JSR normal-branch stuff occuring below.
  1478   if( is_jsr ) {
  1479     // compute return address as bci in Otos_i
  1480     __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
  1481     __ sub(Lbcp, G3_scratch, G3_scratch);
  1482     __ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
  1484     // Bump Lbcp to target of JSR
  1485     __ add(Lbcp, O1_disp, Lbcp);
  1486     // Push returnAddress for "ret" on stack
  1487     __ push_ptr(Otos_i, G0); // push ptr sized thing plus 0 for tag.
  1488     // And away we go!
  1489     __ dispatch_next(vtos);
  1490     return;
  1493   // Normal (non-jsr) branch handling
  1495   // Save the current Lbcp
  1496   const Register O0_cur_bcp = O0;
  1497   __ mov( Lbcp, O0_cur_bcp );
  1499   bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
  1500   if ( increment_invocation_counter_for_backward_branches ) {
  1501     Label Lforward;
  1502     // check branch direction
  1503     __ br( Assembler::positive, false,  Assembler::pn, Lforward );
  1504     // Bump bytecode pointer by displacement (take the branch)
  1505     __ delayed()->add( O1_disp, Lbcp, Lbcp );     // add to bc addr
  1507     // Update Backedge branch separately from invocations
  1508     const Register G4_invoke_ctr = G4;
  1509     __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
  1510     if (ProfileInterpreter) {
  1511       __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
  1512       if (UseOnStackReplacement) {
  1513         __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
  1515     } else {
  1516       if (UseOnStackReplacement) {
  1517         __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
  1521     __ bind(Lforward);
  1522   } else
  1523     // Bump bytecode pointer by displacement (take the branch)
  1524     __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
  1526   // continue with bytecode @ target
  1527   // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
  1528   // %%%%% and changing dispatch_next to dispatch_only
  1529   __ dispatch_next(vtos);
  1533 // Note Condition in argument is TemplateTable::Condition
  1534 // arg scope is within class scope
  1536 void TemplateTable::if_0cmp(Condition cc) {
  1537   // no pointers, integer only!
  1538   transition(itos, vtos);
  1539   // assume branch is more often taken than not (loops use backward branches)
  1540   __ cmp( Otos_i, 0);
  1541   __ if_cmp(ccNot(cc), false);
  1545 void TemplateTable::if_icmp(Condition cc) {
  1546   transition(itos, vtos);
  1547   __ pop_i(O1);
  1548   __ cmp(O1, Otos_i);
  1549   __ if_cmp(ccNot(cc), false);
  1553 void TemplateTable::if_nullcmp(Condition cc) {
  1554   transition(atos, vtos);
  1555   __ tst(Otos_i);
  1556   __ if_cmp(ccNot(cc), true);
  1560 void TemplateTable::if_acmp(Condition cc) {
  1561   transition(atos, vtos);
  1562   __ pop_ptr(O1);
  1563   __ verify_oop(O1);
  1564   __ verify_oop(Otos_i);
  1565   __ cmp(O1, Otos_i);
  1566   __ if_cmp(ccNot(cc), true);
  1571 void TemplateTable::ret() {
  1572   transition(vtos, vtos);
  1573   locals_index(G3_scratch);
  1574   __ access_local_returnAddress(G3_scratch, Otos_i);
  1575   // Otos_i contains the bci, compute the bcp from that
  1577 #ifdef _LP64
  1578 #ifdef ASSERT
  1579   // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
  1580   // the result.  The return address (really a BCI) was stored with an
  1581   // 'astore' because JVM specs claim it's a pointer-sized thing.  Hence in
  1582   // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
  1583   // loaded value.
  1584   { Label zzz ;
  1585      __ set (65536, G3_scratch) ;
  1586      __ cmp (Otos_i, G3_scratch) ;
  1587      __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
  1588      __ delayed()->nop();
  1589      __ stop("BCI is in the wrong register half?");
  1590      __ bind (zzz) ;
  1592 #endif
  1593 #endif
  1595   __ profile_ret(vtos, Otos_i, G4_scratch);
  1597   __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
  1598   __ add(G3_scratch, Otos_i, G3_scratch);
  1599   __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
  1600   __ dispatch_next(vtos);
  1604 void TemplateTable::wide_ret() {
  1605   transition(vtos, vtos);
  1606   locals_index_wide(G3_scratch);
  1607   __ access_local_returnAddress(G3_scratch, Otos_i);
  1608   // Otos_i contains the bci, compute the bcp from that
  1610   __ profile_ret(vtos, Otos_i, G4_scratch);
  1612   __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
  1613   __ add(G3_scratch, Otos_i, G3_scratch);
  1614   __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
  1615   __ dispatch_next(vtos);
  1619 void TemplateTable::tableswitch() {
  1620   transition(itos, vtos);
  1621   Label default_case, continue_execution;
  1623   // align bcp
  1624   __ add(Lbcp, BytesPerInt, O1);
  1625   __ and3(O1, -BytesPerInt, O1);
  1626   // load lo, hi
  1627   __ ld(O1, 1 * BytesPerInt, O2);       // Low Byte
  1628   __ ld(O1, 2 * BytesPerInt, O3);       // High Byte
  1629 #ifdef _LP64
  1630   // Sign extend the 32 bits
  1631   __ sra ( Otos_i, 0, Otos_i );
  1632 #endif /* _LP64 */
  1634   // check against lo & hi
  1635   __ cmp( Otos_i, O2);
  1636   __ br( Assembler::less, false, Assembler::pn, default_case);
  1637   __ delayed()->cmp( Otos_i, O3 );
  1638   __ br( Assembler::greater, false, Assembler::pn, default_case);
  1639   // lookup dispatch offset
  1640   __ delayed()->sub(Otos_i, O2, O2);
  1641   __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
  1642   __ sll(O2, LogBytesPerInt, O2);
  1643   __ add(O2, 3 * BytesPerInt, O2);
  1644   __ ba(false, continue_execution);
  1645   __ delayed()->ld(O1, O2, O2);
  1646   // handle default
  1647   __ bind(default_case);
  1648   __ profile_switch_default(O3);
  1649   __ ld(O1, 0, O2); // get default offset
  1650   // continue execution
  1651   __ bind(continue_execution);
  1652   __ add(Lbcp, O2, Lbcp);
  1653   __ dispatch_next(vtos);
  1657 void TemplateTable::lookupswitch() {
  1658   transition(itos, itos);
  1659   __ stop("lookupswitch bytecode should have been rewritten");
  1662 void TemplateTable::fast_linearswitch() {
  1663   transition(itos, vtos);
  1664     Label loop_entry, loop, found, continue_execution;
  1665   // align bcp
  1666   __ add(Lbcp, BytesPerInt, O1);
  1667   __ and3(O1, -BytesPerInt, O1);
  1668  // set counter
  1669   __ ld(O1, BytesPerInt, O2);
  1670   __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
  1671   __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
  1672   __ ba(false, loop_entry);
  1673   __ delayed()->add(O3, O2, O2); // counter now points past last pair
  1675   // table search
  1676   __ bind(loop);
  1677   __ cmp(O4, Otos_i);
  1678   __ br(Assembler::equal, true, Assembler::pn, found);
  1679   __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
  1680   __ inc(O3, 2 * BytesPerInt);
  1682   __ bind(loop_entry);
  1683   __ cmp(O2, O3);
  1684   __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
  1685   __ delayed()->ld(O3, 0, O4);
  1687   // default case
  1688   __ ld(O1, 0, O4); // get default offset
  1689   if (ProfileInterpreter) {
  1690     __ profile_switch_default(O3);
  1691     __ ba(false, continue_execution);
  1692     __ delayed()->nop();
  1695   // entry found -> get offset
  1696   __ bind(found);
  1697   if (ProfileInterpreter) {
  1698     __ sub(O3, O1, O3);
  1699     __ sub(O3, 2*BytesPerInt, O3);
  1700     __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
  1701     __ profile_switch_case(O3, O1, O2, G3_scratch);
  1703     __ bind(continue_execution);
  1705   __ add(Lbcp, O4, Lbcp);
  1706   __ dispatch_next(vtos);
  1710 void TemplateTable::fast_binaryswitch() {
  1711   transition(itos, vtos);
  1712   // Implementation using the following core algorithm: (copied from Intel)
  1713   //
  1714   // int binary_search(int key, LookupswitchPair* array, int n) {
  1715   //   // Binary search according to "Methodik des Programmierens" by
  1716   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
  1717   //   int i = 0;
  1718   //   int j = n;
  1719   //   while (i+1 < j) {
  1720   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
  1721   //     // with      Q: for all i: 0 <= i < n: key < a[i]
  1722   //     // where a stands for the array and assuming that the (inexisting)
  1723   //     // element a[n] is infinitely big.
  1724   //     int h = (i + j) >> 1;
  1725   //     // i < h < j
  1726   //     if (key < array[h].fast_match()) {
  1727   //       j = h;
  1728   //     } else {
  1729   //       i = h;
  1730   //     }
  1731   //   }
  1732   //   // R: a[i] <= key < a[i+1] or Q
  1733   //   // (i.e., if key is within array, i is the correct index)
  1734   //   return i;
  1735   // }
  1737   // register allocation
  1738   assert(Otos_i == O0, "alias checking");
  1739   const Register Rkey     = Otos_i;                    // already set (tosca)
  1740   const Register Rarray   = O1;
  1741   const Register Ri       = O2;
  1742   const Register Rj       = O3;
  1743   const Register Rh       = O4;
  1744   const Register Rscratch = O5;
  1746   const int log_entry_size = 3;
  1747   const int entry_size = 1 << log_entry_size;
  1749   Label found;
  1750   // Find Array start
  1751   __ add(Lbcp, 3 * BytesPerInt, Rarray);
  1752   __ and3(Rarray, -BytesPerInt, Rarray);
  1753   // initialize i & j (in delay slot)
  1754   __ clr( Ri );
  1756   // and start
  1757   Label entry;
  1758   __ ba(false, entry);
  1759   __ delayed()->ld( Rarray, -BytesPerInt, Rj);
  1760   // (Rj is already in the native byte-ordering.)
  1762   // binary search loop
  1763   { Label loop;
  1764     __ bind( loop );
  1765     // int h = (i + j) >> 1;
  1766     __ sra( Rh, 1, Rh );
  1767     // if (key < array[h].fast_match()) {
  1768     //   j = h;
  1769     // } else {
  1770     //   i = h;
  1771     // }
  1772     __ sll( Rh, log_entry_size, Rscratch );
  1773     __ ld( Rarray, Rscratch, Rscratch );
  1774     // (Rscratch is already in the native byte-ordering.)
  1775     __ cmp( Rkey, Rscratch );
  1776     if ( VM_Version::v9_instructions_work() ) {
  1777       __ movcc( Assembler::less,         false, Assembler::icc, Rh, Rj );  // j = h if (key <  array[h].fast_match())
  1778       __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri );  // i = h if (key >= array[h].fast_match())
  1780     else {
  1781       Label end_of_if;
  1782       __ br( Assembler::less, true, Assembler::pt, end_of_if );
  1783       __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
  1784       __ mov( Rh, Ri );            // else i = h
  1785       __ bind(end_of_if);          // }
  1788     // while (i+1 < j)
  1789     __ bind( entry );
  1790     __ add( Ri, 1, Rscratch );
  1791     __ cmp(Rscratch, Rj);
  1792     __ br( Assembler::less, true, Assembler::pt, loop );
  1793     __ delayed()->add( Ri, Rj, Rh ); // start h = i + j  >> 1;
  1796   // end of binary search, result index is i (must check again!)
  1797   Label default_case;
  1798   Label continue_execution;
  1799   if (ProfileInterpreter) {
  1800     __ mov( Ri, Rh );              // Save index in i for profiling
  1802   __ sll( Ri, log_entry_size, Ri );
  1803   __ ld( Rarray, Ri, Rscratch );
  1804   // (Rscratch is already in the native byte-ordering.)
  1805   __ cmp( Rkey, Rscratch );
  1806   __ br( Assembler::notEqual, true, Assembler::pn, default_case );
  1807   __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
  1809   // entry found -> j = offset
  1810   __ inc( Ri, BytesPerInt );
  1811   __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
  1812   __ ld( Rarray, Ri, Rj );
  1813   // (Rj is already in the native byte-ordering.)
  1815   if (ProfileInterpreter) {
  1816     __ ba(false, continue_execution);
  1817     __ delayed()->nop();
  1820   __ bind(default_case); // fall through (if not profiling)
  1821   __ profile_switch_default(Ri);
  1823   __ bind(continue_execution);
  1824   __ add( Lbcp, Rj, Lbcp );
  1825   __ dispatch_next( vtos );
  1829 void TemplateTable::_return(TosState state) {
  1830   transition(state, state);
  1831   assert(_desc->calls_vm(), "inconsistent calls_vm information");
  1833   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
  1834     assert(state == vtos, "only valid state");
  1835     __ mov(G0, G3_scratch);
  1836     __ access_local_ptr(G3_scratch, Otos_i);
  1837     __ load_klass(Otos_i, O2);
  1838     __ set(JVM_ACC_HAS_FINALIZER, G3);
  1839     __ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2);
  1840     __ andcc(G3, O2, G0);
  1841     Label skip_register_finalizer;
  1842     __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
  1843     __ delayed()->nop();
  1845     // Call out to do finalizer registration
  1846     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
  1848     __ bind(skip_register_finalizer);
  1851   __ remove_activation(state, /* throw_monitor_exception */ true);
  1853   // The caller's SP was adjusted upon method entry to accomodate
  1854   // the callee's non-argument locals. Undo that adjustment.
  1855   __ ret();                             // return to caller
  1856   __ delayed()->restore(I5_savedSP, G0, SP);
  1860 // ----------------------------------------------------------------------------
  1861 // Volatile variables demand their effects be made known to all CPU's in
  1862 // order.  Store buffers on most chips allow reads & writes to reorder; the
  1863 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
  1864 // memory barrier (i.e., it's not sufficient that the interpreter does not
  1865 // reorder volatile references, the hardware also must not reorder them).
  1866 //
  1867 // According to the new Java Memory Model (JMM):
  1868 // (1) All volatiles are serialized wrt to each other.
  1869 // ALSO reads & writes act as aquire & release, so:
  1870 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
  1871 // the read float up to before the read.  It's OK for non-volatile memory refs
  1872 // that happen before the volatile read to float down below it.
  1873 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
  1874 // that happen BEFORE the write float down to after the write.  It's OK for
  1875 // non-volatile memory refs that happen after the volatile write to float up
  1876 // before it.
  1877 //
  1878 // We only put in barriers around volatile refs (they are expensive), not
  1879 // _between_ memory refs (that would require us to track the flavor of the
  1880 // previous memory refs).  Requirements (2) and (3) require some barriers
  1881 // before volatile stores and after volatile loads.  These nearly cover
  1882 // requirement (1) but miss the volatile-store-volatile-load case.  This final
  1883 // case is placed after volatile-stores although it could just as well go
  1884 // before volatile-loads.
  1885 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
  1886   // Helper function to insert a is-volatile test and memory barrier
  1887   // All current sparc implementations run in TSO, needing only StoreLoad
  1888   if ((order_constraint & Assembler::StoreLoad) == 0) return;
  1889   __ membar( order_constraint );
  1892 // ----------------------------------------------------------------------------
  1893 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
  1894   assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
  1895   // Depends on cpCacheOop layout!
  1896   const int shift_count = (1 + byte_no)*BitsPerByte;
  1897   Label resolved;
  1899   __ get_cache_and_index_at_bcp(Rcache, index, 1);
  1900   __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() +
  1901                                         ConstantPoolCacheEntry::indices_offset())), Lbyte_code);
  1903   __ srl(  Lbyte_code, shift_count, Lbyte_code );
  1904   __ and3( Lbyte_code,        0xFF, Lbyte_code );
  1905   __ cmp(  Lbyte_code, (int)bytecode());
  1906   __ br(   Assembler::equal, false, Assembler::pt, resolved);
  1907   __ delayed()->set((int)bytecode(), O1);
  1909   address entry;
  1910   switch (bytecode()) {
  1911     case Bytecodes::_getstatic      : // fall through
  1912     case Bytecodes::_putstatic      : // fall through
  1913     case Bytecodes::_getfield       : // fall through
  1914     case Bytecodes::_putfield       : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
  1915     case Bytecodes::_invokevirtual  : // fall through
  1916     case Bytecodes::_invokespecial  : // fall through
  1917     case Bytecodes::_invokestatic   : // fall through
  1918     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);  break;
  1919     default                         : ShouldNotReachHere();                                 break;
  1921   // first time invocation - must resolve first
  1922   __ call_VM(noreg, entry, O1);
  1923   // Update registers with resolved info
  1924   __ get_cache_and_index_at_bcp(Rcache, index, 1);
  1925   __ bind(resolved);
  1928 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
  1929                                                Register Rmethod,
  1930                                                Register Ritable_index,
  1931                                                Register Rflags,
  1932                                                bool is_invokevirtual,
  1933                                                bool is_invokevfinal) {
  1934   // Uses both G3_scratch and G4_scratch
  1935   Register Rcache = G3_scratch;
  1936   Register Rscratch = G4_scratch;
  1937   assert_different_registers(Rcache, Rmethod, Ritable_index);
  1939   ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
  1941   // determine constant pool cache field offsets
  1942   const int method_offset = in_bytes(
  1943     cp_base_offset +
  1944       (is_invokevirtual
  1945        ? ConstantPoolCacheEntry::f2_offset()
  1946        : ConstantPoolCacheEntry::f1_offset()
  1948     );
  1949   const int flags_offset = in_bytes(cp_base_offset +
  1950                                     ConstantPoolCacheEntry::flags_offset());
  1951   // access constant pool cache fields
  1952   const int index_offset = in_bytes(cp_base_offset +
  1953                                     ConstantPoolCacheEntry::f2_offset());
  1955   if (is_invokevfinal) {
  1956     __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
  1957   } else {
  1958     resolve_cache_and_index(byte_no, Rcache, Rscratch);
  1961   __ ld_ptr(Address(Rcache, 0, method_offset), Rmethod);
  1962   if (Ritable_index != noreg) {
  1963     __ ld_ptr(Address(Rcache, 0, index_offset), Ritable_index);
  1965   __ ld_ptr(Address(Rcache, 0, flags_offset),  Rflags);
  1968 // The Rcache register must be set before call
  1969 void TemplateTable::load_field_cp_cache_entry(Register Robj,
  1970                                               Register Rcache,
  1971                                               Register index,
  1972                                               Register Roffset,
  1973                                               Register Rflags,
  1974                                               bool is_static) {
  1975   assert_different_registers(Rcache, Rflags, Roffset);
  1977   ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
  1979   __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
  1980                              ConstantPoolCacheEntry::flags_offset())), Rflags);
  1981   __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
  1982                              ConstantPoolCacheEntry::f2_offset())), Roffset);
  1983   if (is_static) {
  1984     __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
  1985                              ConstantPoolCacheEntry::f1_offset())), Robj);
  1989 // The registers Rcache and index expected to be set before call.
  1990 // Correct values of the Rcache and index registers are preserved.
  1991 void TemplateTable::jvmti_post_field_access(Register Rcache,
  1992                                             Register index,
  1993                                             bool is_static,
  1994                                             bool has_tos) {
  1995   ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
  1997   if (JvmtiExport::can_post_field_access()) {
  1998     // Check to see if a field access watch has been set before we take
  1999     // the time to call into the VM.
  2000     Label Label1;
  2001     assert_different_registers(Rcache, index, G1_scratch);
  2002     Address get_field_access_count_addr(G1_scratch,
  2003                                         (address)JvmtiExport::get_field_access_count_addr(),
  2004                                         relocInfo::none);
  2005     __ load_contents(get_field_access_count_addr, G1_scratch);
  2006     __ tst(G1_scratch);
  2007     __ br(Assembler::zero, false, Assembler::pt, Label1);
  2008     __ delayed()->nop();
  2010     __ add(Rcache, in_bytes(cp_base_offset), Rcache);
  2012     if (is_static) {
  2013       __ clr(Otos_i);
  2014     } else {
  2015       if (has_tos) {
  2016       // save object pointer before call_VM() clobbers it
  2017         __ mov(Otos_i, Lscratch);
  2018       } else {
  2019         // Load top of stack (do not pop the value off the stack);
  2020         __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
  2022       __ verify_oop(Otos_i);
  2024     // Otos_i: object pointer or NULL if static
  2025     // Rcache: cache entry pointer
  2026     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
  2027                Otos_i, Rcache);
  2028     if (!is_static && has_tos) {
  2029       __ mov(Lscratch, Otos_i);  // restore object pointer
  2030       __ verify_oop(Otos_i);
  2032     __ get_cache_and_index_at_bcp(Rcache, index, 1);
  2033     __ bind(Label1);
  2037 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
  2038   transition(vtos, vtos);
  2040   Register Rcache = G3_scratch;
  2041   Register index  = G4_scratch;
  2042   Register Rclass = Rcache;
  2043   Register Roffset= G4_scratch;
  2044   Register Rflags = G1_scratch;
  2045   ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
  2047   resolve_cache_and_index(byte_no, Rcache, index);
  2048   jvmti_post_field_access(Rcache, index, is_static, false);
  2049   load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
  2051   if (!is_static) {
  2052     pop_and_check_object(Rclass);
  2053   } else {
  2054     __ verify_oop(Rclass);
  2057   Label exit;
  2059   Assembler::Membar_mask_bits membar_bits =
  2060     Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
  2062   if (__ membar_has_effect(membar_bits)) {
  2063     // Get volatile flag
  2064     __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
  2065     __ and3(Rflags, Lscratch, Lscratch);
  2068   Label checkVolatile;
  2070   // compute field type
  2071   Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
  2072   __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
  2073   // Make sure we don't need to mask Rflags for tosBits after the above shift
  2074   ConstantPoolCacheEntry::verify_tosBits();
  2076   // Check atos before itos for getstatic, more likely (in Queens at least)
  2077   __ cmp(Rflags, atos);
  2078   __ br(Assembler::notEqual, false, Assembler::pt, notObj);
  2079   __ delayed() ->cmp(Rflags, itos);
  2081   // atos
  2082   __ load_heap_oop(Rclass, Roffset, Otos_i);
  2083   __ verify_oop(Otos_i);
  2084   __ push(atos);
  2085   if (!is_static) {
  2086     patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
  2088   __ ba(false, checkVolatile);
  2089   __ delayed()->tst(Lscratch);
  2091   __ bind(notObj);
  2093   // cmp(Rflags, itos);
  2094   __ br(Assembler::notEqual, false, Assembler::pt, notInt);
  2095   __ delayed() ->cmp(Rflags, ltos);
  2097   // itos
  2098   __ ld(Rclass, Roffset, Otos_i);
  2099   __ push(itos);
  2100   if (!is_static) {
  2101     patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
  2103   __ ba(false, checkVolatile);
  2104   __ delayed()->tst(Lscratch);
  2106   __ bind(notInt);
  2108   // cmp(Rflags, ltos);
  2109   __ br(Assembler::notEqual, false, Assembler::pt, notLong);
  2110   __ delayed() ->cmp(Rflags, btos);
  2112   // ltos
  2113   // load must be atomic
  2114   __ ld_long(Rclass, Roffset, Otos_l);
  2115   __ push(ltos);
  2116   if (!is_static) {
  2117     patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
  2119   __ ba(false, checkVolatile);
  2120   __ delayed()->tst(Lscratch);
  2122   __ bind(notLong);
  2124   // cmp(Rflags, btos);
  2125   __ br(Assembler::notEqual, false, Assembler::pt, notByte);
  2126   __ delayed() ->cmp(Rflags, ctos);
  2128   // btos
  2129   __ ldsb(Rclass, Roffset, Otos_i);
  2130   __ push(itos);
  2131   if (!is_static) {
  2132     patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
  2134   __ ba(false, checkVolatile);
  2135   __ delayed()->tst(Lscratch);
  2137   __ bind(notByte);
  2139   // cmp(Rflags, ctos);
  2140   __ br(Assembler::notEqual, false, Assembler::pt, notChar);
  2141   __ delayed() ->cmp(Rflags, stos);
  2143   // ctos
  2144   __ lduh(Rclass, Roffset, Otos_i);
  2145   __ push(itos);
  2146   if (!is_static) {
  2147     patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
  2149   __ ba(false, checkVolatile);
  2150   __ delayed()->tst(Lscratch);
  2152   __ bind(notChar);
  2154   // cmp(Rflags, stos);
  2155   __ br(Assembler::notEqual, false, Assembler::pt, notShort);
  2156   __ delayed() ->cmp(Rflags, ftos);
  2158   // stos
  2159   __ ldsh(Rclass, Roffset, Otos_i);
  2160   __ push(itos);
  2161   if (!is_static) {
  2162     patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
  2164   __ ba(false, checkVolatile);
  2165   __ delayed()->tst(Lscratch);
  2167   __ bind(notShort);
  2170   // cmp(Rflags, ftos);
  2171   __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
  2172   __ delayed() ->tst(Lscratch);
  2174   // ftos
  2175   __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
  2176   __ push(ftos);
  2177   if (!is_static) {
  2178     patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
  2180   __ ba(false, checkVolatile);
  2181   __ delayed()->tst(Lscratch);
  2183   __ bind(notFloat);
  2186   // dtos
  2187   __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
  2188   __ push(dtos);
  2189   if (!is_static) {
  2190     patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
  2193   __ bind(checkVolatile);
  2194   if (__ membar_has_effect(membar_bits)) {
  2195     // __ tst(Lscratch); executed in delay slot
  2196     __ br(Assembler::zero, false, Assembler::pt, exit);
  2197     __ delayed()->nop();
  2198     volatile_barrier(membar_bits);
  2201   __ bind(exit);
  2205 void TemplateTable::getfield(int byte_no) {
  2206   getfield_or_static(byte_no, false);
  2209 void TemplateTable::getstatic(int byte_no) {
  2210   getfield_or_static(byte_no, true);
  2214 void TemplateTable::fast_accessfield(TosState state) {
  2215   transition(atos, state);
  2216   Register Rcache  = G3_scratch;
  2217   Register index   = G4_scratch;
  2218   Register Roffset = G4_scratch;
  2219   Register Rflags  = Rcache;
  2220   ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
  2222   __ get_cache_and_index_at_bcp(Rcache, index, 1);
  2223   jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
  2225   __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Roffset);
  2227   __ null_check(Otos_i);
  2228   __ verify_oop(Otos_i);
  2230   Label exit;
  2232   Assembler::Membar_mask_bits membar_bits =
  2233     Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
  2234   if (__ membar_has_effect(membar_bits)) {
  2235     // Get volatile flag
  2236     __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Rflags);
  2237     __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
  2240   switch (bytecode()) {
  2241     case Bytecodes::_fast_bgetfield:
  2242       __ ldsb(Otos_i, Roffset, Otos_i);
  2243       break;
  2244     case Bytecodes::_fast_cgetfield:
  2245       __ lduh(Otos_i, Roffset, Otos_i);
  2246       break;
  2247     case Bytecodes::_fast_sgetfield:
  2248       __ ldsh(Otos_i, Roffset, Otos_i);
  2249       break;
  2250     case Bytecodes::_fast_igetfield:
  2251       __ ld(Otos_i, Roffset, Otos_i);
  2252       break;
  2253     case Bytecodes::_fast_lgetfield:
  2254       __ ld_long(Otos_i, Roffset, Otos_l);
  2255       break;
  2256     case Bytecodes::_fast_fgetfield:
  2257       __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
  2258       break;
  2259     case Bytecodes::_fast_dgetfield:
  2260       __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
  2261       break;
  2262     case Bytecodes::_fast_agetfield:
  2263       __ load_heap_oop(Otos_i, Roffset, Otos_i);
  2264       break;
  2265     default:
  2266       ShouldNotReachHere();
  2269   if (__ membar_has_effect(membar_bits)) {
  2270     __ btst(Lscratch, Rflags);
  2271     __ br(Assembler::zero, false, Assembler::pt, exit);
  2272     __ delayed()->nop();
  2273     volatile_barrier(membar_bits);
  2274     __ bind(exit);
  2277   if (state == atos) {
  2278     __ verify_oop(Otos_i);    // does not blow flags!
  2282 void TemplateTable::jvmti_post_fast_field_mod() {
  2283   if (JvmtiExport::can_post_field_modification()) {
  2284     // Check to see if a field modification watch has been set before we take
  2285     // the time to call into the VM.
  2286     Label done;
  2287     Address get_field_modification_count_addr(G4_scratch, (address)JvmtiExport::get_field_modification_count_addr(), relocInfo::none);
  2288     __ load_contents(get_field_modification_count_addr, G4_scratch);
  2289     __ tst(G4_scratch);
  2290     __ br(Assembler::zero, false, Assembler::pt, done);
  2291     __ delayed()->nop();
  2292     __ pop_ptr(G4_scratch);     // copy the object pointer from tos
  2293     __ verify_oop(G4_scratch);
  2294     __ push_ptr(G4_scratch);    // put the object pointer back on tos
  2295     __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
  2296     // Save tos values before call_VM() clobbers them. Since we have
  2297     // to do it for every data type, we use the saved values as the
  2298     // jvalue object.
  2299     switch (bytecode()) {  // save tos values before call_VM() clobbers them
  2300     case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
  2301     case Bytecodes::_fast_bputfield: // fall through
  2302     case Bytecodes::_fast_sputfield: // fall through
  2303     case Bytecodes::_fast_cputfield: // fall through
  2304     case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
  2305     case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
  2306     case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
  2307     // get words in right order for use as jvalue object
  2308     case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
  2310     // setup pointer to jvalue object
  2311     __ mov(Lesp, G3_scratch);  __ inc(G3_scratch, wordSize);
  2312     // G4_scratch:  object pointer
  2313     // G1_scratch: cache entry pointer
  2314     // G3_scratch: jvalue object on the stack
  2315     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
  2316     switch (bytecode()) {             // restore tos values
  2317     case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
  2318     case Bytecodes::_fast_bputfield: // fall through
  2319     case Bytecodes::_fast_sputfield: // fall through
  2320     case Bytecodes::_fast_cputfield: // fall through
  2321     case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
  2322     case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
  2323     case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
  2324     case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
  2326     __ bind(done);
  2330 // The registers Rcache and index expected to be set before call.
  2331 // The function may destroy various registers, just not the Rcache and index registers.
  2332 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
  2333   ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
  2335   if (JvmtiExport::can_post_field_modification()) {
  2336     // Check to see if a field modification watch has been set before we take
  2337     // the time to call into the VM.
  2338     Label Label1;
  2339     assert_different_registers(Rcache, index, G1_scratch);
  2340     Address get_field_modification_count_addr(G1_scratch,
  2341                                               (address)JvmtiExport::get_field_modification_count_addr(),
  2342                                               relocInfo::none);
  2343     __ load_contents(get_field_modification_count_addr, G1_scratch);
  2344     __ tst(G1_scratch);
  2345     __ br(Assembler::zero, false, Assembler::pt, Label1);
  2346     __ delayed()->nop();
  2348     // The Rcache and index registers have been already set.
  2349     // This allows to eliminate this call but the Rcache and index
  2350     // registers must be correspondingly used after this line.
  2351     __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
  2353     __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
  2354     if (is_static) {
  2355       // Life is simple.  Null out the object pointer.
  2356       __ clr(G4_scratch);
  2357     } else {
  2358       Register Rflags = G1_scratch;
  2359       // Life is harder. The stack holds the value on top, followed by the
  2360       // object.  We don't know the size of the value, though; it could be
  2361       // one or two words depending on its type. As a result, we must find
  2362       // the type to determine where the object is.
  2364       Label two_word, valsizeknown;
  2365       __ ld_ptr(Address(G1_scratch, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())), Rflags);
  2366       __ mov(Lesp, G4_scratch);
  2367       __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
  2368       // Make sure we don't need to mask Rflags for tosBits after the above shift
  2369       ConstantPoolCacheEntry::verify_tosBits();
  2370       __ cmp(Rflags, ltos);
  2371       __ br(Assembler::equal, false, Assembler::pt, two_word);
  2372       __ delayed()->cmp(Rflags, dtos);
  2373       __ br(Assembler::equal, false, Assembler::pt, two_word);
  2374       __ delayed()->nop();
  2375       __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
  2376       __ br(Assembler::always, false, Assembler::pt, valsizeknown);
  2377       __ delayed()->nop();
  2378       __ bind(two_word);
  2380       __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
  2382       __ bind(valsizeknown);
  2383       // setup object pointer
  2384       __ ld_ptr(G4_scratch, 0, G4_scratch);
  2385       __ verify_oop(G4_scratch);
  2387     // setup pointer to jvalue object
  2388     __ mov(Lesp, G1_scratch);  __ inc(G1_scratch, wordSize);
  2389     // G4_scratch:  object pointer or NULL if static
  2390     // G3_scratch: cache entry pointer
  2391     // G1_scratch: jvalue object on the stack
  2392     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
  2393                G4_scratch, G3_scratch, G1_scratch);
  2394     __ get_cache_and_index_at_bcp(Rcache, index, 1);
  2395     __ bind(Label1);
  2399 void TemplateTable::pop_and_check_object(Register r) {
  2400   __ pop_ptr(r);
  2401   __ null_check(r);  // for field access must check obj.
  2402   __ verify_oop(r);
  2405 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
  2406   transition(vtos, vtos);
  2407   Register Rcache = G3_scratch;
  2408   Register index  = G4_scratch;
  2409   Register Rclass = Rcache;
  2410   Register Roffset= G4_scratch;
  2411   Register Rflags = G1_scratch;
  2412   ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
  2414   resolve_cache_and_index(byte_no, Rcache, index);
  2415   jvmti_post_field_mod(Rcache, index, is_static);
  2416   load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
  2418   Assembler::Membar_mask_bits read_bits =
  2419     Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
  2420   Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
  2422   Label notVolatile, checkVolatile, exit;
  2423   if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
  2424     __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
  2425     __ and3(Rflags, Lscratch, Lscratch);
  2427     if (__ membar_has_effect(read_bits)) {
  2428       __ tst(Lscratch);
  2429       __ br(Assembler::zero, false, Assembler::pt, notVolatile);
  2430       __ delayed()->nop();
  2431       volatile_barrier(read_bits);
  2432       __ bind(notVolatile);
  2436   __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
  2437   // Make sure we don't need to mask Rflags for tosBits after the above shift
  2438   ConstantPoolCacheEntry::verify_tosBits();
  2440   // compute field type
  2441   Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
  2443   if (is_static) {
  2444     // putstatic with object type most likely, check that first
  2445     __ cmp(Rflags, atos );
  2446     __ br(Assembler::notEqual, false, Assembler::pt, notObj);
  2447     __ delayed() ->cmp(Rflags, itos );
  2449     // atos
  2450     __ pop_ptr();
  2451     __ verify_oop(Otos_i);
  2452     __ store_heap_oop(Otos_i, Rclass, Roffset);
  2453     __ store_check(G1_scratch, Rclass, Roffset);
  2454     __ ba(false, checkVolatile);
  2455     __ delayed()->tst(Lscratch);
  2457     __ bind(notObj);
  2459     // cmp(Rflags, itos );
  2460     __ br(Assembler::notEqual, false, Assembler::pt, notInt);
  2461     __ delayed() ->cmp(Rflags, btos );
  2463     // itos
  2464     __ pop_i();
  2465     __ st(Otos_i, Rclass, Roffset);
  2466     __ ba(false, checkVolatile);
  2467     __ delayed()->tst(Lscratch);
  2469     __ bind(notInt);
  2471   } else {
  2472     // putfield with int type most likely, check that first
  2473     __ cmp(Rflags, itos );
  2474     __ br(Assembler::notEqual, false, Assembler::pt, notInt);
  2475     __ delayed() ->cmp(Rflags, atos );
  2477     // itos
  2478     __ pop_i();
  2479     pop_and_check_object(Rclass);
  2480     __ st(Otos_i, Rclass, Roffset);
  2481     patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch);
  2482     __ ba(false, checkVolatile);
  2483     __ delayed()->tst(Lscratch);
  2485     __ bind(notInt);
  2486     // cmp(Rflags, atos );
  2487     __ br(Assembler::notEqual, false, Assembler::pt, notObj);
  2488     __ delayed() ->cmp(Rflags, btos );
  2490     // atos
  2491     __ pop_ptr();
  2492     pop_and_check_object(Rclass);
  2493     __ verify_oop(Otos_i);
  2494     __ store_heap_oop(Otos_i, Rclass, Roffset);
  2495     __ store_check(G1_scratch, Rclass, Roffset);
  2496     patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch);
  2497     __ ba(false, checkVolatile);
  2498     __ delayed()->tst(Lscratch);
  2500     __ bind(notObj);
  2503   // cmp(Rflags, btos );
  2504   __ br(Assembler::notEqual, false, Assembler::pt, notByte);
  2505   __ delayed() ->cmp(Rflags, ltos );
  2507   // btos
  2508   __ pop_i();
  2509   if (!is_static) pop_and_check_object(Rclass);
  2510   __ stb(Otos_i, Rclass, Roffset);
  2511   if (!is_static) {
  2512     patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch);
  2514   __ ba(false, checkVolatile);
  2515   __ delayed()->tst(Lscratch);
  2517   __ bind(notByte);
  2519   // cmp(Rflags, ltos );
  2520   __ br(Assembler::notEqual, false, Assembler::pt, notLong);
  2521   __ delayed() ->cmp(Rflags, ctos );
  2523   // ltos
  2524   __ pop_l();
  2525   if (!is_static) pop_and_check_object(Rclass);
  2526   __ st_long(Otos_l, Rclass, Roffset);
  2527   if (!is_static) {
  2528     patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch);
  2530   __ ba(false, checkVolatile);
  2531   __ delayed()->tst(Lscratch);
  2533   __ bind(notLong);
  2535   // cmp(Rflags, ctos );
  2536   __ br(Assembler::notEqual, false, Assembler::pt, notChar);
  2537   __ delayed() ->cmp(Rflags, stos );
  2539   // ctos (char)
  2540   __ pop_i();
  2541   if (!is_static) pop_and_check_object(Rclass);
  2542   __ sth(Otos_i, Rclass, Roffset);
  2543   if (!is_static) {
  2544     patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch);
  2546   __ ba(false, checkVolatile);
  2547   __ delayed()->tst(Lscratch);
  2549   __ bind(notChar);
  2550   // cmp(Rflags, stos );
  2551   __ br(Assembler::notEqual, false, Assembler::pt, notShort);
  2552   __ delayed() ->cmp(Rflags, ftos );
  2554   // stos (char)
  2555   __ pop_i();
  2556   if (!is_static) pop_and_check_object(Rclass);
  2557   __ sth(Otos_i, Rclass, Roffset);
  2558   if (!is_static) {
  2559     patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch);
  2561   __ ba(false, checkVolatile);
  2562   __ delayed()->tst(Lscratch);
  2564   __ bind(notShort);
  2565   // cmp(Rflags, ftos );
  2566   __ br(Assembler::notZero, false, Assembler::pt, notFloat);
  2567   __ delayed()->nop();
  2569   // ftos
  2570   __ pop_f();
  2571   if (!is_static) pop_and_check_object(Rclass);
  2572   __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
  2573   if (!is_static) {
  2574     patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch);
  2576   __ ba(false, checkVolatile);
  2577   __ delayed()->tst(Lscratch);
  2579   __ bind(notFloat);
  2581   // dtos
  2582   __ pop_d();
  2583   if (!is_static) pop_and_check_object(Rclass);
  2584   __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
  2585   if (!is_static) {
  2586     patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch);
  2589   __ bind(checkVolatile);
  2590   __ tst(Lscratch);
  2592   if (__ membar_has_effect(write_bits)) {
  2593     // __ tst(Lscratch); in delay slot
  2594     __ br(Assembler::zero, false, Assembler::pt, exit);
  2595     __ delayed()->nop();
  2596     volatile_barrier(Assembler::StoreLoad);
  2597     __ bind(exit);
  2601 void TemplateTable::fast_storefield(TosState state) {
  2602   transition(state, vtos);
  2603   Register Rcache = G3_scratch;
  2604   Register Rclass = Rcache;
  2605   Register Roffset= G4_scratch;
  2606   Register Rflags = G1_scratch;
  2607   ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
  2609   jvmti_post_fast_field_mod();
  2611   __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
  2613   Assembler::Membar_mask_bits read_bits =
  2614     Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
  2615   Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
  2617   Label notVolatile, checkVolatile, exit;
  2618   if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
  2619     __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
  2620                              ConstantPoolCacheEntry::flags_offset())), Rflags);
  2621     __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
  2622     __ and3(Rflags, Lscratch, Lscratch);
  2623     if (__ membar_has_effect(read_bits)) {
  2624       __ tst(Lscratch);
  2625       __ br(Assembler::zero, false, Assembler::pt, notVolatile);
  2626       __ delayed()->nop();
  2627       volatile_barrier(read_bits);
  2628       __ bind(notVolatile);
  2632   __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
  2633                              ConstantPoolCacheEntry::f2_offset())), Roffset);
  2634   pop_and_check_object(Rclass);
  2636   switch (bytecode()) {
  2637     case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
  2638     case Bytecodes::_fast_cputfield: /* fall through */
  2639     case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
  2640     case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset);  break;
  2641     case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
  2642     case Bytecodes::_fast_fputfield:
  2643       __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
  2644       break;
  2645     case Bytecodes::_fast_dputfield:
  2646       __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
  2647       break;
  2648     case Bytecodes::_fast_aputfield:
  2649       __ store_heap_oop(Otos_i, Rclass, Roffset);
  2650       __ store_check(G1_scratch, Rclass, Roffset);
  2651       break;
  2652     default:
  2653       ShouldNotReachHere();
  2656   if (__ membar_has_effect(write_bits)) {
  2657     __ tst(Lscratch);
  2658     __ br(Assembler::zero, false, Assembler::pt, exit);
  2659     __ delayed()->nop();
  2660     volatile_barrier(Assembler::StoreLoad);
  2661     __ bind(exit);
  2666 void TemplateTable::putfield(int byte_no) {
  2667   putfield_or_static(byte_no, false);
  2670 void TemplateTable::putstatic(int byte_no) {
  2671   putfield_or_static(byte_no, true);
  2675 void TemplateTable::fast_xaccess(TosState state) {
  2676   transition(vtos, state);
  2677   Register Rcache = G3_scratch;
  2678   Register Roffset = G4_scratch;
  2679   Register Rflags  = G4_scratch;
  2680   Register Rreceiver = Lscratch;
  2682   __ ld_ptr(Llocals, Interpreter::value_offset_in_bytes(), Rreceiver);
  2684   // access constant pool cache  (is resolved)
  2685   __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
  2686   __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())), Roffset);
  2687   __ add(Lbcp, 1, Lbcp);       // needed to report exception at the correct bcp
  2689   __ verify_oop(Rreceiver);
  2690   __ null_check(Rreceiver);
  2691   if (state == atos) {
  2692     __ load_heap_oop(Rreceiver, Roffset, Otos_i);
  2693   } else if (state == itos) {
  2694     __ ld (Rreceiver, Roffset, Otos_i) ;
  2695   } else if (state == ftos) {
  2696     __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
  2697   } else {
  2698     ShouldNotReachHere();
  2701   Assembler::Membar_mask_bits membar_bits =
  2702     Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
  2703   if (__ membar_has_effect(membar_bits)) {
  2705     // Get is_volatile value in Rflags and check if membar is needed
  2706     __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())), Rflags);
  2708     // Test volatile
  2709     Label notVolatile;
  2710     __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
  2711     __ btst(Rflags, Lscratch);
  2712     __ br(Assembler::zero, false, Assembler::pt, notVolatile);
  2713     __ delayed()->nop();
  2714     volatile_barrier(membar_bits);
  2715     __ bind(notVolatile);
  2718   __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
  2719   __ sub(Lbcp, 1, Lbcp);
  2722 //----------------------------------------------------------------------------------------------------
  2723 // Calls
  2725 void TemplateTable::count_calls(Register method, Register temp) {
  2726   // implemented elsewhere
  2727   ShouldNotReachHere();
  2730 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
  2731   Register Rtemp = G4_scratch;
  2732   Register Rcall = Rindex;
  2733   assert_different_registers(Rcall, G5_method, Gargs, Rret);
  2735   // get target methodOop & entry point
  2736   const int base = instanceKlass::vtable_start_offset() * wordSize;
  2737   if (vtableEntry::size() % 3 == 0) {
  2738     // scale the vtable index by 12:
  2739     int one_third = vtableEntry::size() / 3;
  2740     __ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp);
  2741     __ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex);
  2742     __ add(Rindex, Rtemp, Rindex);
  2743   } else {
  2744     // scale the vtable index by 8:
  2745     __ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex);
  2748   __ add(Rrecv, Rindex, Rrecv);
  2749   __ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method);
  2751   __ call_from_interpreter(Rcall, Gargs, Rret);
  2754 void TemplateTable::invokevirtual(int byte_no) {
  2755   transition(vtos, vtos);
  2757   Register Rscratch = G3_scratch;
  2758   Register Rtemp = G4_scratch;
  2759   Register Rret = Lscratch;
  2760   Register Rrecv = G5_method;
  2761   Label notFinal;
  2763   load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true);
  2764   __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
  2766   // Check for vfinal
  2767   __ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch);
  2768   __ btst(Rret, G4_scratch);
  2769   __ br(Assembler::zero, false, Assembler::pt, notFinal);
  2770   __ delayed()->and3(Rret, 0xFF, G4_scratch);      // gets number of parameters
  2772   patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
  2774   invokevfinal_helper(Rscratch, Rret);
  2776   __ bind(notFinal);
  2778   __ mov(G5_method, Rscratch);  // better scratch register
  2779   __ load_receiver(G4_scratch, O0);  // gets receiverOop
  2780   // receiver is in O0
  2781   __ verify_oop(O0);
  2783   // get return address
  2784   Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
  2785   __ load_address(table);
  2786   __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);          // get return type
  2787   // Make sure we don't need to mask Rret for tosBits after the above shift
  2788   ConstantPoolCacheEntry::verify_tosBits();
  2789   __ sll(Rret,  LogBytesPerWord, Rret);
  2790   __ ld_ptr(Rtemp, Rret, Rret);         // get return address
  2792   // get receiver klass
  2793   __ null_check(O0, oopDesc::klass_offset_in_bytes());
  2794   __ load_klass(O0, Rrecv);
  2795   __ verify_oop(Rrecv);
  2797   __ profile_virtual_call(Rrecv, O4);
  2799   generate_vtable_call(Rrecv, Rscratch, Rret);
  2802 void TemplateTable::fast_invokevfinal(int byte_no) {
  2803   transition(vtos, vtos);
  2805   load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
  2806                              /*is_invokevfinal*/true);
  2807   __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
  2808   invokevfinal_helper(G3_scratch, Lscratch);
  2811 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
  2812   Register Rtemp = G4_scratch;
  2814   __ verify_oop(G5_method);
  2816   // Load receiver from stack slot
  2817   __ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch);
  2818   __ load_receiver(G4_scratch, O0);
  2820   // receiver NULL check
  2821   __ null_check(O0);
  2823   __ profile_final_call(O4);
  2825   // get return address
  2826   Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
  2827   __ load_address(table);
  2828   __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);          // get return type
  2829   // Make sure we don't need to mask Rret for tosBits after the above shift
  2830   ConstantPoolCacheEntry::verify_tosBits();
  2831   __ sll(Rret,  LogBytesPerWord, Rret);
  2832   __ ld_ptr(Rtemp, Rret, Rret);         // get return address
  2835   // do the call
  2836   __ call_from_interpreter(Rscratch, Gargs, Rret);
  2839 void TemplateTable::invokespecial(int byte_no) {
  2840   transition(vtos, vtos);
  2842   Register Rscratch = G3_scratch;
  2843   Register Rtemp = G4_scratch;
  2844   Register Rret = Lscratch;
  2846   load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
  2847   __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
  2849   __ verify_oop(G5_method);
  2851   __ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch);
  2852   __ load_receiver(G4_scratch, O0);
  2854   // receiver NULL check
  2855   __ null_check(O0);
  2857   __ profile_call(O4);
  2859   // get return address
  2860   Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
  2861   __ load_address(table);
  2862   __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);          // get return type
  2863   // Make sure we don't need to mask Rret for tosBits after the above shift
  2864   ConstantPoolCacheEntry::verify_tosBits();
  2865   __ sll(Rret,  LogBytesPerWord, Rret);
  2866   __ ld_ptr(Rtemp, Rret, Rret);         // get return address
  2868   // do the call
  2869   __ call_from_interpreter(Rscratch, Gargs, Rret);
  2872 void TemplateTable::invokestatic(int byte_no) {
  2873   transition(vtos, vtos);
  2875   Register Rscratch = G3_scratch;
  2876   Register Rtemp = G4_scratch;
  2877   Register Rret = Lscratch;
  2879   load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
  2880   __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
  2882   __ verify_oop(G5_method);
  2884   __ profile_call(O4);
  2886   // get return address
  2887   Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
  2888   __ load_address(table);
  2889   __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);          // get return type
  2890   // Make sure we don't need to mask Rret for tosBits after the above shift
  2891   ConstantPoolCacheEntry::verify_tosBits();
  2892   __ sll(Rret,  LogBytesPerWord, Rret);
  2893   __ ld_ptr(Rtemp, Rret, Rret);         // get return address
  2895   // do the call
  2896   __ call_from_interpreter(Rscratch, Gargs, Rret);
  2900 void TemplateTable::invokeinterface_object_method(Register RklassOop,
  2901                                                   Register Rcall,
  2902                                                   Register Rret,
  2903                                                   Register Rflags) {
  2904   Register Rscratch = G4_scratch;
  2905   Register Rindex = Lscratch;
  2907   assert_different_registers(Rscratch, Rindex, Rret);
  2909   Label notFinal;
  2911   // Check for vfinal
  2912   __ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch);
  2913   __ btst(Rflags, Rscratch);
  2914   __ br(Assembler::zero, false, Assembler::pt, notFinal);
  2915   __ delayed()->nop();
  2917   __ profile_final_call(O4);
  2919   // do the call - the index (f2) contains the methodOop
  2920   assert_different_registers(G5_method, Gargs, Rcall);
  2921   __ mov(Rindex, G5_method);
  2922   __ call_from_interpreter(Rcall, Gargs, Rret);
  2923   __ bind(notFinal);
  2925   __ profile_virtual_call(RklassOop, O4);
  2926   generate_vtable_call(RklassOop, Rindex, Rret);
  2930 void TemplateTable::invokeinterface(int byte_no) {
  2931   transition(vtos, vtos);
  2933   Register Rscratch = G4_scratch;
  2934   Register Rret = G3_scratch;
  2935   Register Rindex = Lscratch;
  2936   Register Rinterface = G1_scratch;
  2937   Register RklassOop = G5_method;
  2938   Register Rflags = O1;
  2939   assert_different_registers(Rscratch, G5_method);
  2941   load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, false);
  2942   __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
  2944   // get receiver
  2945   __ and3(Rflags, 0xFF, Rscratch);       // gets number of parameters
  2946   __ load_receiver(Rscratch, O0);
  2947   __ verify_oop(O0);
  2949   __ mov(Rflags, Rret);
  2951   // get return address
  2952   Address table(Rscratch, (address)Interpreter::return_5_addrs_by_index_table());
  2953   __ load_address(table);
  2954   __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);          // get return type
  2955   // Make sure we don't need to mask Rret for tosBits after the above shift
  2956   ConstantPoolCacheEntry::verify_tosBits();
  2957   __ sll(Rret,  LogBytesPerWord, Rret);
  2958   __ ld_ptr(Rscratch, Rret, Rret);      // get return address
  2960   // get receiver klass
  2961   __ null_check(O0, oopDesc::klass_offset_in_bytes());
  2962   __ load_klass(O0, RklassOop);
  2963   __ verify_oop(RklassOop);
  2965   // Special case of invokeinterface called for virtual method of
  2966   // java.lang.Object.  See cpCacheOop.cpp for details.
  2967   // This code isn't produced by javac, but could be produced by
  2968   // another compliant java compiler.
  2969   Label notMethod;
  2970   __ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch);
  2971   __ btst(Rflags, Rscratch);
  2972   __ br(Assembler::zero, false, Assembler::pt, notMethod);
  2973   __ delayed()->nop();
  2975   invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags);
  2977   __ bind(notMethod);
  2979   __ profile_virtual_call(RklassOop, O4);
  2981   //
  2982   // find entry point to call
  2983   //
  2985   // compute start of first itableOffsetEntry (which is at end of vtable)
  2986   const int base = instanceKlass::vtable_start_offset() * wordSize;
  2987   Label search;
  2988   Register Rtemp = Rflags;
  2990   __ ld(Address(RklassOop, 0, instanceKlass::vtable_length_offset() * wordSize), Rtemp);
  2991   if (align_object_offset(1) > 1) {
  2992     __ round_to(Rtemp, align_object_offset(1));
  2994   __ sll(Rtemp, LogBytesPerWord, Rtemp);   // Rscratch *= 4;
  2995   if (Assembler::is_simm13(base)) {
  2996     __ add(Rtemp, base, Rtemp);
  2997   } else {
  2998     __ set(base, Rscratch);
  2999     __ add(Rscratch, Rtemp, Rtemp);
  3001   __ add(RklassOop, Rtemp, Rscratch);
  3003   __ bind(search);
  3005   __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
  3007     Label ok;
  3009     // Check that entry is non-null.  Null entries are probably a bytecode
  3010     // problem.  If the interface isn't implemented by the reciever class,
  3011     // the VM should throw IncompatibleClassChangeError.  linkResolver checks
  3012     // this too but that's only if the entry isn't already resolved, so we
  3013     // need to check again.
  3014     __ br_notnull( Rtemp, false, Assembler::pt, ok);
  3015     __ delayed()->nop();
  3016     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
  3017     __ should_not_reach_here();
  3018     __ bind(ok);
  3019     __ verify_oop(Rtemp);
  3022   __ verify_oop(Rinterface);
  3024   __ cmp(Rinterface, Rtemp);
  3025   __ brx(Assembler::notEqual, true, Assembler::pn, search);
  3026   __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
  3028   // entry found and Rscratch points to it
  3029   __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
  3031   assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
  3032   __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex);       // Rindex *= 8;
  3033   __ add(Rscratch, Rindex, Rscratch);
  3034   __ ld_ptr(RklassOop, Rscratch, G5_method);
  3036   // Check for abstract method error.
  3038     Label ok;
  3039     __ tst(G5_method);
  3040     __ brx(Assembler::notZero, false, Assembler::pt, ok);
  3041     __ delayed()->nop();
  3042     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
  3043     __ should_not_reach_here();
  3044     __ bind(ok);
  3047   Register Rcall = Rinterface;
  3048   assert_different_registers(Rcall, G5_method, Gargs, Rret);
  3050   __ verify_oop(G5_method);
  3051   __ call_from_interpreter(Rcall, Gargs, Rret);
  3056 //----------------------------------------------------------------------------------------------------
  3057 // Allocation
  3059 void TemplateTable::_new() {
  3060   transition(vtos, atos);
  3062   Label slow_case;
  3063   Label done;
  3064   Label initialize_header;
  3065   Label initialize_object;  // including clearing the fields
  3067   Register RallocatedObject = Otos_i;
  3068   Register RinstanceKlass = O1;
  3069   Register Roffset = O3;
  3070   Register Rscratch = O4;
  3072   __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
  3073   __ get_cpool_and_tags(Rscratch, G3_scratch);
  3074   // make sure the class we're about to instantiate has been resolved
  3075   __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
  3076   __ ldub(G3_scratch, Roffset, G3_scratch);
  3077   __ cmp(G3_scratch, JVM_CONSTANT_Class);
  3078   __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
  3079   __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
  3081   //__ sll(Roffset, LogBytesPerWord, Roffset);        // executed in delay slot
  3082   __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
  3083   __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
  3085   // make sure klass is fully initialized:
  3086   __ ld(RinstanceKlass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_scratch);
  3087   __ cmp(G3_scratch, instanceKlass::fully_initialized);
  3088   __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
  3089   __ delayed()->ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
  3091   // get instance_size in instanceKlass (already aligned)
  3092   //__ ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
  3094   // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
  3095   __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
  3096   __ br(Assembler::notZero, false, Assembler::pn, slow_case);
  3097   __ delayed()->nop();
  3099   // allocate the instance
  3100   // 1) Try to allocate in the TLAB
  3101   // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
  3102   // 3) if the above fails (or is not applicable), go to a slow case
  3103   // (creates a new TLAB, etc.)
  3105   const bool allow_shared_alloc =
  3106     Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
  3108   if(UseTLAB) {
  3109     Register RoldTopValue = RallocatedObject;
  3110     Register RtopAddr = G3_scratch, RtlabWasteLimitValue = G3_scratch;
  3111     Register RnewTopValue = G1_scratch;
  3112     Register RendValue = Rscratch;
  3113     Register RfreeValue = RnewTopValue;
  3115     // check if we can allocate in the TLAB
  3116     __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
  3117     __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
  3118     __ add(RoldTopValue, Roffset, RnewTopValue);
  3120     // if there is enough space, we do not CAS and do not clear
  3121     __ cmp(RnewTopValue, RendValue);
  3122     if(ZeroTLAB) {
  3123       // the fields have already been cleared
  3124       __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
  3125     } else {
  3126       // initialize both the header and fields
  3127       __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
  3129     __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
  3131     if (allow_shared_alloc) {
  3132     // Check if tlab should be discarded (refill_waste_limit >= free)
  3133     __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
  3134     __ sub(RendValue, RoldTopValue, RfreeValue);
  3135 #ifdef _LP64
  3136     __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
  3137 #else
  3138     __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
  3139 #endif
  3140     __ cmp(RtlabWasteLimitValue, RfreeValue);
  3141     __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, slow_case); // tlab waste is small
  3142     __ delayed()->nop();
  3144     // increment waste limit to prevent getting stuck on this slow path
  3145     __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
  3146     __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
  3147     } else {
  3148       // No allocation in the shared eden.
  3149       __ br(Assembler::always, false, Assembler::pt, slow_case);
  3150       __ delayed()->nop();
  3154   // Allocation in the shared Eden
  3155   if (allow_shared_alloc) {
  3156     Register RoldTopValue = G1_scratch;
  3157     Register RtopAddr = G3_scratch;
  3158     Register RnewTopValue = RallocatedObject;
  3159     Register RendValue = Rscratch;
  3161     __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
  3163     Label retry;
  3164     __ bind(retry);
  3165     __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
  3166     __ ld_ptr(RendValue, 0, RendValue);
  3167     __ ld_ptr(RtopAddr, 0, RoldTopValue);
  3168     __ add(RoldTopValue, Roffset, RnewTopValue);
  3170     // RnewTopValue contains the top address after the new object
  3171     // has been allocated.
  3172     __ cmp(RnewTopValue, RendValue);
  3173     __ brx(Assembler::greaterUnsigned, false, Assembler::pn, slow_case);
  3174     __ delayed()->nop();
  3176     __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
  3177       VM_Version::v9_instructions_work() ? NULL :
  3178       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
  3180     // if someone beat us on the allocation, try again, otherwise continue
  3181     __ cmp(RoldTopValue, RnewTopValue);
  3182     __ brx(Assembler::notEqual, false, Assembler::pn, retry);
  3183     __ delayed()->nop();
  3186   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
  3187     // clear object fields
  3188     __ bind(initialize_object);
  3189     __ deccc(Roffset, sizeof(oopDesc));
  3190     __ br(Assembler::zero, false, Assembler::pt, initialize_header);
  3191     __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
  3193     // initialize remaining object fields
  3194     { Label loop;
  3195       __ subcc(Roffset, wordSize, Roffset);
  3196       __ bind(loop);
  3197       //__ subcc(Roffset, wordSize, Roffset);      // executed above loop or in delay slot
  3198       __ st_ptr(G0, G3_scratch, Roffset);
  3199       __ br(Assembler::notEqual, false, Assembler::pt, loop);
  3200       __ delayed()->subcc(Roffset, wordSize, Roffset);
  3202     __ br(Assembler::always, false, Assembler::pt, initialize_header);
  3203     __ delayed()->nop();
  3206   // slow case
  3207   __ bind(slow_case);
  3208   __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
  3209   __ get_constant_pool(O1);
  3211   call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
  3213   __ ba(false, done);
  3214   __ delayed()->nop();
  3216   // Initialize the header: mark, klass
  3217   __ bind(initialize_header);
  3219   if (UseBiasedLocking) {
  3220     __ ld_ptr(RinstanceKlass, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), G4_scratch);
  3221   } else {
  3222     __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
  3224   __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes());       // mark
  3225   __ store_klass_gap(G0, RallocatedObject);         // klass gap if compressed
  3226   __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
  3229     SkipIfEqual skip_if(
  3230       _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
  3231     // Trigger dtrace event
  3232     __ push(atos);
  3233     __ call_VM_leaf(noreg,
  3234        CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
  3235     __ pop(atos);
  3238   // continue
  3239   __ bind(done);
  3244 void TemplateTable::newarray() {
  3245   transition(itos, atos);
  3246   __ ldub(Lbcp, 1, O1);
  3247      call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
  3251 void TemplateTable::anewarray() {
  3252   transition(itos, atos);
  3253   __ get_constant_pool(O1);
  3254   __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
  3255      call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
  3259 void TemplateTable::arraylength() {
  3260   transition(atos, itos);
  3261   Label ok;
  3262   __ verify_oop(Otos_i);
  3263   __ tst(Otos_i);
  3264   __ throw_if_not_1_x( Assembler::notZero, ok );
  3265   __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
  3266   __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
  3270 void TemplateTable::checkcast() {
  3271   transition(atos, atos);
  3272   Label done, is_null, quicked, cast_ok, resolved;
  3273   Register Roffset = G1_scratch;
  3274   Register RobjKlass = O5;
  3275   Register RspecifiedKlass = O4;
  3277   // Check for casting a NULL
  3278   __ br_null(Otos_i, false, Assembler::pn, is_null);
  3279   __ delayed()->nop();
  3281   // Get value klass in RobjKlass
  3282   __ load_klass(Otos_i, RobjKlass); // get value klass
  3284   // Get constant pool tag
  3285   __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
  3287   // See if the checkcast has been quickened
  3288   __ get_cpool_and_tags(Lscratch, G3_scratch);
  3289   __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
  3290   __ ldub(G3_scratch, Roffset, G3_scratch);
  3291   __ cmp(G3_scratch, JVM_CONSTANT_Class);
  3292   __ br(Assembler::equal, true, Assembler::pt, quicked);
  3293   __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
  3295   __ push_ptr(); // save receiver for result, and for GC
  3296   call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
  3297   __ pop_ptr(Otos_i, G3_scratch); // restore receiver
  3299   __ br(Assembler::always, false, Assembler::pt, resolved);
  3300   __ delayed()->nop();
  3302   // Extract target class from constant pool
  3303   __ bind(quicked);
  3304   __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
  3305   __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
  3306   __ bind(resolved);
  3307   __ load_klass(Otos_i, RobjKlass); // get value klass
  3309   // Generate a fast subtype check.  Branch to cast_ok if no
  3310   // failure.  Throw exception if failure.
  3311   __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
  3313   // Not a subtype; so must throw exception
  3314   __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
  3316   __ bind(cast_ok);
  3318   if (ProfileInterpreter) {
  3319     __ ba(false, done);
  3320     __ delayed()->nop();
  3322   __ bind(is_null);
  3323   __ profile_null_seen(G3_scratch);
  3324   __ bind(done);
  3328 void TemplateTable::instanceof() {
  3329   Label done, is_null, quicked, resolved;
  3330   transition(atos, itos);
  3331   Register Roffset = G1_scratch;
  3332   Register RobjKlass = O5;
  3333   Register RspecifiedKlass = O4;
  3335   // Check for casting a NULL
  3336   __ br_null(Otos_i, false, Assembler::pt, is_null);
  3337   __ delayed()->nop();
  3339   // Get value klass in RobjKlass
  3340   __ load_klass(Otos_i, RobjKlass); // get value klass
  3342   // Get constant pool tag
  3343   __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
  3345   // See if the checkcast has been quickened
  3346   __ get_cpool_and_tags(Lscratch, G3_scratch);
  3347   __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
  3348   __ ldub(G3_scratch, Roffset, G3_scratch);
  3349   __ cmp(G3_scratch, JVM_CONSTANT_Class);
  3350   __ br(Assembler::equal, true, Assembler::pt, quicked);
  3351   __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
  3353   __ push_ptr(); // save receiver for result, and for GC
  3354   call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
  3355   __ pop_ptr(Otos_i, G3_scratch); // restore receiver
  3357   __ br(Assembler::always, false, Assembler::pt, resolved);
  3358   __ delayed()->nop();
  3361   // Extract target class from constant pool
  3362   __ bind(quicked);
  3363   __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
  3364   __ get_constant_pool(Lscratch);
  3365   __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
  3366   __ bind(resolved);
  3367   __ load_klass(Otos_i, RobjKlass); // get value klass
  3369   // Generate a fast subtype check.  Branch to cast_ok if no
  3370   // failure.  Return 0 if failure.
  3371   __ or3(G0, 1, Otos_i);      // set result assuming quick tests succeed
  3372   __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
  3373   // Not a subtype; return 0;
  3374   __ clr( Otos_i );
  3376   if (ProfileInterpreter) {
  3377     __ ba(false, done);
  3378     __ delayed()->nop();
  3380   __ bind(is_null);
  3381   __ profile_null_seen(G3_scratch);
  3382   __ bind(done);
  3385 void TemplateTable::_breakpoint() {
  3387    // Note: We get here even if we are single stepping..
  3388    // jbug inists on setting breakpoints at every bytecode
  3389    // even if we are in single step mode.
  3391    transition(vtos, vtos);
  3392    // get the unpatched byte code
  3393    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
  3394    __ mov(O0, Lbyte_code);
  3396    // post the breakpoint event
  3397    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
  3399    // complete the execution of original bytecode
  3400    __ dispatch_normal(vtos);
  3404 //----------------------------------------------------------------------------------------------------
  3405 // Exceptions
  3407 void TemplateTable::athrow() {
  3408   transition(atos, vtos);
  3410   // This works because exception is cached in Otos_i which is same as O0,
  3411   // which is same as what throw_exception_entry_expects
  3412   assert(Otos_i == Oexception, "see explanation above");
  3414   __ verify_oop(Otos_i);
  3415   __ null_check(Otos_i);
  3416   __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
  3420 //----------------------------------------------------------------------------------------------------
  3421 // Synchronization
  3424 // See frame_sparc.hpp for monitor block layout.
  3425 // Monitor elements are dynamically allocated by growing stack as needed.
  3427 void TemplateTable::monitorenter() {
  3428   transition(atos, vtos);
  3429   __ verify_oop(Otos_i);
  3430   // Try to acquire a lock on the object
  3431   // Repeat until succeeded (i.e., until
  3432   // monitorenter returns true).
  3434   {   Label ok;
  3435     __ tst(Otos_i);
  3436     __ throw_if_not_1_x( Assembler::notZero,  ok);
  3437     __ delayed()->mov(Otos_i, Lscratch); // save obj
  3438     __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
  3441   assert(O0 == Otos_i, "Be sure where the object to lock is");
  3443   // find a free slot in the monitor block
  3446   // initialize entry pointer
  3447   __ clr(O1); // points to free slot or NULL
  3450     Label entry, loop, exit;
  3451     __ add( __ top_most_monitor(), O2 ); // last one to check
  3452     __ ba( false, entry );
  3453     __ delayed()->mov( Lmonitors, O3 ); // first one to check
  3456     __ bind( loop );
  3458     __ verify_oop(O4);          // verify each monitor's oop
  3459     __ tst(O4); // is this entry unused?
  3460     if (VM_Version::v9_instructions_work())
  3461       __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
  3462     else {
  3463       Label L;
  3464       __ br( Assembler::zero, true, Assembler::pn, L );
  3465       __ delayed()->mov(O3, O1); // rememeber this one if match
  3466       __ bind(L);
  3469     __ cmp(O4, O0); // check if current entry is for same object
  3470     __ brx( Assembler::equal, false, Assembler::pn, exit );
  3471     __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
  3473     __ bind( entry );
  3475     __ cmp( O3, O2 );
  3476     __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
  3477     __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
  3479     __ bind( exit );
  3482   { Label allocated;
  3484     // found free slot?
  3485     __ br_notnull(O1, false, Assembler::pn, allocated);
  3486     __ delayed()->nop();
  3488     __ add_monitor_to_stack( false, O2, O3 );
  3489     __ mov(Lmonitors, O1);
  3491     __ bind(allocated);
  3494   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
  3495   // The object has already been poped from the stack, so the expression stack looks correct.
  3496   __ inc(Lbcp);
  3498   __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
  3499   __ lock_object(O1, O0);
  3501   // check if there's enough space on the stack for the monitors after locking
  3502   __ generate_stack_overflow_check(0);
  3504   // The bcp has already been incremented. Just need to dispatch to next instruction.
  3505   __ dispatch_next(vtos);
  3509 void TemplateTable::monitorexit() {
  3510   transition(atos, vtos);
  3511   __ verify_oop(Otos_i);
  3512   __ tst(Otos_i);
  3513   __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
  3515   assert(O0 == Otos_i, "just checking");
  3517   { Label entry, loop, found;
  3518     __ add( __ top_most_monitor(), O2 ); // last one to check
  3519     __ ba(false, entry );
  3520     // use Lscratch to hold monitor elem to check, start with most recent monitor,
  3521     // By using a local it survives the call to the C routine.
  3522     __ delayed()->mov( Lmonitors, Lscratch );
  3524     __ bind( loop );
  3526     __ verify_oop(O4);          // verify each monitor's oop
  3527     __ cmp(O4, O0); // check if current entry is for desired object
  3528     __ brx( Assembler::equal, true, Assembler::pt, found );
  3529     __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
  3531     __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
  3533     __ bind( entry );
  3535     __ cmp( Lscratch, O2 );
  3536     __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
  3537     __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
  3539     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
  3540     __ should_not_reach_here();
  3542     __ bind(found);
  3544   __ unlock_object(O1);
  3548 //----------------------------------------------------------------------------------------------------
  3549 // Wide instructions
  3551 void TemplateTable::wide() {
  3552   transition(vtos, vtos);
  3553   __ ldub(Lbcp, 1, G3_scratch);// get next bc
  3554   __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
  3555   Address ep(G4_scratch, (address)Interpreter::_wentry_point);
  3556   __ load_address(ep);
  3557   __ ld_ptr(ep.base(), G3_scratch, G3_scratch);
  3558   __ jmp(G3_scratch, G0);
  3559   __ delayed()->nop();
  3560   // Note: the Lbcp increment step is part of the individual wide bytecode implementations
  3564 //----------------------------------------------------------------------------------------------------
  3565 // Multi arrays
  3567 void TemplateTable::multianewarray() {
  3568   transition(vtos, atos);
  3569      // put ndims * wordSize into Lscratch
  3570   __ ldub( Lbcp,     3,               Lscratch);
  3571   __ sll(  Lscratch, Interpreter::logStackElementSize(), Lscratch);
  3572      // Lesp points past last_dim, so set to O1 to first_dim address
  3573   __ add(  Lesp,     Lscratch,        O1);
  3574      call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
  3575   __ add(  Lesp,     Lscratch,        Lesp); // pop all dimensions off the stack
  3577 #endif /* !CC_INTERP */

mercurial