src/cpu/ppc/vm/templateTable_ppc_64.cpp

Wed, 15 Apr 2020 11:49:55 +0800

author
aoqi
date
Wed, 15 Apr 2020 11:49:55 +0800
changeset 9852
70aa912cebe5
parent 9041
95a08233f46c
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 2014, 2017 Oracle and/or its affiliates. All rights reserved.
     3  * Copyright 2013, 2017 SAP AG. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #include "precompiled.hpp"
    27 #include "asm/macroAssembler.inline.hpp"
    28 #include "interpreter/interpreter.hpp"
    29 #include "interpreter/interpreterRuntime.hpp"
    30 #include "interpreter/templateInterpreter.hpp"
    31 #include "interpreter/templateTable.hpp"
    32 #include "memory/universe.inline.hpp"
    33 #include "oops/objArrayKlass.hpp"
    34 #include "oops/oop.inline.hpp"
    35 #include "prims/methodHandles.hpp"
    36 #include "runtime/sharedRuntime.hpp"
    37 #include "runtime/stubRoutines.hpp"
    38 #include "runtime/synchronizer.hpp"
    39 #include "utilities/macros.hpp"
    41 #ifndef CC_INTERP
    43 #undef __
    44 #define __ _masm->
    46 // ============================================================================
    47 // Misc helpers
    49 // Do an oop store like *(base + index) = val OR *(base + offset) = val
    50 // (only one of both variants is possible at the same time).
    51 // Index can be noreg.
    52 // Kills:
    53 //   Rbase, Rtmp
    54 static void do_oop_store(InterpreterMacroAssembler* _masm,
    55                          Register           Rbase,
    56                          RegisterOrConstant offset,
    57                          Register           Rval,         // Noreg means always null.
    58                          Register           Rtmp1,
    59                          Register           Rtmp2,
    60                          Register           Rtmp3,
    61                          BarrierSet::Name   barrier,
    62                          bool               precise,
    63                          bool               check_null) {
    64   assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase);
    66   switch (barrier) {
    67 #if INCLUDE_ALL_GCS
    68     case BarrierSet::G1SATBCT:
    69     case BarrierSet::G1SATBCTLogging:
    70       {
    71         // Load and record the previous value.
    72         __ g1_write_barrier_pre(Rbase, offset,
    73                                 Rtmp3, /* holder of pre_val ? */
    74                                 Rtmp1, Rtmp2, false /* frame */);
    76         Label Lnull, Ldone;
    77         if (Rval != noreg) {
    78           if (check_null) {
    79             __ cmpdi(CCR0, Rval, 0);
    80             __ beq(CCR0, Lnull);
    81           }
    82           __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1);
    83           // Mark the card.
    84           if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
    85             __ add(Rbase, offset, Rbase);
    86           }
    87           __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone);
    88           if (check_null) { __ b(Ldone); }
    89         }
    91         if (Rval == noreg || check_null) { // Store null oop.
    92           Register Rnull = Rval;
    93           __ bind(Lnull);
    94           if (Rval == noreg) {
    95             Rnull = Rtmp1;
    96             __ li(Rnull, 0);
    97           }
    98           if (UseCompressedOops) {
    99             __ stw(Rnull, offset, Rbase);
   100           } else {
   101             __ std(Rnull, offset, Rbase);
   102           }
   103         }
   104         __ bind(Ldone);
   105       }
   106       break;
   107 #endif // INCLUDE_ALL_GCS
   108     case BarrierSet::CardTableModRef:
   109     case BarrierSet::CardTableExtension:
   110       {
   111         Label Lnull, Ldone;
   112         if (Rval != noreg) {
   113           if (check_null) {
   114             __ cmpdi(CCR0, Rval, 0);
   115             __ beq(CCR0, Lnull);
   116           }
   117           __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1);
   118           // Mark the card.
   119           if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
   120             __ add(Rbase, offset, Rbase);
   121           }
   122           __ card_write_barrier_post(Rbase, Rval, Rtmp1);
   123           if (check_null) {
   124             __ b(Ldone);
   125           }
   126         }
   128         if (Rval == noreg || check_null) { // Store null oop.
   129           Register Rnull = Rval;
   130           __ bind(Lnull);
   131           if (Rval == noreg) {
   132             Rnull = Rtmp1;
   133             __ li(Rnull, 0);
   134           }
   135           if (UseCompressedOops) {
   136             __ stw(Rnull, offset, Rbase);
   137           } else {
   138             __ std(Rnull, offset, Rbase);
   139           }
   140         }
   141         __ bind(Ldone);
   142       }
   143       break;
   144     case BarrierSet::ModRef:
   145     case BarrierSet::Other:
   146       ShouldNotReachHere();
   147       break;
   148     default:
   149       ShouldNotReachHere();
   150   }
   151 }
   153 // ============================================================================
   154 // Platform-dependent initialization
   156 void TemplateTable::pd_initialize() {
   157   // No ppc64 specific initialization.
   158 }
   160 Address TemplateTable::at_bcp(int offset) {
   161   // Not used on ppc.
   162   ShouldNotReachHere();
   163   return Address();
   164 }
   166 // Patches the current bytecode (ptr to it located in bcp)
   167 // in the bytecode stream with a new one.
   168 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) {
   169   // With sharing on, may need to test method flag.
   170   if (!RewriteBytecodes) return;
   171   Label L_patch_done;
   173   switch (new_bc) {
   174     case Bytecodes::_fast_aputfield:
   175     case Bytecodes::_fast_bputfield:
   176     case Bytecodes::_fast_zputfield:
   177     case Bytecodes::_fast_cputfield:
   178     case Bytecodes::_fast_dputfield:
   179     case Bytecodes::_fast_fputfield:
   180     case Bytecodes::_fast_iputfield:
   181     case Bytecodes::_fast_lputfield:
   182     case Bytecodes::_fast_sputfield:
   183     {
   184       // We skip bytecode quickening for putfield instructions when
   185       // the put_code written to the constant pool cache is zero.
   186       // This is required so that every execution of this instruction
   187       // calls out to InterpreterRuntime::resolve_get_put to do
   188       // additional, required work.
   189       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
   190       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
   191       __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
   192       // ((*(cache+indices))>>((1+byte_no)*8))&0xFF:
   193 #if defined(VM_LITTLE_ENDIAN)
   194       __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp);
   195 #else
   196       __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
   197 #endif
   198       __ cmpwi(CCR0, Rnew_bc, 0);
   199       __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
   200       __ beq(CCR0, L_patch_done);
   201       // __ isync(); // acquire not needed
   202       break;
   203     }
   205     default:
   206       assert(byte_no == -1, "sanity");
   207       if (load_bc_into_bc_reg) {
   208         __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
   209       }
   210   }
   212   if (JvmtiExport::can_post_breakpoint()) {
   213     Label L_fast_patch;
   214     __ lbz(Rtemp, 0, R14_bcp);
   215     __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint);
   216     __ bne(CCR0, L_fast_patch);
   217     // Perform the quickening, slowly, in the bowels of the breakpoint table.
   218     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc);
   219     __ b(L_patch_done);
   220     __ bind(L_fast_patch);
   221   }
   223   // Patch bytecode.
   224   __ stb(Rnew_bc, 0, R14_bcp);
   226   __ bind(L_patch_done);
   227 }
   229 // ============================================================================
   230 // Individual instructions
   232 void TemplateTable::nop() {
   233   transition(vtos, vtos);
   234   // Nothing to do.
   235 }
   237 void TemplateTable::shouldnotreachhere() {
   238   transition(vtos, vtos);
   239   __ stop("shouldnotreachhere bytecode");
   240 }
   242 void TemplateTable::aconst_null() {
   243   transition(vtos, atos);
   244   __ li(R17_tos, 0);
   245 }
   247 void TemplateTable::iconst(int value) {
   248   transition(vtos, itos);
   249   assert(value >= -1 && value <= 5, "");
   250   __ li(R17_tos, value);
   251 }
   253 void TemplateTable::lconst(int value) {
   254   transition(vtos, ltos);
   255   assert(value >= -1 && value <= 5, "");
   256   __ li(R17_tos, value);
   257 }
   259 void TemplateTable::fconst(int value) {
   260   transition(vtos, ftos);
   261   static float zero = 0.0;
   262   static float one  = 1.0;
   263   static float two  = 2.0;
   264   switch (value) {
   265     default: ShouldNotReachHere();
   266     case 0: {
   267       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
   268       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
   269       break;
   270     }
   271     case 1: {
   272       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
   273       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
   274       break;
   275     }
   276     case 2: {
   277       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true);
   278       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
   279       break;
   280     }
   281   }
   282 }
   284 void TemplateTable::dconst(int value) {
   285   transition(vtos, dtos);
   286   static double zero = 0.0;
   287   static double one  = 1.0;
   288   switch (value) {
   289     case 0: {
   290       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
   291       __ lfd(F15_ftos, simm16_offset, R11_scratch1);
   292       break;
   293     }
   294     case 1: {
   295       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
   296       __ lfd(F15_ftos, simm16_offset, R11_scratch1);
   297       break;
   298     }
   299     default: ShouldNotReachHere();
   300   }
   301 }
   303 void TemplateTable::bipush() {
   304   transition(vtos, itos);
   305   __ lbz(R17_tos, 1, R14_bcp);
   306   __ extsb(R17_tos, R17_tos);
   307 }
   309 void TemplateTable::sipush() {
   310   transition(vtos, itos);
   311   __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed);
   312 }
   314 void TemplateTable::ldc(bool wide) {
   315   Register Rscratch1 = R11_scratch1,
   316            Rscratch2 = R12_scratch2,
   317            Rcpool    = R3_ARG1;
   319   transition(vtos, vtos);
   320   Label notInt, notClass, exit;
   322   __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags.
   323   if (wide) { // Read index.
   324     __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned);
   325   } else {
   326     __ lbz(Rscratch1, 1, R14_bcp);
   327   }
   329   const int base_offset = ConstantPool::header_size() * wordSize;
   330   const int tags_offset = Array<u1>::base_offset_in_bytes();
   332   // Get type from tags.
   333   __ addi(Rscratch2, Rscratch2, tags_offset);
   334   __ lbzx(Rscratch2, Rscratch2, Rscratch1);
   336   __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class?
   337   __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state?
   338   __ cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
   340   // Resolved class - need to call vm to get java mirror of the class.
   341   __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class);
   342   __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // Neither resolved class nor unresolved case from above?
   343   __ beq(CCR0, notClass);
   345   __ li(R4, wide ? 1 : 0);
   346   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4);
   347   __ push(atos);
   348   __ b(exit);
   350   __ align(32, 12);
   351   __ bind(notClass);
   352   __ addi(Rcpool, Rcpool, base_offset);
   353   __ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
   354   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
   355   __ bne(CCR0, notInt);
   356   __ lwax(R17_tos, Rcpool, Rscratch1);
   357   __ push(itos);
   358   __ b(exit);
   360   __ align(32, 12);
   361   __ bind(notInt);
   362 #ifdef ASSERT
   363   // String and Object are rewritten to fast_aldc
   364   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
   365   __ asm_assert_eq("unexpected type", 0x8765);
   366 #endif
   367   __ lfsx(F15_ftos, Rcpool, Rscratch1);
   368   __ push(ftos);
   370   __ align(32, 12);
   371   __ bind(exit);
   372 }
   374 // Fast path for caching oop constants.
   375 void TemplateTable::fast_aldc(bool wide) {
   376   transition(vtos, atos);
   378   int index_size = wide ? sizeof(u2) : sizeof(u1);
   379   const Register Rscratch = R11_scratch1;
   380   Label resolved;
   382   // We are resolved if the resolved reference cache entry contains a
   383   // non-null object (CallSite, etc.)
   384   __ get_cache_index_at_bcp(Rscratch, 1, index_size);  // Load index.
   385   __ load_resolved_reference_at_index(R17_tos, Rscratch);
   386   __ cmpdi(CCR0, R17_tos, 0);
   387   __ bne(CCR0, resolved);
   388   __ load_const_optimized(R3_ARG1, (int)bytecode());
   390   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
   392   // First time invocation - must resolve first.
   393   __ call_VM(R17_tos, entry, R3_ARG1);
   395   __ align(32, 12);
   396   __ bind(resolved);
   397   __ verify_oop(R17_tos);
   398 }
   400 void TemplateTable::ldc2_w() {
   401   transition(vtos, vtos);
   402   Label Llong, Lexit;
   404   Register Rindex = R11_scratch1,
   405            Rcpool = R12_scratch2,
   406            Rtag   = R3_ARG1;
   407   __ get_cpool_and_tags(Rcpool, Rtag);
   408   __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
   410   const int base_offset = ConstantPool::header_size() * wordSize;
   411   const int tags_offset = Array<u1>::base_offset_in_bytes();
   412   // Get type from tags.
   413   __ addi(Rcpool, Rcpool, base_offset);
   414   __ addi(Rtag, Rtag, tags_offset);
   416   __ lbzx(Rtag, Rtag, Rindex);
   418   __ sldi(Rindex, Rindex, LogBytesPerWord);
   419   __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double);
   420   __ bne(CCR0, Llong);
   421   // A double can be placed at word-aligned locations in the constant pool.
   422   // Check out Conversions.java for an example.
   423   // Also ConstantPool::header_size() is 20, which makes it very difficult
   424   // to double-align double on the constant pool. SG, 11/7/97
   425   __ lfdx(F15_ftos, Rcpool, Rindex);
   426   __ push(dtos);
   427   __ b(Lexit);
   429   __ bind(Llong);
   430   __ ldx(R17_tos, Rcpool, Rindex);
   431   __ push(ltos);
   433   __ bind(Lexit);
   434 }
   436 // Get the locals index located in the bytecode stream at bcp + offset.
   437 void TemplateTable::locals_index(Register Rdst, int offset) {
   438   __ lbz(Rdst, offset, R14_bcp);
   439 }
   441 void TemplateTable::iload() {
   442   transition(vtos, itos);
   444   // Get the local value into tos
   445   const Register Rindex = R22_tmp2;
   446   locals_index(Rindex);
   448   // Rewrite iload,iload  pair into fast_iload2
   449   //         iload,caload pair into fast_icaload
   450   if (RewriteFrequentPairs) {
   451     Label Lrewrite, Ldone;
   452     Register Rnext_byte  = R3_ARG1,
   453              Rrewrite_to = R6_ARG4,
   454              Rscratch    = R11_scratch1;
   456     // get next byte
   457     __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp);
   459     // if _iload, wait to rewrite to iload2. We only want to rewrite the
   460     // last two iloads in a pair. Comparing against fast_iload means that
   461     // the next bytecode is neither an iload or a caload, and therefore
   462     // an iload pair.
   463     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
   464     __ beq(CCR0, Ldone);
   466     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
   467     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
   468     __ beq(CCR1, Lrewrite);
   470     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
   471     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload);
   472     __ beq(CCR0, Lrewrite);
   474     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
   476     __ bind(Lrewrite);
   477     patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false);
   478     __ bind(Ldone);
   479   }
   481   __ load_local_int(R17_tos, Rindex, Rindex);
   482 }
   484 // Load 2 integers in a row without dispatching
   485 void TemplateTable::fast_iload2() {
   486   transition(vtos, itos);
   488   __ lbz(R3_ARG1, 1, R14_bcp);
   489   __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp);
   491   __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1);
   492   __ load_local_int(R17_tos, R12_scratch2, R17_tos);
   493   __ push_i(R3_ARG1);
   494 }
   496 void TemplateTable::fast_iload() {
   497   transition(vtos, itos);
   498   // Get the local value into tos
   500   const Register Rindex = R11_scratch1;
   501   locals_index(Rindex);
   502   __ load_local_int(R17_tos, Rindex, Rindex);
   503 }
   505 // Load a local variable type long from locals area to TOS cache register.
   506 // Local index resides in bytecodestream.
   507 void TemplateTable::lload() {
   508   transition(vtos, ltos);
   510   const Register Rindex = R11_scratch1;
   511   locals_index(Rindex);
   512   __ load_local_long(R17_tos, Rindex, Rindex);
   513 }
   515 void TemplateTable::fload() {
   516   transition(vtos, ftos);
   518   const Register Rindex = R11_scratch1;
   519   locals_index(Rindex);
   520   __ load_local_float(F15_ftos, Rindex, Rindex);
   521 }
   523 void TemplateTable::dload() {
   524   transition(vtos, dtos);
   526   const Register Rindex = R11_scratch1;
   527   locals_index(Rindex);
   528   __ load_local_double(F15_ftos, Rindex, Rindex);
   529 }
   531 void TemplateTable::aload() {
   532   transition(vtos, atos);
   534   const Register Rindex = R11_scratch1;
   535   locals_index(Rindex);
   536   __ load_local_ptr(R17_tos, Rindex, Rindex);
   537 }
   539 void TemplateTable::locals_index_wide(Register Rdst) {
   540   // Offset is 2, not 1, because Lbcp points to wide prefix code.
   541   __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned);
   542 }
   544 void TemplateTable::wide_iload() {
   545   // Get the local value into tos.
   547   const Register Rindex = R11_scratch1;
   548   locals_index_wide(Rindex);
   549   __ load_local_int(R17_tos, Rindex, Rindex);
   550 }
   552 void TemplateTable::wide_lload() {
   553   transition(vtos, ltos);
   555   const Register Rindex = R11_scratch1;
   556   locals_index_wide(Rindex);
   557   __ load_local_long(R17_tos, Rindex, Rindex);
   558 }
   560 void TemplateTable::wide_fload() {
   561   transition(vtos, ftos);
   563   const Register Rindex = R11_scratch1;
   564   locals_index_wide(Rindex);
   565   __ load_local_float(F15_ftos, Rindex, Rindex);
   566 }
   568 void TemplateTable::wide_dload() {
   569   transition(vtos, dtos);
   571   const Register Rindex = R11_scratch1;
   572   locals_index_wide(Rindex);
   573   __ load_local_double(F15_ftos, Rindex, Rindex);
   574 }
   576 void TemplateTable::wide_aload() {
   577   transition(vtos, atos);
   579   const Register Rindex = R11_scratch1;
   580   locals_index_wide(Rindex);
   581   __ load_local_ptr(R17_tos, Rindex, Rindex);
   582 }
   584 void TemplateTable::iaload() {
   585   transition(itos, itos);
   587   const Register Rload_addr = R3_ARG1,
   588                  Rarray     = R4_ARG2,
   589                  Rtemp      = R5_ARG3;
   590   __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
   591   __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr);
   592 }
   594 void TemplateTable::laload() {
   595   transition(itos, ltos);
   597   const Register Rload_addr = R3_ARG1,
   598                  Rarray     = R4_ARG2,
   599                  Rtemp      = R5_ARG3;
   600   __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
   601   __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr);
   602 }
   604 void TemplateTable::faload() {
   605   transition(itos, ftos);
   607   const Register Rload_addr = R3_ARG1,
   608                  Rarray     = R4_ARG2,
   609                  Rtemp      = R5_ARG3;
   610   __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
   611   __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr);
   612 }
   614 void TemplateTable::daload() {
   615   transition(itos, dtos);
   617   const Register Rload_addr = R3_ARG1,
   618                  Rarray     = R4_ARG2,
   619                  Rtemp      = R5_ARG3;
   620   __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
   621   __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr);
   622 }
   624 void TemplateTable::aaload() {
   625   transition(itos, atos);
   627   // tos: index
   628   // result tos: array
   629   const Register Rload_addr = R3_ARG1,
   630                  Rarray     = R4_ARG2,
   631                  Rtemp      = R5_ARG3;
   632   __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr);
   633   __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr);
   634   __ verify_oop(R17_tos);
   635   //__ dcbt(R17_tos); // prefetch
   636 }
   638 void TemplateTable::baload() {
   639   transition(itos, itos);
   641   const Register Rload_addr = R3_ARG1,
   642                  Rarray     = R4_ARG2,
   643                  Rtemp      = R5_ARG3;
   644   __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr);
   645   __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr);
   646   __ extsb(R17_tos, R17_tos);
   647 }
   649 void TemplateTable::caload() {
   650   transition(itos, itos);
   652   const Register Rload_addr = R3_ARG1,
   653                  Rarray     = R4_ARG2,
   654                  Rtemp      = R5_ARG3;
   655   __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
   656   __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
   657 }
   659 // Iload followed by caload frequent pair.
   660 void TemplateTable::fast_icaload() {
   661   transition(vtos, itos);
   663   const Register Rload_addr = R3_ARG1,
   664                  Rarray     = R4_ARG2,
   665                  Rtemp      = R11_scratch1;
   667   locals_index(R17_tos);
   668   __ load_local_int(R17_tos, Rtemp, R17_tos);
   669   __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
   670   __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
   671 }
   673 void TemplateTable::saload() {
   674   transition(itos, itos);
   676   const Register Rload_addr = R11_scratch1,
   677                  Rarray     = R12_scratch2,
   678                  Rtemp      = R3_ARG1;
   679   __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
   680   __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr);
   681 }
   683 void TemplateTable::iload(int n) {
   684   transition(vtos, itos);
   686   __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
   687 }
   689 void TemplateTable::lload(int n) {
   690   transition(vtos, ltos);
   692   __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
   693 }
   695 void TemplateTable::fload(int n) {
   696   transition(vtos, ftos);
   698   __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
   699 }
   701 void TemplateTable::dload(int n) {
   702   transition(vtos, dtos);
   704   __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
   705 }
   707 void TemplateTable::aload(int n) {
   708   transition(vtos, atos);
   710   __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
   711 }
   713 void TemplateTable::aload_0() {
   714   transition(vtos, atos);
   715   // According to bytecode histograms, the pairs:
   716   //
   717   // _aload_0, _fast_igetfield
   718   // _aload_0, _fast_agetfield
   719   // _aload_0, _fast_fgetfield
   720   //
   721   // occur frequently. If RewriteFrequentPairs is set, the (slow)
   722   // _aload_0 bytecode checks if the next bytecode is either
   723   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
   724   // rewrites the current bytecode into a pair bytecode; otherwise it
   725   // rewrites the current bytecode into _0 that doesn't do
   726   // the pair check anymore.
   727   //
   728   // Note: If the next bytecode is _getfield, the rewrite must be
   729   //       delayed, otherwise we may miss an opportunity for a pair.
   730   //
   731   // Also rewrite frequent pairs
   732   //   aload_0, aload_1
   733   //   aload_0, iload_1
   734   // These bytecodes with a small amount of code are most profitable
   735   // to rewrite.
   737   if (RewriteFrequentPairs) {
   739     Label Lrewrite, Ldont_rewrite;
   740     Register Rnext_byte  = R3_ARG1,
   741              Rrewrite_to = R6_ARG4,
   742              Rscratch    = R11_scratch1;
   744     // Get next byte.
   745     __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
   747     // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
   748     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
   749     __ beq(CCR0, Ldont_rewrite);
   751     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
   752     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
   753     __ beq(CCR1, Lrewrite);
   755     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
   756     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
   757     __ beq(CCR0, Lrewrite);
   759     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield);
   760     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0);
   761     __ beq(CCR1, Lrewrite);
   763     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0);
   765     __ bind(Lrewrite);
   766     patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false);
   767     __ bind(Ldont_rewrite);
   768   }
   770   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
   771   aload(0);
   772 }
   774 void TemplateTable::istore() {
   775   transition(itos, vtos);
   777   const Register Rindex = R11_scratch1;
   778   locals_index(Rindex);
   779   __ store_local_int(R17_tos, Rindex);
   780 }
   782 void TemplateTable::lstore() {
   783   transition(ltos, vtos);
   784   const Register Rindex = R11_scratch1;
   785   locals_index(Rindex);
   786   __ store_local_long(R17_tos, Rindex);
   787 }
   789 void TemplateTable::fstore() {
   790   transition(ftos, vtos);
   792   const Register Rindex = R11_scratch1;
   793   locals_index(Rindex);
   794   __ store_local_float(F15_ftos, Rindex);
   795 }
   797 void TemplateTable::dstore() {
   798   transition(dtos, vtos);
   800   const Register Rindex = R11_scratch1;
   801   locals_index(Rindex);
   802   __ store_local_double(F15_ftos, Rindex);
   803 }
   805 void TemplateTable::astore() {
   806   transition(vtos, vtos);
   808   const Register Rindex = R11_scratch1;
   809   __ pop_ptr();
   810   __ verify_oop_or_return_address(R17_tos, Rindex);
   811   locals_index(Rindex);
   812   __ store_local_ptr(R17_tos, Rindex);
   813 }
   815 void TemplateTable::wide_istore() {
   816   transition(vtos, vtos);
   818   const Register Rindex = R11_scratch1;
   819   __ pop_i();
   820   locals_index_wide(Rindex);
   821   __ store_local_int(R17_tos, Rindex);
   822 }
   824 void TemplateTable::wide_lstore() {
   825   transition(vtos, vtos);
   827   const Register Rindex = R11_scratch1;
   828   __ pop_l();
   829   locals_index_wide(Rindex);
   830   __ store_local_long(R17_tos, Rindex);
   831 }
   833 void TemplateTable::wide_fstore() {
   834   transition(vtos, vtos);
   836   const Register Rindex = R11_scratch1;
   837   __ pop_f();
   838   locals_index_wide(Rindex);
   839   __ store_local_float(F15_ftos, Rindex);
   840 }
   842 void TemplateTable::wide_dstore() {
   843   transition(vtos, vtos);
   845   const Register Rindex = R11_scratch1;
   846   __ pop_d();
   847   locals_index_wide(Rindex);
   848   __ store_local_double(F15_ftos, Rindex);
   849 }
   851 void TemplateTable::wide_astore() {
   852   transition(vtos, vtos);
   854   const Register Rindex = R11_scratch1;
   855   __ pop_ptr();
   856   __ verify_oop_or_return_address(R17_tos, Rindex);
   857   locals_index_wide(Rindex);
   858   __ store_local_ptr(R17_tos, Rindex);
   859 }
   861 void TemplateTable::iastore() {
   862   transition(itos, vtos);
   864   const Register Rindex      = R3_ARG1,
   865                  Rstore_addr = R4_ARG2,
   866                  Rarray      = R5_ARG3,
   867                  Rtemp       = R6_ARG4;
   868   __ pop_i(Rindex);
   869   __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
   870   __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr);
   871   }
   873 void TemplateTable::lastore() {
   874   transition(ltos, vtos);
   876   const Register Rindex      = R3_ARG1,
   877                  Rstore_addr = R4_ARG2,
   878                  Rarray      = R5_ARG3,
   879                  Rtemp       = R6_ARG4;
   880   __ pop_i(Rindex);
   881   __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
   882   __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr);
   883   }
   885 void TemplateTable::fastore() {
   886   transition(ftos, vtos);
   888   const Register Rindex      = R3_ARG1,
   889                  Rstore_addr = R4_ARG2,
   890                  Rarray      = R5_ARG3,
   891                  Rtemp       = R6_ARG4;
   892   __ pop_i(Rindex);
   893   __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
   894   __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr);
   895   }
   897 void TemplateTable::dastore() {
   898   transition(dtos, vtos);
   900   const Register Rindex      = R3_ARG1,
   901                  Rstore_addr = R4_ARG2,
   902                  Rarray      = R5_ARG3,
   903                  Rtemp       = R6_ARG4;
   904   __ pop_i(Rindex);
   905   __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
   906   __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr);
   907   }
   909 // Pop 3 values from the stack and...
   910 void TemplateTable::aastore() {
   911   transition(vtos, vtos);
   913   Label Lstore_ok, Lis_null, Ldone;
   914   const Register Rindex    = R3_ARG1,
   915                  Rarray    = R4_ARG2,
   916                  Rscratch  = R11_scratch1,
   917                  Rscratch2 = R12_scratch2,
   918                  Rarray_klass = R5_ARG3,
   919                  Rarray_element_klass = Rarray_klass,
   920                  Rvalue_klass = R6_ARG4,
   921                  Rstore_addr = R31;    // Use register which survives VM call.
   923   __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store.
   924   __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index.
   925   __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp);  // Get array.
   927   __ verify_oop(R17_tos);
   928   __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr);
   929   // Rindex is dead!
   930   Register Rscratch3 = Rindex;
   932   // Do array store check - check for NULL value first.
   933   __ cmpdi(CCR0, R17_tos, 0);
   934   __ beq(CCR0, Lis_null);
   936   __ load_klass(Rarray_klass, Rarray);
   937   __ load_klass(Rvalue_klass, R17_tos);
   939   // Do fast instanceof cache test.
   940   __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass);
   942   // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure.
   943   __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok);
   945   // Fell through: subtype check failed => throw an exception.
   946   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry);
   947   __ mtctr(R11_scratch1);
   948   __ bctr();
   950   __ bind(Lis_null);
   951   do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */,
   952                Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
   953   __ profile_null_seen(Rscratch, Rscratch2);
   954   __ b(Ldone);
   956   // Store is OK.
   957   __ bind(Lstore_ok);
   958   do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */,
   959                Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
   961   __ bind(Ldone);
   962   // Adjust sp (pops array, index and value).
   963   __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize);
   964 }
   966 void TemplateTable::bastore() {
   967   transition(itos, vtos);
   969   const Register Rindex   = R11_scratch1,
   970                  Rarray   = R12_scratch2,
   971                  Rscratch = R3_ARG1;
   972   __ pop_i(Rindex);
   973   __ pop_ptr(Rarray);
   974   // tos: val
   976   // Need to check whether array is boolean or byte
   977   // since both types share the bastore bytecode.
   978   __ load_klass(Rscratch, Rarray);
   979   __ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch);
   980   int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit());
   981   __ testbitdi(CCR0, R0, Rscratch, diffbit);
   982   Label L_skip;
   983   __ bfalse(CCR0, L_skip);
   984   __ andi(R17_tos, R17_tos, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
   985   __ bind(L_skip);
   987   __ index_check_without_pop(Rarray, Rindex, 0, Rscratch, Rarray);
   988   __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray);
   989 }
   991 void TemplateTable::castore() {
   992   transition(itos, vtos);
   994   const Register Rindex   = R11_scratch1,
   995                  Rarray   = R12_scratch2,
   996                  Rscratch = R3_ARG1;
   997   __ pop_i(Rindex);
   998   // tos: val
   999   // Rarray: array ptr (popped by index_check)
  1000   __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray);
  1001   __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray);
  1004 void TemplateTable::sastore() {
  1005   castore();
  1008 void TemplateTable::istore(int n) {
  1009   transition(itos, vtos);
  1010   __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
  1013 void TemplateTable::lstore(int n) {
  1014   transition(ltos, vtos);
  1015   __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
  1018 void TemplateTable::fstore(int n) {
  1019   transition(ftos, vtos);
  1020   __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
  1023 void TemplateTable::dstore(int n) {
  1024   transition(dtos, vtos);
  1025   __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
  1028 void TemplateTable::astore(int n) {
  1029   transition(vtos, vtos);
  1031   __ pop_ptr();
  1032   __ verify_oop_or_return_address(R17_tos, R11_scratch1);
  1033   __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
  1036 void TemplateTable::pop() {
  1037   transition(vtos, vtos);
  1039   __ addi(R15_esp, R15_esp, Interpreter::stackElementSize);
  1042 void TemplateTable::pop2() {
  1043   transition(vtos, vtos);
  1045   __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2);
  1048 void TemplateTable::dup() {
  1049   transition(vtos, vtos);
  1051   __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp);
  1052   __ push_ptr(R11_scratch1);
  1055 void TemplateTable::dup_x1() {
  1056   transition(vtos, vtos);
  1058   Register Ra = R11_scratch1,
  1059            Rb = R12_scratch2;
  1060   // stack: ..., a, b
  1061   __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
  1062   __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
  1063   __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
  1064   __ std(Ra, Interpreter::stackElementSize,     R15_esp);
  1065   __ push_ptr(Rb);
  1066   // stack: ..., b, a, b
  1069 void TemplateTable::dup_x2() {
  1070   transition(vtos, vtos);
  1072   Register Ra = R11_scratch1,
  1073            Rb = R12_scratch2,
  1074            Rc = R3_ARG1;
  1076   // stack: ..., a, b, c
  1077   __ ld(Rc, Interpreter::stackElementSize,     R15_esp);  // load c
  1078   __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp);  // load a
  1079   __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a
  1080   __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp);  // load b
  1081   // stack: ..., c, b, c
  1082   __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b
  1083   // stack: ..., c, a, c
  1084   __ std(Rb, Interpreter::stackElementSize,     R15_esp); // store b in c
  1085   __ push_ptr(Rc);                                        // push c
  1086   // stack: ..., c, a, b, c
  1089 void TemplateTable::dup2() {
  1090   transition(vtos, vtos);
  1092   Register Ra = R11_scratch1,
  1093            Rb = R12_scratch2;
  1094   // stack: ..., a, b
  1095   __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
  1096   __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
  1097   __ push_2ptrs(Ra, Rb);
  1098   // stack: ..., a, b, a, b
  1101 void TemplateTable::dup2_x1() {
  1102   transition(vtos, vtos);
  1104   Register Ra = R11_scratch1,
  1105            Rb = R12_scratch2,
  1106            Rc = R3_ARG1;
  1107   // stack: ..., a, b, c
  1108   __ ld(Rc, Interpreter::stackElementSize,     R15_esp);
  1109   __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp);
  1110   __ std(Rc, Interpreter::stackElementSize * 2, R15_esp);
  1111   __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp);
  1112   __ std(Ra, Interpreter::stackElementSize,     R15_esp);
  1113   __ std(Rb, Interpreter::stackElementSize * 3, R15_esp);
  1114   // stack: ..., b, c, a
  1115   __ push_2ptrs(Rb, Rc);
  1116   // stack: ..., b, c, a, b, c
  1119 void TemplateTable::dup2_x2() {
  1120   transition(vtos, vtos);
  1122   Register Ra = R11_scratch1,
  1123            Rb = R12_scratch2,
  1124            Rc = R3_ARG1,
  1125            Rd = R4_ARG2;
  1126   // stack: ..., a, b, c, d
  1127   __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp);
  1128   __ ld(Rd, Interpreter::stackElementSize,     R15_esp);
  1129   __ std(Rb, Interpreter::stackElementSize,     R15_esp);  // store b in d
  1130   __ std(Rd, Interpreter::stackElementSize * 3, R15_esp);  // store d in b
  1131   __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp);
  1132   __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp);
  1133   __ std(Ra, Interpreter::stackElementSize * 2, R15_esp);  // store a in c
  1134   __ std(Rc, Interpreter::stackElementSize * 4, R15_esp);  // store c in a
  1135   // stack: ..., c, d, a, b
  1136   __ push_2ptrs(Rc, Rd);
  1137   // stack: ..., c, d, a, b, c, d
  1140 void TemplateTable::swap() {
  1141   transition(vtos, vtos);
  1142   // stack: ..., a, b
  1144   Register Ra = R11_scratch1,
  1145            Rb = R12_scratch2;
  1146   // stack: ..., a, b
  1147   __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
  1148   __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
  1149   __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
  1150   __ std(Ra, Interpreter::stackElementSize,     R15_esp);
  1151   // stack: ..., b, a
  1154 void TemplateTable::iop2(Operation op) {
  1155   transition(itos, itos);
  1157   Register Rscratch = R11_scratch1;
  1159   __ pop_i(Rscratch);
  1160   // tos  = number of bits to shift
  1161   // Rscratch = value to shift
  1162   switch (op) {
  1163     case  add:   __ add(R17_tos, Rscratch, R17_tos); break;
  1164     case  sub:   __ sub(R17_tos, Rscratch, R17_tos); break;
  1165     case  mul:   __ mullw(R17_tos, Rscratch, R17_tos); break;
  1166     case  _and:  __ andr(R17_tos, Rscratch, R17_tos); break;
  1167     case  _or:   __ orr(R17_tos, Rscratch, R17_tos); break;
  1168     case  _xor:  __ xorr(R17_tos, Rscratch, R17_tos); break;
  1169     case  shl:   __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break;
  1170     case  shr:   __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break;
  1171     case  ushr:  __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break;
  1172     default:     ShouldNotReachHere();
  1176 void TemplateTable::lop2(Operation op) {
  1177   transition(ltos, ltos);
  1179   Register Rscratch = R11_scratch1;
  1180   __ pop_l(Rscratch);
  1181   switch (op) {
  1182     case  add:   __ add(R17_tos, Rscratch, R17_tos); break;
  1183     case  sub:   __ sub(R17_tos, Rscratch, R17_tos); break;
  1184     case  _and:  __ andr(R17_tos, Rscratch, R17_tos); break;
  1185     case  _or:   __ orr(R17_tos, Rscratch, R17_tos); break;
  1186     case  _xor:  __ xorr(R17_tos, Rscratch, R17_tos); break;
  1187     default:     ShouldNotReachHere();
  1191 void TemplateTable::idiv() {
  1192   transition(itos, itos);
  1194   Label Lnormal, Lexception, Ldone;
  1195   Register Rdividend = R11_scratch1; // Used by irem.
  1197   __ addi(R0, R17_tos, 1);
  1198   __ cmplwi(CCR0, R0, 2);
  1199   __ bgt(CCR0, Lnormal); // divisor <-1 or >1
  1201   __ cmpwi(CCR1, R17_tos, 0);
  1202   __ beq(CCR1, Lexception); // divisor == 0
  1204   __ pop_i(Rdividend);
  1205   __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1
  1206   __ b(Ldone);
  1208   __ bind(Lexception);
  1209   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
  1210   __ mtctr(R11_scratch1);
  1211   __ bctr();
  1213   __ align(32, 12);
  1214   __ bind(Lnormal);
  1215   __ pop_i(Rdividend);
  1216   __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
  1217   __ bind(Ldone);
  1220 void TemplateTable::irem() {
  1221   transition(itos, itos);
  1223   __ mr(R12_scratch2, R17_tos);
  1224   idiv();
  1225   __ mullw(R17_tos, R17_tos, R12_scratch2);
  1226   __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv.
  1229 void TemplateTable::lmul() {
  1230   transition(ltos, ltos);
  1232   __ pop_l(R11_scratch1);
  1233   __ mulld(R17_tos, R11_scratch1, R17_tos);
  1236 void TemplateTable::ldiv() {
  1237   transition(ltos, ltos);
  1239   Label Lnormal, Lexception, Ldone;
  1240   Register Rdividend = R11_scratch1; // Used by lrem.
  1242   __ addi(R0, R17_tos, 1);
  1243   __ cmpldi(CCR0, R0, 2);
  1244   __ bgt(CCR0, Lnormal); // divisor <-1 or >1
  1246   __ cmpdi(CCR1, R17_tos, 0);
  1247   __ beq(CCR1, Lexception); // divisor == 0
  1249   __ pop_l(Rdividend);
  1250   __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1
  1251   __ b(Ldone);
  1253   __ bind(Lexception);
  1254   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
  1255   __ mtctr(R11_scratch1);
  1256   __ bctr();
  1258   __ align(32, 12);
  1259   __ bind(Lnormal);
  1260   __ pop_l(Rdividend);
  1261   __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
  1262   __ bind(Ldone);
  1265 void TemplateTable::lrem() {
  1266   transition(ltos, ltos);
  1268   __ mr(R12_scratch2, R17_tos);
  1269   ldiv();
  1270   __ mulld(R17_tos, R17_tos, R12_scratch2);
  1271   __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv.
  1274 void TemplateTable::lshl() {
  1275   transition(itos, ltos);
  1277   __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
  1278   __ pop_l(R11_scratch1);
  1279   __ sld(R17_tos, R11_scratch1, R17_tos);
  1282 void TemplateTable::lshr() {
  1283   transition(itos, ltos);
  1285   __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
  1286   __ pop_l(R11_scratch1);
  1287   __ srad(R17_tos, R11_scratch1, R17_tos);
  1290 void TemplateTable::lushr() {
  1291   transition(itos, ltos);
  1293   __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
  1294   __ pop_l(R11_scratch1);
  1295   __ srd(R17_tos, R11_scratch1, R17_tos);
  1298 void TemplateTable::fop2(Operation op) {
  1299   transition(ftos, ftos);
  1301   switch (op) {
  1302     case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break;
  1303     case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break;
  1304     case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break;
  1305     case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break;
  1306     case rem:
  1307       __ pop_f(F1_ARG1);
  1308       __ fmr(F2_ARG2, F15_ftos);
  1309       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
  1310       __ fmr(F15_ftos, F1_RET);
  1311       break;
  1313     default: ShouldNotReachHere();
  1317 void TemplateTable::dop2(Operation op) {
  1318   transition(dtos, dtos);
  1320   switch (op) {
  1321     case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break;
  1322     case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break;
  1323     case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break;
  1324     case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break;
  1325     case rem:
  1326       __ pop_d(F1_ARG1);
  1327       __ fmr(F2_ARG2, F15_ftos);
  1328       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
  1329       __ fmr(F15_ftos, F1_RET);
  1330       break;
  1332     default: ShouldNotReachHere();
  1336 // Negate the value in the TOS cache.
  1337 void TemplateTable::ineg() {
  1338   transition(itos, itos);
  1340   __ neg(R17_tos, R17_tos);
  1343 // Negate the value in the TOS cache.
  1344 void TemplateTable::lneg() {
  1345   transition(ltos, ltos);
  1347   __ neg(R17_tos, R17_tos);
  1350 void TemplateTable::fneg() {
  1351   transition(ftos, ftos);
  1353   __ fneg(F15_ftos, F15_ftos);
  1356 void TemplateTable::dneg() {
  1357   transition(dtos, dtos);
  1359   __ fneg(F15_ftos, F15_ftos);
  1362 // Increments a local variable in place.
  1363 void TemplateTable::iinc() {
  1364   transition(vtos, vtos);
  1366   const Register Rindex     = R11_scratch1,
  1367                  Rincrement = R0,
  1368                  Rvalue     = R12_scratch2;
  1370   locals_index(Rindex);              // Load locals index from bytecode stream.
  1371   __ lbz(Rincrement, 2, R14_bcp);    // Load increment from the bytecode stream.
  1372   __ extsb(Rincrement, Rincrement);
  1374   __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex.
  1376   __ add(Rvalue, Rincrement, Rvalue);
  1377   __ stw(Rvalue, 0, Rindex);
  1380 void TemplateTable::wide_iinc() {
  1381   transition(vtos, vtos);
  1383   Register Rindex       = R11_scratch1,
  1384            Rlocals_addr = Rindex,
  1385            Rincr        = R12_scratch2;
  1386   locals_index_wide(Rindex);
  1387   __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed);
  1388   __ load_local_int(R17_tos, Rlocals_addr, Rindex);
  1389   __ add(R17_tos, Rincr, R17_tos);
  1390   __ stw(R17_tos, 0, Rlocals_addr);
  1393 void TemplateTable::convert() {
  1394   // %%%%% Factor this first part accross platforms
  1395 #ifdef ASSERT
  1396   TosState tos_in  = ilgl;
  1397   TosState tos_out = ilgl;
  1398   switch (bytecode()) {
  1399     case Bytecodes::_i2l: // fall through
  1400     case Bytecodes::_i2f: // fall through
  1401     case Bytecodes::_i2d: // fall through
  1402     case Bytecodes::_i2b: // fall through
  1403     case Bytecodes::_i2c: // fall through
  1404     case Bytecodes::_i2s: tos_in = itos; break;
  1405     case Bytecodes::_l2i: // fall through
  1406     case Bytecodes::_l2f: // fall through
  1407     case Bytecodes::_l2d: tos_in = ltos; break;
  1408     case Bytecodes::_f2i: // fall through
  1409     case Bytecodes::_f2l: // fall through
  1410     case Bytecodes::_f2d: tos_in = ftos; break;
  1411     case Bytecodes::_d2i: // fall through
  1412     case Bytecodes::_d2l: // fall through
  1413     case Bytecodes::_d2f: tos_in = dtos; break;
  1414     default             : ShouldNotReachHere();
  1416   switch (bytecode()) {
  1417     case Bytecodes::_l2i: // fall through
  1418     case Bytecodes::_f2i: // fall through
  1419     case Bytecodes::_d2i: // fall through
  1420     case Bytecodes::_i2b: // fall through
  1421     case Bytecodes::_i2c: // fall through
  1422     case Bytecodes::_i2s: tos_out = itos; break;
  1423     case Bytecodes::_i2l: // fall through
  1424     case Bytecodes::_f2l: // fall through
  1425     case Bytecodes::_d2l: tos_out = ltos; break;
  1426     case Bytecodes::_i2f: // fall through
  1427     case Bytecodes::_l2f: // fall through
  1428     case Bytecodes::_d2f: tos_out = ftos; break;
  1429     case Bytecodes::_i2d: // fall through
  1430     case Bytecodes::_l2d: // fall through
  1431     case Bytecodes::_f2d: tos_out = dtos; break;
  1432     default             : ShouldNotReachHere();
  1434   transition(tos_in, tos_out);
  1435 #endif
  1437   // Conversion
  1438   Label done;
  1439   switch (bytecode()) {
  1440     case Bytecodes::_i2l:
  1441       __ extsw(R17_tos, R17_tos);
  1442       break;
  1444     case Bytecodes::_l2i:
  1445       // Nothing to do, we'll continue to work with the lower bits.
  1446       break;
  1448     case Bytecodes::_i2b:
  1449       __ extsb(R17_tos, R17_tos);
  1450       break;
  1452     case Bytecodes::_i2c:
  1453       __ rldicl(R17_tos, R17_tos, 0, 64-2*8);
  1454       break;
  1456     case Bytecodes::_i2s:
  1457       __ extsh(R17_tos, R17_tos);
  1458       break;
  1460     case Bytecodes::_i2d:
  1461       __ extsw(R17_tos, R17_tos);
  1462     case Bytecodes::_l2d:
  1463       __ push_l_pop_d();
  1464       __ fcfid(F15_ftos, F15_ftos);
  1465       break;
  1467     case Bytecodes::_i2f:
  1468       __ extsw(R17_tos, R17_tos);
  1469       __ push_l_pop_d();
  1470       if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
  1471         // Comment: alternatively, load with sign extend could be done by lfiwax.
  1472         __ fcfids(F15_ftos, F15_ftos);
  1473       } else {
  1474         __ fcfid(F15_ftos, F15_ftos);
  1475         __ frsp(F15_ftos, F15_ftos);
  1477       break;
  1479     case Bytecodes::_l2f:
  1480       if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
  1481         __ push_l_pop_d();
  1482         __ fcfids(F15_ftos, F15_ftos);
  1483       } else {
  1484         // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp.
  1485         __ mr(R3_ARG1, R17_tos);
  1486         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f));
  1487         __ fmr(F15_ftos, F1_RET);
  1489       break;
  1491     case Bytecodes::_f2d:
  1492       // empty
  1493       break;
  1495     case Bytecodes::_d2f:
  1496       __ frsp(F15_ftos, F15_ftos);
  1497       break;
  1499     case Bytecodes::_d2i:
  1500     case Bytecodes::_f2i:
  1501       __ fcmpu(CCR0, F15_ftos, F15_ftos);
  1502       __ li(R17_tos, 0); // 0 in case of NAN
  1503       __ bso(CCR0, done);
  1504       __ fctiwz(F15_ftos, F15_ftos);
  1505       __ push_d_pop_l();
  1506       break;
  1508     case Bytecodes::_d2l:
  1509     case Bytecodes::_f2l:
  1510       __ fcmpu(CCR0, F15_ftos, F15_ftos);
  1511       __ li(R17_tos, 0); // 0 in case of NAN
  1512       __ bso(CCR0, done);
  1513       __ fctidz(F15_ftos, F15_ftos);
  1514       __ push_d_pop_l();
  1515       break;
  1517     default: ShouldNotReachHere();
  1519   __ bind(done);
  1522 // Long compare
  1523 void TemplateTable::lcmp() {
  1524   transition(ltos, itos);
  1526   const Register Rscratch = R11_scratch1;
  1527   __ pop_l(Rscratch); // first operand, deeper in stack
  1529   __ cmpd(CCR0, Rscratch, R17_tos); // compare
  1530   __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
  1531   __ srwi(Rscratch, R17_tos, 30);
  1532   __ srawi(R17_tos, R17_tos, 31);
  1533   __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
  1536 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes
  1537 // unordered_result == -1 => fcmpl or dcmpl
  1538 // unordered_result ==  1 => fcmpg or dcmpg
  1539 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
  1540   const FloatRegister Rfirst  = F0_SCRATCH,
  1541                       Rsecond = F15_ftos;
  1542   const Register Rscratch = R11_scratch1;
  1544   if (is_float) {
  1545     __ pop_f(Rfirst);
  1546   } else {
  1547     __ pop_d(Rfirst);
  1550   Label Lunordered, Ldone;
  1551   __ fcmpu(CCR0, Rfirst, Rsecond); // compare
  1552   if (unordered_result) {
  1553     __ bso(CCR0, Lunordered);
  1555   __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
  1556   __ srwi(Rscratch, R17_tos, 30);
  1557   __ srawi(R17_tos, R17_tos, 31);
  1558   __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
  1559   if (unordered_result) {
  1560     __ b(Ldone);
  1561     __ bind(Lunordered);
  1562     __ load_const_optimized(R17_tos, unordered_result);
  1564   __ bind(Ldone);
  1567 // Branch_conditional which takes TemplateTable::Condition.
  1568 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) {
  1569   bool positive = false;
  1570   Assembler::Condition cond = Assembler::equal;
  1571   switch (cc) {
  1572     case TemplateTable::equal:         positive = true ; cond = Assembler::equal  ; break;
  1573     case TemplateTable::not_equal:     positive = false; cond = Assembler::equal  ; break;
  1574     case TemplateTable::less:          positive = true ; cond = Assembler::less   ; break;
  1575     case TemplateTable::less_equal:    positive = false; cond = Assembler::greater; break;
  1576     case TemplateTable::greater:       positive = true ; cond = Assembler::greater; break;
  1577     case TemplateTable::greater_equal: positive = false; cond = Assembler::less   ; break;
  1578     default: ShouldNotReachHere();
  1580   int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
  1581   int bi = Assembler::bi0(crx, cond);
  1582   __ bc(bo, bi, L);
  1585 void TemplateTable::branch(bool is_jsr, bool is_wide) {
  1587   // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
  1588   __ verify_thread();
  1590   const Register Rscratch1    = R11_scratch1,
  1591                  Rscratch2    = R12_scratch2,
  1592                  Rscratch3    = R3_ARG1,
  1593                  R4_counters  = R4_ARG2,
  1594                  bumped_count = R31,
  1595                  Rdisp        = R22_tmp2;
  1597   __ profile_taken_branch(Rscratch1, bumped_count);
  1599   // Get (wide) offset.
  1600   if (is_wide) {
  1601     __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
  1602   } else {
  1603     __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
  1606   // --------------------------------------------------------------------------
  1607   // Handle all the JSR stuff here, then exit.
  1608   // It's much shorter and cleaner than intermingling with the
  1609   // non-JSR normal-branch stuff occurring below.
  1610   if (is_jsr) {
  1611     // Compute return address as bci in Otos_i.
  1612     __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
  1613     __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3));
  1614     __ subf(R17_tos, Rscratch1, Rscratch2);
  1616     // Bump bcp to target of JSR.
  1617     __ add(R14_bcp, Rdisp, R14_bcp);
  1618     // Push returnAddress for "ret" on stack.
  1619     __ push_ptr(R17_tos);
  1620     // And away we go!
  1621     __ dispatch_next(vtos);
  1622     return;
  1625   // --------------------------------------------------------------------------
  1626   // Normal (non-jsr) branch handling
  1628   const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
  1629   if (increment_invocation_counter_for_backward_branches) {
  1630     //__ unimplemented("branch invocation counter");
  1632     Label Lforward;
  1633     __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
  1635     // Check branch direction.
  1636     __ cmpdi(CCR0, Rdisp, 0);
  1637     __ bgt(CCR0, Lforward);
  1639     __ get_method_counters(R19_method, R4_counters, Lforward);
  1641     if (TieredCompilation) {
  1642       Label Lno_mdo, Loverflow;
  1643       const int increment = InvocationCounter::count_increment;
  1644       const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
  1645       if (ProfileInterpreter) {
  1646         Register Rmdo = Rscratch1;
  1648         // If no method data exists, go to profile_continue.
  1649         __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
  1650         __ cmpdi(CCR0, Rmdo, 0);
  1651         __ beq(CCR0, Lno_mdo);
  1653         // Increment backedge counter in the MDO.
  1654         const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
  1655         __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
  1656         __ load_const_optimized(Rscratch3, mask, R0);
  1657         __ addi(Rscratch2, Rscratch2, increment);
  1658         __ stw(Rscratch2, mdo_bc_offs, Rmdo);
  1659         __ and_(Rscratch3, Rscratch2, Rscratch3);
  1660         __ bne(CCR0, Lforward);
  1661         __ b(Loverflow);
  1664       // If there's no MDO, increment counter in method.
  1665       const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
  1666       __ bind(Lno_mdo);
  1667       __ lwz(Rscratch2, mo_bc_offs, R4_counters);
  1668       __ load_const_optimized(Rscratch3, mask, R0);
  1669       __ addi(Rscratch2, Rscratch2, increment);
  1670       __ stw(Rscratch2, mo_bc_offs, R19_method);
  1671       __ and_(Rscratch3, Rscratch2, Rscratch3);
  1672       __ bne(CCR0, Lforward);
  1674       __ bind(Loverflow);
  1676       // Notify point for loop, pass branch bytecode.
  1677       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true);
  1679       // Was an OSR adapter generated?
  1680       // O0 = osr nmethod
  1681       __ cmpdi(CCR0, R3_RET, 0);
  1682       __ beq(CCR0, Lforward);
  1684       // Has the nmethod been invalidated already?
  1685       __ lwz(R0, nmethod::entry_bci_offset(), R3_RET);
  1686       __ cmpwi(CCR0, R0, InvalidOSREntryBci);
  1687       __ beq(CCR0, Lforward);
  1689       // Migrate the interpreter frame off of the stack.
  1690       // We can use all registers because we will not return to interpreter from this point.
  1692       // Save nmethod.
  1693       const Register osr_nmethod = R31;
  1694       __ mr(osr_nmethod, R3_RET);
  1695       __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
  1696       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
  1697       __ reset_last_Java_frame();
  1698       // OSR buffer is in ARG1.
  1700       // Remove the interpreter frame.
  1701       __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
  1703       // Jump to the osr code.
  1704       __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
  1705       __ mtlr(R0);
  1706       __ mtctr(R11_scratch1);
  1707       __ bctr();
  1709     } else {
  1711       const Register invoke_ctr = Rscratch1;
  1712       // Update Backedge branch separately from invocations.
  1713       __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3);
  1715       if (ProfileInterpreter) {
  1716         __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward);
  1717         if (UseOnStackReplacement) {
  1718           __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2);
  1720       } else {
  1721         if (UseOnStackReplacement) {
  1722           __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2);
  1727     __ bind(Lforward);
  1729   } else {
  1730     // Bump bytecode pointer by displacement (take the branch).
  1731     __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
  1733   // Continue with bytecode @ target.
  1734   // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
  1735   // %%%%% and changing dispatch_next to dispatch_only.
  1736   __ dispatch_next(vtos);
  1739 // Helper function for if_cmp* methods below.
  1740 // Factored out common compare and branch code.
  1741 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) {
  1742   Label Lnot_taken;
  1743   // Note: The condition code we get is the condition under which we
  1744   // *fall through*! So we have to inverse the CC here.
  1746   if (is_jint) {
  1747     if (cmp0) {
  1748       __ cmpwi(CCR0, Rfirst, 0);
  1749     } else {
  1750       __ cmpw(CCR0, Rfirst, Rsecond);
  1752   } else {
  1753     if (cmp0) {
  1754       __ cmpdi(CCR0, Rfirst, 0);
  1755     } else {
  1756       __ cmpd(CCR0, Rfirst, Rsecond);
  1759   branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true);
  1761   // Conition is false => Jump!
  1762   branch(false, false);
  1764   // Condition is not true => Continue.
  1765   __ align(32, 12);
  1766   __ bind(Lnot_taken);
  1767   __ profile_not_taken_branch(Rscratch1, Rscratch2);
  1770 // Compare integer values with zero and fall through if CC holds, branch away otherwise.
  1771 void TemplateTable::if_0cmp(Condition cc) {
  1772   transition(itos, vtos);
  1774   if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true);
  1777 // Compare integer values and fall through if CC holds, branch away otherwise.
  1778 //
  1779 // Interface:
  1780 //  - Rfirst: First operand  (older stack value)
  1781 //  - tos:    Second operand (younger stack value)
  1782 void TemplateTable::if_icmp(Condition cc) {
  1783   transition(itos, vtos);
  1785   const Register Rfirst  = R0,
  1786                  Rsecond = R17_tos;
  1788   __ pop_i(Rfirst);
  1789   if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false);
  1792 void TemplateTable::if_nullcmp(Condition cc) {
  1793   transition(atos, vtos);
  1795   if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true);
  1798 void TemplateTable::if_acmp(Condition cc) {
  1799   transition(atos, vtos);
  1801   const Register Rfirst  = R0,
  1802                  Rsecond = R17_tos;
  1804   __ pop_ptr(Rfirst);
  1805   if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false);
  1808 void TemplateTable::ret() {
  1809   locals_index(R11_scratch1);
  1810   __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1);
  1812   __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2);
  1814   __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
  1815   __ add(R11_scratch1, R17_tos, R11_scratch1);
  1816   __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
  1817   __ dispatch_next(vtos);
  1820 void TemplateTable::wide_ret() {
  1821   transition(vtos, vtos);
  1823   const Register Rindex = R3_ARG1,
  1824                  Rscratch1 = R11_scratch1,
  1825                  Rscratch2 = R12_scratch2;
  1827   locals_index_wide(Rindex);
  1828   __ load_local_ptr(R17_tos, R17_tos, Rindex);
  1829   __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2);
  1830   // Tos now contains the bci, compute the bcp from that.
  1831   __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
  1832   __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset()));
  1833   __ add(R14_bcp, Rscratch1, Rscratch2);
  1834   __ dispatch_next(vtos);
  1837 void TemplateTable::tableswitch() {
  1838   transition(itos, vtos);
  1840   Label Ldispatch, Ldefault_case;
  1841   Register Rlow_byte         = R3_ARG1,
  1842            Rindex            = Rlow_byte,
  1843            Rhigh_byte        = R4_ARG2,
  1844            Rdef_offset_addr  = R5_ARG3, // is going to contain address of default offset
  1845            Rscratch1         = R11_scratch1,
  1846            Rscratch2         = R12_scratch2,
  1847            Roffset           = R6_ARG4;
  1849   // Align bcp.
  1850   __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
  1851   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
  1853   // Load lo & hi.
  1854   __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
  1855   __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
  1857   // Check for default case (=index outside [low,high]).
  1858   __ cmpw(CCR0, R17_tos, Rlow_byte);
  1859   __ cmpw(CCR1, R17_tos, Rhigh_byte);
  1860   __ blt(CCR0, Ldefault_case);
  1861   __ bgt(CCR1, Ldefault_case);
  1863   // Lookup dispatch offset.
  1864   __ sub(Rindex, R17_tos, Rlow_byte);
  1865   __ extsw(Rindex, Rindex);
  1866   __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
  1867   __ sldi(Rindex, Rindex, LogBytesPerInt);
  1868   __ addi(Rindex, Rindex, 3 * BytesPerInt);
  1869 #if defined(VM_LITTLE_ENDIAN)
  1870   __ lwbrx(Roffset, Rdef_offset_addr, Rindex);
  1871   __ extsw(Roffset, Roffset);
  1872 #else
  1873   __ lwax(Roffset, Rdef_offset_addr, Rindex);
  1874 #endif
  1875   __ b(Ldispatch);
  1877   __ bind(Ldefault_case);
  1878   __ profile_switch_default(Rhigh_byte, Rscratch1);
  1879   __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
  1881   __ bind(Ldispatch);
  1883   __ add(R14_bcp, Roffset, R14_bcp);
  1884   __ dispatch_next(vtos);
  1887 void TemplateTable::lookupswitch() {
  1888   transition(itos, itos);
  1889   __ stop("lookupswitch bytecode should have been rewritten");
  1892 // Table switch using linear search through cases.
  1893 // Bytecode stream format:
  1894 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
  1895 // Note: Everything is big-endian format here.
  1896 void TemplateTable::fast_linearswitch() {
  1897   transition(itos, vtos);
  1899   Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case;
  1900   Register Rcount           = R3_ARG1,
  1901            Rcurrent_pair    = R4_ARG2,
  1902            Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
  1903            Roffset          = R31,     // Might need to survive C call.
  1904            Rvalue           = R12_scratch2,
  1905            Rscratch         = R11_scratch1,
  1906            Rcmp_value       = R17_tos;
  1908   // Align bcp.
  1909   __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
  1910   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
  1912   // Setup loop counter and limit.
  1913   __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
  1914   __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
  1916   __ mtctr(Rcount);
  1917   __ cmpwi(CCR0, Rcount, 0);
  1918   __ bne(CCR0, Lloop_entry);
  1920   // Default case
  1921   __ bind(Ldefault_case);
  1922   __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
  1923   if (ProfileInterpreter) {
  1924     __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
  1926   __ b(Lcontinue_execution);
  1928   // Next iteration
  1929   __ bind(Lsearch_loop);
  1930   __ bdz(Ldefault_case);
  1931   __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
  1932   __ bind(Lloop_entry);
  1933   __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
  1934   __ cmpw(CCR0, Rvalue, Rcmp_value);
  1935   __ bne(CCR0, Lsearch_loop);
  1937   // Found, load offset.
  1938   __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
  1939   // Calculate case index and profile
  1940   __ mfctr(Rcurrent_pair);
  1941   if (ProfileInterpreter) {
  1942     __ sub(Rcurrent_pair, Rcount, Rcurrent_pair);
  1943     __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
  1946   __ bind(Lcontinue_execution);
  1947   __ add(R14_bcp, Roffset, R14_bcp);
  1948   __ dispatch_next(vtos);
  1951 // Table switch using binary search (value/offset pairs are ordered).
  1952 // Bytecode stream format:
  1953 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
  1954 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
  1955 void TemplateTable::fast_binaryswitch() {
  1957   transition(itos, vtos);
  1958   // Implementation using the following core algorithm: (copied from Intel)
  1959   //
  1960   // int binary_search(int key, LookupswitchPair* array, int n) {
  1961   //   // Binary search according to "Methodik des Programmierens" by
  1962   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
  1963   //   int i = 0;
  1964   //   int j = n;
  1965   //   while (i+1 < j) {
  1966   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
  1967   //     // with      Q: for all i: 0 <= i < n: key < a[i]
  1968   //     // where a stands for the array and assuming that the (inexisting)
  1969   //     // element a[n] is infinitely big.
  1970   //     int h = (i + j) >> 1;
  1971   //     // i < h < j
  1972   //     if (key < array[h].fast_match()) {
  1973   //       j = h;
  1974   //     } else {
  1975   //       i = h;
  1976   //     }
  1977   //   }
  1978   //   // R: a[i] <= key < a[i+1] or Q
  1979   //   // (i.e., if key is within array, i is the correct index)
  1980   //   return i;
  1981   // }
  1983   // register allocation
  1984   const Register Rkey     = R17_tos;          // already set (tosca)
  1985   const Register Rarray   = R3_ARG1;
  1986   const Register Ri       = R4_ARG2;
  1987   const Register Rj       = R5_ARG3;
  1988   const Register Rh       = R6_ARG4;
  1989   const Register Rscratch = R11_scratch1;
  1991   const int log_entry_size = 3;
  1992   const int entry_size = 1 << log_entry_size;
  1994   Label found;
  1996   // Find Array start,
  1997   __ addi(Rarray, R14_bcp, 3 * BytesPerInt);
  1998   __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt));
  2000   // initialize i & j
  2001   __ li(Ri,0);
  2002   __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned);
  2004   // and start.
  2005   Label entry;
  2006   __ b(entry);
  2008   // binary search loop
  2009   { Label loop;
  2010     __ bind(loop);
  2011     // int h = (i + j) >> 1;
  2012     __ srdi(Rh, Rh, 1);
  2013     // if (key < array[h].fast_match()) {
  2014     //   j = h;
  2015     // } else {
  2016     //   i = h;
  2017     // }
  2018     __ sldi(Rscratch, Rh, log_entry_size);
  2019 #if defined(VM_LITTLE_ENDIAN)
  2020     __ lwbrx(Rscratch, Rscratch, Rarray);
  2021 #else
  2022     __ lwzx(Rscratch, Rscratch, Rarray);
  2023 #endif
  2025     // if (key < current value)
  2026     //   Rh = Rj
  2027     // else
  2028     //   Rh = Ri
  2029     Label Lgreater;
  2030     __ cmpw(CCR0, Rkey, Rscratch);
  2031     __ bge(CCR0, Lgreater);
  2032     __ mr(Rj, Rh);
  2033     __ b(entry);
  2034     __ bind(Lgreater);
  2035     __ mr(Ri, Rh);
  2037     // while (i+1 < j)
  2038     __ bind(entry);
  2039     __ addi(Rscratch, Ri, 1);
  2040     __ cmpw(CCR0, Rscratch, Rj);
  2041     __ add(Rh, Ri, Rj); // start h = i + j >> 1;
  2043     __ blt(CCR0, loop);
  2046   // End of binary search, result index is i (must check again!).
  2047   Label default_case;
  2048   Label continue_execution;
  2049   if (ProfileInterpreter) {
  2050     __ mr(Rh, Ri);              // Save index in i for profiling.
  2052   // Ri = value offset
  2053   __ sldi(Ri, Ri, log_entry_size);
  2054   __ add(Ri, Ri, Rarray);
  2055   __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned);
  2057   Label not_found;
  2058   // Ri = offset offset
  2059   __ cmpw(CCR0, Rkey, Rscratch);
  2060   __ beq(CCR0, not_found);
  2061   // entry not found -> j = default offset
  2062   __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
  2063   __ b(default_case);
  2065   __ bind(not_found);
  2066   // entry found -> j = offset
  2067   __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
  2068   __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned);
  2070   if (ProfileInterpreter) {
  2071     __ b(continue_execution);
  2074   __ bind(default_case); // fall through (if not profiling)
  2075   __ profile_switch_default(Ri, Rscratch);
  2077   __ bind(continue_execution);
  2079   __ extsw(Rj, Rj);
  2080   __ add(R14_bcp, Rj, R14_bcp);
  2081   __ dispatch_next(vtos);
  2084 void TemplateTable::_return(TosState state) {
  2085   transition(state, state);
  2086   assert(_desc->calls_vm(),
  2087          "inconsistent calls_vm information"); // call in remove_activation
  2089   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
  2091     Register Rscratch     = R11_scratch1,
  2092              Rklass       = R12_scratch2,
  2093              Rklass_flags = Rklass;
  2094     Label Lskip_register_finalizer;
  2096     // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case.
  2097     assert(state == vtos, "only valid state");
  2098     __ ld(R17_tos, 0, R18_locals);
  2100     // Load klass of this obj.
  2101     __ load_klass(Rklass, R17_tos);
  2102     __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass);
  2103     __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER));
  2104     __ bfalse(CCR0, Lskip_register_finalizer);
  2106     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */);
  2108     __ align(32, 12);
  2109     __ bind(Lskip_register_finalizer);
  2112   // Move the result value into the correct register and remove memory stack frame.
  2113   __ remove_activation(state, /* throw_monitor_exception */ true);
  2114   // Restoration of lr done by remove_activation.
  2115   switch (state) {
  2116     // Narrow result if state is itos but result type is smaller.
  2117     // Need to narrow in the return bytecode rather than in generate_return_entry
  2118     // since compiled code callers expect the result to already be narrowed.
  2119     case itos: __ narrow(R17_tos); /* fall through */
  2120     case ltos:
  2121     case btos:
  2122     case ztos:
  2123     case ctos:
  2124     case stos:
  2125     case atos: __ mr(R3_RET, R17_tos); break;
  2126     case ftos:
  2127     case dtos: __ fmr(F1_RET, F15_ftos); break;
  2128     case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
  2129                // to get visible before the reference to the object gets stored anywhere.
  2130                __ membar(Assembler::StoreStore); break;
  2131     default  : ShouldNotReachHere();
  2133   __ blr();
  2136 // ============================================================================
  2137 // Constant pool cache access
  2138 //
  2139 // Memory ordering:
  2140 //
  2141 // Like done in C++ interpreter, we load the fields
  2142 //   - _indices
  2143 //   - _f12_oop
  2144 // acquired, because these are asked if the cache is already resolved. We don't
  2145 // want to float loads above this check.
  2146 // See also comments in ConstantPoolCacheEntry::bytecode_1(),
  2147 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1();
  2149 // Call into the VM if call site is not yet resolved
  2150 //
  2151 // Input regs:
  2152 //   - None, all passed regs are outputs.
  2153 //
  2154 // Returns:
  2155 //   - Rcache:  The const pool cache entry that contains the resolved result.
  2156 //   - Rresult: Either noreg or output for f1/f2.
  2157 //
  2158 // Kills:
  2159 //   - Rscratch
  2160 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
  2162   __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
  2163   Label Lresolved, Ldone;
  2165   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
  2166   // We are resolved if the indices offset contains the current bytecode.
  2167 #if defined(VM_LITTLE_ENDIAN)
  2168   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
  2169 #else
  2170   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
  2171 #endif
  2172   // Acquire by cmp-br-isync (see below).
  2173   __ cmpdi(CCR0, Rscratch, (int)bytecode());
  2174   __ beq(CCR0, Lresolved);
  2176   address entry = NULL;
  2177   switch (bytecode()) {
  2178     case Bytecodes::_getstatic      : // fall through
  2179     case Bytecodes::_putstatic      : // fall through
  2180     case Bytecodes::_getfield       : // fall through
  2181     case Bytecodes::_putfield       : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
  2182     case Bytecodes::_invokevirtual  : // fall through
  2183     case Bytecodes::_invokespecial  : // fall through
  2184     case Bytecodes::_invokestatic   : // fall through
  2185     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
  2186     case Bytecodes::_invokehandle   : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
  2187     case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
  2188     default                         : ShouldNotReachHere(); break;
  2190   __ li(R4_ARG2, (int)bytecode());
  2191   __ call_VM(noreg, entry, R4_ARG2, true);
  2193   // Update registers with resolved info.
  2194   __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
  2195   __ b(Ldone);
  2197   __ bind(Lresolved);
  2198   __ isync(); // Order load wrt. succeeding loads.
  2199   __ bind(Ldone);
  2202 // Load the constant pool cache entry at field accesses into registers.
  2203 // The Rcache and Rindex registers must be set before call.
  2204 // Input:
  2205 //   - Rcache, Rindex
  2206 // Output:
  2207 //   - Robj, Roffset, Rflags
  2208 void TemplateTable::load_field_cp_cache_entry(Register Robj,
  2209                                               Register Rcache,
  2210                                               Register Rindex /* unused on PPC64 */,
  2211                                               Register Roffset,
  2212                                               Register Rflags,
  2213                                               bool is_static = false) {
  2214   assert_different_registers(Rcache, Rflags, Roffset);
  2215   // assert(Rindex == noreg, "parameter not used on PPC64");
  2217   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
  2218   __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache);
  2219   __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache);
  2220   if (is_static) {
  2221     __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache);
  2222     __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj);
  2223     // Acquire not needed here. Following access has an address dependency on this value.
  2227 // Load the constant pool cache entry at invokes into registers.
  2228 // Resolve if necessary.
  2230 // Input Registers:
  2231 //   - None, bcp is used, though
  2232 //
  2233 // Return registers:
  2234 //   - Rmethod       (f1 field or f2 if invokevirtual)
  2235 //   - Ritable_index (f2 field)
  2236 //   - Rflags        (flags field)
  2237 //
  2238 // Kills:
  2239 //   - R21
  2240 //
  2241 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
  2242                                                Register Rmethod,
  2243                                                Register Ritable_index,
  2244                                                Register Rflags,
  2245                                                bool is_invokevirtual,
  2246                                                bool is_invokevfinal,
  2247                                                bool is_invokedynamic) {
  2249   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
  2250   // Determine constant pool cache field offsets.
  2251   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
  2252   const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset()));
  2253   const int flags_offset  = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset());
  2254   // Access constant pool cache fields.
  2255   const int index_offset  = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset());
  2257   Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP.
  2259   if (is_invokevfinal) {
  2260     assert(Ritable_index == noreg, "register not used");
  2261     // Already resolved.
  2262     __ get_cache_and_index_at_bcp(Rcache, 1);
  2263   } else {
  2264     resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2));
  2267   __ ld(Rmethod, method_offset, Rcache);
  2268   __ ld(Rflags, flags_offset, Rcache);
  2270   if (Ritable_index != noreg) {
  2271     __ ld(Ritable_index, index_offset, Rcache);
  2275 // ============================================================================
  2276 // Field access
  2278 // Volatile variables demand their effects be made known to all CPU's
  2279 // in order. Store buffers on most chips allow reads & writes to
  2280 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
  2281 // without some kind of memory barrier (i.e., it's not sufficient that
  2282 // the interpreter does not reorder volatile references, the hardware
  2283 // also must not reorder them).
  2284 //
  2285 // According to the new Java Memory Model (JMM):
  2286 // (1) All volatiles are serialized wrt to each other. ALSO reads &
  2287 //     writes act as aquire & release, so:
  2288 // (2) A read cannot let unrelated NON-volatile memory refs that
  2289 //     happen after the read float up to before the read. It's OK for
  2290 //     non-volatile memory refs that happen before the volatile read to
  2291 //     float down below it.
  2292 // (3) Similar a volatile write cannot let unrelated NON-volatile
  2293 //     memory refs that happen BEFORE the write float down to after the
  2294 //     write. It's OK for non-volatile memory refs that happen after the
  2295 //     volatile write to float up before it.
  2296 //
  2297 // We only put in barriers around volatile refs (they are expensive),
  2298 // not _between_ memory refs (that would require us to track the
  2299 // flavor of the previous memory refs). Requirements (2) and (3)
  2300 // require some barriers before volatile stores and after volatile
  2301 // loads. These nearly cover requirement (1) but miss the
  2302 // volatile-store-volatile-load case.  This final case is placed after
  2303 // volatile-stores although it could just as well go before
  2304 // volatile-loads.
  2306 // The registers cache and index expected to be set before call.
  2307 // Correct values of the cache and index registers are preserved.
  2308 // Kills:
  2309 //   Rcache (if has_tos)
  2310 //   Rscratch
  2311 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) {
  2313   assert_different_registers(Rcache, Rscratch);
  2315   if (JvmtiExport::can_post_field_access()) {
  2316     ByteSize cp_base_offset = ConstantPoolCache::base_offset();
  2317     Label Lno_field_access_post;
  2319     // Check if post field access in enabled.
  2320     int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true);
  2321     __ lwz(Rscratch, offs, Rscratch);
  2323     __ cmpwi(CCR0, Rscratch, 0);
  2324     __ beq(CCR0, Lno_field_access_post);
  2326     // Post access enabled - do it!
  2327     __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
  2328     if (is_static) {
  2329       __ li(R17_tos, 0);
  2330     } else {
  2331       if (has_tos) {
  2332         // The fast bytecode versions have obj ptr in register.
  2333         // Thus, save object pointer before call_VM() clobbers it
  2334         // put object on tos where GC wants it.
  2335         __ push_ptr(R17_tos);
  2336       } else {
  2337         // Load top of stack (do not pop the value off the stack).
  2338         __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp);
  2340       __ verify_oop(R17_tos);
  2342     // tos:   object pointer or NULL if static
  2343     // cache: cache entry pointer
  2344     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache);
  2345     if (!is_static && has_tos) {
  2346       // Restore object pointer.
  2347       __ pop_ptr(R17_tos);
  2348       __ verify_oop(R17_tos);
  2349     } else {
  2350       // Cache is still needed to get class or obj.
  2351       __ get_cache_and_index_at_bcp(Rcache, 1);
  2354     __ align(32, 12);
  2355     __ bind(Lno_field_access_post);
  2359 // kills R11_scratch1
  2360 void TemplateTable::pop_and_check_object(Register Roop) {
  2361   Register Rtmp = R11_scratch1;
  2363   assert_different_registers(Rtmp, Roop);
  2364   __ pop_ptr(Roop);
  2365   // For field access must check obj.
  2366   __ null_check_throw(Roop, -1, Rtmp);
  2367   __ verify_oop(Roop);
  2370 // PPC64: implement volatile loads as fence-store-acquire.
  2371 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
  2372   transition(vtos, vtos);
  2374   Label Lacquire, Lisync;
  2376   const Register Rcache        = R3_ARG1,
  2377                  Rclass_or_obj = R22_tmp2,
  2378                  Roffset       = R23_tmp3,
  2379                  Rflags        = R31,
  2380                  Rbtable       = R5_ARG3,
  2381                  Rbc           = R6_ARG4,
  2382                  Rscratch      = R12_scratch2;
  2384   static address field_branch_table[number_of_states],
  2385                  static_branch_table[number_of_states];
  2387   address* branch_table = is_static ? static_branch_table : field_branch_table;
  2389   // Get field offset.
  2390   resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
  2392   // JVMTI support
  2393   jvmti_post_field_access(Rcache, Rscratch, is_static, false);
  2395   // Load after possible GC.
  2396   load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
  2398   // Load pointer to branch table.
  2399   __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
  2401   // Get volatile flag.
  2402   __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
  2403   // Note: sync is needed before volatile load on PPC64.
  2405   // Check field type.
  2406   __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
  2408 #ifdef ASSERT
  2409   Label LFlagInvalid;
  2410   __ cmpldi(CCR0, Rflags, number_of_states);
  2411   __ bge(CCR0, LFlagInvalid);
  2412 #endif
  2414   // Load from branch table and dispatch (volatile case: one instruction ahead).
  2415   __ sldi(Rflags, Rflags, LogBytesPerWord);
  2416   __ cmpwi(CCR6, Rscratch, 1); // Volatile?
  2417   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2418     __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
  2420   __ ldx(Rbtable, Rbtable, Rflags);
  2422   // Get the obj from stack.
  2423   if (!is_static) {
  2424     pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
  2425   } else {
  2426     __ verify_oop(Rclass_or_obj);
  2429   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2430     __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
  2432   __ mtctr(Rbtable);
  2433   __ bctr();
  2435 #ifdef ASSERT
  2436   __ bind(LFlagInvalid);
  2437   __ stop("got invalid flag", 0x654);
  2439   // __ bind(Lvtos);
  2440   address pc_before_fence = __ pc();
  2441   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
  2442   assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
  2443   assert(branch_table[vtos] == 0, "can't compute twice");
  2444   branch_table[vtos] = __ pc(); // non-volatile_entry point
  2445   __ stop("vtos unexpected", 0x655);
  2446 #endif
  2448   __ align(32, 28, 28); // Align load.
  2449   // __ bind(Ldtos);
  2450   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
  2451   assert(branch_table[dtos] == 0, "can't compute twice");
  2452   branch_table[dtos] = __ pc(); // non-volatile_entry point
  2453   __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
  2454   __ push(dtos);
  2455   if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
  2457     Label acquire_double;
  2458     __ beq(CCR6, acquire_double); // Volatile?
  2459     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2461     __ bind(acquire_double);
  2462     __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
  2463     __ beq_predict_taken(CCR0, Lisync);
  2464     __ b(Lisync); // In case of NAN.
  2467   __ align(32, 28, 28); // Align load.
  2468   // __ bind(Lftos);
  2469   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
  2470   assert(branch_table[ftos] == 0, "can't compute twice");
  2471   branch_table[ftos] = __ pc(); // non-volatile_entry point
  2472   __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
  2473   __ push(ftos);
  2474   if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); }
  2476     Label acquire_float;
  2477     __ beq(CCR6, acquire_float); // Volatile?
  2478     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2480     __ bind(acquire_float);
  2481     __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
  2482     __ beq_predict_taken(CCR0, Lisync);
  2483     __ b(Lisync); // In case of NAN.
  2486   __ align(32, 28, 28); // Align load.
  2487   // __ bind(Litos);
  2488   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
  2489   assert(branch_table[itos] == 0, "can't compute twice");
  2490   branch_table[itos] = __ pc(); // non-volatile_entry point
  2491   __ lwax(R17_tos, Rclass_or_obj, Roffset);
  2492   __ push(itos);
  2493   if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
  2494   __ beq(CCR6, Lacquire); // Volatile?
  2495   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2497   __ align(32, 28, 28); // Align load.
  2498   // __ bind(Lltos);
  2499   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
  2500   assert(branch_table[ltos] == 0, "can't compute twice");
  2501   branch_table[ltos] = __ pc(); // non-volatile_entry point
  2502   __ ldx(R17_tos, Rclass_or_obj, Roffset);
  2503   __ push(ltos);
  2504   if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
  2505   __ beq(CCR6, Lacquire); // Volatile?
  2506   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2508   __ align(32, 28, 28); // Align load.
  2509   // __ bind(Lbtos);
  2510   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
  2511   assert(branch_table[btos] == 0, "can't compute twice");
  2512   branch_table[btos] = __ pc(); // non-volatile_entry point
  2513   __ lbzx(R17_tos, Rclass_or_obj, Roffset);
  2514   __ extsb(R17_tos, R17_tos);
  2515   __ push(btos);
  2516   if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
  2517   __ beq(CCR6, Lacquire); // Volatile?
  2518   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2520   __ align(32, 28, 28); // Align load.
  2521   // __ bind(Lztos); (same code as btos)
  2522   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
  2523   assert(branch_table[ztos] == 0, "can't compute twice");
  2524   branch_table[ztos] = __ pc(); // non-volatile_entry point
  2525   __ lbzx(R17_tos, Rclass_or_obj, Roffset);
  2526   __ extsb(R17_tos, R17_tos);
  2527   __ push(ztos);
  2528   if (!is_static) {
  2529     // use btos rewriting, no truncating to t/f bit is needed for getfield.
  2530     patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
  2532   __ beq(CCR6, Lacquire); // Volatile?
  2533   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2535   __ align(32, 28, 28); // Align load.
  2536   // __ bind(Lctos);
  2537   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
  2538   assert(branch_table[ctos] == 0, "can't compute twice");
  2539   branch_table[ctos] = __ pc(); // non-volatile_entry point
  2540   __ lhzx(R17_tos, Rclass_or_obj, Roffset);
  2541   __ push(ctos);
  2542   if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
  2543   __ beq(CCR6, Lacquire); // Volatile?
  2544   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2546   __ align(32, 28, 28); // Align load.
  2547   // __ bind(Lstos);
  2548   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
  2549   assert(branch_table[stos] == 0, "can't compute twice");
  2550   branch_table[stos] = __ pc(); // non-volatile_entry point
  2551   __ lhax(R17_tos, Rclass_or_obj, Roffset);
  2552   __ push(stos);
  2553   if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
  2554   __ beq(CCR6, Lacquire); // Volatile?
  2555   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2557   __ align(32, 28, 28); // Align load.
  2558   // __ bind(Latos);
  2559   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
  2560   assert(branch_table[atos] == 0, "can't compute twice");
  2561   branch_table[atos] = __ pc(); // non-volatile_entry point
  2562   __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
  2563   __ verify_oop(R17_tos);
  2564   __ push(atos);
  2565   //__ dcbt(R17_tos); // prefetch
  2566   if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
  2567   __ beq(CCR6, Lacquire); // Volatile?
  2568   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2570   __ align(32, 12);
  2571   __ bind(Lacquire);
  2572   __ twi_0(R17_tos);
  2573   __ bind(Lisync);
  2574   __ isync(); // acquire
  2576 #ifdef ASSERT
  2577   for (int i = 0; i<number_of_states; ++i) {
  2578     assert(branch_table[i], "get initialization");
  2579     //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
  2580     //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
  2582 #endif
  2585 void TemplateTable::getfield(int byte_no) {
  2586   getfield_or_static(byte_no, false);
  2589 void TemplateTable::getstatic(int byte_no) {
  2590   getfield_or_static(byte_no, true);
  2593 // The registers cache and index expected to be set before call.
  2594 // The function may destroy various registers, just not the cache and index registers.
  2595 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) {
  2597   assert_different_registers(Rcache, Rscratch, R6_ARG4);
  2599   if (JvmtiExport::can_post_field_modification()) {
  2600     Label Lno_field_mod_post;
  2602     // Check if post field access in enabled.
  2603     int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
  2604     __ lwz(Rscratch, offs, Rscratch);
  2606     __ cmpwi(CCR0, Rscratch, 0);
  2607     __ beq(CCR0, Lno_field_mod_post);
  2609     // Do the post
  2610     ByteSize cp_base_offset = ConstantPoolCache::base_offset();
  2611     const Register Robj = Rscratch;
  2613     __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
  2614     if (is_static) {
  2615       // Life is simple. Null out the object pointer.
  2616       __ li(Robj, 0);
  2617     } else {
  2618       // In case of the fast versions, value lives in registers => put it back on tos.
  2619       int offs = Interpreter::expr_offset_in_bytes(0);
  2620       Register base = R15_esp;
  2621       switch(bytecode()) {
  2622         case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break;
  2623         case Bytecodes::_fast_iputfield: // Fall through
  2624         case Bytecodes::_fast_bputfield: // Fall through
  2625         case Bytecodes::_fast_zputfield: // Fall through
  2626         case Bytecodes::_fast_cputfield: // Fall through
  2627         case Bytecodes::_fast_sputfield: __ push_i(); offs+=  Interpreter::stackElementSize; break;
  2628         case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break;
  2629         case Bytecodes::_fast_fputfield: __ push_f(); offs+=  Interpreter::stackElementSize; break;
  2630         case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break;
  2631         default: {
  2632           offs = 0;
  2633           base = Robj;
  2634           const Register Rflags = Robj;
  2635           Label is_one_slot;
  2636           // Life is harder. The stack holds the value on top, followed by the
  2637           // object. We don't know the size of the value, though; it could be
  2638           // one or two words depending on its type. As a result, we must find
  2639           // the type to determine where the object is.
  2640           __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian
  2641           __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
  2643           __ cmpwi(CCR0, Rflags, ltos);
  2644           __ cmpwi(CCR1, Rflags, dtos);
  2645           __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1));
  2646           __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
  2647           __ beq(CCR0, is_one_slot);
  2648           __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2));
  2649           __ bind(is_one_slot);
  2650           break;
  2653       __ ld(Robj, offs, base);
  2654       __ verify_oop(Robj);
  2657     __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0));
  2658     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4);
  2659     __ get_cache_and_index_at_bcp(Rcache, 1);
  2661     // In case of the fast versions, value lives in registers => put it back on tos.
  2662     switch(bytecode()) {
  2663       case Bytecodes::_fast_aputfield: __ pop_ptr(); break;
  2664       case Bytecodes::_fast_iputfield: // Fall through
  2665       case Bytecodes::_fast_bputfield: // Fall through
  2666       case Bytecodes::_fast_zputfield: // Fall through
  2667       case Bytecodes::_fast_cputfield: // Fall through
  2668       case Bytecodes::_fast_sputfield: __ pop_i(); break;
  2669       case Bytecodes::_fast_lputfield: __ pop_l(); break;
  2670       case Bytecodes::_fast_fputfield: __ pop_f(); break;
  2671       case Bytecodes::_fast_dputfield: __ pop_d(); break;
  2672       default: break; // Nothin' to do.
  2675     __ align(32, 12);
  2676     __ bind(Lno_field_mod_post);
  2680 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
  2681 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
  2682   Label Lvolatile;
  2684   const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
  2685                  Rclass_or_obj = R31,      // Needs to survive C call.
  2686                  Roffset       = R22_tmp2, // Needs to survive C call.
  2687                  Rflags        = R3_ARG1,
  2688                  Rbtable       = R4_ARG2,
  2689                  Rscratch      = R11_scratch1,
  2690                  Rscratch2     = R12_scratch2,
  2691                  Rscratch3     = R6_ARG4,
  2692                  Rbc           = Rscratch3;
  2693   const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
  2695   static address field_branch_table[number_of_states],
  2696                  static_branch_table[number_of_states];
  2698   address* branch_table = is_static ? static_branch_table : field_branch_table;
  2700   // Stack (grows up):
  2701   //  value
  2702   //  obj
  2704   // Load the field offset.
  2705   resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
  2706   jvmti_post_field_mod(Rcache, Rscratch, is_static);
  2707   load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
  2709   // Load pointer to branch table.
  2710   __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
  2712   // Get volatile flag.
  2713   __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
  2715   // Check the field type.
  2716   __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
  2718 #ifdef ASSERT
  2719   Label LFlagInvalid;
  2720   __ cmpldi(CCR0, Rflags, number_of_states);
  2721   __ bge(CCR0, LFlagInvalid);
  2722 #endif
  2724   // Load from branch table and dispatch (volatile case: one instruction ahead).
  2725   __ sldi(Rflags, Rflags, LogBytesPerWord);
  2726   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile?
  2727   __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
  2728   __ ldx(Rbtable, Rbtable, Rflags);
  2730   __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
  2731   __ mtctr(Rbtable);
  2732   __ bctr();
  2734 #ifdef ASSERT
  2735   __ bind(LFlagInvalid);
  2736   __ stop("got invalid flag", 0x656);
  2738   // __ bind(Lvtos);
  2739   address pc_before_release = __ pc();
  2740   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
  2741   assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
  2742   assert(branch_table[vtos] == 0, "can't compute twice");
  2743   branch_table[vtos] = __ pc(); // non-volatile_entry point
  2744   __ stop("vtos unexpected", 0x657);
  2745 #endif
  2747   __ align(32, 28, 28); // Align pop.
  2748   // __ bind(Ldtos);
  2749   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
  2750   assert(branch_table[dtos] == 0, "can't compute twice");
  2751   branch_table[dtos] = __ pc(); // non-volatile_entry point
  2752   __ pop(dtos);
  2753   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
  2754   __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
  2755   if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); }
  2756   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2757     __ beq(CR_is_vol, Lvolatile); // Volatile?
  2759   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2761   __ align(32, 28, 28); // Align pop.
  2762   // __ bind(Lftos);
  2763   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
  2764   assert(branch_table[ftos] == 0, "can't compute twice");
  2765   branch_table[ftos] = __ pc(); // non-volatile_entry point
  2766   __ pop(ftos);
  2767   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
  2768   __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
  2769   if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); }
  2770   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2771     __ beq(CR_is_vol, Lvolatile); // Volatile?
  2773   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2775   __ align(32, 28, 28); // Align pop.
  2776   // __ bind(Litos);
  2777   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
  2778   assert(branch_table[itos] == 0, "can't compute twice");
  2779   branch_table[itos] = __ pc(); // non-volatile_entry point
  2780   __ pop(itos);
  2781   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
  2782   __ stwx(R17_tos, Rclass_or_obj, Roffset);
  2783   if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); }
  2784   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2785     __ beq(CR_is_vol, Lvolatile); // Volatile?
  2787   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2789   __ align(32, 28, 28); // Align pop.
  2790   // __ bind(Lltos);
  2791   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
  2792   assert(branch_table[ltos] == 0, "can't compute twice");
  2793   branch_table[ltos] = __ pc(); // non-volatile_entry point
  2794   __ pop(ltos);
  2795   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
  2796   __ stdx(R17_tos, Rclass_or_obj, Roffset);
  2797   if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); }
  2798   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2799     __ beq(CR_is_vol, Lvolatile); // Volatile?
  2801   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2803   __ align(32, 28, 28); // Align pop.
  2804   // __ bind(Lbtos);
  2805   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
  2806   assert(branch_table[btos] == 0, "can't compute twice");
  2807   branch_table[btos] = __ pc(); // non-volatile_entry point
  2808   __ pop(btos);
  2809   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
  2810   __ stbx(R17_tos, Rclass_or_obj, Roffset);
  2811   if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); }
  2812   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2813     __ beq(CR_is_vol, Lvolatile); // Volatile?
  2815   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2817   __ align(32, 28, 28); // Align pop.
  2818   // __ bind(Lztos);
  2819   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
  2820   assert(branch_table[ztos] == 0, "can't compute twice");
  2821   branch_table[ztos] = __ pc(); // non-volatile_entry point
  2822   __ pop(ztos);
  2823   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
  2824   __ andi(R17_tos, R17_tos, 0x1);
  2825   __ stbx(R17_tos, Rclass_or_obj, Roffset);
  2826   if (!is_static) { patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no); }
  2827   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2828     __ beq(CR_is_vol, Lvolatile); // Volatile?
  2830   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2832   __ align(32, 28, 28); // Align pop.
  2833   // __ bind(Lctos);
  2834   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
  2835   assert(branch_table[ctos] == 0, "can't compute twice");
  2836   branch_table[ctos] = __ pc(); // non-volatile_entry point
  2837   __ pop(ctos);
  2838   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
  2839   __ sthx(R17_tos, Rclass_or_obj, Roffset);
  2840   if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); }
  2841   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2842     __ beq(CR_is_vol, Lvolatile); // Volatile?
  2844   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2846   __ align(32, 28, 28); // Align pop.
  2847   // __ bind(Lstos);
  2848   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
  2849   assert(branch_table[stos] == 0, "can't compute twice");
  2850   branch_table[stos] = __ pc(); // non-volatile_entry point
  2851   __ pop(stos);
  2852   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
  2853   __ sthx(R17_tos, Rclass_or_obj, Roffset);
  2854   if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); }
  2855   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2856     __ beq(CR_is_vol, Lvolatile); // Volatile?
  2858   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2860   __ align(32, 28, 28); // Align pop.
  2861   // __ bind(Latos);
  2862   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
  2863   assert(branch_table[atos] == 0, "can't compute twice");
  2864   branch_table[atos] = __ pc(); // non-volatile_entry point
  2865   __ pop(atos);
  2866   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
  2867   do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
  2868   if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); }
  2869   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2870     __ beq(CR_is_vol, Lvolatile); // Volatile?
  2871     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2873     __ align(32, 12);
  2874     __ bind(Lvolatile);
  2875     __ fence();
  2877   // fallthru: __ b(Lexit);
  2879 #ifdef ASSERT
  2880   for (int i = 0; i<number_of_states; ++i) {
  2881     assert(branch_table[i], "put initialization");
  2882     //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
  2883     //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
  2885 #endif
  2888 void TemplateTable::putfield(int byte_no) {
  2889   putfield_or_static(byte_no, false);
  2892 void TemplateTable::putstatic(int byte_no) {
  2893   putfield_or_static(byte_no, true);
  2896 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job.
  2897 void TemplateTable::jvmti_post_fast_field_mod() {
  2898   __ should_not_reach_here();
  2901 void TemplateTable::fast_storefield(TosState state) {
  2902   transition(state, vtos);
  2904   const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
  2905                  Rclass_or_obj = R31,      // Needs to survive C call.
  2906                  Roffset       = R22_tmp2, // Needs to survive C call.
  2907                  Rflags        = R3_ARG1,
  2908                  Rscratch      = R11_scratch1,
  2909                  Rscratch2     = R12_scratch2,
  2910                  Rscratch3     = R4_ARG2;
  2911   const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
  2913   // Constant pool already resolved => Load flags and offset of field.
  2914   __ get_cache_and_index_at_bcp(Rcache, 1);
  2915   jvmti_post_field_mod(Rcache, Rscratch, false /* not static */);
  2916   load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
  2918   // Get the obj and the final store addr.
  2919   pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
  2921   // Get volatile flag.
  2922   __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
  2923   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); }
  2925     Label LnotVolatile;
  2926     __ beq(CCR0, LnotVolatile);
  2927     __ release();
  2928     __ align(32, 12);
  2929     __ bind(LnotVolatile);
  2932   // Do the store and fencing.
  2933   switch(bytecode()) {
  2934     case Bytecodes::_fast_aputfield:
  2935       // Store into the field.
  2936       do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
  2937       break;
  2939     case Bytecodes::_fast_iputfield:
  2940       __ stwx(R17_tos, Rclass_or_obj, Roffset);
  2941       break;
  2943     case Bytecodes::_fast_lputfield:
  2944       __ stdx(R17_tos, Rclass_or_obj, Roffset);
  2945       break;
  2947     case Bytecodes::_fast_zputfield:
  2948       __ andi(R17_tos, R17_tos, 0x1);  // boolean is true if LSB is 1
  2949       // fall through to bputfield
  2950     case Bytecodes::_fast_bputfield:
  2951       __ stbx(R17_tos, Rclass_or_obj, Roffset);
  2952       break;
  2954     case Bytecodes::_fast_cputfield:
  2955     case Bytecodes::_fast_sputfield:
  2956       __ sthx(R17_tos, Rclass_or_obj, Roffset);
  2957       break;
  2959     case Bytecodes::_fast_fputfield:
  2960       __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
  2961       break;
  2963     case Bytecodes::_fast_dputfield:
  2964       __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
  2965       break;
  2967     default: ShouldNotReachHere();
  2970   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
  2971     Label LVolatile;
  2972     __ beq(CR_is_vol, LVolatile);
  2973     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
  2975     __ align(32, 12);
  2976     __ bind(LVolatile);
  2977     __ fence();
  2981 void TemplateTable::fast_accessfield(TosState state) {
  2982   transition(atos, state);
  2984   Label LisVolatile;
  2985   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
  2987   const Register Rcache        = R3_ARG1,
  2988                  Rclass_or_obj = R17_tos,
  2989                  Roffset       = R22_tmp2,
  2990                  Rflags        = R23_tmp3,
  2991                  Rscratch      = R12_scratch2;
  2993   // Constant pool already resolved. Get the field offset.
  2994   __ get_cache_and_index_at_bcp(Rcache, 1);
  2995   load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
  2997   // JVMTI support
  2998   jvmti_post_field_access(Rcache, Rscratch, false, true);
  3000   // Get the load address.
  3001   __ null_check_throw(Rclass_or_obj, -1, Rscratch);
  3003   // Get volatile flag.
  3004   __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
  3005   __ bne(CCR0, LisVolatile);
  3007   switch(bytecode()) {
  3008     case Bytecodes::_fast_agetfield:
  3010       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
  3011       __ verify_oop(R17_tos);
  3012       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
  3014       __ bind(LisVolatile);
  3015       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
  3016       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
  3017       __ verify_oop(R17_tos);
  3018       __ twi_0(R17_tos);
  3019       __ isync();
  3020       break;
  3022     case Bytecodes::_fast_igetfield:
  3024       __ lwax(R17_tos, Rclass_or_obj, Roffset);
  3025       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
  3027       __ bind(LisVolatile);
  3028       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
  3029       __ lwax(R17_tos, Rclass_or_obj, Roffset);
  3030       __ twi_0(R17_tos);
  3031       __ isync();
  3032       break;
  3034     case Bytecodes::_fast_lgetfield:
  3036       __ ldx(R17_tos, Rclass_or_obj, Roffset);
  3037       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
  3039       __ bind(LisVolatile);
  3040       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
  3041       __ ldx(R17_tos, Rclass_or_obj, Roffset);
  3042       __ twi_0(R17_tos);
  3043       __ isync();
  3044       break;
  3046     case Bytecodes::_fast_bgetfield:
  3048       __ lbzx(R17_tos, Rclass_or_obj, Roffset);
  3049       __ extsb(R17_tos, R17_tos);
  3050       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
  3052       __ bind(LisVolatile);
  3053       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
  3054       __ lbzx(R17_tos, Rclass_or_obj, Roffset);
  3055       __ twi_0(R17_tos);
  3056       __ extsb(R17_tos, R17_tos);
  3057       __ isync();
  3058       break;
  3060     case Bytecodes::_fast_cgetfield:
  3062       __ lhzx(R17_tos, Rclass_or_obj, Roffset);
  3063       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
  3065       __ bind(LisVolatile);
  3066       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
  3067       __ lhzx(R17_tos, Rclass_or_obj, Roffset);
  3068       __ twi_0(R17_tos);
  3069       __ isync();
  3070       break;
  3072     case Bytecodes::_fast_sgetfield:
  3074       __ lhax(R17_tos, Rclass_or_obj, Roffset);
  3075       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
  3077       __ bind(LisVolatile);
  3078       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
  3079       __ lhax(R17_tos, Rclass_or_obj, Roffset);
  3080       __ twi_0(R17_tos);
  3081       __ isync();
  3082       break;
  3084     case Bytecodes::_fast_fgetfield:
  3086       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
  3087       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
  3089       __ bind(LisVolatile);
  3090       Label Ldummy;
  3091       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
  3092       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
  3093       __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
  3094       __ bne_predict_not_taken(CCR0, Ldummy);
  3095       __ bind(Ldummy);
  3096       __ isync();
  3097       break;
  3099     case Bytecodes::_fast_dgetfield:
  3101       __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
  3102       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
  3104       __ bind(LisVolatile);
  3105       Label Ldummy;
  3106       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
  3107       __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
  3108       __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
  3109       __ bne_predict_not_taken(CCR0, Ldummy);
  3110       __ bind(Ldummy);
  3111       __ isync();
  3112       break;
  3114     default: ShouldNotReachHere();
  3118 void TemplateTable::fast_xaccess(TosState state) {
  3119   transition(vtos, state);
  3121   Label LisVolatile;
  3122   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
  3123   const Register Rcache        = R3_ARG1,
  3124                  Rclass_or_obj = R17_tos,
  3125                  Roffset       = R22_tmp2,
  3126                  Rflags        = R23_tmp3,
  3127                  Rscratch      = R12_scratch2;
  3129   __ ld(Rclass_or_obj, 0, R18_locals);
  3131   // Constant pool already resolved. Get the field offset.
  3132   __ get_cache_and_index_at_bcp(Rcache, 2);
  3133   load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
  3135   // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches.
  3137   // Needed to report exception at the correct bcp.
  3138   __ addi(R14_bcp, R14_bcp, 1);
  3140   // Get the load address.
  3141   __ null_check_throw(Rclass_or_obj, -1, Rscratch);
  3143   // Get volatile flag.
  3144   __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
  3145   __ bne(CCR0, LisVolatile);
  3147   switch(state) {
  3148   case atos:
  3150       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
  3151       __ verify_oop(R17_tos);
  3152       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
  3154       __ bind(LisVolatile);
  3155       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
  3156       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
  3157       __ verify_oop(R17_tos);
  3158       __ twi_0(R17_tos);
  3159       __ isync();
  3160       break;
  3162   case itos:
  3164       __ lwax(R17_tos, Rclass_or_obj, Roffset);
  3165       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
  3167       __ bind(LisVolatile);
  3168       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
  3169       __ lwax(R17_tos, Rclass_or_obj, Roffset);
  3170       __ twi_0(R17_tos);
  3171       __ isync();
  3172       break;
  3174   case ftos:
  3176       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
  3177       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
  3179       __ bind(LisVolatile);
  3180       Label Ldummy;
  3181       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
  3182       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
  3183       __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
  3184       __ bne_predict_not_taken(CCR0, Ldummy);
  3185       __ bind(Ldummy);
  3186       __ isync();
  3187       break;
  3189   default: ShouldNotReachHere();
  3191   __ addi(R14_bcp, R14_bcp, -1);
  3194 // ============================================================================
  3195 // Calls
  3197 // Common code for invoke
  3198 //
  3199 // Input:
  3200 //   - byte_no
  3201 //
  3202 // Output:
  3203 //   - Rmethod:        The method to invoke next.
  3204 //   - Rret_addr:      The return address to return to.
  3205 //   - Rindex:         MethodType (invokehandle) or CallSite obj (invokedynamic)
  3206 //   - Rrecv:          Cache for "this" pointer, might be noreg if static call.
  3207 //   - Rflags:         Method flags from const pool cache.
  3208 //
  3209 //  Kills:
  3210 //   - Rscratch1
  3211 //
  3212 void TemplateTable::prepare_invoke(int byte_no,
  3213                                    Register Rmethod,  // linked method (or i-klass)
  3214                                    Register Rret_addr,// return address
  3215                                    Register Rindex,   // itable index, MethodType, etc.
  3216                                    Register Rrecv,    // If caller wants to see it.
  3217                                    Register Rflags,   // If caller wants to test it.
  3218                                    Register Rscratch
  3219                                    ) {
  3220   // Determine flags.
  3221   const Bytecodes::Code code = bytecode();
  3222   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
  3223   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
  3224   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
  3225   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
  3226   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
  3227   const bool load_receiver       = (Rrecv != noreg);
  3228   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
  3230   assert_different_registers(Rmethod, Rindex, Rflags, Rscratch);
  3231   assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch);
  3232   assert_different_registers(Rret_addr, Rscratch);
  3234   load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic);
  3236   // Saving of SP done in call_from_interpreter.
  3238   // Maybe push "appendix" to arguments.
  3239   if (is_invokedynamic || is_invokehandle) {
  3240     Label Ldone;
  3241     __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63);
  3242     __ beq(CCR0, Ldone);
  3243     // Push "appendix" (MethodType, CallSite, etc.).
  3244     // This must be done before we get the receiver,
  3245     // since the parameter_size includes it.
  3246     __ load_resolved_reference_at_index(Rscratch, Rindex);
  3247     __ verify_oop(Rscratch);
  3248     __ push_ptr(Rscratch);
  3249     __ bind(Ldone);
  3252   // Load receiver if needed (after appendix is pushed so parameter size is correct).
  3253   if (load_receiver) {
  3254     const Register Rparam_count = Rscratch;
  3255     __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask);
  3256     __ load_receiver(Rparam_count, Rrecv);
  3257     __ verify_oop(Rrecv);
  3260   // Get return address.
  3262     Register Rtable_addr = Rscratch;
  3263     Register Rret_type = Rret_addr;
  3264     address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
  3266     // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
  3267     __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
  3268     __ load_dispatch_table(Rtable_addr, (address*)table_addr);
  3269     __ sldi(Rret_type, Rret_type, LogBytesPerWord);
  3270     // Get return address.
  3271     __ ldx(Rret_addr, Rtable_addr, Rret_type);
  3275 // Helper for virtual calls. Load target out of vtable and jump off!
  3276 // Kills all passed registers.
  3277 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) {
  3279   assert_different_registers(Rrecv_klass, Rtemp, Rret);
  3280   const Register Rtarget_method = Rindex;
  3282   // Get target method & entry point.
  3283   const int base = InstanceKlass::vtable_start_offset() * wordSize;
  3284   // Calc vtable addr scale the vtable index by 8.
  3285   __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize));
  3286   // Load target.
  3287   __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
  3288   __ ldx(Rtarget_method, Rindex, Rrecv_klass);
  3289   // Argument and return type profiling.
  3290   __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true);
  3291   __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
  3294 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time.
  3295 void TemplateTable::invokevirtual(int byte_no) {
  3296   transition(vtos, vtos);
  3298   Register Rtable_addr = R11_scratch1,
  3299            Rret_type = R12_scratch2,
  3300            Rret_addr = R5_ARG3,
  3301            Rflags = R22_tmp2, // Should survive C call.
  3302            Rrecv = R3_ARG1,
  3303            Rrecv_klass = Rrecv,
  3304            Rvtableindex_or_method = R31, // Should survive C call.
  3305            Rnum_params = R4_ARG2,
  3306            Rnew_bc = R6_ARG4;
  3308   Label LnotFinal;
  3310   load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false);
  3312   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
  3313   __ bfalse(CCR0, LnotFinal);
  3315   patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
  3316   invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
  3318   __ align(32, 12);
  3319   __ bind(LnotFinal);
  3320   // Load "this" pointer (receiver).
  3321   __ rldicl(Rnum_params, Rflags, 64, 48);
  3322   __ load_receiver(Rnum_params, Rrecv);
  3323   __ verify_oop(Rrecv);
  3325   // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
  3326   __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
  3327   __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
  3328   __ sldi(Rret_type, Rret_type, LogBytesPerWord);
  3329   __ ldx(Rret_addr, Rret_type, Rtable_addr);
  3330   __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1);
  3331   __ load_klass(Rrecv_klass, Rrecv);
  3332   __ verify_klass_ptr(Rrecv_klass);
  3333   __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
  3335   generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1);
  3338 void TemplateTable::fast_invokevfinal(int byte_no) {
  3339   transition(vtos, vtos);
  3341   assert(byte_no == f2_byte, "use this argument");
  3342   Register Rflags  = R22_tmp2,
  3343            Rmethod = R31;
  3344   load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false);
  3345   invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2);
  3348 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) {
  3350   assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2);
  3352   // Load receiver from stack slot.
  3353   Register Rrecv = Rscratch2;
  3354   Register Rnum_params = Rrecv;
  3356   __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod);
  3357   __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params);
  3359   // Get return address.
  3360   Register Rtable_addr = Rscratch1,
  3361            Rret_addr   = Rflags,
  3362            Rret_type   = Rret_addr;
  3363   // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
  3364   __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
  3365   __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
  3366   __ sldi(Rret_type, Rret_type, LogBytesPerWord);
  3367   __ ldx(Rret_addr, Rret_type, Rtable_addr);
  3369   // Load receiver and receiver NULL check.
  3370   __ load_receiver(Rnum_params, Rrecv);
  3371   __ null_check_throw(Rrecv, -1, Rscratch1);
  3373   __ profile_final_call(Rrecv, Rscratch1);
  3374   // Argument and return type profiling.
  3375   __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
  3377   // Do the call.
  3378   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
  3381 void TemplateTable::invokespecial(int byte_no) {
  3382   assert(byte_no == f1_byte, "use this argument");
  3383   transition(vtos, vtos);
  3385   Register Rtable_addr = R3_ARG1,
  3386            Rret_addr   = R4_ARG2,
  3387            Rflags      = R5_ARG3,
  3388            Rreceiver   = R6_ARG4,
  3389            Rmethod     = R31;
  3391   prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1);
  3393   // Receiver NULL check.
  3394   __ null_check_throw(Rreceiver, -1, R11_scratch1);
  3396   __ profile_call(R11_scratch1, R12_scratch2);
  3397   // Argument and return type profiling.
  3398   __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false);
  3399   __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
  3402 void TemplateTable::invokestatic(int byte_no) {
  3403   assert(byte_no == f1_byte, "use this argument");
  3404   transition(vtos, vtos);
  3406   Register Rtable_addr = R3_ARG1,
  3407            Rret_addr   = R4_ARG2,
  3408            Rflags      = R5_ARG3;
  3410   prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
  3412   __ profile_call(R11_scratch1, R12_scratch2);
  3413   // Argument and return type profiling.
  3414   __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false);
  3415   __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
  3418 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
  3419                                                   Register Rret,
  3420                                                   Register Rflags,
  3421                                                   Register Rmethod,
  3422                                                   Register Rtemp1,
  3423                                                   Register Rtemp2) {
  3425   assert_different_registers(Rmethod, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2);
  3426   Label LnotFinal;
  3428   // Check for vfinal.
  3429   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
  3430   __ bfalse(CCR0, LnotFinal);
  3432   Register Rscratch = Rflags; // Rflags is dead now.
  3434   // Final call case.
  3435   __ profile_final_call(Rtemp1, Rscratch);
  3436   // Argument and return type profiling.
  3437   __ profile_arguments_type(Rmethod, Rscratch, Rrecv_klass /* scratch */, true);
  3438   // Do the final call - the index (f2) contains the method.
  3439   __ call_from_interpreter(Rmethod, Rret, Rscratch, Rrecv_klass /* scratch */);
  3441   // Non-final callc case.
  3442   __ bind(LnotFinal);
  3443   __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false);
  3444   generate_vtable_call(Rrecv_klass, Rmethod, Rret, Rscratch);
  3447 void TemplateTable::invokeinterface(int byte_no) {
  3448   assert(byte_no == f1_byte, "use this argument");
  3449   transition(vtos, vtos);
  3451   const Register Rscratch1        = R11_scratch1,
  3452                  Rscratch2        = R12_scratch2,
  3453                  Rmethod          = R6_ARG4,
  3454                  Rmethod2         = R9_ARG7,
  3455                  Rinterface_klass = R5_ARG3,
  3456                  Rret_addr        = R8_ARG6,
  3457                  Rindex           = R10_ARG8,
  3458                  Rreceiver        = R3_ARG1,
  3459                  Rrecv_klass      = R4_ARG2,
  3460                  Rflags           = R7_ARG5;
  3462   prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rmethod, Rreceiver, Rflags, Rscratch1);
  3464   // Get receiver klass.
  3465   __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch2);
  3466   __ load_klass(Rrecv_klass, Rreceiver);
  3468   // Check corner case object method.
  3469   Label LobjectMethod, L_no_such_interface, Lthrow_ame;
  3470   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift);
  3471   __ btrue(CCR0, LobjectMethod);
  3473   __ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2,
  3474                              L_no_such_interface, /*return_method=*/false);
  3476   __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false);
  3478   // Find entry point to call.
  3480   // Get declaring interface class from method
  3481   __ ld(Rinterface_klass, in_bytes(Method::const_offset()), Rmethod);
  3482   __ ld(Rinterface_klass, in_bytes(ConstMethod::constants_offset()), Rinterface_klass);
  3483   __ ld(Rinterface_klass, ConstantPool::pool_holder_offset_in_bytes(), Rinterface_klass);
  3485   // Get itable index from method
  3486   __ lwa(Rindex, in_bytes(Method::itable_index_offset()), Rmethod);
  3487   __ subfic(Rindex, Rindex, Method::itable_index_max);
  3489   __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rmethod2, Rscratch1, Rscratch2,
  3490                              L_no_such_interface);
  3492   __ cmpdi(CCR0, Rmethod2, 0);
  3493   __ beq(CCR0, Lthrow_ame);
  3494   // Found entry. Jump off!
  3495   // Argument and return type profiling.
  3496   __ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true);
  3497   //__ profile_called_method(Rindex, Rscratch1);
  3498   __ call_from_interpreter(Rmethod2, Rret_addr, Rscratch1, Rscratch2);
  3500   // Vtable entry was NULL => Throw abstract method error.
  3501   __ bind(Lthrow_ame);
  3502   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
  3504   // Interface was not found => Throw incompatible class change error.
  3505   __ bind(L_no_such_interface);
  3506   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
  3507   DEBUG_ONLY( __ should_not_reach_here(); )
  3509   // Special case of invokeinterface called for virtual method of
  3510   // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details:
  3511   // The invokeinterface was rewritten to a invokevirtual, hence we have
  3512   // to handle this corner case. This code isn't produced by javac, but could
  3513   // be produced by another compliant java compiler.
  3514   __ bind(LobjectMethod);
  3515   invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rmethod, Rscratch1, Rscratch2);
  3518 void TemplateTable::invokedynamic(int byte_no) {
  3519   transition(vtos, vtos);
  3521   const Register Rret_addr = R3_ARG1,
  3522                  Rflags    = R4_ARG2,
  3523                  Rmethod   = R22_tmp2,
  3524                  Rscratch1 = R11_scratch1,
  3525                  Rscratch2 = R12_scratch2;
  3527   if (!EnableInvokeDynamic) {
  3528     // We should not encounter this bytecode if !EnableInvokeDynamic.
  3529     // The verifier will stop it. However, if we get past the verifier,
  3530     // this will stop the thread in a reasonable way, without crashing the JVM.
  3531     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
  3532     // The call_VM checks for exception, so we should never return here.
  3533     __ should_not_reach_here();
  3534     return;
  3537   prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2);
  3539   // Profile this call.
  3540   __ profile_call(Rscratch1, Rscratch2);
  3542   // Off we go. With the new method handles, we don't jump to a method handle
  3543   // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens
  3544   // to be the callsite object the bootstrap method returned. This is passed to a
  3545   // "link" method which does the dispatch (Most likely just grabs the MH stored
  3546   // inside the callsite and does an invokehandle).
  3547   // Argument and return type profiling.
  3548   __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false);
  3549   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
  3552 void TemplateTable::invokehandle(int byte_no) {
  3553   transition(vtos, vtos);
  3555   const Register Rret_addr = R3_ARG1,
  3556                  Rflags    = R4_ARG2,
  3557                  Rrecv     = R5_ARG3,
  3558                  Rmethod   = R22_tmp2,
  3559                  Rscratch1 = R11_scratch1,
  3560                  Rscratch2 = R12_scratch2;
  3562   if (!EnableInvokeDynamic) {
  3563     // Rewriter does not generate this bytecode.
  3564     __ should_not_reach_here();
  3565     return;
  3568   prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2);
  3569   __ verify_method_ptr(Rmethod);
  3570   __ null_check_throw(Rrecv, -1, Rscratch2);
  3572   __ profile_final_call(Rrecv, Rscratch1);
  3574   // Still no call from handle => We call the method handle interpreter here.
  3575   // Argument and return type profiling.
  3576   __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
  3577   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
  3580 // =============================================================================
  3581 // Allocation
  3583 // Puts allocated obj ref onto the expression stack.
  3584 void TemplateTable::_new() {
  3585   transition(vtos, atos);
  3587   Label Lslow_case,
  3588         Ldone,
  3589         Linitialize_header,
  3590         Lallocate_shared,
  3591         Linitialize_object;  // Including clearing the fields.
  3593   const Register RallocatedObject = R17_tos,
  3594                  RinstanceKlass   = R9_ARG7,
  3595                  Rscratch         = R11_scratch1,
  3596                  Roffset          = R8_ARG6,
  3597                  Rinstance_size   = Roffset,
  3598                  Rcpool           = R4_ARG2,
  3599                  Rtags            = R3_ARG1,
  3600                  Rindex           = R5_ARG3;
  3602   const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
  3604   // --------------------------------------------------------------------------
  3605   // Check if fast case is possible.
  3607   // Load pointers to const pool and const pool's tags array.
  3608   __ get_cpool_and_tags(Rcpool, Rtags);
  3609   // Load index of constant pool entry.
  3610   __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
  3612   if (UseTLAB) {
  3613     // Make sure the class we're about to instantiate has been resolved
  3614     // This is done before loading instanceKlass to be consistent with the order
  3615     // how Constant Pool is updated (see ConstantPoolCache::klass_at_put).
  3616     __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
  3617     __ lbzx(Rtags, Rindex, Rtags);
  3619     __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
  3620     __ bne(CCR0, Lslow_case);
  3622     // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord).
  3623     __ sldi(Roffset, Rindex, LogBytesPerWord);
  3624     __ addi(Rscratch, Rcpool, sizeof(ConstantPool));
  3625     __ isync(); // Order load of instance Klass wrt. tags.
  3626     __ ldx(RinstanceKlass, Roffset, Rscratch);
  3628     // Make sure klass is fully initialized and get instance_size.
  3629     __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
  3630     __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass);
  3632     __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized);
  3633     // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class.
  3634     __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0?
  3636     __ crnand(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // slow path bit set or not fully initialized?
  3637     __ beq(CCR0, Lslow_case);
  3639     // --------------------------------------------------------------------------
  3640     // Fast case:
  3641     // Allocate the instance.
  3642     // 1) Try to allocate in the TLAB.
  3643     // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden.
  3644     // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
  3646     Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits.
  3647     Register RnewTopValue = R6_ARG4;
  3648     Register RendValue    = R7_ARG5;
  3650     // Check if we can allocate in the TLAB.
  3651     __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
  3652     __ ld(RendValue,    in_bytes(JavaThread::tlab_end_offset()), R16_thread);
  3654     __ add(RnewTopValue, Rinstance_size, RoldTopValue);
  3656     // If there is enough space, we do not CAS and do not clear.
  3657     __ cmpld(CCR0, RnewTopValue, RendValue);
  3658     __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case);
  3660     __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
  3662     if (ZeroTLAB) {
  3663       // The fields have already been cleared.
  3664       __ b(Linitialize_header);
  3665     } else {
  3666       // Initialize both the header and fields.
  3667       __ b(Linitialize_object);
  3670     // Fall through: TLAB was too small.
  3671     if (allow_shared_alloc) {
  3672       Register RtlabWasteLimitValue = R10_ARG8;
  3673       Register RfreeValue = RnewTopValue;
  3675       __ bind(Lallocate_shared);
  3676       // Check if tlab should be discarded (refill_waste_limit >= free).
  3677       __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
  3678       __ subf(RfreeValue, RoldTopValue, RendValue);
  3679       __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords
  3680       __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue);
  3681       __ bge(CCR0, Lslow_case);
  3683       // Increment waste limit to prevent getting stuck on this slow path.
  3684       __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment());
  3685       __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
  3687     // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
  3689   // else: Always go the slow path.
  3691   // --------------------------------------------------------------------------
  3692   // slow case
  3693   __ bind(Lslow_case);
  3694   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
  3696   if (UseTLAB) {
  3697     __ b(Ldone);
  3698     // --------------------------------------------------------------------------
  3699     // Init1: Zero out newly allocated memory.
  3701     if (!ZeroTLAB || allow_shared_alloc) {
  3702       // Clear object fields.
  3703       __ bind(Linitialize_object);
  3705       // Initialize remaining object fields.
  3706       Register Rbase = Rtags;
  3707       __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc));
  3708       __ addi(Rbase, RallocatedObject, sizeof(oopDesc));
  3709       __ srdi(Rinstance_size, Rinstance_size, 3);
  3711       // Clear out object skipping header. Takes also care of the zero length case.
  3712       __ clear_memory_doubleword(Rbase, Rinstance_size);
  3713       // fallthru: __ b(Linitialize_header);
  3716     // --------------------------------------------------------------------------
  3717     // Init2: Initialize the header: mark, klass
  3718     __ bind(Linitialize_header);
  3720     // Init mark.
  3721     if (UseBiasedLocking) {
  3722       __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
  3723     } else {
  3724       __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0);
  3726     __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);
  3728     // Init klass.
  3729     __ store_klass_gap(RallocatedObject);
  3730     __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms)
  3732     // Check and trigger dtrace event.
  3734       SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes);
  3735       __ push(atos);
  3736       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
  3737       __ pop(atos);
  3741   // continue
  3742   __ bind(Ldone);
  3744   // Must prevent reordering of stores for object initialization with stores that publish the new object.
  3745   __ membar(Assembler::StoreStore);
  3748 void TemplateTable::newarray() {
  3749   transition(itos, atos);
  3751   __ lbz(R4, 1, R14_bcp);
  3752   __ extsw(R5, R17_tos);
  3753   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */);
  3755   // Must prevent reordering of stores for object initialization with stores that publish the new object.
  3756   __ membar(Assembler::StoreStore);
  3759 void TemplateTable::anewarray() {
  3760   transition(itos, atos);
  3762   __ get_constant_pool(R4);
  3763   __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned);
  3764   __ extsw(R6, R17_tos); // size
  3765   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */);
  3767   // Must prevent reordering of stores for object initialization with stores that publish the new object.
  3768   __ membar(Assembler::StoreStore);
  3771 // Allocate a multi dimensional array
  3772 void TemplateTable::multianewarray() {
  3773   transition(vtos, atos);
  3775   Register Rptr = R31; // Needs to survive C call.
  3777   // Put ndims * wordSize into frame temp slot
  3778   __ lbz(Rptr, 3, R14_bcp);
  3779   __ sldi(Rptr, Rptr, Interpreter::logStackElementSize);
  3780   // Esp points past last_dim, so set to R4 to first_dim address.
  3781   __ add(R4, Rptr, R15_esp);
  3782   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */);
  3783   // Pop all dimensions off the stack.
  3784   __ add(R15_esp, Rptr, R15_esp);
  3786   // Must prevent reordering of stores for object initialization with stores that publish the new object.
  3787   __ membar(Assembler::StoreStore);
  3790 void TemplateTable::arraylength() {
  3791   transition(atos, itos);
  3793   Label LnoException;
  3794   __ verify_oop(R17_tos);
  3795   __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1);
  3796   __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos);
  3799 // ============================================================================
  3800 // Typechecks
  3802 void TemplateTable::checkcast() {
  3803   transition(atos, atos);
  3805   Label Ldone, Lis_null, Lquicked, Lresolved;
  3806   Register Roffset         = R6_ARG4,
  3807            RobjKlass       = R4_ARG2,
  3808            RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register.
  3809            Rcpool          = R11_scratch1,
  3810            Rtags           = R12_scratch2;
  3812   // Null does not pass.
  3813   __ cmpdi(CCR0, R17_tos, 0);
  3814   __ beq(CCR0, Lis_null);
  3816   // Get constant pool tag to find out if the bytecode has already been "quickened".
  3817   __ get_cpool_and_tags(Rcpool, Rtags);
  3819   __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
  3821   __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
  3822   __ lbzx(Rtags, Rtags, Roffset);
  3824   __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
  3825   __ beq(CCR0, Lquicked);
  3827   // Call into the VM to "quicken" instanceof.
  3828   __ push_ptr();  // for GC
  3829   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
  3830   __ get_vm_result_2(RspecifiedKlass);
  3831   __ pop_ptr();   // Restore receiver.
  3832   __ b(Lresolved);
  3834   // Extract target class from constant pool.
  3835   __ bind(Lquicked);
  3836   __ sldi(Roffset, Roffset, LogBytesPerWord);
  3837   __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
  3838   __ isync(); // Order load of specified Klass wrt. tags.
  3839   __ ldx(RspecifiedKlass, Rcpool, Roffset);
  3841   // Do the checkcast.
  3842   __ bind(Lresolved);
  3843   // Get value klass in RobjKlass.
  3844   __ load_klass(RobjKlass, R17_tos);
  3845   // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
  3846   __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
  3848   // Not a subtype; so must throw exception
  3849   // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention.
  3850   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry);
  3851   __ mtctr(R11_scratch1);
  3852   __ bctr();
  3854   // Profile the null case.
  3855   __ align(32, 12);
  3856   __ bind(Lis_null);
  3857   __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch.
  3859   __ align(32, 12);
  3860   __ bind(Ldone);
  3863 // Output:
  3864 //   - tos == 0: Obj was null or not an instance of class.
  3865 //   - tos == 1: Obj was an instance of class.
  3866 void TemplateTable::instanceof() {
  3867   transition(atos, itos);
  3869   Label Ldone, Lis_null, Lquicked, Lresolved;
  3870   Register Roffset         = R5_ARG3,
  3871            RobjKlass       = R4_ARG2,
  3872            RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register.
  3873            Rcpool          = R11_scratch1,
  3874            Rtags           = R12_scratch2;
  3876   // Null does not pass.
  3877   __ cmpdi(CCR0, R17_tos, 0);
  3878   __ beq(CCR0, Lis_null);
  3880   // Get constant pool tag to find out if the bytecode has already been "quickened".
  3881   __ get_cpool_and_tags(Rcpool, Rtags);
  3883   __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
  3885   __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
  3886   __ lbzx(Rtags, Rtags, Roffset);
  3888   __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
  3889   __ beq(CCR0, Lquicked);
  3891   // Call into the VM to "quicken" instanceof.
  3892   __ push_ptr();  // for GC
  3893   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
  3894   __ get_vm_result_2(RspecifiedKlass);
  3895   __ pop_ptr();   // Restore receiver.
  3896   __ b(Lresolved);
  3898   // Extract target class from constant pool.
  3899   __ bind(Lquicked);
  3900   __ sldi(Roffset, Roffset, LogBytesPerWord);
  3901   __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
  3902   __ isync(); // Order load of specified Klass wrt. tags.
  3903   __ ldx(RspecifiedKlass, Rcpool, Roffset);
  3905   // Do the checkcast.
  3906   __ bind(Lresolved);
  3907   // Get value klass in RobjKlass.
  3908   __ load_klass(RobjKlass, R17_tos);
  3909   // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
  3910   __ li(R17_tos, 1);
  3911   __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
  3912   __ li(R17_tos, 0);
  3914   if (ProfileInterpreter) {
  3915     __ b(Ldone);
  3918   // Profile the null case.
  3919   __ align(32, 12);
  3920   __ bind(Lis_null);
  3921   __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch.
  3923   __ align(32, 12);
  3924   __ bind(Ldone);
  3927 // =============================================================================
  3928 // Breakpoints
  3930 void TemplateTable::_breakpoint() {
  3931   transition(vtos, vtos);
  3933   // Get the unpatched byte code.
  3934   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp);
  3935   __ mr(R31, R3_RET);
  3937   // Post the breakpoint event.
  3938   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp);
  3940   // Complete the execution of original bytecode.
  3941   __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos));
  3944 // =============================================================================
  3945 // Exceptions
  3947 void TemplateTable::athrow() {
  3948   transition(atos, vtos);
  3950   // Exception oop is in tos
  3951   __ verify_oop(R17_tos);
  3953   __ null_check_throw(R17_tos, -1, R11_scratch1);
  3955   // Throw exception interpreter entry expects exception oop to be in R3.
  3956   __ mr(R3_RET, R17_tos);
  3957   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry());
  3958   __ mtctr(R11_scratch1);
  3959   __ bctr();
  3962 // =============================================================================
  3963 // Synchronization
  3964 // Searches the basic object lock list on the stack for a free slot
  3965 // and uses it to lock the obect in tos.
  3966 //
  3967 // Recursive locking is enabled by exiting the search if the same
  3968 // object is already found in the list. Thus, a new basic lock obj lock
  3969 // is allocated "higher up" in the stack and thus is found first
  3970 // at next monitor exit.
  3971 void TemplateTable::monitorenter() {
  3972   transition(atos, vtos);
  3974   __ verify_oop(R17_tos);
  3976   Register Rcurrent_monitor  = R11_scratch1,
  3977            Rcurrent_obj      = R12_scratch2,
  3978            Robj_to_lock      = R17_tos,
  3979            Rscratch1         = R3_ARG1,
  3980            Rscratch2         = R4_ARG2,
  3981            Rscratch3         = R5_ARG3,
  3982            Rcurrent_obj_addr = R6_ARG4;
  3984   // ------------------------------------------------------------------------------
  3985   // Null pointer exception.
  3986   __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
  3988   // Try to acquire a lock on the object.
  3989   // Repeat until succeeded (i.e., until monitorenter returns true).
  3991   // ------------------------------------------------------------------------------
  3992   // Find a free slot in the monitor block.
  3993   Label Lfound, Lexit, Lallocate_new;
  3994   ConditionRegister found_free_slot = CCR0,
  3995                     found_same_obj  = CCR1,
  3996                     reached_limit   = CCR6;
  3998     Label Lloop, Lentry;
  3999     Register Rlimit = Rcurrent_monitor;
  4001     // Set up search loop - start with topmost monitor.
  4002     __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
  4004     __ ld(Rlimit, 0, R1_SP);
  4005     __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base
  4007     // Check if any slot is present => short cut to allocation if not.
  4008     __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
  4009     __ bgt(reached_limit, Lallocate_new);
  4011     // Pre-load topmost slot.
  4012     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
  4013     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
  4014     // The search loop.
  4015     __ bind(Lloop);
  4016     // Found free slot?
  4017     __ cmpdi(found_free_slot, Rcurrent_obj, 0);
  4018     // Is this entry for same obj? If so, stop the search and take the found
  4019     // free slot or allocate a new one to enable recursive locking.
  4020     __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock);
  4021     __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
  4022     __ beq(found_free_slot, Lexit);
  4023     __ beq(found_same_obj, Lallocate_new);
  4024     __ bgt(reached_limit, Lallocate_new);
  4025     // Check if last allocated BasicLockObj reached.
  4026     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
  4027     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
  4028     // Next iteration if unchecked BasicObjectLocks exist on the stack.
  4029     __ b(Lloop);
  4032   // ------------------------------------------------------------------------------
  4033   // Check if we found a free slot.
  4034   __ bind(Lexit);
  4036   __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
  4037   __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize);
  4038   __ b(Lfound);
  4040   // We didn't find a free BasicObjLock => allocate one.
  4041   __ align(32, 12);
  4042   __ bind(Lallocate_new);
  4043   __ add_monitor_to_stack(false, Rscratch1, Rscratch2);
  4044   __ mr(Rcurrent_monitor, R26_monitor);
  4045   __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
  4047   // ------------------------------------------------------------------------------
  4048   // We now have a slot to lock.
  4049   __ bind(Lfound);
  4051   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
  4052   // The object has already been poped from the stack, so the expression stack looks correct.
  4053   __ addi(R14_bcp, R14_bcp, 1);
  4055   __ std(Robj_to_lock, 0, Rcurrent_obj_addr);
  4056   __ lock_object(Rcurrent_monitor, Robj_to_lock);
  4058   // Check if there's enough space on the stack for the monitors after locking.
  4059   Label Lskip_stack_check;
  4060   // Optimization: If the monitors stack section is less then a std page size (4K) don't run
  4061   // the stack check. There should be enough shadow pages to fit that in.
  4062   __ ld(Rscratch3, 0, R1_SP);
  4063   __ sub(Rscratch3, Rscratch3, R26_monitor);
  4064   __ cmpdi(CCR0, Rscratch3, 4*K);
  4065   __ blt(CCR0, Lskip_stack_check);
  4067   DEBUG_ONLY(__ untested("stack overflow check during monitor enter");)
  4068   __ li(Rscratch1, 0);
  4069   __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2);
  4071   __ align(32, 12);
  4072   __ bind(Lskip_stack_check);
  4074   // The bcp has already been incremented. Just need to dispatch to next instruction.
  4075   __ dispatch_next(vtos);
  4078 void TemplateTable::monitorexit() {
  4079   transition(atos, vtos);
  4080   __ verify_oop(R17_tos);
  4082   Register Rcurrent_monitor  = R11_scratch1,
  4083            Rcurrent_obj      = R12_scratch2,
  4084            Robj_to_lock      = R17_tos,
  4085            Rcurrent_obj_addr = R3_ARG1,
  4086            Rlimit            = R4_ARG2;
  4087   Label Lfound, Lillegal_monitor_state;
  4089   // Check corner case: unbalanced monitorEnter / Exit.
  4090   __ ld(Rlimit, 0, R1_SP);
  4091   __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
  4093   // Null pointer check.
  4094   __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
  4096   __ cmpld(CCR0, R26_monitor, Rlimit);
  4097   __ bgt(CCR0, Lillegal_monitor_state);
  4099   // Find the corresponding slot in the monitors stack section.
  4101     Label Lloop;
  4103     // Start with topmost monitor.
  4104     __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
  4105     __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes());
  4106     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
  4107     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
  4109     __ bind(Lloop);
  4110     // Is this entry for same obj?
  4111     __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
  4112     __ beq(CCR0, Lfound);
  4114     // Check if last allocated BasicLockObj reached.
  4116     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
  4117     __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit);
  4118     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
  4120     // Next iteration if unchecked BasicObjectLocks exist on the stack.
  4121     __ ble(CCR0, Lloop);
  4124   // Fell through without finding the basic obj lock => throw up!
  4125   __ bind(Lillegal_monitor_state);
  4126   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
  4127   __ should_not_reach_here();
  4129   __ align(32, 12);
  4130   __ bind(Lfound);
  4131   __ addi(Rcurrent_monitor, Rcurrent_obj_addr,
  4132           -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
  4133   __ unlock_object(Rcurrent_monitor);
  4136 // ============================================================================
  4137 // Wide bytecodes
  4139 // Wide instructions. Simply redirects to the wide entry point for that instruction.
  4140 void TemplateTable::wide() {
  4141   transition(vtos, vtos);
  4143   const Register Rtable = R11_scratch1,
  4144                  Rindex = R12_scratch2,
  4145                  Rtmp   = R0;
  4147   __ lbz(Rindex, 1, R14_bcp);
  4149   __ load_dispatch_table(Rtable, Interpreter::_wentry_point);
  4151   __ slwi(Rindex, Rindex, LogBytesPerWord);
  4152   __ ldx(Rtmp, Rtable, Rindex);
  4153   __ mtctr(Rtmp);
  4154   __ bctr();
  4155   // Note: the bcp increment step is part of the individual wide bytecode implementations.
  4157 #endif // !CC_INTERP

mercurial