src/cpu/ppc/vm/templateInterpreter_ppc.cpp

Tue, 01 Apr 2014 09:36:49 +0200

author
roland
date
Tue, 01 Apr 2014 09:36:49 +0200
changeset 6723
0bf37f737702
parent 6660
63c5920a038d
child 6876
710a3c8b516e
child 7132
b384ba33c9a0
permissions
-rw-r--r--

8032410: compiler/uncommontrap/TestStackBangRbp.java times out on Solaris-Sparc V9
Summary: make compiled code bang the stack by the worst case size of the interpreter frame at deoptimization points.
Reviewed-by: twisti, kvn

     1 /*
     2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright 2013, 2014 SAP AG. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #include "precompiled.hpp"
    27 #ifndef CC_INTERP
    28 #include "asm/macroAssembler.inline.hpp"
    29 #include "interpreter/bytecodeHistogram.hpp"
    30 #include "interpreter/interpreter.hpp"
    31 #include "interpreter/interpreterGenerator.hpp"
    32 #include "interpreter/interpreterRuntime.hpp"
    33 #include "interpreter/templateTable.hpp"
    34 #include "oops/arrayOop.hpp"
    35 #include "oops/methodData.hpp"
    36 #include "oops/method.hpp"
    37 #include "oops/oop.inline.hpp"
    38 #include "prims/jvmtiExport.hpp"
    39 #include "prims/jvmtiThreadState.hpp"
    40 #include "runtime/arguments.hpp"
    41 #include "runtime/deoptimization.hpp"
    42 #include "runtime/frame.inline.hpp"
    43 #include "runtime/sharedRuntime.hpp"
    44 #include "runtime/stubRoutines.hpp"
    45 #include "runtime/synchronizer.hpp"
    46 #include "runtime/timer.hpp"
    47 #include "runtime/vframeArray.hpp"
    48 #include "utilities/debug.hpp"
    49 #include "utilities/macros.hpp"
    51 #undef __
    52 #define __ _masm->
    54 #ifdef PRODUCT
    55 #define BLOCK_COMMENT(str) /* nothing */
    56 #else
    57 #define BLOCK_COMMENT(str) __ block_comment(str)
    58 #endif
    60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
    62 //-----------------------------------------------------------------------------
    64 // Actually we should never reach here since we do stack overflow checks before pushing any frame.
    65 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
    66   address entry = __ pc();
    67   __ unimplemented("generate_StackOverflowError_handler");
    68   return entry;
    69 }
    71 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
    72   address entry = __ pc();
    73   __ empty_expression_stack();
    74   __ load_const_optimized(R4_ARG2, (address) name);
    75   // Index is in R17_tos.
    76   __ mr(R5_ARG3, R17_tos);
    77   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException));
    78   return entry;
    79 }
    81 #if 0
    82 // Call special ClassCastException constructor taking object to cast
    83 // and target class as arguments.
    84 address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() {
    85   address entry = __ pc();
    87   // Expression stack must be empty before entering the VM if an
    88   // exception happened.
    89   __ empty_expression_stack();
    91   // Thread will be loaded to R3_ARG1.
    92   // Target class oop is in register R5_ARG3 by convention!
    93   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3));
    94   // Above call must not return here since exception pending.
    95   DEBUG_ONLY(__ should_not_reach_here();)
    96   return entry;
    97 }
    98 #endif
   100 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
   101   address entry = __ pc();
   102   // Expression stack must be empty before entering the VM if an
   103   // exception happened.
   104   __ empty_expression_stack();
   106   // Load exception object.
   107   // Thread will be loaded to R3_ARG1.
   108   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos);
   109 #ifdef ASSERT
   110   // Above call must not return here since exception pending.
   111   __ should_not_reach_here();
   112 #endif
   113   return entry;
   114 }
   116 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
   117   address entry = __ pc();
   118   //__ untested("generate_exception_handler_common");
   119   Register Rexception = R17_tos;
   121   // Expression stack must be empty before entering the VM if an exception happened.
   122   __ empty_expression_stack();
   124   __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1);
   125   if (pass_oop) {
   126     __ mr(R5_ARG3, Rexception);
   127     __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false);
   128   } else {
   129     __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1);
   130     __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false);
   131   }
   133   // Throw exception.
   134   __ mr(R3_ARG1, Rexception);
   135   __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2);
   136   __ mtctr(R11_scratch1);
   137   __ bctr();
   139   return entry;
   140 }
   142 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
   143   address entry = __ pc();
   144   __ unimplemented("generate_continuation_for");
   145   return entry;
   146 }
   148 // This entry is returned to when a call returns to the interpreter.
   149 // When we arrive here, we expect that the callee stack frame is already popped.
   150 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
   151   address entry = __ pc();
   153   // Move the value out of the return register back to the TOS cache of current frame.
   154   switch (state) {
   155     case ltos:
   156     case btos:
   157     case ctos:
   158     case stos:
   159     case atos:
   160     case itos: __ mr(R17_tos, R3_RET); break;   // RET -> TOS cache
   161     case ftos:
   162     case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
   163     case vtos: break;                           // Nothing to do, this was a void return.
   164     default  : ShouldNotReachHere();
   165   }
   167   __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
   168   __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
   169   __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
   171   // Compiled code destroys templateTableBase, reload.
   172   __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
   174   const Register cache = R11_scratch1;
   175   const Register size  = R12_scratch2;
   176   __ get_cache_and_index_at_bcp(cache, 1, index_size);
   178   // Big Endian (get least significant byte of 64 bit value):
   179   __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
   180   __ sldi(size, size, Interpreter::logStackElementSize);
   181   __ add(R15_esp, R15_esp, size);
   182   __ dispatch_next(state, step);
   183   return entry;
   184 }
   186 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
   187   address entry = __ pc();
   188   // If state != vtos, we're returning from a native method, which put it's result
   189   // into the result register. So move the value out of the return register back
   190   // to the TOS cache of current frame.
   192   switch (state) {
   193     case ltos:
   194     case btos:
   195     case ctos:
   196     case stos:
   197     case atos:
   198     case itos: __ mr(R17_tos, R3_RET); break;   // GR_RET -> TOS cache
   199     case ftos:
   200     case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
   201     case vtos: break;                           // Nothing to do, this was a void return.
   202     default  : ShouldNotReachHere();
   203   }
   205   // Load LcpoolCache @@@ should be already set!
   206   __ get_constant_pool_cache(R27_constPoolCache);
   208   // Handle a pending exception, fall through if none.
   209   __ check_and_forward_exception(R11_scratch1, R12_scratch2);
   211   // Start executing bytecodes.
   212   __ dispatch_next(state, step);
   214   return entry;
   215 }
   217 // A result handler converts the native result into java format.
   218 // Use the shared code between c++ and template interpreter.
   219 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
   220   return AbstractInterpreterGenerator::generate_result_handler_for(type);
   221 }
   223 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
   224   address entry = __ pc();
   226   __ push(state);
   227   __ call_VM(noreg, runtime_entry);
   228   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
   230   return entry;
   231 }
   233 // Helpers for commoning out cases in the various type of method entries.
   235 // Increment invocation count & check for overflow.
   236 //
   237 // Note: checking for negative value instead of overflow
   238 //       so we have a 'sticky' overflow test.
   239 //
   240 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
   241   // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not.
   242   Register Rscratch1   = R11_scratch1;
   243   Register Rscratch2   = R12_scratch2;
   244   Register R3_counters = R3_ARG1;
   245   Label done;
   247   if (TieredCompilation) {
   248     const int increment = InvocationCounter::count_increment;
   249     const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
   250     Label no_mdo;
   251     if (ProfileInterpreter) {
   252       const Register Rmdo = Rscratch1;
   253       // If no method data exists, go to profile_continue.
   254       __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
   255       __ cmpdi(CCR0, Rmdo, 0);
   256       __ beq(CCR0, no_mdo);
   258       // Increment backedge counter in the MDO.
   259       const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
   260       __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
   261       __ addi(Rscratch2, Rscratch2, increment);
   262       __ stw(Rscratch2, mdo_bc_offs, Rmdo);
   263       __ load_const_optimized(Rscratch1, mask, R0);
   264       __ and_(Rscratch1, Rscratch2, Rscratch1);
   265       __ bne(CCR0, done);
   266       __ b(*overflow);
   267     }
   269     // Increment counter in MethodCounters*.
   270     const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
   271     __ bind(no_mdo);
   272     __ get_method_counters(R19_method, R3_counters, done);
   273     __ lwz(Rscratch2, mo_bc_offs, R3_counters);
   274     __ addi(Rscratch2, Rscratch2, increment);
   275     __ stw(Rscratch2, mo_bc_offs, R3_counters);
   276     __ load_const_optimized(Rscratch1, mask, R0);
   277     __ and_(Rscratch1, Rscratch2, Rscratch1);
   278     __ beq(CCR0, *overflow);
   280     __ bind(done);
   282   } else {
   284     // Update standard invocation counters.
   285     Register Rsum_ivc_bec = R4_ARG2;
   286     __ get_method_counters(R19_method, R3_counters, done);
   287     __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2);
   288     // Increment interpreter invocation counter.
   289     if (ProfileInterpreter) {  // %%% Merge this into methodDataOop.
   290       __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
   291       __ addi(R12_scratch2, R12_scratch2, 1);
   292       __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
   293     }
   294     // Check if we must create a method data obj.
   295     if (ProfileInterpreter && profile_method != NULL) {
   296       const Register profile_limit = Rscratch1;
   297       int pl_offs = __ load_const_optimized(profile_limit, &InvocationCounter::InterpreterProfileLimit, R0, true);
   298       __ lwz(profile_limit, pl_offs, profile_limit);
   299       // Test to see if we should create a method data oop.
   300       __ cmpw(CCR0, Rsum_ivc_bec, profile_limit);
   301       __ blt(CCR0, *profile_method_continue);
   302       // If no method data exists, go to profile_method.
   303       __ test_method_data_pointer(*profile_method);
   304     }
   305     // Finally check for counter overflow.
   306     if (overflow) {
   307       const Register invocation_limit = Rscratch1;
   308       int il_offs = __ load_const_optimized(invocation_limit, &InvocationCounter::InterpreterInvocationLimit, R0, true);
   309       __ lwz(invocation_limit, il_offs, invocation_limit);
   310       assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit), "unexpected field size");
   311       __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit);
   312       __ bge(CCR0, *overflow);
   313     }
   315     __ bind(done);
   316   }
   317 }
   319 // Generate code to initiate compilation on invocation counter overflow.
   320 void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) {
   321   // Generate code to initiate compilation on the counter overflow.
   323   // InterpreterRuntime::frequency_counter_overflow takes one arguments,
   324   // which indicates if the counter overflow occurs at a backwards branch (NULL bcp)
   325   // We pass zero in.
   326   // The call returns the address of the verified entry point for the method or NULL
   327   // if the compilation did not complete (either went background or bailed out).
   328   //
   329   // Unlike the C++ interpreter above: Check exceptions!
   330   // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed
   331   // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur.
   333   __ li(R4_ARG2, 0);
   334   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
   336   // Returns verified_entry_point or NULL.
   337   // We ignore it in any case.
   338   __ b(continue_entry);
   339 }
   341 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) {
   342   assert_different_registers(Rmem_frame_size, Rscratch1);
   343   __ generate_stack_overflow_check_with_compare_and_throw(Rmem_frame_size, Rscratch1);
   344 }
   346 void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) {
   347   __ unlock_object(R26_monitor, check_exceptions);
   348 }
   350 // Lock the current method, interpreter register window must be set up!
   351 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
   352   const Register Robj_to_lock = Rscratch2;
   354   {
   355     if (!flags_preloaded) {
   356       __ lwz(Rflags, method_(access_flags));
   357     }
   359 #ifdef ASSERT
   360     // Check if methods needs synchronization.
   361     {
   362       Label Lok;
   363       __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
   364       __ btrue(CCR0,Lok);
   365       __ stop("method doesn't need synchronization");
   366       __ bind(Lok);
   367     }
   368 #endif // ASSERT
   369   }
   371   // Get synchronization object to Rscratch2.
   372   {
   373     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
   374     Label Lstatic;
   375     Label Ldone;
   377     __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT);
   378     __ btrue(CCR0, Lstatic);
   380     // Non-static case: load receiver obj from stack and we're done.
   381     __ ld(Robj_to_lock, R18_locals);
   382     __ b(Ldone);
   384     __ bind(Lstatic); // Static case: Lock the java mirror
   385     __ ld(Robj_to_lock, in_bytes(Method::const_offset()), R19_method);
   386     __ ld(Robj_to_lock, in_bytes(ConstMethod::constants_offset()), Robj_to_lock);
   387     __ ld(Robj_to_lock, ConstantPool::pool_holder_offset_in_bytes(), Robj_to_lock);
   388     __ ld(Robj_to_lock, mirror_offset, Robj_to_lock);
   390     __ bind(Ldone);
   391     __ verify_oop(Robj_to_lock);
   392   }
   394   // Got the oop to lock => execute!
   395   __ add_monitor_to_stack(true, Rscratch1, R0);
   397   __ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
   398   __ lock_object(R26_monitor, Robj_to_lock);
   399 }
   401 // Generate a fixed interpreter frame for pure interpreter
   402 // and I2N native transition frames.
   403 //
   404 // Before (stack grows downwards):
   405 //
   406 //         |  ...         |
   407 //         |------------- |
   408 //         |  java arg0   |
   409 //         |  ...         |
   410 //         |  java argn   |
   411 //         |              |   <-   R15_esp
   412 //         |              |
   413 //         |--------------|
   414 //         | abi_112      |
   415 //         |              |   <-   R1_SP
   416 //         |==============|
   417 //
   418 //
   419 // After:
   420 //
   421 //         |  ...         |
   422 //         |  java arg0   |<-   R18_locals
   423 //         |  ...         |
   424 //         |  java argn   |
   425 //         |--------------|
   426 //         |              |
   427 //         |  java locals |
   428 //         |              |
   429 //         |--------------|
   430 //         |  abi_48      |
   431 //         |==============|
   432 //         |              |
   433 //         |   istate     |
   434 //         |              |
   435 //         |--------------|
   436 //         |   monitor    |<-   R26_monitor
   437 //         |--------------|
   438 //         |              |<-   R15_esp
   439 //         | expression   |
   440 //         | stack        |
   441 //         |              |
   442 //         |--------------|
   443 //         |              |
   444 //         | abi_112      |<-   R1_SP
   445 //         |==============|
   446 //
   447 // The top most frame needs an abi space of 112 bytes. This space is needed,
   448 // since we call to c. The c function may spill their arguments to the caller
   449 // frame. When we call to java, we don't need these spill slots. In order to save
   450 // space on the stack, we resize the caller. However, java local reside in
   451 // the caller frame and the frame has to be increased. The frame_size for the
   452 // current frame was calculated based on max_stack as size for the expression
   453 // stack. At the call, just a part of the expression stack might be used.
   454 // We don't want to waste this space and cut the frame back accordingly.
   455 // The resulting amount for resizing is calculated as follows:
   456 // resize =   (number_of_locals - number_of_arguments) * slot_size
   457 //          + (R1_SP - R15_esp) + 48
   458 //
   459 // The size for the callee frame is calculated:
   460 // framesize = 112 + max_stack + monitor + state_size
   461 //
   462 // maxstack:   Max number of slots on the expression stack, loaded from the method.
   463 // monitor:    We statically reserve room for one monitor object.
   464 // state_size: We save the current state of the interpreter to this area.
   465 //
   466 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) {
   467   Register parent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes.
   468            top_frame_size      = R7_ARG5,
   469            Rconst_method       = R8_ARG6;
   471   assert_different_registers(Rsize_of_parameters, Rsize_of_locals, parent_frame_resize, top_frame_size);
   473   __ ld(Rconst_method, method_(const));
   474   __ lhz(Rsize_of_parameters /* number of params */,
   475          in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method);
   476   if (native_call) {
   477     // If we're calling a native method, we reserve space for the worst-case signature
   478     // handler varargs vector, which is max(Argument::n_register_parameters, parameter_count+2).
   479     // We add two slots to the parameter_count, one for the jni
   480     // environment and one for a possible native mirror.
   481     Label skip_native_calculate_max_stack;
   482     __ addi(top_frame_size, Rsize_of_parameters, 2);
   483     __ cmpwi(CCR0, top_frame_size, Argument::n_register_parameters);
   484     __ bge(CCR0, skip_native_calculate_max_stack);
   485     __ li(top_frame_size, Argument::n_register_parameters);
   486     __ bind(skip_native_calculate_max_stack);
   487     __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
   488     __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
   489     __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
   490     assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters.
   491   } else {
   492     __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method);
   493     __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
   494     __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize);
   495     __ lhz(top_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method);
   496     __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0
   497     __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
   498     __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
   499     __ add(parent_frame_resize, parent_frame_resize, R11_scratch1);
   500   }
   502   // Compute top frame size.
   503   __ addi(top_frame_size, top_frame_size, frame::abi_reg_args_size + frame::ijava_state_size);
   505   // Cut back area between esp and max_stack.
   506   __ addi(parent_frame_resize, parent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize);
   508   __ round_to(top_frame_size, frame::alignment_in_bytes);
   509   __ round_to(parent_frame_resize, frame::alignment_in_bytes);
   510   // parent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size.
   511   // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48.
   513   {
   514     // --------------------------------------------------------------------------
   515     // Stack overflow check
   517     Label cont;
   518     __ add(R11_scratch1, parent_frame_resize, top_frame_size);
   519     generate_stack_overflow_check(R11_scratch1, R12_scratch2);
   520   }
   522   // Set up interpreter state registers.
   524   __ add(R18_locals, R15_esp, Rsize_of_parameters);
   525   __ ld(R27_constPoolCache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
   526   __ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), R27_constPoolCache);
   528   // Set method data pointer.
   529   if (ProfileInterpreter) {
   530     Label zero_continue;
   531     __ ld(R28_mdx, method_(method_data));
   532     __ cmpdi(CCR0, R28_mdx, 0);
   533     __ beq(CCR0, zero_continue);
   534     __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
   535     __ bind(zero_continue);
   536   }
   538   if (native_call) {
   539     __ li(R14_bcp, 0); // Must initialize.
   540   } else {
   541     __ add(R14_bcp, in_bytes(ConstMethod::codes_offset()), Rconst_method);
   542   }
   544   // Resize parent frame.
   545   __ mflr(R12_scratch2);
   546   __ neg(parent_frame_resize, parent_frame_resize);
   547   __ resize_frame(parent_frame_resize, R11_scratch1);
   548   __ std(R12_scratch2, _abi(lr), R1_SP);
   550   __ addi(R26_monitor, R1_SP, - frame::ijava_state_size);
   551   __ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
   553   // Store values.
   554   // R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls
   555   // in InterpreterMacroAssembler::call_from_interpreter.
   556   __ std(R19_method, _ijava_state_neg(method), R1_SP);
   557   __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP);
   558   __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP);
   559   __ std(R18_locals, _ijava_state_neg(locals), R1_SP);
   561   // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only
   562   // be found in the frame after save_interpreter_state is done. This is always true
   563   // for non-top frames. But when a signal occurs, dumping the top frame can go wrong,
   564   // because e.g. frame::interpreter_frame_bcp() will not access the correct value
   565   // (Enhanced Stack Trace).
   566   // The signal handler does not save the interpreter state into the frame.
   567   __ li(R0, 0);
   568 #ifdef ASSERT
   569   // Fill remaining slots with constants.
   570   __ load_const_optimized(R11_scratch1, 0x5afe);
   571   __ load_const_optimized(R12_scratch2, 0xdead);
   572 #endif
   573   // We have to initialize some frame slots for native calls (accessed by GC).
   574   if (native_call) {
   575     __ std(R26_monitor, _ijava_state_neg(monitors), R1_SP);
   576     __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP);
   577     if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); }
   578   }
   579 #ifdef ASSERT
   580   else {
   581     __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP);
   582     __ std(R12_scratch2, _ijava_state_neg(bcp), R1_SP);
   583     __ std(R12_scratch2, _ijava_state_neg(mdx), R1_SP);
   584   }
   585   __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP);
   586   __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP);
   587   __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP);
   588   __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP);
   589 #endif
   590   __ subf(R12_scratch2, top_frame_size, R1_SP);
   591   __ std(R0, _ijava_state_neg(oop_tmp), R1_SP);
   592   __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP);
   594   // Push top frame.
   595   __ push_frame(top_frame_size, R11_scratch1);
   596 }
   598 // End of helpers
   600 // ============================================================================
   601 // Various method entries
   602 //
   604 // Empty method, generate a very fast return. We must skip this entry if
   605 // someone's debugging, indicated by the flag
   606 // "interp_mode" in the Thread obj.
   607 // Note: empty methods are generated mostly methods that do assertions, which are
   608 // disabled in the "java opt build".
   609 address TemplateInterpreterGenerator::generate_empty_entry(void) {
   610   if (!UseFastEmptyMethods) {
   611     NOT_PRODUCT(__ should_not_reach_here();)
   612     return Interpreter::entry_for_kind(Interpreter::zerolocals);
   613   }
   615   Label Lslow_path;
   616   const Register Rjvmti_mode = R11_scratch1;
   617   address entry = __ pc();
   619   __ lwz(Rjvmti_mode, thread_(interp_only_mode));
   620   __ cmpwi(CCR0, Rjvmti_mode, 0);
   621   __ bne(CCR0, Lslow_path); // jvmti_mode!=0
   623   // Noone's debuggin: Simply return.
   624   // Pop c2i arguments (if any) off when we return.
   625 #ifdef ASSERT
   626     __ ld(R9_ARG7, 0, R1_SP);
   627     __ ld(R10_ARG8, 0, R21_sender_SP);
   628     __ cmpd(CCR0, R9_ARG7, R10_ARG8);
   629     __ asm_assert_eq("backlink", 0x545);
   630 #endif // ASSERT
   631   __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
   633   // And we're done.
   634   __ blr();
   636   __ bind(Lslow_path);
   637   __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
   638   __ flush();
   640   return entry;
   641 }
   643 // Support abs and sqrt like in compiler.
   644 // For others we can use a normal (native) entry.
   646 inline bool math_entry_available(AbstractInterpreter::MethodKind kind) {
   647   // Provide math entry with debugging on demand.
   648   // Note: Debugging changes which code will get executed:
   649   // Debugging or disabled InlineIntrinsics: java method will get interpreted and performs a native call.
   650   // Not debugging and enabled InlineIntrinics: processor instruction will get used.
   651   // Result might differ slightly due to rounding etc.
   652   if (!InlineIntrinsics && (!FLAG_IS_ERGO(InlineIntrinsics))) return false; // Generate a vanilla entry.
   654   return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
   655           (kind==Interpreter::java_lang_math_abs));
   656 }
   658 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
   659   if (!math_entry_available(kind)) {
   660     NOT_PRODUCT(__ should_not_reach_here();)
   661     return Interpreter::entry_for_kind(Interpreter::zerolocals);
   662   }
   664   Label Lslow_path;
   665   const Register Rjvmti_mode = R11_scratch1;
   666   address entry = __ pc();
   668   // Provide math entry with debugging on demand.
   669   __ lwz(Rjvmti_mode, thread_(interp_only_mode));
   670   __ cmpwi(CCR0, Rjvmti_mode, 0);
   671   __ bne(CCR0, Lslow_path); // jvmti_mode!=0
   673   __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp);
   675   // Pop c2i arguments (if any) off when we return.
   676 #ifdef ASSERT
   677   __ ld(R9_ARG7, 0, R1_SP);
   678   __ ld(R10_ARG8, 0, R21_sender_SP);
   679   __ cmpd(CCR0, R9_ARG7, R10_ARG8);
   680   __ asm_assert_eq("backlink", 0x545);
   681 #endif // ASSERT
   682   __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
   684   if (kind == Interpreter::java_lang_math_sqrt) {
   685     __ fsqrt(F1_RET, F1_RET);
   686   } else if (kind == Interpreter::java_lang_math_abs) {
   687     __ fabs(F1_RET, F1_RET);
   688   } else {
   689     ShouldNotReachHere();
   690   }
   692   // And we're done.
   693   __ blr();
   695   // Provide slow path for JVMTI case.
   696   __ bind(Lslow_path);
   697   __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R12_scratch2);
   698   __ flush();
   700   return entry;
   701 }
   703 // Interpreter stub for calling a native method. (asm interpreter)
   704 // This sets up a somewhat different looking stack for calling the
   705 // native method than the typical interpreter frame setup.
   706 //
   707 // On entry:
   708 //   R19_method    - method
   709 //   R16_thread    - JavaThread*
   710 //   R15_esp       - intptr_t* sender tos
   711 //
   712 //   abstract stack (grows up)
   713 //     [  IJava (caller of JNI callee)  ]  <-- ASP
   714 //        ...
   715 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
   717   address entry = __ pc();
   719   const bool inc_counter = UseCompiler || CountCompiledCalls;
   721   // -----------------------------------------------------------------------------
   722   // Allocate a new frame that represents the native callee (i2n frame).
   723   // This is not a full-blown interpreter frame, but in particular, the
   724   // following registers are valid after this:
   725   // - R19_method
   726   // - R18_local (points to start of argumuments to native function)
   727   //
   728   //   abstract stack (grows up)
   729   //     [  IJava (caller of JNI callee)  ]  <-- ASP
   730   //        ...
   732   const Register signature_handler_fd = R11_scratch1;
   733   const Register pending_exception    = R0;
   734   const Register result_handler_addr  = R31;
   735   const Register native_method_fd     = R11_scratch1;
   736   const Register access_flags         = R22_tmp2;
   737   const Register active_handles       = R11_scratch1; // R26_monitor saved to state.
   738   const Register sync_state           = R12_scratch2;
   739   const Register sync_state_addr      = sync_state;   // Address is dead after use.
   740   const Register suspend_flags        = R11_scratch1;
   742   //=============================================================================
   743   // Allocate new frame and initialize interpreter state.
   745   Label exception_return;
   746   Label exception_return_sync_check;
   747   Label stack_overflow_return;
   749   // Generate new interpreter state and jump to stack_overflow_return in case of
   750   // a stack overflow.
   751   //generate_compute_interpreter_state(stack_overflow_return);
   753   Register size_of_parameters = R22_tmp2;
   755   generate_fixed_frame(true, size_of_parameters, noreg /* unused */);
   757   //=============================================================================
   758   // Increment invocation counter. On overflow, entry to JNI method
   759   // will be compiled.
   760   Label invocation_counter_overflow, continue_after_compile;
   761   if (inc_counter) {
   762     if (synchronized) {
   763       // Since at this point in the method invocation the exception handler
   764       // would try to exit the monitor of synchronized methods which hasn't
   765       // been entered yet, we set the thread local variable
   766       // _do_not_unlock_if_synchronized to true. If any exception was thrown by
   767       // runtime, exception handling i.e. unlock_if_synchronized_method will
   768       // check this thread local flag.
   769       // This flag has two effects, one is to force an unwind in the topmost
   770       // interpreter frame and not perform an unlock while doing so.
   771       __ li(R0, 1);
   772       __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
   773     }
   774     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
   776     __ BIND(continue_after_compile);
   777     // Reset the _do_not_unlock_if_synchronized flag.
   778     if (synchronized) {
   779       __ li(R0, 0);
   780       __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
   781     }
   782   }
   784   // access_flags = method->access_flags();
   785   // Load access flags.
   786   assert(access_flags->is_nonvolatile(),
   787          "access_flags must be in a non-volatile register");
   788   // Type check.
   789   assert(4 == sizeof(AccessFlags), "unexpected field size");
   790   __ lwz(access_flags, method_(access_flags));
   792   // We don't want to reload R19_method and access_flags after calls
   793   // to some helper functions.
   794   assert(R19_method->is_nonvolatile(),
   795          "R19_method must be a non-volatile register");
   797   // Check for synchronized methods. Must happen AFTER invocation counter
   798   // check, so method is not locked if counter overflows.
   800   if (synchronized) {
   801     lock_method(access_flags, R11_scratch1, R12_scratch2, true);
   803     // Update monitor in state.
   804     __ ld(R11_scratch1, 0, R1_SP);
   805     __ std(R26_monitor, _ijava_state_neg(monitors), R11_scratch1);
   806   }
   808   // jvmti/jvmpi support
   809   __ notify_method_entry();
   811   //=============================================================================
   812   // Get and call the signature handler.
   814   __ ld(signature_handler_fd, method_(signature_handler));
   815   Label call_signature_handler;
   817   __ cmpdi(CCR0, signature_handler_fd, 0);
   818   __ bne(CCR0, call_signature_handler);
   820   // Method has never been called. Either generate a specialized
   821   // handler or point to the slow one.
   822   //
   823   // Pass parameter 'false' to avoid exception check in call_VM.
   824   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false);
   826   // Check for an exception while looking up the target method. If we
   827   // incurred one, bail.
   828   __ ld(pending_exception, thread_(pending_exception));
   829   __ cmpdi(CCR0, pending_exception, 0);
   830   __ bne(CCR0, exception_return_sync_check); // Has pending exception.
   832   // Reload signature handler, it may have been created/assigned in the meanwhile.
   833   __ ld(signature_handler_fd, method_(signature_handler));
   834   __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below).
   836   __ BIND(call_signature_handler);
   838   // Before we call the signature handler we push a new frame to
   839   // protect the interpreter frame volatile registers when we return
   840   // from jni but before we can get back to Java.
   842   // First set the frame anchor while the SP/FP registers are
   843   // convenient and the slow signature handler can use this same frame
   844   // anchor.
   846   // We have a TOP_IJAVA_FRAME here, which belongs to us.
   847   __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
   849   // Now the interpreter frame (and its call chain) have been
   850   // invalidated and flushed. We are now protected against eager
   851   // being enabled in native code. Even if it goes eager the
   852   // registers will be reloaded as clean and we will invalidate after
   853   // the call so no spurious flush should be possible.
   855   // Call signature handler and pass locals address.
   856   //
   857   // Our signature handlers copy required arguments to the C stack
   858   // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13.
   859   __ mr(R3_ARG1, R18_locals);
   860   __ ld(signature_handler_fd, 0, signature_handler_fd);
   862   __ call_stub(signature_handler_fd);
   864   // Remove the register parameter varargs slots we allocated in
   865   // compute_interpreter_state. SP+16 ends up pointing to the ABI
   866   // outgoing argument area.
   867   //
   868   // Not needed on PPC64.
   869   //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord);
   871   assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register");
   872   // Save across call to native method.
   873   __ mr(result_handler_addr, R3_RET);
   875   __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror.
   877   // Set up fixed parameters and call the native method.
   878   // If the method is static, get mirror into R4_ARG2.
   879   {
   880     Label method_is_not_static;
   881     // Access_flags is non-volatile and still, no need to restore it.
   883     // Restore access flags.
   884     __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
   885     __ bfalse(CCR0, method_is_not_static);
   887     // constants = method->constants();
   888     __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
   889     __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
   890     // pool_holder = method->constants()->pool_holder();
   891     __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
   892           R11_scratch1/*constants*/);
   894     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
   896     // mirror = pool_holder->klass_part()->java_mirror();
   897     __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
   898     // state->_native_mirror = mirror;
   900     __ ld(R11_scratch1, 0, R1_SP);
   901     __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
   902     // R4_ARG2 = &state->_oop_temp;
   903     __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
   904     __ BIND(method_is_not_static);
   905   }
   907   // At this point, arguments have been copied off the stack into
   908   // their JNI positions. Oops are boxed in-place on the stack, with
   909   // handles copied to arguments. The result handler address is in a
   910   // register.
   912   // Pass JNIEnv address as first parameter.
   913   __ addir(R3_ARG1, thread_(jni_environment));
   915   // Load the native_method entry before we change the thread state.
   916   __ ld(native_method_fd, method_(native_function));
   918   //=============================================================================
   919   // Transition from _thread_in_Java to _thread_in_native. As soon as
   920   // we make this change the safepoint code needs to be certain that
   921   // the last Java frame we established is good. The pc in that frame
   922   // just needs to be near here not an actual return address.
   924   // We use release_store_fence to update values like the thread state, where
   925   // we don't want the current thread to continue until all our prior memory
   926   // accesses (including the new thread state) are visible to other threads.
   927   __ li(R0, _thread_in_native);
   928   __ release();
   930   // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
   931   __ stw(R0, thread_(thread_state));
   933   if (UseMembar) {
   934     __ fence();
   935   }
   937   //=============================================================================
   938   // Call the native method. Argument registers must not have been
   939   // overwritten since "__ call_stub(signature_handler);" (except for
   940   // ARG1 and ARG2 for static methods).
   941   __ call_c(native_method_fd);
   943   __ li(R0, 0);
   944   __ ld(R11_scratch1, 0, R1_SP);
   945   __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
   946   __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
   947   __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset
   949   // Note: C++ interpreter needs the following here:
   950   // The frame_manager_lr field, which we use for setting the last
   951   // java frame, gets overwritten by the signature handler. Restore
   952   // it now.
   953   //__ get_PC_trash_LR(R11_scratch1);
   954   //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
   956   // Because of GC R19_method may no longer be valid.
   958   // Block, if necessary, before resuming in _thread_in_Java state.
   959   // In order for GC to work, don't clear the last_Java_sp until after
   960   // blocking.
   962   //=============================================================================
   963   // Switch thread to "native transition" state before reading the
   964   // synchronization state. This additional state is necessary
   965   // because reading and testing the synchronization state is not
   966   // atomic w.r.t. GC, as this scenario demonstrates: Java thread A,
   967   // in _thread_in_native state, loads _not_synchronized and is
   968   // preempted. VM thread changes sync state to synchronizing and
   969   // suspends threads for GC. Thread A is resumed to finish this
   970   // native method, but doesn't block here since it didn't see any
   971   // synchronization in progress, and escapes.
   973   // We use release_store_fence to update values like the thread state, where
   974   // we don't want the current thread to continue until all our prior memory
   975   // accesses (including the new thread state) are visible to other threads.
   976   __ li(R0/*thread_state*/, _thread_in_native_trans);
   977   __ release();
   978   __ stw(R0/*thread_state*/, thread_(thread_state));
   979   if (UseMembar) {
   980     __ fence();
   981   }
   982   // Write serialization page so that the VM thread can do a pseudo remote
   983   // membar. We use the current thread pointer to calculate a thread
   984   // specific offset to write to within the page. This minimizes bus
   985   // traffic due to cache line collision.
   986   else {
   987     __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2);
   988   }
   990   // Now before we return to java we must look for a current safepoint
   991   // (a new safepoint can not start since we entered native_trans).
   992   // We must check here because a current safepoint could be modifying
   993   // the callers registers right this moment.
   995   // Acquire isn't strictly necessary here because of the fence, but
   996   // sync_state is declared to be volatile, so we do it anyway
   997   // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
   998   int sync_state_offs = __ load_const_optimized(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
  1000   // TODO PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
  1001   __ lwz(sync_state, sync_state_offs, sync_state_addr);
  1003   // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
  1004   __ lwz(suspend_flags, thread_(suspend_flags));
  1006   Label sync_check_done;
  1007   Label do_safepoint;
  1008   // No synchronization in progress nor yet synchronized.
  1009   __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
  1010   // Not suspended.
  1011   __ cmpwi(CCR1, suspend_flags, 0);
  1013   __ bne(CCR0, do_safepoint);
  1014   __ beq(CCR1, sync_check_done);
  1015   __ bind(do_safepoint);
  1016   __ isync();
  1017   // Block. We do the call directly and leave the current
  1018   // last_Java_frame setup undisturbed. We must save any possible
  1019   // native result across the call. No oop is present.
  1021   __ mr(R3_ARG1, R16_thread);
  1022   __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
  1023             relocInfo::none);
  1025   __ bind(sync_check_done);
  1027   //=============================================================================
  1028   // <<<<<< Back in Interpreter Frame >>>>>
  1030   // We are in thread_in_native_trans here and back in the normal
  1031   // interpreter frame. We don't have to do anything special about
  1032   // safepoints and we can switch to Java mode anytime we are ready.
  1034   // Note: frame::interpreter_frame_result has a dependency on how the
  1035   // method result is saved across the call to post_method_exit. For
  1036   // native methods it assumes that the non-FPU/non-void result is
  1037   // saved in _native_lresult and a FPU result in _native_fresult. If
  1038   // this changes then the interpreter_frame_result implementation
  1039   // will need to be updated too.
  1041   // On PPC64, we have stored the result directly after the native call.
  1043   //=============================================================================
  1044   // Back in Java
  1046   // We use release_store_fence to update values like the thread state, where
  1047   // we don't want the current thread to continue until all our prior memory
  1048   // accesses (including the new thread state) are visible to other threads.
  1049   __ li(R0/*thread_state*/, _thread_in_Java);
  1050   __ release();
  1051   __ stw(R0/*thread_state*/, thread_(thread_state));
  1052   if (UseMembar) {
  1053     __ fence();
  1056   __ reset_last_Java_frame();
  1058   // Jvmdi/jvmpi support. Whether we've got an exception pending or
  1059   // not, and whether unlocking throws an exception or not, we notify
  1060   // on native method exit. If we do have an exception, we'll end up
  1061   // in the caller's context to handle it, so if we don't do the
  1062   // notify here, we'll drop it on the floor.
  1063   __ notify_method_exit(true/*native method*/,
  1064                         ilgl /*illegal state (not used for native methods)*/,
  1065                         InterpreterMacroAssembler::NotifyJVMTI,
  1066                         false /*check_exceptions*/);
  1068   //=============================================================================
  1069   // Handle exceptions
  1071   if (synchronized) {
  1072     // Don't check for exceptions since we're still in the i2n frame. Do that
  1073     // manually afterwards.
  1074     unlock_method(false);
  1077   // Reset active handles after returning from native.
  1078   // thread->active_handles()->clear();
  1079   __ ld(active_handles, thread_(active_handles));
  1080   // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
  1081   __ li(R0, 0);
  1082   __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
  1084   Label exception_return_sync_check_already_unlocked;
  1085   __ ld(R0/*pending_exception*/, thread_(pending_exception));
  1086   __ cmpdi(CCR0, R0/*pending_exception*/, 0);
  1087   __ bne(CCR0, exception_return_sync_check_already_unlocked);
  1089   //-----------------------------------------------------------------------------
  1090   // No exception pending.
  1092   // Move native method result back into proper registers and return.
  1093   // Invoke result handler (may unbox/promote).
  1094   __ ld(R11_scratch1, 0, R1_SP);
  1095   __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
  1096   __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
  1097   __ call_stub(result_handler_addr);
  1099   __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
  1101   // Must use the return pc which was loaded from the caller's frame
  1102   // as the VM uses return-pc-patching for deoptimization.
  1103   __ mtlr(R0);
  1104   __ blr();
  1106   //-----------------------------------------------------------------------------
  1107   // An exception is pending. We call into the runtime only if the
  1108   // caller was not interpreted. If it was interpreted the
  1109   // interpreter will do the correct thing. If it isn't interpreted
  1110   // (call stub/compiled code) we will change our return and continue.
  1112   __ BIND(exception_return_sync_check);
  1114   if (synchronized) {
  1115     // Don't check for exceptions since we're still in the i2n frame. Do that
  1116     // manually afterwards.
  1117     unlock_method(false);
  1119   __ BIND(exception_return_sync_check_already_unlocked);
  1121   const Register return_pc = R31;
  1123   __ ld(return_pc, 0, R1_SP);
  1124   __ ld(return_pc, _abi(lr), return_pc);
  1126   // Get the address of the exception handler.
  1127   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
  1128                   R16_thread,
  1129                   return_pc /* return pc */);
  1130   __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
  1132   // Load the PC of the the exception handler into LR.
  1133   __ mtlr(R3_RET);
  1135   // Load exception into R3_ARG1 and clear pending exception in thread.
  1136   __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
  1137   __ li(R4_ARG2, 0);
  1138   __ std(R4_ARG2, thread_(pending_exception));
  1140   // Load the original return pc into R4_ARG2.
  1141   __ mr(R4_ARG2/*issuing_pc*/, return_pc);
  1143   // Return to exception handler.
  1144   __ blr();
  1146   //=============================================================================
  1147   // Counter overflow.
  1149   if (inc_counter) {
  1150     // Handle invocation counter overflow.
  1151     __ bind(invocation_counter_overflow);
  1153     generate_counter_overflow(continue_after_compile);
  1156   return entry;
  1159 // Generic interpreted method entry to (asm) interpreter.
  1160 //
  1161 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
  1162   bool inc_counter = UseCompiler || CountCompiledCalls;
  1163   address entry = __ pc();
  1164   // Generate the code to allocate the interpreter stack frame.
  1165   Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
  1166            Rsize_of_locals     = R5_ARG3; // Written by generate_fixed_frame.
  1168   generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
  1170 #ifdef FAST_DISPATCH
  1171   __ unimplemented("Fast dispatch in generate_normal_entry");
  1172 #if 0
  1173   __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
  1174   // Set bytecode dispatch table base.
  1175 #endif
  1176 #endif
  1178   // --------------------------------------------------------------------------
  1179   // Zero out non-parameter locals.
  1180   // Note: *Always* zero out non-parameter locals as Sparc does. It's not
  1181   // worth to ask the flag, just do it.
  1182   Register Rslot_addr = R6_ARG4,
  1183            Rnum       = R7_ARG5;
  1184   Label Lno_locals, Lzero_loop;
  1186   // Set up the zeroing loop.
  1187   __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
  1188   __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
  1189   __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);
  1190   __ beq(CCR0, Lno_locals);
  1191   __ li(R0, 0);
  1192   __ mtctr(Rnum);
  1194   // The zero locals loop.
  1195   __ bind(Lzero_loop);
  1196   __ std(R0, 0, Rslot_addr);
  1197   __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize);
  1198   __ bdnz(Lzero_loop);
  1200   __ bind(Lno_locals);
  1202   // --------------------------------------------------------------------------
  1203   // Counter increment and overflow check.
  1204   Label invocation_counter_overflow,
  1205         profile_method,
  1206         profile_method_continue;
  1207   if (inc_counter || ProfileInterpreter) {
  1209     Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1;
  1210     if (synchronized) {
  1211       // Since at this point in the method invocation the exception handler
  1212       // would try to exit the monitor of synchronized methods which hasn't
  1213       // been entered yet, we set the thread local variable
  1214       // _do_not_unlock_if_synchronized to true. If any exception was thrown by
  1215       // runtime, exception handling i.e. unlock_if_synchronized_method will
  1216       // check this thread local flag.
  1217       // This flag has two effects, one is to force an unwind in the topmost
  1218       // interpreter frame and not perform an unlock while doing so.
  1219       __ li(R0, 1);
  1220       __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
  1222     // Increment invocation counter and check for overflow.
  1223     if (inc_counter) {
  1224       generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
  1227     __ bind(profile_method_continue);
  1229     // Reset the _do_not_unlock_if_synchronized flag.
  1230     if (synchronized) {
  1231       __ li(R0, 0);
  1232       __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
  1236   // --------------------------------------------------------------------------
  1237   // Locking of synchronized methods. Must happen AFTER invocation_counter
  1238   // check and stack overflow check, so method is not locked if overflows.
  1239   if (synchronized) {
  1240     lock_method(R3_ARG1, R4_ARG2, R5_ARG3);
  1242 #ifdef ASSERT
  1243   else {
  1244     Label Lok;
  1245     __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method);
  1246     __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED);
  1247     __ asm_assert_eq("method needs synchronization", 0x8521);
  1248     __ bind(Lok);
  1250 #endif // ASSERT
  1252   __ verify_thread();
  1254   // --------------------------------------------------------------------------
  1255   // JVMTI support
  1256   __ notify_method_entry();
  1258   // --------------------------------------------------------------------------
  1259   // Start executing instructions.
  1260   __ dispatch_next(vtos);
  1262   // --------------------------------------------------------------------------
  1263   // Out of line counter overflow and MDO creation code.
  1264   if (ProfileInterpreter) {
  1265     // We have decided to profile this method in the interpreter.
  1266     __ bind(profile_method);
  1267     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
  1268     __ set_method_data_pointer_for_bcp();
  1269     __ b(profile_method_continue);
  1272   if (inc_counter) {
  1273     // Handle invocation counter overflow.
  1274     __ bind(invocation_counter_overflow);
  1275     generate_counter_overflow(profile_method_continue);
  1277   return entry;
  1280 // =============================================================================
  1281 // Entry points
  1283 address AbstractInterpreterGenerator::generate_method_entry(
  1284                                         AbstractInterpreter::MethodKind kind) {
  1285   // Determine code generation flags.
  1286   bool synchronized = false;
  1287   address entry_point = NULL;
  1289   switch (kind) {
  1290   case Interpreter::zerolocals             :                                                                             break;
  1291   case Interpreter::zerolocals_synchronized: synchronized = true;                                                        break;
  1292   case Interpreter::native                 : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
  1293   case Interpreter::native_synchronized    : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true);  break;
  1294   case Interpreter::empty                  : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry();       break;
  1295   case Interpreter::accessor               : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry();    break;
  1296   case Interpreter::abstract               : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();    break;
  1298   case Interpreter::java_lang_math_sin     : // fall thru
  1299   case Interpreter::java_lang_math_cos     : // fall thru
  1300   case Interpreter::java_lang_math_tan     : // fall thru
  1301   case Interpreter::java_lang_math_abs     : // fall thru
  1302   case Interpreter::java_lang_math_log     : // fall thru
  1303   case Interpreter::java_lang_math_log10   : // fall thru
  1304   case Interpreter::java_lang_math_sqrt    : // fall thru
  1305   case Interpreter::java_lang_math_pow     : // fall thru
  1306   case Interpreter::java_lang_math_exp     : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);    break;
  1307   case Interpreter::java_lang_ref_reference_get
  1308                                            : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
  1309   default                                  : ShouldNotReachHere();                                                       break;
  1312   if (entry_point) {
  1313     return entry_point;
  1316   return ((InterpreterGenerator*) this)->generate_normal_entry(synchronized);
  1319 // These should never be compiled since the interpreter will prefer
  1320 // the compiled version to the intrinsic version.
  1321 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
  1322   return !math_entry_available(method_kind(m));
  1325 // How much stack a method activation needs in stack slots.
  1326 // We must calc this exactly like in generate_fixed_frame.
  1327 // Note: This returns the conservative size assuming maximum alignment.
  1328 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
  1329   const int max_alignment_size = 2;
  1330   const int abi_scratch = frame::abi_reg_args_size;
  1331   return method->max_locals() + method->max_stack() +
  1332          frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
  1335 // Returns number of stackElementWords needed for the interpreter frame with the
  1336 // given sections.
  1337 // This overestimates the stack by one slot in case of alignments.
  1338 int AbstractInterpreter::size_activation(int max_stack,
  1339                                          int temps,
  1340                                          int extra_args,
  1341                                          int monitors,
  1342                                          int callee_params,
  1343                                          int callee_locals,
  1344                                          bool is_top_frame) {
  1345   // Note: This calculation must exactly parallel the frame setup
  1346   // in AbstractInterpreterGenerator::generate_method_entry.
  1347   assert(Interpreter::stackElementWords == 1, "sanity");
  1348   const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
  1349   const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
  1350                                          (frame::abi_minframe_size / Interpreter::stackElementSize);
  1351   const int size =
  1352     max_stack                                                +
  1353     (callee_locals - callee_params)                          +
  1354     monitors * frame::interpreter_frame_monitor_size()       +
  1355     max_alignment_space                                      +
  1356     abi_scratch                                              +
  1357     frame::ijava_state_size / Interpreter::stackElementSize;
  1359   // Fixed size of an interpreter frame, align to 16-byte.
  1360   return (size & -2);
  1363 // Fills a sceletal interpreter frame generated during deoptimizations.
  1364 //
  1365 // Parameters:
  1366 //
  1367 // interpreter_frame != NULL:
  1368 //   set up the method, locals, and monitors.
  1369 //   The frame interpreter_frame, if not NULL, is guaranteed to be the
  1370 //   right size, as determined by a previous call to this method.
  1371 //   It is also guaranteed to be walkable even though it is in a skeletal state
  1372 //
  1373 // is_top_frame == true:
  1374 //   We're processing the *oldest* interpreter frame!
  1375 //
  1376 // pop_frame_extra_args:
  1377 //   If this is != 0 we are returning to a deoptimized frame by popping
  1378 //   off the callee frame. We want to re-execute the call that called the
  1379 //   callee interpreted, but since the return to the interpreter would pop
  1380 //   the arguments off advance the esp by dummy popframe_extra_args slots.
  1381 //   Popping off those will establish the stack layout as it was before the call.
  1382 //
  1383 void AbstractInterpreter::layout_activation(Method* method,
  1384                                             int tempcount,
  1385                                             int popframe_extra_args,
  1386                                             int moncount,
  1387                                             int caller_actual_parameters,
  1388                                             int callee_param_count,
  1389                                             int callee_locals_count,
  1390                                             frame* caller,
  1391                                             frame* interpreter_frame,
  1392                                             bool is_top_frame,
  1393                                             bool is_bottom_frame) {
  1395   const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
  1396                                          (frame::abi_minframe_size / Interpreter::stackElementSize);
  1398   intptr_t* locals_base  = (caller->is_interpreted_frame()) ?
  1399     caller->interpreter_frame_esp() + caller_actual_parameters :
  1400     caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ;
  1402   intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ;
  1403   intptr_t* monitor      = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
  1404   intptr_t* esp_base     = monitor - 1;
  1405   intptr_t* esp          = esp_base - tempcount - popframe_extra_args;
  1406   intptr_t* sp           = (intptr_t *) (((intptr_t) (esp_base - callee_locals_count + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
  1407   intptr_t* sender_sp    = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
  1408   intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
  1410   interpreter_frame->interpreter_frame_set_method(method);
  1411   interpreter_frame->interpreter_frame_set_locals(locals_base);
  1412   interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache());
  1413   interpreter_frame->interpreter_frame_set_esp(esp);
  1414   interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor);
  1415   interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp);
  1416   if (!is_bottom_frame) {
  1417     interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
  1421 // =============================================================================
  1422 // Exceptions
  1424 void TemplateInterpreterGenerator::generate_throw_exception() {
  1425   Register Rexception    = R17_tos,
  1426            Rcontinuation = R3_RET;
  1428   // --------------------------------------------------------------------------
  1429   // Entry point if an method returns with a pending exception (rethrow).
  1430   Interpreter::_rethrow_exception_entry = __ pc();
  1432     __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
  1433     __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
  1434     __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
  1436     // Compiled code destroys templateTableBase, reload.
  1437     __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
  1440   // Entry point if a interpreted method throws an exception (throw).
  1441   Interpreter::_throw_exception_entry = __ pc();
  1443     __ mr(Rexception, R3_RET);
  1445     __ verify_thread();
  1446     __ verify_oop(Rexception);
  1448     // Expression stack must be empty before entering the VM in case of an exception.
  1449     __ empty_expression_stack();
  1450     // Find exception handler address and preserve exception oop.
  1451     // Call C routine to find handler and jump to it.
  1452     __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception);
  1453     __ mtctr(Rcontinuation);
  1454     // Push exception for exception handler bytecodes.
  1455     __ push_ptr(Rexception);
  1457     // Jump to exception handler (may be remove activation entry!).
  1458     __ bctr();
  1461   // If the exception is not handled in the current frame the frame is
  1462   // removed and the exception is rethrown (i.e. exception
  1463   // continuation is _rethrow_exception).
  1464   //
  1465   // Note: At this point the bci is still the bxi for the instruction
  1466   // which caused the exception and the expression stack is
  1467   // empty. Thus, for any VM calls at this point, GC will find a legal
  1468   // oop map (with empty expression stack).
  1470   // In current activation
  1471   // tos: exception
  1472   // bcp: exception bcp
  1474   // --------------------------------------------------------------------------
  1475   // JVMTI PopFrame support
  1477   Interpreter::_remove_activation_preserving_args_entry = __ pc();
  1479     // Set the popframe_processing bit in popframe_condition indicating that we are
  1480     // currently handling popframe, so that call_VMs that may happen later do not
  1481     // trigger new popframe handling cycles.
  1482     __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
  1483     __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit);
  1484     __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
  1486     // Empty the expression stack, as in normal exception handling.
  1487     __ empty_expression_stack();
  1488     __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
  1490     // Check to see whether we are returning to a deoptimized frame.
  1491     // (The PopFrame call ensures that the caller of the popped frame is
  1492     // either interpreted or compiled and deoptimizes it if compiled.)
  1493     // Note that we don't compare the return PC against the
  1494     // deoptimization blob's unpack entry because of the presence of
  1495     // adapter frames in C2.
  1496     Label Lcaller_not_deoptimized;
  1497     Register return_pc = R3_ARG1;
  1498     __ ld(return_pc, 0, R1_SP);
  1499     __ ld(return_pc, _abi(lr), return_pc);
  1500     __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc);
  1501     __ cmpdi(CCR0, R3_RET, 0);
  1502     __ bne(CCR0, Lcaller_not_deoptimized);
  1504     // The deoptimized case.
  1505     // In this case, we can't call dispatch_next() after the frame is
  1506     // popped, but instead must save the incoming arguments and restore
  1507     // them after deoptimization has occurred.
  1508     __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method);
  1509     __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2);
  1510     __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize);
  1511     __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize);
  1512     __ subf(R5_ARG3, R4_ARG2, R5_ARG3);
  1513     // Save these arguments.
  1514     __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3);
  1516     // Inform deoptimization that it is responsible for restoring these arguments.
  1517     __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit);
  1518     __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
  1520     // Return from the current method into the deoptimization blob. Will eventually
  1521     // end up in the deopt interpeter entry, deoptimization prepared everything that
  1522     // we will reexecute the call that called us.
  1523     __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2);
  1524     __ mtlr(return_pc);
  1525     __ blr();
  1527     // The non-deoptimized case.
  1528     __ bind(Lcaller_not_deoptimized);
  1530     // Clear the popframe condition flag.
  1531     __ li(R0, 0);
  1532     __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
  1534     // Get out of the current method and re-execute the call that called us.
  1535     __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
  1536     __ restore_interpreter_state(R11_scratch1);
  1537     __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
  1538     __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
  1539     if (ProfileInterpreter) {
  1540       __ set_method_data_pointer_for_bcp();
  1542 #if INCLUDE_JVMTI
  1543     Label L_done;
  1545     __ lbz(R11_scratch1, 0, R14_bcp);
  1546     __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic);
  1547     __ bne(CCR0, L_done);
  1549     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
  1550     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
  1551     __ ld(R4_ARG2, 0, R18_locals);
  1552     __ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),
  1553                R4_ARG2, R19_method, R14_bcp);
  1555     __ cmpdi(CCR0, R11_scratch1, 0);
  1556     __ beq(CCR0, L_done);
  1558     __ std(R11_scratch1, wordSize, R15_esp);
  1559     __ bind(L_done);
  1560 #endif // INCLUDE_JVMTI
  1561     __ dispatch_next(vtos);
  1563   // end of JVMTI PopFrame support
  1565   // --------------------------------------------------------------------------
  1566   // Remove activation exception entry.
  1567   // This is jumped to if an interpreted method can't handle an exception itself
  1568   // (we come from the throw/rethrow exception entry above). We're going to call
  1569   // into the VM to find the exception handler in the caller, pop the current
  1570   // frame and return the handler we calculated.
  1571   Interpreter::_remove_activation_entry = __ pc();
  1573     __ pop_ptr(Rexception);
  1574     __ verify_thread();
  1575     __ verify_oop(Rexception);
  1576     __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread);
  1578     __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true);
  1579     __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false);
  1581     __ get_vm_result(Rexception);
  1583     // We are done with this activation frame; find out where to go next.
  1584     // The continuation point will be an exception handler, which expects
  1585     // the following registers set up:
  1586     //
  1587     // RET:  exception oop
  1588     // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled.
  1590     Register return_pc = R31; // Needs to survive the runtime call.
  1591     __ ld(return_pc, 0, R1_SP);
  1592     __ ld(return_pc, _abi(lr), return_pc);
  1593     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc);
  1595     // Remove the current activation.
  1596     __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
  1598     __ mr(R4_ARG2, return_pc);
  1599     __ mtlr(R3_RET);
  1600     __ mr(R3_RET, Rexception);
  1601     __ blr();
  1605 // JVMTI ForceEarlyReturn support.
  1606 // Returns "in the middle" of a method with a "fake" return value.
  1607 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
  1609   Register Rscratch1 = R11_scratch1,
  1610            Rscratch2 = R12_scratch2;
  1612   address entry = __ pc();
  1613   __ empty_expression_stack();
  1615   __ load_earlyret_value(state, Rscratch1);
  1617   __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
  1618   // Clear the earlyret state.
  1619   __ li(R0, 0);
  1620   __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1);
  1622   __ remove_activation(state, false, false);
  1623   // Copied from TemplateTable::_return.
  1624   // Restoration of lr done by remove_activation.
  1625   switch (state) {
  1626     case ltos:
  1627     case btos:
  1628     case ctos:
  1629     case stos:
  1630     case atos:
  1631     case itos: __ mr(R3_RET, R17_tos); break;
  1632     case ftos:
  1633     case dtos: __ fmr(F1_RET, F15_ftos); break;
  1634     case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
  1635                // to get visible before the reference to the object gets stored anywhere.
  1636                __ membar(Assembler::StoreStore); break;
  1637     default  : ShouldNotReachHere();
  1639   __ blr();
  1641   return entry;
  1642 } // end of ForceEarlyReturn support
  1644 //-----------------------------------------------------------------------------
  1645 // Helper for vtos entry point generation
  1647 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
  1648                                                          address& bep,
  1649                                                          address& cep,
  1650                                                          address& sep,
  1651                                                          address& aep,
  1652                                                          address& iep,
  1653                                                          address& lep,
  1654                                                          address& fep,
  1655                                                          address& dep,
  1656                                                          address& vep) {
  1657   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
  1658   Label L;
  1660   aep = __ pc();  __ push_ptr();  __ b(L);
  1661   fep = __ pc();  __ push_f();    __ b(L);
  1662   dep = __ pc();  __ push_d();    __ b(L);
  1663   lep = __ pc();  __ push_l();    __ b(L);
  1664   __ align(32, 12, 24); // align L
  1665   bep = cep = sep =
  1666   iep = __ pc();  __ push_i();
  1667   vep = __ pc();
  1668   __ bind(L);
  1669   generate_and_dispatch(t);
  1672 //-----------------------------------------------------------------------------
  1673 // Generation of individual instructions
  1675 // helpers for generate_and_dispatch
  1677 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
  1678   : TemplateInterpreterGenerator(code) {
  1679   generate_all(); // Down here so it can be "virtual".
  1682 //-----------------------------------------------------------------------------
  1684 // Non-product code
  1685 #ifndef PRODUCT
  1686 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
  1687   //__ flush_bundle();
  1688   address entry = __ pc();
  1690   const char *bname = NULL;
  1691   uint tsize = 0;
  1692   switch(state) {
  1693   case ftos:
  1694     bname = "trace_code_ftos {";
  1695     tsize = 2;
  1696     break;
  1697   case btos:
  1698     bname = "trace_code_btos {";
  1699     tsize = 2;
  1700     break;
  1701   case ctos:
  1702     bname = "trace_code_ctos {";
  1703     tsize = 2;
  1704     break;
  1705   case stos:
  1706     bname = "trace_code_stos {";
  1707     tsize = 2;
  1708     break;
  1709   case itos:
  1710     bname = "trace_code_itos {";
  1711     tsize = 2;
  1712     break;
  1713   case ltos:
  1714     bname = "trace_code_ltos {";
  1715     tsize = 3;
  1716     break;
  1717   case atos:
  1718     bname = "trace_code_atos {";
  1719     tsize = 2;
  1720     break;
  1721   case vtos:
  1722     // Note: In case of vtos, the topmost of stack value could be a int or doubl
  1723     // In case of a double (2 slots) we won't see the 2nd stack value.
  1724     // Maybe we simply should print the topmost 3 stack slots to cope with the problem.
  1725     bname = "trace_code_vtos {";
  1726     tsize = 2;
  1728     break;
  1729   case dtos:
  1730     bname = "trace_code_dtos {";
  1731     tsize = 3;
  1732     break;
  1733   default:
  1734     ShouldNotReachHere();
  1736   BLOCK_COMMENT(bname);
  1738   // Support short-cut for TraceBytecodesAt.
  1739   // Don't call into the VM if we don't want to trace to speed up things.
  1740   Label Lskip_vm_call;
  1741   if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
  1742     int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true);
  1743     int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
  1744     __ ld(R11_scratch1, offs1, R11_scratch1);
  1745     __ lwa(R12_scratch2, offs2, R12_scratch2);
  1746     __ cmpd(CCR0, R12_scratch2, R11_scratch1);
  1747     __ blt(CCR0, Lskip_vm_call);
  1750   __ push(state);
  1751   // Load 2 topmost expression stack values.
  1752   __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
  1753   __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
  1754   __ mflr(R31);
  1755   __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
  1756   __ mtlr(R31);
  1757   __ pop(state);
  1759   if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
  1760     __ bind(Lskip_vm_call);
  1762   __ blr();
  1763   BLOCK_COMMENT("} trace_code");
  1764   return entry;
  1767 void TemplateInterpreterGenerator::count_bytecode() {
  1768   int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
  1769   __ lwz(R12_scratch2, offs, R11_scratch1);
  1770   __ addi(R12_scratch2, R12_scratch2, 1);
  1771   __ stw(R12_scratch2, offs, R11_scratch1);
  1774 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
  1775   int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
  1776   __ lwz(R12_scratch2, offs, R11_scratch1);
  1777   __ addi(R12_scratch2, R12_scratch2, 1);
  1778   __ stw(R12_scratch2, offs, R11_scratch1);
  1781 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
  1782   const Register addr = R11_scratch1,
  1783                  tmp  = R12_scratch2;
  1784   // Get index, shift out old bytecode, bring in new bytecode, and store it.
  1785   // _index = (_index >> log2_number_of_codes) |
  1786   //          (bytecode << log2_number_of_codes);
  1787   int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
  1788   __ lwz(tmp, offs1, addr);
  1789   __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
  1790   __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
  1791   __ stw(tmp, offs1, addr);
  1793   // Bump bucket contents.
  1794   // _counters[_index] ++;
  1795   int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
  1796   __ sldi(tmp, tmp, LogBytesPerInt);
  1797   __ add(addr, tmp, addr);
  1798   __ lwz(tmp, offs2, addr);
  1799   __ addi(tmp, tmp, 1);
  1800   __ stw(tmp, offs2, addr);
  1803 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
  1804   // Call a little run-time stub to avoid blow-up for each bytecode.
  1805   // The run-time runtime saves the right registers, depending on
  1806   // the tosca in-state for the given template.
  1808   assert(Interpreter::trace_code(t->tos_in()) != NULL,
  1809          "entry must have been generated");
  1811   // Note: we destroy LR here.
  1812   __ bl(Interpreter::trace_code(t->tos_in()));
  1815 void TemplateInterpreterGenerator::stop_interpreter_at() {
  1816   Label L;
  1817   int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true);
  1818   int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
  1819   __ ld(R11_scratch1, offs1, R11_scratch1);
  1820   __ lwa(R12_scratch2, offs2, R12_scratch2);
  1821   __ cmpd(CCR0, R12_scratch2, R11_scratch1);
  1822   __ bne(CCR0, L);
  1823   __ illtrap();
  1824   __ bind(L);
  1827 #endif // !PRODUCT
  1828 #endif // !CC_INTERP

mercurial