src/cpu/ppc/vm/cppInterpreter_ppc.cpp

Thu, 06 Mar 2014 10:55:28 -0800

author
goetz
date
Thu, 06 Mar 2014 10:55:28 -0800
changeset 6511
31e80afe3fed
parent 6501
c668f307a4c0
child 6512
fd1b9f02cc91
permissions
-rw-r--r--

8035647: PPC64: Support for elf v2 abi.
Summary: ELFv2 ABI used by the little endian PowerPC64 on Linux.
Reviewed-by: kvn
Contributed-by: asmundak@google.com

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright 2012, 2013 SAP AG. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #include "precompiled.hpp"
    27 #include "asm/assembler.hpp"
    28 #include "asm/macroAssembler.inline.hpp"
    29 #include "interpreter/bytecodeHistogram.hpp"
    30 #include "interpreter/cppInterpreter.hpp"
    31 #include "interpreter/interpreter.hpp"
    32 #include "interpreter/interpreterGenerator.hpp"
    33 #include "interpreter/interpreterRuntime.hpp"
    34 #include "oops/arrayOop.hpp"
    35 #include "oops/methodData.hpp"
    36 #include "oops/method.hpp"
    37 #include "oops/oop.inline.hpp"
    38 #include "prims/jvmtiExport.hpp"
    39 #include "prims/jvmtiThreadState.hpp"
    40 #include "runtime/arguments.hpp"
    41 #include "runtime/deoptimization.hpp"
    42 #include "runtime/frame.inline.hpp"
    43 #include "runtime/interfaceSupport.hpp"
    44 #include "runtime/sharedRuntime.hpp"
    45 #include "runtime/stubRoutines.hpp"
    46 #include "runtime/synchronizer.hpp"
    47 #include "runtime/timer.hpp"
    48 #include "runtime/vframeArray.hpp"
    49 #include "utilities/debug.hpp"
    50 #ifdef SHARK
    51 #include "shark/shark_globals.hpp"
    52 #endif
    54 #ifdef CC_INTERP
    56 #define __ _masm->
    58 // Contains is used for identifying interpreter frames during a stack-walk.
    59 // A frame with a PC in InterpretMethod must be identified as a normal C frame.
    60 bool CppInterpreter::contains(address pc) {
    61   return _code->contains(pc);
    62 }
    64 #ifdef PRODUCT
    65 #define BLOCK_COMMENT(str) // nothing
    66 #else
    67 #define BLOCK_COMMENT(str) __ block_comment(str)
    68 #endif
    70 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
    72 static address interpreter_frame_manager        = NULL;
    73 static address frame_manager_specialized_return = NULL;
    74 static address native_entry                     = NULL;
    76 static address interpreter_return_address       = NULL;
    78 static address unctrap_frame_manager_entry      = NULL;
    80 static address deopt_frame_manager_return_atos  = NULL;
    81 static address deopt_frame_manager_return_btos  = NULL;
    82 static address deopt_frame_manager_return_itos  = NULL;
    83 static address deopt_frame_manager_return_ltos  = NULL;
    84 static address deopt_frame_manager_return_ftos  = NULL;
    85 static address deopt_frame_manager_return_dtos  = NULL;
    86 static address deopt_frame_manager_return_vtos  = NULL;
    88 // A result handler converts/unboxes a native call result into
    89 // a java interpreter/compiler result. The current frame is an
    90 // interpreter frame.
    91 address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
    92   return AbstractInterpreterGenerator::generate_result_handler_for(type);
    93 }
    95 // tosca based result to c++ interpreter stack based result.
    96 address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
    97   //
    98   // A result is in the native abi result register from a native
    99   // method call. We need to return this result to the interpreter by
   100   // pushing the result on the interpreter's stack.
   101   //
   102   // Registers alive:
   103   //   R3_ARG1(R3_RET)/F1_ARG1(F1_RET) - result to move
   104   //   R4_ARG2                         - address of tos
   105   //   LR
   106   //
   107   // Registers updated:
   108   //   R3_RET(R3_ARG1)   - address of new tos (== R17_tos for T_VOID)
   109   //
   111   int number_of_used_slots = 1;
   113   const Register tos = R4_ARG2;
   114   Label done;
   115   Label is_false;
   117   address entry = __ pc();
   119   switch (type) {
   120   case T_BOOLEAN:
   121     __ cmpwi(CCR0, R3_RET, 0);
   122     __ beq(CCR0, is_false);
   123     __ li(R3_RET, 1);
   124     __ stw(R3_RET, 0, tos);
   125     __ b(done);
   126     __ bind(is_false);
   127     __ li(R3_RET, 0);
   128     __ stw(R3_RET, 0, tos);
   129     break;
   130   case T_BYTE:
   131   case T_CHAR:
   132   case T_SHORT:
   133   case T_INT:
   134     __ stw(R3_RET, 0, tos);
   135     break;
   136   case T_LONG:
   137     number_of_used_slots = 2;
   138     // mark unused slot for debugging
   139     // long goes to topmost slot
   140     __ std(R3_RET, -BytesPerWord, tos);
   141     __ li(R3_RET, 0);
   142     __ std(R3_RET, 0, tos);
   143     break;
   144   case T_OBJECT:
   145     __ verify_oop(R3_RET);
   146     __ std(R3_RET, 0, tos);
   147     break;
   148   case T_FLOAT:
   149     __ stfs(F1_RET, 0, tos);
   150     break;
   151   case T_DOUBLE:
   152     number_of_used_slots = 2;
   153     // mark unused slot for debugging
   154     __ li(R3_RET, 0);
   155     __ std(R3_RET, 0, tos);
   156     // double goes to topmost slot
   157     __ stfd(F1_RET, -BytesPerWord, tos);
   158     break;
   159   case T_VOID:
   160     number_of_used_slots = 0;
   161     break;
   162   default:
   163     ShouldNotReachHere();
   164   }
   166   __ BIND(done);
   168   // new expression stack top
   169   __ addi(R3_RET, tos, -BytesPerWord * number_of_used_slots);
   171   __ blr();
   173   return entry;
   174 }
   176 address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
   177   //
   178   // Copy the result from the callee's stack to the caller's stack,
   179   // caller and callee both being interpreted.
   180   //
   181   // Registers alive
   182   //   R3_ARG1        - address of callee's tos + BytesPerWord
   183   //   R4_ARG2        - address of caller's tos [i.e. free location]
   184   //   LR
   185   //
   186   //   stack grows upwards, memory grows downwards.
   187   //
   188   //   [      free         ]  <-- callee's tos
   189   //   [  optional result  ]  <-- R3_ARG1
   190   //   [  optional dummy   ]
   191   //          ...
   192   //   [      free         ]  <-- caller's tos, R4_ARG2
   193   //          ...
   194   // Registers updated
   195   //   R3_RET(R3_ARG1) - address of caller's new tos
   196   //
   197   //   stack grows upwards, memory grows downwards.
   198   //
   199   //   [      free         ]  <-- current tos, R3_RET
   200   //   [  optional result  ]
   201   //   [  optional dummy   ]
   202   //          ...
   203   //
   205   const Register from = R3_ARG1;
   206   const Register ret  = R3_ARG1;
   207   const Register tos  = R4_ARG2;
   208   const Register tmp1 = R21_tmp1;
   209   const Register tmp2 = R22_tmp2;
   211   address entry = __ pc();
   213   switch (type) {
   214   case T_BOOLEAN:
   215   case T_BYTE:
   216   case T_CHAR:
   217   case T_SHORT:
   218   case T_INT:
   219   case T_FLOAT:
   220     __ lwz(tmp1, 0, from);
   221     __ stw(tmp1, 0, tos);
   222     // New expression stack top.
   223     __ addi(ret, tos, - BytesPerWord);
   224     break;
   225   case T_LONG:
   226   case T_DOUBLE:
   227     // Move both entries for debug purposes even though only one is live.
   228     __ ld(tmp1, BytesPerWord, from);
   229     __ ld(tmp2, 0, from);
   230     __ std(tmp1, 0, tos);
   231     __ std(tmp2, -BytesPerWord, tos);
   232     // New expression stack top.
   233     __ addi(ret, tos, - 2 * BytesPerWord); // two slots
   234     break;
   235   case T_OBJECT:
   236     __ ld(tmp1, 0, from);
   237     __ verify_oop(tmp1);
   238     __ std(tmp1, 0, tos);
   239     // New expression stack top.
   240     __ addi(ret, tos, - BytesPerWord);
   241     break;
   242   case T_VOID:
   243     // New expression stack top.
   244     __ mr(ret, tos);
   245     break;
   246   default:
   247     ShouldNotReachHere();
   248   }
   250   __ blr();
   252   return entry;
   253 }
   255 address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
   256   //
   257   // Load a result from the callee's stack into the caller's expecting
   258   // return register, callee being interpreted, caller being call stub
   259   // or jit code.
   260   //
   261   // Registers alive
   262   //   R3_ARG1   - callee expression tos + BytesPerWord
   263   //   LR
   264   //
   265   //   stack grows upwards, memory grows downwards.
   266   //
   267   //   [      free         ]  <-- callee's tos
   268   //   [  optional result  ]  <-- R3_ARG1
   269   //   [  optional dummy   ]
   270   //          ...
   271   //
   272   // Registers updated
   273   //   R3_RET(R3_ARG1)/F1_RET - result
   274   //
   276   const Register from = R3_ARG1;
   277   const Register ret = R3_ARG1;
   278   const FloatRegister fret = F1_ARG1;
   280   address entry = __ pc();
   282   // Implemented uniformly for both kinds of endianness. The interpreter
   283   // implements boolean, byte, char, and short as jint (4 bytes).
   284   switch (type) {
   285   case T_BOOLEAN:
   286   case T_CHAR:
   287     // zero extension
   288     __ lwz(ret, 0, from);
   289     break;
   290   case T_BYTE:
   291   case T_SHORT:
   292   case T_INT:
   293     // sign extension
   294     __ lwa(ret, 0, from);
   295     break;
   296   case T_LONG:
   297     __ ld(ret, 0, from);
   298     break;
   299   case T_OBJECT:
   300     __ ld(ret, 0, from);
   301     __ verify_oop(ret);
   302     break;
   303   case T_FLOAT:
   304     __ lfs(fret, 0, from);
   305     break;
   306   case T_DOUBLE:
   307     __ lfd(fret, 0, from);
   308     break;
   309   case T_VOID:
   310     break;
   311   default:
   312     ShouldNotReachHere();
   313   }
   315   __ blr();
   317   return entry;
   318 }
   320 address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
   321   assert(interpreter_return_address != NULL, "Not initialized");
   322   return interpreter_return_address;
   323 }
   325 address CppInterpreter::deopt_entry(TosState state, int length) {
   326   address ret = NULL;
   327   if (length != 0) {
   328     switch (state) {
   329       case atos: ret = deopt_frame_manager_return_atos; break;
   330       case btos: ret = deopt_frame_manager_return_itos; break;
   331       case ctos:
   332       case stos:
   333       case itos: ret = deopt_frame_manager_return_itos; break;
   334       case ltos: ret = deopt_frame_manager_return_ltos; break;
   335       case ftos: ret = deopt_frame_manager_return_ftos; break;
   336       case dtos: ret = deopt_frame_manager_return_dtos; break;
   337       case vtos: ret = deopt_frame_manager_return_vtos; break;
   338       default: ShouldNotReachHere();
   339     }
   340   } else {
   341     ret = unctrap_frame_manager_entry;  // re-execute the bytecode (e.g. uncommon trap, popframe)
   342   }
   343   assert(ret != NULL, "Not initialized");
   344   return ret;
   345 }
   347 //
   348 // Helpers for commoning out cases in the various type of method entries.
   349 //
   351 //
   352 // Registers alive
   353 //   R16_thread      - JavaThread*
   354 //   R1_SP           - old stack pointer
   355 //   R19_method      - callee's Method
   356 //   R17_tos         - address of caller's tos (prepushed)
   357 //   R15_prev_state  - address of caller's BytecodeInterpreter or 0
   358 //   return_pc in R21_tmp15 (only when called within generate_native_entry)
   359 //
   360 // Registers updated
   361 //   R14_state       - address of callee's interpreter state
   362 //   R1_SP           - new stack pointer
   363 //   CCR4_is_synced  - current method is synchronized
   364 //
   365 void CppInterpreterGenerator::generate_compute_interpreter_state(Label& stack_overflow_return) {
   366   //
   367   // Stack layout at this point:
   368   //
   369   //   F1      [TOP_IJAVA_FRAME_ABI]              <-- R1_SP
   370   //           alignment (optional)
   371   //           [F1's outgoing Java arguments]     <-- R17_tos
   372   //           ...
   373   //   F2      [PARENT_IJAVA_FRAME_ABI]
   374   //            ...
   376   //=============================================================================
   377   // Allocate space for locals other than the parameters, the
   378   // interpreter state, monitors, and the expression stack.
   380   const Register local_count        = R21_tmp1;
   381   const Register parameter_count    = R22_tmp2;
   382   const Register max_stack          = R23_tmp3;
   383   // Must not be overwritten within this method!
   384   // const Register return_pc         = R29_tmp9;
   386   const ConditionRegister is_synced = CCR4_is_synced;
   387   const ConditionRegister is_native = CCR6;
   388   const ConditionRegister is_static = CCR7;
   390   assert(is_synced != is_native, "condition code registers must be distinct");
   391   assert(is_synced != is_static, "condition code registers must be distinct");
   392   assert(is_native != is_static, "condition code registers must be distinct");
   394   {
   396   // Local registers
   397   const Register top_frame_size     = R24_tmp4;
   398   const Register access_flags       = R25_tmp5;
   399   const Register state_offset       = R26_tmp6;
   400   Register mem_stack_limit          = R27_tmp7;
   401   const Register page_size          = R28_tmp8;
   403   BLOCK_COMMENT("compute_interpreter_state {");
   405   // access_flags = method->access_flags();
   406   // TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
   407   __ lwa(access_flags, method_(access_flags));
   409   // parameter_count = method->constMethod->size_of_parameters();
   410   // TODO: PPC port: assert(2 == ConstMethod::sz_size_of_parameters(), "unexpected field size");
   411   __ ld(max_stack, in_bytes(Method::const_offset()), R19_method);   // Max_stack holds constMethod for a while.
   412   __ lhz(parameter_count, in_bytes(ConstMethod::size_of_parameters_offset()), max_stack);
   414   // local_count = method->constMethod()->max_locals();
   415   // TODO: PPC port: assert(2 == ConstMethod::sz_max_locals(), "unexpected field size");
   416   __ lhz(local_count, in_bytes(ConstMethod::size_of_locals_offset()), max_stack);
   418   // max_stack = method->constMethod()->max_stack();
   419   // TODO: PPC port: assert(2 == ConstMethod::sz_max_stack(), "unexpected field size");
   420   __ lhz(max_stack, in_bytes(ConstMethod::max_stack_offset()), max_stack);
   422   if (EnableInvokeDynamic) {
   423     // Take into account 'extra_stack_entries' needed by method handles (see method.hpp).
   424     __ addi(max_stack, max_stack, Method::extra_stack_entries());
   425   }
   427   // mem_stack_limit = thread->stack_limit();
   428   __ ld(mem_stack_limit, thread_(stack_overflow_limit));
   430   // Point locals at the first argument. Method's locals are the
   431   // parameters on top of caller's expression stack.
   433   // tos points past last Java argument
   434   __ sldi(R18_locals, parameter_count, Interpreter::logStackElementSize);
   435   __ add(R18_locals, R17_tos, R18_locals);
   437   // R18_locals - i*BytesPerWord points to i-th Java local (i starts at 0)
   439   // Set is_native, is_synced, is_static - will be used later.
   440   __ testbitdi(is_native, R0, access_flags, JVM_ACC_NATIVE_BIT);
   441   __ testbitdi(is_synced, R0, access_flags, JVM_ACC_SYNCHRONIZED_BIT);
   442   assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
   443   __ testbitdi(is_static, R0, access_flags, JVM_ACC_STATIC_BIT);
   445   // PARENT_IJAVA_FRAME_ABI
   446   //
   447   // frame_size =
   448   //   round_to((local_count - parameter_count)*BytesPerWord +
   449   //              2*BytesPerWord +
   450   //              alignment +
   451   //              frame::interpreter_frame_cinterpreterstate_size_in_bytes()
   452   //              sizeof(PARENT_IJAVA_FRAME_ABI)
   453   //              method->is_synchronized() ? sizeof(BasicObjectLock) : 0 +
   454   //              max_stack*BytesPerWord,
   455   //            16)
   456   //
   457   // Note that this calculation is exactly mirrored by
   458   // AbstractInterpreter::layout_activation_impl() [ and
   459   // AbstractInterpreter::size_activation() ]. Which is used by
   460   // deoptimization so that it can allocate the proper sized
   461   // frame. This only happens for interpreted frames so the extra
   462   // notes below about max_stack below are not important. The other
   463   // thing to note is that for interpreter frames other than the
   464   // current activation the size of the stack is the size of the live
   465   // portion of the stack at the particular bcp and NOT the maximum
   466   // stack that the method might use.
   467   //
   468   // If we're calling a native method, we replace max_stack (which is
   469   // zero) with space for the worst-case signature handler varargs
   470   // vector, which is:
   471   //
   472   //   max_stack = max(Argument::n_register_parameters, parameter_count+2);
   473   //
   474   // We add two slots to the parameter_count, one for the jni
   475   // environment and one for a possible native mirror.  We allocate
   476   // space for at least the number of ABI registers, even though
   477   // InterpreterRuntime::slow_signature_handler won't write more than
   478   // parameter_count+2 words when it creates the varargs vector at the
   479   // top of the stack.  The generated slow signature handler will just
   480   // load trash into registers beyond the necessary number.  We're
   481   // still going to cut the stack back by the ABI register parameter
   482   // count so as to get SP+16 pointing at the ABI outgoing parameter
   483   // area, so we need to allocate at least that much even though we're
   484   // going to throw it away.
   485   //
   487   // Adjust max_stack for native methods:
   488   Label skip_native_calculate_max_stack;
   489   __ bfalse(is_native, skip_native_calculate_max_stack);
   490   // if (is_native) {
   491   //  max_stack = max(Argument::n_register_parameters, parameter_count+2);
   492   __ addi(max_stack, parameter_count, 2*Interpreter::stackElementWords);
   493   __ cmpwi(CCR0, max_stack, Argument::n_register_parameters);
   494   __ bge(CCR0, skip_native_calculate_max_stack);
   495   __ li(max_stack,  Argument::n_register_parameters);
   496   // }
   497   __ bind(skip_native_calculate_max_stack);
   498   // max_stack is now in bytes
   499   __ slwi(max_stack, max_stack, Interpreter::logStackElementSize);
   501   // Calculate number of non-parameter locals (in slots):
   502   Label not_java;
   503   __ btrue(is_native, not_java);
   504   // if (!is_native) {
   505   //   local_count = non-parameter local count
   506   __ sub(local_count, local_count, parameter_count);
   507   // } else {
   508   //   // nothing to do: method->max_locals() == 0 for native methods
   509   // }
   510   __ bind(not_java);
   513   // Calculate top_frame_size and parent_frame_resize.
   514   {
   515   const Register parent_frame_resize = R12_scratch2;
   517   BLOCK_COMMENT("Compute top_frame_size.");
   518   // top_frame_size = TOP_IJAVA_FRAME_ABI
   519   //                  + size of interpreter state
   520   __ li(top_frame_size, frame::top_ijava_frame_abi_size
   521                         + frame::interpreter_frame_cinterpreterstate_size_in_bytes());
   522   //                  + max_stack
   523   __ add(top_frame_size, top_frame_size, max_stack);
   524   //                  + stack slots for a BasicObjectLock for synchronized methods
   525   {
   526     Label not_synced;
   527     __ bfalse(is_synced, not_synced);
   528     __ addi(top_frame_size, top_frame_size, frame::interpreter_frame_monitor_size_in_bytes());
   529     __ bind(not_synced);
   530   }
   531   // align
   532   __ round_to(top_frame_size, frame::alignment_in_bytes);
   535   BLOCK_COMMENT("Compute parent_frame_resize.");
   536   // parent_frame_resize = R1_SP - R17_tos
   537   __ sub(parent_frame_resize, R1_SP, R17_tos);
   538   //__ li(parent_frame_resize, 0);
   539   //                       + PARENT_IJAVA_FRAME_ABI
   540   //                       + extra two slots for the no-parameter/no-locals
   541   //                         method result
   542   __ addi(parent_frame_resize, parent_frame_resize,
   543                                       frame::parent_ijava_frame_abi_size
   544                                     + 2*Interpreter::stackElementSize);
   545   //                       + (locals_count - params_count)
   546   __ sldi(R0, local_count, Interpreter::logStackElementSize);
   547   __ add(parent_frame_resize, parent_frame_resize, R0);
   548   // align
   549   __ round_to(parent_frame_resize, frame::alignment_in_bytes);
   551   //
   552   // Stack layout at this point:
   553   //
   554   // The new frame F0 hasn't yet been pushed, F1 is still the top frame.
   555   //
   556   //   F0      [TOP_IJAVA_FRAME_ABI]
   557   //           alignment (optional)
   558   //           [F0's full operand stack]
   559   //           [F0's monitors] (optional)
   560   //           [F0's BytecodeInterpreter object]
   561   //   F1      [PARENT_IJAVA_FRAME_ABI]
   562   //           alignment (optional)
   563   //           [F0's Java result]
   564   //           [F0's non-arg Java locals]
   565   //           [F1's outgoing Java arguments]     <-- R17_tos
   566   //           ...
   567   //   F2      [PARENT_IJAVA_FRAME_ABI]
   568   //            ...
   571   // Calculate new R14_state
   572   // and
   573   // test that the new memory stack pointer is above the limit,
   574   // throw a StackOverflowError otherwise.
   575   __ sub(R11_scratch1/*F1's SP*/,  R1_SP, parent_frame_resize);
   576   __ addi(R14_state, R11_scratch1/*F1's SP*/,
   577               -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
   578   __ sub(R11_scratch1/*F0's SP*/,
   579              R11_scratch1/*F1's SP*/, top_frame_size);
   581   BLOCK_COMMENT("Test for stack overflow:");
   582   __ cmpld(CCR0/*is_stack_overflow*/, R11_scratch1, mem_stack_limit);
   583   __ blt(CCR0/*is_stack_overflow*/, stack_overflow_return);
   586   //=============================================================================
   587   // Frame_size doesn't overflow the stack. Allocate new frame and
   588   // initialize interpreter state.
   590   // Register state
   591   //
   592   //   R15            - local_count
   593   //   R16            - parameter_count
   594   //   R17            - max_stack
   595   //
   596   //   R18            - frame_size
   597   //   R19            - access_flags
   598   //   CCR4_is_synced - is_synced
   599   //
   600   //   GR_Lstate      - pointer to the uninitialized new BytecodeInterpreter.
   602   // _last_Java_pc just needs to be close enough that we can identify
   603   // the frame as an interpreted frame. It does not need to be the
   604   // exact return address from either calling
   605   // BytecodeInterpreter::InterpretMethod or the call to a jni native method.
   606   // So we can initialize it here with a value of a bundle in this
   607   // code fragment. We only do this initialization for java frames
   608   // where InterpretMethod needs a a way to get a good pc value to
   609   // store in the thread state. For interpreter frames used to call
   610   // jni native code we just zero the value in the state and move an
   611   // ip as needed in the native entry code.
   612   //
   613   // const Register last_Java_pc_addr     = GR24_SCRATCH;  // QQQ 27
   614   // const Register last_Java_pc          = GR26_SCRATCH;
   616   // Must reference stack before setting new SP since Windows
   617   // will not be able to deliver the exception on a bad SP.
   618   // Windows also insists that we bang each page one at a time in order
   619   // for the OS to map in the reserved pages. If we bang only
   620   // the final page, Windows stops delivering exceptions to our
   621   // VectoredExceptionHandler and terminates our program.
   622   // Linux only requires a single bang but it's rare to have
   623   // to bang more than 1 page so the code is enabled for both OS's.
   625   // BANG THE STACK
   626   //
   627   // Nothing to do for PPC, because updating the SP will automatically
   628   // bang the page.
   630   // Up to here we have calculated the delta for the new C-frame and
   631   // checked for a stack-overflow. Now we can savely update SP and
   632   // resize the C-frame.
   634   // R14_state has already been calculated.
   635   __ push_interpreter_frame(top_frame_size, parent_frame_resize,
   636                             R25_tmp5, R26_tmp6, R27_tmp7, R28_tmp8);
   638   }
   640   //
   641   // Stack layout at this point:
   642   //
   643   //   F0 has been been pushed!
   644   //
   645   //   F0      [TOP_IJAVA_FRAME_ABI]              <-- R1_SP
   646   //           alignment (optional)               (now it's here, if required)
   647   //           [F0's full operand stack]
   648   //           [F0's monitors] (optional)
   649   //           [F0's BytecodeInterpreter object]
   650   //   F1      [PARENT_IJAVA_FRAME_ABI]
   651   //           alignment (optional)               (now it's here, if required)
   652   //           [F0's Java result]
   653   //           [F0's non-arg Java locals]
   654   //           [F1's outgoing Java arguments]
   655   //           ...
   656   //   F2      [PARENT_IJAVA_FRAME_ABI]
   657   //           ...
   658   //
   659   // R14_state points to F0's BytecodeInterpreter object.
   660   //
   662   }
   664   //=============================================================================
   665   // new BytecodeInterpreter-object is save, let's initialize it:
   666   BLOCK_COMMENT("New BytecodeInterpreter-object is save.");
   668   {
   669   // Locals
   670   const Register bytecode_addr = R24_tmp4;
   671   const Register constants     = R25_tmp5;
   672   const Register tos           = R26_tmp6;
   673   const Register stack_base    = R27_tmp7;
   674   const Register local_addr    = R28_tmp8;
   675   {
   676     Label L;
   677     __ btrue(is_native, L);
   678     // if (!is_native) {
   679       // bytecode_addr = constMethod->codes();
   680       __ ld(bytecode_addr, method_(const));
   681       __ addi(bytecode_addr, bytecode_addr, in_bytes(ConstMethod::codes_offset()));
   682     // }
   683     __ bind(L);
   684   }
   686   __ ld(constants, in_bytes(Method::const_offset()), R19_method);
   687   __ ld(constants, in_bytes(ConstMethod::constants_offset()), constants);
   689   // state->_prev_link = prev_state;
   690   __ std(R15_prev_state, state_(_prev_link));
   692   // For assertions only.
   693   // TODO: not needed anyway because it coincides with `_monitor_base'. remove!
   694   // state->_self_link = state;
   695   DEBUG_ONLY(__ std(R14_state, state_(_self_link));)
   697   // state->_thread = thread;
   698   __ std(R16_thread, state_(_thread));
   700   // state->_method = method;
   701   __ std(R19_method, state_(_method));
   703   // state->_locals = locals;
   704   __ std(R18_locals, state_(_locals));
   706   // state->_oop_temp = NULL;
   707   __ li(R0, 0);
   708   __ std(R0, state_(_oop_temp));
   710   // state->_last_Java_fp = *R1_SP // Use *R1_SP as fp
   711   __ ld(R0, _abi(callers_sp), R1_SP);
   712   __ std(R0, state_(_last_Java_fp));
   714   BLOCK_COMMENT("load Stack base:");
   715   {
   716     // Stack_base.
   717     // if (!method->synchronized()) {
   718     //   stack_base = state;
   719     // } else {
   720     //   stack_base = (uintptr_t)state - sizeof(BasicObjectLock);
   721     // }
   722     Label L;
   723     __ mr(stack_base, R14_state);
   724     __ bfalse(is_synced, L);
   725     __ addi(stack_base, stack_base, -frame::interpreter_frame_monitor_size_in_bytes());
   726     __ bind(L);
   727   }
   729   // state->_mdx = NULL;
   730   __ li(R0, 0);
   731   __ std(R0, state_(_mdx));
   733   {
   734     // if (method->is_native()) state->_bcp = NULL;
   735     // else state->_bcp = bytecode_addr;
   736     Label label1, label2;
   737     __ bfalse(is_native, label1);
   738     __ std(R0, state_(_bcp));
   739     __ b(label2);
   740     __ bind(label1);
   741     __ std(bytecode_addr, state_(_bcp));
   742     __ bind(label2);
   743   }
   746   // state->_result._to_call._callee = NULL;
   747   __ std(R0, state_(_result._to_call._callee));
   749   // state->_monitor_base = state;
   750   __ std(R14_state, state_(_monitor_base));
   752   // state->_msg = BytecodeInterpreter::method_entry;
   753   __ li(R0, BytecodeInterpreter::method_entry);
   754   __ stw(R0, state_(_msg));
   756   // state->_last_Java_sp = R1_SP;
   757   __ std(R1_SP, state_(_last_Java_sp));
   759   // state->_stack_base = stack_base;
   760   __ std(stack_base, state_(_stack_base));
   762   // tos = stack_base - 1 slot (prepushed);
   763   // state->_stack.Tos(tos);
   764   __ addi(tos, stack_base, - Interpreter::stackElementSize);
   765   __ std(tos,  state_(_stack));
   768   {
   769     BLOCK_COMMENT("get last_Java_pc:");
   770     // if (!is_native) state->_last_Java_pc = <some_ip_in_this_code_buffer>;
   771     // else state->_last_Java_pc = NULL; (just for neatness)
   772     Label label1, label2;
   773     __ btrue(is_native, label1);
   774     __ get_PC_trash_LR(R0);
   775     __ std(R0, state_(_last_Java_pc));
   776     __ b(label2);
   777     __ bind(label1);
   778     __ li(R0, 0);
   779     __ std(R0, state_(_last_Java_pc));
   780     __ bind(label2);
   781   }
   784   // stack_limit = tos - max_stack;
   785   __ sub(R0, tos, max_stack);
   786   // state->_stack_limit = stack_limit;
   787   __ std(R0, state_(_stack_limit));
   790   // cache = method->constants()->cache();
   791    __ ld(R0, ConstantPool::cache_offset_in_bytes(), constants);
   792   // state->_constants = method->constants()->cache();
   793   __ std(R0, state_(_constants));
   797   //=============================================================================
   798   // synchronized method, allocate and initialize method object lock.
   799   // if (!method->is_synchronized()) goto fill_locals_with_0x0s;
   800   Label fill_locals_with_0x0s;
   801   __ bfalse(is_synced, fill_locals_with_0x0s);
   803   //   pool_holder = method->constants()->pool_holder();
   804   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
   805   {
   806     Label label1, label2;
   807     // lockee = NULL; for java methods, correct value will be inserted in BytecodeInterpretMethod.hpp
   808     __ li(R0,0);
   809     __ bfalse(is_native, label2);
   811     __ bfalse(is_static, label1);
   812     // if (method->is_static()) lockee =
   813     // pool_holder->klass_part()->java_mirror();
   814     __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(), constants);
   815     __ ld(R0/*lockee*/, mirror_offset, R11_scratch1/*pool_holder*/);
   816     __ b(label2);
   818     __ bind(label1);
   819     // else lockee = *(oop*)locals;
   820     __ ld(R0/*lockee*/, 0, R18_locals);
   821     __ bind(label2);
   823     // monitor->set_obj(lockee);
   824     __ std(R0/*lockee*/, BasicObjectLock::obj_offset_in_bytes(), stack_base);
   825   }
   827   // See if we need to zero the locals
   828   __ BIND(fill_locals_with_0x0s);
   831   //=============================================================================
   832   // fill locals with 0x0s
   833   Label locals_zeroed;
   834   __ btrue(is_native, locals_zeroed);
   836   if (true /* zerolocals */ || ClearInterpreterLocals) {
   837     // local_count is already num_locals_slots - num_param_slots
   838     __ sldi(R0, parameter_count, Interpreter::logStackElementSize);
   839     __ sub(local_addr, R18_locals, R0);
   840     __ cmpdi(CCR0, local_count, 0);
   841     __ ble(CCR0, locals_zeroed);
   843     __ mtctr(local_count);
   844     //__ ld_const_addr(R0, (address) 0xcafe0000babe);
   845     __ li(R0, 0);
   847     Label zero_slot;
   848     __ bind(zero_slot);
   850     // first local is at local_addr
   851     __ std(R0, 0, local_addr);
   852     __ addi(local_addr, local_addr, -BytesPerWord);
   853     __ bdnz(zero_slot);
   854   }
   856    __ BIND(locals_zeroed);
   858   }
   859   BLOCK_COMMENT("} compute_interpreter_state");
   860 }
   862 // Generate code to initiate compilation on invocation counter overflow.
   863 void CppInterpreterGenerator::generate_counter_overflow(Label& continue_entry) {
   864   // Registers alive
   865   //   R14_state
   866   //   R16_thread
   867   //
   868   // Registers updated
   869   //   R14_state
   870   //   R3_ARG1 (=R3_RET)
   871   //   R4_ARG2
   873   // After entering the vm we remove the activation and retry the
   874   // entry point in case the compilation is complete.
   876   // InterpreterRuntime::frequency_counter_overflow takes one argument
   877   // that indicates if the counter overflow occurs at a backwards
   878   // branch (NULL bcp). We pass zero. The call returns the address
   879   // of the verified entry point for the method or NULL if the
   880   // compilation did not complete (either went background or bailed
   881   // out).
   882   __ li(R4_ARG2, 0);
   884   // Pass false to call_VM so it doesn't check for pending exceptions,
   885   // since at this point in the method invocation the exception
   886   // handler would try to exit the monitor of synchronized methods
   887   // which haven't been entered yet.
   888   //
   889   // Returns verified_entry_point or NULL, we don't care which.
   890   //
   891   // Do not use the variant `frequency_counter_overflow' that returns
   892   // a structure, because this will change the argument list by a
   893   // hidden parameter (gcc 4.1).
   895   __ call_VM(noreg,
   896              CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow),
   897              R4_ARG2,
   898              false);
   899   // Returns verified_entry_point or NULL, we don't care which as we ignore it
   900   // and run interpreted.
   902   // Reload method, it may have moved.
   903   __ ld(R19_method, state_(_method));
   905   // We jump now to the label "continue_after_compile".
   906   __ b(continue_entry);
   907 }
   909 // Increment invocation count and check for overflow.
   910 //
   911 // R19_method must contain Method* of method to profile.
   912 void CppInterpreterGenerator::generate_counter_incr(Label& overflow) {
   913   Label done;
   914   const Register Rcounters             = R12_scratch2;
   915   const Register iv_be_count           = R11_scratch1;
   916   const Register invocation_limit      = R12_scratch2;
   917   const Register invocation_limit_addr = invocation_limit;
   919   // Load and ev. allocate MethodCounters object.
   920   __ get_method_counters(R19_method, Rcounters, done);
   922   // Update standard invocation counters.
   923   __ increment_invocation_counter(Rcounters, iv_be_count, R0);
   925   // Compare against limit.
   926   BLOCK_COMMENT("Compare counter against limit:");
   927   assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit),
   928          "must be 4 bytes");
   929   __ load_const(invocation_limit_addr, (address)&InvocationCounter::InterpreterInvocationLimit);
   930   __ lwa(invocation_limit, 0, invocation_limit_addr);
   931   __ cmpw(CCR0, iv_be_count, invocation_limit);
   932   __ bge(CCR0, overflow);
   933   __ bind(done);
   934 }
   936 //
   937 // Call a JNI method.
   938 //
   939 // Interpreter stub for calling a native method. (C++ interpreter)
   940 // This sets up a somewhat different looking stack for calling the native method
   941 // than the typical interpreter frame setup.
   942 //
   943 address CppInterpreterGenerator::generate_native_entry(void) {
   944   if (native_entry != NULL) return native_entry;
   945   address entry = __ pc();
   947   // Read
   948   //   R16_thread
   949   //   R15_prev_state  - address of caller's BytecodeInterpreter, if this snippet
   950   //                     gets called by the frame manager.
   951   //   R19_method      - callee's Method
   952   //   R17_tos         - address of caller's tos
   953   //   R1_SP           - caller's stack pointer
   954   //   R21_sender_SP   - initial caller sp
   955   //
   956   // Update
   957   //   R14_state       - address of caller's BytecodeInterpreter
   958   //   R3_RET          - integer result, if any.
   959   //   F1_RET          - float result, if any.
   960   //
   961   //
   962   // Stack layout at this point:
   963   //
   964   //    0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
   965   //            alignment (optional)
   966   //            [outgoing Java arguments]     <-- R17_tos
   967   //            ...
   968   //    PARENT  [PARENT_IJAVA_FRAME_ABI]
   969   //            ...
   970   //
   972   const bool inc_counter = UseCompiler || CountCompiledCalls;
   974   const Register signature_handler_fd   = R21_tmp1;
   975   const Register pending_exception      = R22_tmp2;
   976   const Register result_handler_addr    = R23_tmp3;
   977   const Register native_method_fd       = R24_tmp4;
   978   const Register access_flags           = R25_tmp5;
   979   const Register active_handles         = R26_tmp6;
   980   const Register sync_state             = R27_tmp7;
   981   const Register sync_state_addr        = sync_state;     // Address is dead after use.
   982   const Register suspend_flags          = R24_tmp4;
   984   const Register return_pc              = R28_tmp8;       // Register will be locked for some time.
   986   const ConditionRegister is_synced     = CCR4_is_synced; // Live-on-exit from compute_interpreter_state.
   989   // R1_SP still points to caller's SP at this point.
   991   // Save initial_caller_sp to caller's abi. The caller frame must be
   992   // resized before returning to get rid of the c2i arguments (if
   993   // any).
   994   // Override the saved SP with the senderSP so we can pop c2i
   995   // arguments (if any) off when we return
   996   __ std(R21_sender_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
   998   // Save LR to caller's frame. We don't use _abi(lr) here, because it is not safe.
   999   __ mflr(return_pc);
  1000   __ std(return_pc, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
  1002   assert(return_pc->is_nonvolatile(), "return_pc must be a non-volatile register");
  1004   __ verify_method_ptr(R19_method);
  1006   //=============================================================================
  1008   // If this snippet gets called by the frame manager (at label
  1009   // `call_special'), then R15_prev_state is valid. If this snippet
  1010   // is not called by the frame manager, but e.g. by the call stub or
  1011   // by compiled code, then R15_prev_state is invalid.
  1013     // Set R15_prev_state to 0 if we don't return to the frame
  1014     // manager; we will return to the call_stub or to compiled code
  1015     // instead. If R15_prev_state is 0 there will be only one
  1016     // interpreter frame (we will set this up later) in this C frame!
  1017     // So we must take care about retrieving prev_state_(_prev_link)
  1018     // and restoring R1_SP when popping that interpreter.
  1019     Label prev_state_is_valid;
  1021     __ load_const(R11_scratch1/*frame_manager_returnpc_addr*/, (address)&frame_manager_specialized_return);
  1022     __ ld(R12_scratch2/*frame_manager_returnpc*/, 0, R11_scratch1/*frame_manager_returnpc_addr*/);
  1023     __ cmpd(CCR0, return_pc, R12_scratch2/*frame_manager_returnpc*/);
  1024     __ beq(CCR0, prev_state_is_valid);
  1026     __ li(R15_prev_state, 0);
  1028     __ BIND(prev_state_is_valid);
  1031   //=============================================================================
  1032   // Allocate new frame and initialize interpreter state.
  1034   Label exception_return;
  1035   Label exception_return_sync_check;
  1036   Label stack_overflow_return;
  1038   // Generate new interpreter state and jump to stack_overflow_return in case of
  1039   // a stack overflow.
  1040   generate_compute_interpreter_state(stack_overflow_return);
  1042   //=============================================================================
  1043   // Increment invocation counter. On overflow, entry to JNI method
  1044   // will be compiled.
  1045   Label invocation_counter_overflow;
  1046   if (inc_counter) {
  1047     generate_counter_incr(invocation_counter_overflow);
  1050   Label continue_after_compile;
  1051   __ BIND(continue_after_compile);
  1053   // access_flags = method->access_flags();
  1054   // Load access flags.
  1055   assert(access_flags->is_nonvolatile(),
  1056          "access_flags must be in a non-volatile register");
  1057   // Type check.
  1058   // TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
  1059   __ lwz(access_flags, method_(access_flags));
  1061   // We don't want to reload R19_method and access_flags after calls
  1062   // to some helper functions.
  1063   assert(R19_method->is_nonvolatile(), "R19_method must be a non-volatile register");
  1065   // Check for synchronized methods. Must happen AFTER invocation counter
  1066   // check, so method is not locked if counter overflows.
  1069     Label method_is_not_synced;
  1070     // Is_synced is still alive.
  1071     assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
  1072     __ bfalse(is_synced, method_is_not_synced);
  1074     lock_method();
  1075     // Reload method, it may have moved.
  1076     __ ld(R19_method, state_(_method));
  1078     __ BIND(method_is_not_synced);
  1081   // jvmti/jvmpi support
  1082   __ notify_method_entry();
  1084   // Reload method, it may have moved.
  1085   __ ld(R19_method, state_(_method));
  1087   //=============================================================================
  1088   // Get and call the signature handler
  1090   __ ld(signature_handler_fd, method_(signature_handler));
  1091   Label call_signature_handler;
  1093   __ cmpdi(CCR0, signature_handler_fd, 0);
  1094   __ bne(CCR0, call_signature_handler);
  1096   // Method has never been called. Either generate a specialized
  1097   // handler or point to the slow one.
  1098   //
  1099   // Pass parameter 'false' to avoid exception check in call_VM.
  1100   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false);
  1102   // Check for an exception while looking up the target method. If we
  1103   // incurred one, bail.
  1104   __ ld(pending_exception, thread_(pending_exception));
  1105   __ cmpdi(CCR0, pending_exception, 0);
  1106   __ bne(CCR0, exception_return_sync_check); // has pending exception
  1108   // reload method
  1109   __ ld(R19_method, state_(_method));
  1111   // Reload signature handler, it may have been created/assigned in the meanwhile
  1112   __ ld(signature_handler_fd, method_(signature_handler));
  1114   __ BIND(call_signature_handler);
  1116   // Before we call the signature handler we push a new frame to
  1117   // protect the interpreter frame volatile registers when we return
  1118   // from jni but before we can get back to Java.
  1120   // First set the frame anchor while the SP/FP registers are
  1121   // convenient and the slow signature handler can use this same frame
  1122   // anchor.
  1124   // We have a TOP_IJAVA_FRAME here, which belongs to us.
  1125   __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
  1127   // Now the interpreter frame (and its call chain) have been
  1128   // invalidated and flushed. We are now protected against eager
  1129   // being enabled in native code. Even if it goes eager the
  1130   // registers will be reloaded as clean and we will invalidate after
  1131   // the call so no spurious flush should be possible.
  1133   // Call signature handler and pass locals address.
  1134   //
  1135   // Our signature handlers copy required arguments to the C stack
  1136   // (outgoing C args), R3_ARG1 to R10_ARG8, and F1_ARG1 to
  1137   // F13_ARG13.
  1138   __ mr(R3_ARG1, R18_locals);
  1139 #if !defined(ABI_ELFv2)
  1140   __ ld(signature_handler_fd, 0, signature_handler_fd);
  1141 #endif
  1142   __ call_stub(signature_handler_fd);
  1143   // reload method
  1144   __ ld(R19_method, state_(_method));
  1146   // Remove the register parameter varargs slots we allocated in
  1147   // compute_interpreter_state. SP+16 ends up pointing to the ABI
  1148   // outgoing argument area.
  1149   //
  1150   // Not needed on PPC64.
  1151   //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord);
  1153   assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register");
  1154   // Save across call to native method.
  1155   __ mr(result_handler_addr, R3_RET);
  1157   // Set up fixed parameters and call the native method.
  1158   // If the method is static, get mirror into R4_ARG2.
  1161     Label method_is_not_static;
  1162     // access_flags is non-volatile and still, no need to restore it
  1164     // restore access flags
  1165     __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
  1166     __ bfalse(CCR0, method_is_not_static);
  1168     // constants = method->constants();
  1169     __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
  1170     __ ld(R11_scratch1/*constants*/, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
  1171     // pool_holder = method->constants()->pool_holder();
  1172     __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
  1173           R11_scratch1/*constants*/);
  1175     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
  1177     // mirror = pool_holder->klass_part()->java_mirror();
  1178     __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
  1179     // state->_native_mirror = mirror;
  1180     __ std(R0/*mirror*/, state_(_oop_temp));
  1181     // R4_ARG2 = &state->_oop_temp;
  1182     __ addir(R4_ARG2, state_(_oop_temp));
  1184     __ BIND(method_is_not_static);
  1187   // At this point, arguments have been copied off the stack into
  1188   // their JNI positions. Oops are boxed in-place on the stack, with
  1189   // handles copied to arguments. The result handler address is in a
  1190   // register.
  1192   // pass JNIEnv address as first parameter
  1193   __ addir(R3_ARG1, thread_(jni_environment));
  1195   // Load the native_method entry before we change the thread state.
  1196   __ ld(native_method_fd, method_(native_function));
  1198   //=============================================================================
  1199   // Transition from _thread_in_Java to _thread_in_native. As soon as
  1200   // we make this change the safepoint code needs to be certain that
  1201   // the last Java frame we established is good. The pc in that frame
  1202   // just needs to be near here not an actual return address.
  1204   // We use release_store_fence to update values like the thread state, where
  1205   // we don't want the current thread to continue until all our prior memory
  1206   // accesses (including the new thread state) are visible to other threads.
  1207   __ li(R0, _thread_in_native);
  1208   __ release();
  1210   // TODO: PPC port: assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
  1211   __ stw(R0, thread_(thread_state));
  1213   if (UseMembar) {
  1214     __ fence();
  1217   //=============================================================================
  1218   // Call the native method. Argument registers must not have been
  1219   // overwritten since "__ call_stub(signature_handler);" (except for
  1220   // ARG1 and ARG2 for static methods)
  1221   __ call_c(native_method_fd);
  1223   __ std(R3_RET, state_(_native_lresult));
  1224   __ stfd(F1_RET, state_(_native_fresult));
  1226   // The frame_manager_lr field, which we use for setting the last
  1227   // java frame, gets overwritten by the signature handler. Restore
  1228   // it now.
  1229   __ get_PC_trash_LR(R11_scratch1);
  1230   __ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
  1232   // Because of GC R19_method may no longer be valid.
  1234   // Block, if necessary, before resuming in _thread_in_Java state.
  1235   // In order for GC to work, don't clear the last_Java_sp until after
  1236   // blocking.
  1240   //=============================================================================
  1241   // Switch thread to "native transition" state before reading the
  1242   // synchronization state.  This additional state is necessary
  1243   // because reading and testing the synchronization state is not
  1244   // atomic w.r.t. GC, as this scenario demonstrates: Java thread A,
  1245   // in _thread_in_native state, loads _not_synchronized and is
  1246   // preempted.  VM thread changes sync state to synchronizing and
  1247   // suspends threads for GC. Thread A is resumed to finish this
  1248   // native method, but doesn't block here since it didn't see any
  1249   // synchronization in progress, and escapes.
  1251   // We use release_store_fence to update values like the thread state, where
  1252   // we don't want the current thread to continue until all our prior memory
  1253   // accesses (including the new thread state) are visible to other threads.
  1254   __ li(R0/*thread_state*/, _thread_in_native_trans);
  1255   __ release();
  1256   __ stw(R0/*thread_state*/, thread_(thread_state));
  1257   if (UseMembar) {
  1258     __ fence();
  1260   // Write serialization page so that the VM thread can do a pseudo remote
  1261   // membar. We use the current thread pointer to calculate a thread
  1262   // specific offset to write to within the page. This minimizes bus
  1263   // traffic due to cache line collision.
  1264   else {
  1265     __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2);
  1268   // Now before we return to java we must look for a current safepoint
  1269   // (a new safepoint can not start since we entered native_trans).
  1270   // We must check here because a current safepoint could be modifying
  1271   // the callers registers right this moment.
  1273   // Acquire isn't strictly necessary here because of the fence, but
  1274   // sync_state is declared to be volatile, so we do it anyway.
  1275   __ load_const(sync_state_addr, SafepointSynchronize::address_of_state());
  1277   // TODO: PPC port: assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
  1278   __ lwz(sync_state, 0, sync_state_addr);
  1280   // TODO: PPC port: assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
  1281   __ lwz(suspend_flags, thread_(suspend_flags));
  1283   __ acquire();
  1285   Label sync_check_done;
  1286   Label do_safepoint;
  1287   // No synchronization in progress nor yet synchronized
  1288   __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
  1289   // not suspended
  1290   __ cmpwi(CCR1, suspend_flags, 0);
  1292   __ bne(CCR0, do_safepoint);
  1293   __ beq(CCR1, sync_check_done);
  1294   __ bind(do_safepoint);
  1295   // Block.  We do the call directly and leave the current
  1296   // last_Java_frame setup undisturbed.  We must save any possible
  1297   // native result acrosss the call. No oop is present
  1299   __ mr(R3_ARG1, R16_thread);
  1300 #if defined(ABI_ELFv2)
  1301   __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
  1302             relocInfo::none);
  1303 #else
  1304   __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
  1305             relocInfo::none);
  1306 #endif
  1307   __ bind(sync_check_done);
  1309   //=============================================================================
  1310   // <<<<<< Back in Interpreter Frame >>>>>
  1312   // We are in thread_in_native_trans here and back in the normal
  1313   // interpreter frame. We don't have to do anything special about
  1314   // safepoints and we can switch to Java mode anytime we are ready.
  1316   // Note: frame::interpreter_frame_result has a dependency on how the
  1317   // method result is saved across the call to post_method_exit. For
  1318   // native methods it assumes that the non-FPU/non-void result is
  1319   // saved in _native_lresult and a FPU result in _native_fresult. If
  1320   // this changes then the interpreter_frame_result implementation
  1321   // will need to be updated too.
  1323   // On PPC64, we have stored the result directly after the native call.
  1325   //=============================================================================
  1326   // back in Java
  1328   // We use release_store_fence to update values like the thread state, where
  1329   // we don't want the current thread to continue until all our prior memory
  1330   // accesses (including the new thread state) are visible to other threads.
  1331   __ li(R0/*thread_state*/, _thread_in_Java);
  1332   __ release();
  1333   __ stw(R0/*thread_state*/, thread_(thread_state));
  1334   if (UseMembar) {
  1335     __ fence();
  1338   __ reset_last_Java_frame();
  1340   // Reload GR27_method, call killed it. We can't look at
  1341   // state->_method until we're back in java state because in java
  1342   // state gc can't happen until we get to a safepoint.
  1343   //
  1344   // We've set thread_state to _thread_in_Java already, so restoring
  1345   // R19_method from R14_state works; R19_method is invalid, because
  1346   // GC may have happened.
  1347   __ ld(R19_method, state_(_method)); // reload method, may have moved
  1349   // jvmdi/jvmpi support. Whether we've got an exception pending or
  1350   // not, and whether unlocking throws an exception or not, we notify
  1351   // on native method exit. If we do have an exception, we'll end up
  1352   // in the caller's context to handle it, so if we don't do the
  1353   // notify here, we'll drop it on the floor.
  1355   __ notify_method_exit(true/*native method*/,
  1356                         ilgl /*illegal state (not used for native methods)*/);
  1360   //=============================================================================
  1361   // Handle exceptions
  1363   // See if we must unlock.
  1364   //
  1366     Label method_is_not_synced;
  1367     // is_synced is still alive
  1368     assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
  1369     __ bfalse(is_synced, method_is_not_synced);
  1371     unlock_method();
  1373     __ bind(method_is_not_synced);
  1376   // Reset active handles after returning from native.
  1377   // thread->active_handles()->clear();
  1378   __ ld(active_handles, thread_(active_handles));
  1379   // JNIHandleBlock::_top is an int.
  1380   // TODO:  PPC port: assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
  1381   __ li(R0, 0);
  1382   __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
  1384   Label no_pending_exception_from_native_method;
  1385   __ ld(R0/*pending_exception*/, thread_(pending_exception));
  1386   __ cmpdi(CCR0, R0/*pending_exception*/, 0);
  1387   __ beq(CCR0, no_pending_exception_from_native_method);
  1390   //-----------------------------------------------------------------------------
  1391   // An exception is pending. We call into the runtime only if the
  1392   // caller was not interpreted. If it was interpreted the
  1393   // interpreter will do the correct thing. If it isn't interpreted
  1394   // (call stub/compiled code) we will change our return and continue.
  1395   __ BIND(exception_return);
  1397   Label return_to_initial_caller_with_pending_exception;
  1398   __ cmpdi(CCR0, R15_prev_state, 0);
  1399   __ beq(CCR0, return_to_initial_caller_with_pending_exception);
  1401   // We are returning to an interpreter activation, just pop the state,
  1402   // pop our frame, leave the exception pending, and return.
  1403   __ pop_interpreter_state(/*prev_state_may_be_0=*/false);
  1404   __ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1 /* set to return pc */, R22_tmp2);
  1405   __ mtlr(R21_tmp1);
  1406   __ blr();
  1408   __ BIND(exception_return_sync_check);
  1410   assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
  1411   __ bfalse(is_synced, exception_return);
  1412   unlock_method();
  1413   __ b(exception_return);
  1416   __ BIND(return_to_initial_caller_with_pending_exception);
  1417   // We are returning to a c2i-adapter / call-stub, get the address of the
  1418   // exception handler, pop the frame and return to the handler.
  1420   // First, pop to caller's frame.
  1421   __ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1  /* set to return pc */, R22_tmp2);
  1423   __ push_frame_reg_args(0, R11_scratch1);
  1424   // Get the address of the exception handler.
  1425   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
  1426                   R16_thread,
  1427                   R21_tmp1 /* return pc */);
  1428   __ pop_frame();
  1430   // Load the PC of the the exception handler into LR.
  1431   __ mtlr(R3_RET);
  1433   // Load exception into R3_ARG1 and clear pending exception in thread.
  1434   __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
  1435   __ li(R4_ARG2, 0);
  1436   __ std(R4_ARG2, thread_(pending_exception));
  1438   // Load the original return pc into R4_ARG2.
  1439   __ mr(R4_ARG2/*issuing_pc*/, R21_tmp1);
  1441   // Resize frame to get rid of a potential extension.
  1442   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
  1444   // Return to exception handler.
  1445   __ blr();
  1448   //-----------------------------------------------------------------------------
  1449   // No exception pending.
  1450   __ BIND(no_pending_exception_from_native_method);
  1452   // Move native method result back into proper registers and return.
  1453   // Invoke result handler (may unbox/promote).
  1454   __ ld(R3_RET, state_(_native_lresult));
  1455   __ lfd(F1_RET, state_(_native_fresult));
  1456   __ call_stub(result_handler_addr);
  1458   // We have created a new BytecodeInterpreter object, now we must destroy it.
  1459   //
  1460   // Restore previous R14_state and caller's SP.  R15_prev_state may
  1461   // be 0 here, because our caller may be the call_stub or compiled
  1462   // code.
  1463   __ pop_interpreter_state(/*prev_state_may_be_0=*/true);
  1464   __ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1 /* set to return pc */, R22_tmp2);
  1465   // Resize frame to get rid of a potential extension.
  1466   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
  1468   // Must use the return pc which was loaded from the caller's frame
  1469   // as the VM uses return-pc-patching for deoptimization.
  1470   __ mtlr(R21_tmp1);
  1471   __ blr();
  1475   //=============================================================================
  1476   // We encountered an exception while computing the interpreter
  1477   // state, so R14_state isn't valid. Act as if we just returned from
  1478   // the callee method with a pending exception.
  1479   __ BIND(stack_overflow_return);
  1481   //
  1482   // Register state:
  1483   //   R14_state         invalid; trashed by compute_interpreter_state
  1484   //   R15_prev_state    valid, but may be 0
  1485   //
  1486   //   R1_SP             valid, points to caller's SP; wasn't yet updated by
  1487   //                     compute_interpreter_state
  1488   //
  1490   // Create exception oop and make it pending.
  1492   // Throw the exception via RuntimeStub "throw_StackOverflowError_entry".
  1493   //
  1494   // Previously, we called C-Code directly. As a consequence, a
  1495   // possible GC tried to process the argument oops of the top frame
  1496   // (see RegisterMap::clear, which sets the corresponding flag to
  1497   // true). This lead to crashes because:
  1498   //   1. The top register map did not contain locations for the argument registers
  1499   //   2. The arguments are dead anyway, could be already overwritten in the worst case
  1500   // Solution: Call via special runtime stub that pushes it's own
  1501   // frame. This runtime stub has the flag "CodeBlob::caller_must_gc_arguments()"
  1502   // set to "false", what prevents the dead arguments getting GC'd.
  1503   //
  1504   // 2 cases exist:
  1505   // 1. We were called by the c2i adapter / call stub
  1506   // 2. We were called by the frame manager
  1507   //
  1508   // Both cases are handled by this code:
  1509   // 1. - initial_caller_sp was saved in both cases on entry, so it's safe to load it back even if it was not changed.
  1510   //    - control flow will be:
  1511   //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->excp_blob of caller method
  1512   // 2. - control flow will be:
  1513   //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->rethrow_excp_entry of frame manager->resume_method
  1514   //      Since we restored the caller SP above, the rethrow_excp_entry can restore the original interpreter state
  1515   //      registers using the stack and resume the calling method with a pending excp.
  1517   // Pop any c2i extension from the stack, restore LR just to be sure
  1518   __ ld(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
  1519   __ mtlr(R0);
  1520   // Resize frame to get rid of a potential extension.
  1521   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
  1523   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
  1524   // Load target address of the runtime stub.
  1525   __ load_const(R12_scratch2, (StubRoutines::throw_StackOverflowError_entry()));
  1526   __ mtctr(R12_scratch2);
  1527   __ bctr();
  1530   //=============================================================================
  1531   // Counter overflow.
  1533   if (inc_counter) {
  1534     // Handle invocation counter overflow
  1535     __ bind(invocation_counter_overflow);
  1537     generate_counter_overflow(continue_after_compile);
  1540   native_entry = entry;
  1541   return entry;
  1544 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
  1545   // No special entry points that preclude compilation.
  1546   return true;
  1549 // Unlock the current method.
  1550 //
  1551 void CppInterpreterGenerator::unlock_method(void) {
  1552   // Find preallocated monitor and unlock method. Method monitor is
  1553   // the first one.
  1555   // Registers alive
  1556   //   R14_state
  1557   //
  1558   // Registers updated
  1559   //   volatiles
  1560   //
  1561   const Register monitor = R4_ARG2;
  1563   // Pass address of initial monitor we allocated.
  1564   //
  1565   // First monitor.
  1566   __ addi(monitor, R14_state, -frame::interpreter_frame_monitor_size_in_bytes());
  1568   // Unlock method
  1569   __ unlock_object(monitor);
  1572 // Lock the current method.
  1573 //
  1574 void CppInterpreterGenerator::lock_method(void) {
  1575   // Find preallocated monitor and lock method. Method monitor is the
  1576   // first one.
  1578   //
  1579   // Registers alive
  1580   //   R14_state
  1581   //
  1582   // Registers updated
  1583   //   volatiles
  1584   //
  1586   const Register monitor = R4_ARG2;
  1587   const Register object  = R5_ARG3;
  1589   // Pass address of initial monitor we allocated.
  1590   __ addi(monitor, R14_state, -frame::interpreter_frame_monitor_size_in_bytes());
  1592   // Pass object address.
  1593   __ ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
  1595   // Lock method.
  1596   __ lock_object(monitor, object);
  1599 // Generate code for handling resuming a deopted method.
  1600 void CppInterpreterGenerator::generate_deopt_handling(Register result_index) {
  1602   //=============================================================================
  1603   // Returning from a compiled method into a deopted method. The
  1604   // bytecode at the bcp has completed. The result of the bytecode is
  1605   // in the native abi (the tosca for the template based
  1606   // interpreter). Any stack space that was used by the bytecode that
  1607   // has completed has been removed (e.g. parameters for an invoke) so
  1608   // all that we have to do is place any pending result on the
  1609   // expression stack and resume execution on the next bytecode.
  1611   Label return_from_deopt_common;
  1613   // R3_RET and F1_RET are live here! Load the array index of the
  1614   // required result stub address and continue at return_from_deopt_common.
  1616   // Deopt needs to jump to here to enter the interpreter (return a result).
  1617   deopt_frame_manager_return_atos = __ pc();
  1618   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_OBJECT));
  1619   __ b(return_from_deopt_common);
  1621   deopt_frame_manager_return_btos = __ pc();
  1622   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_BOOLEAN));
  1623   __ b(return_from_deopt_common);
  1625   deopt_frame_manager_return_itos = __ pc();
  1626   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_INT));
  1627   __ b(return_from_deopt_common);
  1629   deopt_frame_manager_return_ltos = __ pc();
  1630   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_LONG));
  1631   __ b(return_from_deopt_common);
  1633   deopt_frame_manager_return_ftos = __ pc();
  1634   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_FLOAT));
  1635   __ b(return_from_deopt_common);
  1637   deopt_frame_manager_return_dtos = __ pc();
  1638   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_DOUBLE));
  1639   __ b(return_from_deopt_common);
  1641   deopt_frame_manager_return_vtos = __ pc();
  1642   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_VOID));
  1643   // Last one, fall-through to return_from_deopt_common.
  1645   // Deopt return common. An index is present that lets us move any
  1646   // possible result being return to the interpreter's stack.
  1647   //
  1648   __ BIND(return_from_deopt_common);
  1652 // Generate the code to handle a more_monitors message from the c++ interpreter.
  1653 void CppInterpreterGenerator::generate_more_monitors() {
  1655   //
  1656   // Registers alive
  1657   //   R16_thread      - JavaThread*
  1658   //   R15_prev_state  - previous BytecodeInterpreter or 0
  1659   //   R14_state       - BytecodeInterpreter* address of receiver's interpreter state
  1660   //   R1_SP           - old stack pointer
  1661   //
  1662   // Registers updated
  1663   //   R1_SP          - new stack pointer
  1664   //
  1666   // Very-local scratch registers.
  1667   const Register old_tos         = R21_tmp1;
  1668   const Register new_tos         = R22_tmp2;
  1669   const Register stack_base      = R23_tmp3;
  1670   const Register stack_limit     = R24_tmp4;
  1671   const Register slot            = R25_tmp5;
  1672   const Register n_slots         = R25_tmp5;
  1674   // Interpreter state fields.
  1675   const Register msg             = R24_tmp4;
  1677   // Load up relevant interpreter state.
  1679   __ ld(stack_base, state_(_stack_base));                // Old stack_base
  1680   __ ld(old_tos, state_(_stack));                        // Old tos
  1681   __ ld(stack_limit, state_(_stack_limit));              // Old stack_limit
  1683   // extracted monitor_size
  1684   int monitor_size = frame::interpreter_frame_monitor_size_in_bytes();
  1685   assert(Assembler::is_aligned((unsigned int)monitor_size,
  1686                                (unsigned int)frame::alignment_in_bytes),
  1687          "size of a monitor must respect alignment of SP");
  1689   // Save and restore top LR
  1690   __ ld(R12_scratch2, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
  1691   __ resize_frame(-monitor_size, R11_scratch1);// Allocate space for new monitor
  1692   __ std(R12_scratch2, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
  1693     // Initial_caller_sp is used as unextended_sp for non initial callers.
  1694   __ std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
  1695   __ addi(stack_base, stack_base, -monitor_size);        // New stack_base
  1696   __ addi(new_tos, old_tos, -monitor_size);              // New tos
  1697   __ addi(stack_limit, stack_limit, -monitor_size);      // New stack_limit
  1699   __ std(R1_SP, state_(_last_Java_sp));                  // Update frame_bottom
  1701   __ std(stack_base, state_(_stack_base));               // Update stack_base
  1702   __ std(new_tos, state_(_stack));                       // Update tos
  1703   __ std(stack_limit, state_(_stack_limit));             // Update stack_limit
  1705   __ li(msg, BytecodeInterpreter::got_monitors);         // Tell interpreter we allocated the lock
  1706   __ stw(msg, state_(_msg));
  1708   // Shuffle expression stack down. Recall that stack_base points
  1709   // just above the new expression stack bottom. Old_tos and new_tos
  1710   // are used to scan thru the old and new expression stacks.
  1712   Label copy_slot, copy_slot_finished;
  1713   __ sub(n_slots, stack_base, new_tos);
  1714   __ srdi_(n_slots, n_slots, LogBytesPerWord);           // compute number of slots to copy
  1715   assert(LogBytesPerWord == 3, "conflicts assembler instructions");
  1716   __ beq(CCR0, copy_slot_finished);                       // nothing to copy
  1718   __ mtctr(n_slots);
  1720   // loop
  1721   __ bind(copy_slot);
  1722   __ ldu(slot, BytesPerWord, old_tos);                   // slot = *++old_tos;
  1723   __ stdu(slot, BytesPerWord, new_tos);                  // *++new_tos = slot;
  1724   __ bdnz(copy_slot);
  1726   __ bind(copy_slot_finished);
  1728   // Restart interpreter
  1729   __ li(R0, 0);
  1730   __ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base);  // Mark lock as unused
  1733 address CppInterpreterGenerator::generate_normal_entry(void) {
  1734   if (interpreter_frame_manager != NULL) return interpreter_frame_manager;
  1736   address entry = __ pc();
  1738   address return_from_native_pc = (address) NULL;
  1740   // Initial entry to frame manager (from call_stub or c2i_adapter)
  1742   //
  1743   // Registers alive
  1744   //   R16_thread               - JavaThread*
  1745   //   R19_method               - callee's Method (method to be invoked)
  1746   //   R17_tos                  - address of sender tos (prepushed)
  1747   //   R1_SP                    - SP prepared by call stub such that caller's outgoing args are near top
  1748   //   LR                       - return address to caller (call_stub or c2i_adapter)
  1749   //   R21_sender_SP            - initial caller sp
  1750   //
  1751   // Registers updated
  1752   //   R15_prev_state           - 0
  1753   //
  1754   // Stack layout at this point:
  1755   //
  1756   //   0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
  1757   //           alignment (optional)
  1758   //           [outgoing Java arguments]     <-- R17_tos
  1759   //           ...
  1760   //   PARENT  [PARENT_IJAVA_FRAME_ABI]
  1761   //           ...
  1762   //
  1764   // Save initial_caller_sp to caller's abi.
  1765   // The caller frame must be resized before returning to get rid of
  1766   // the c2i part on top of the calling compiled frame (if any).
  1767   // R21_tmp1 must match sender_sp in gen_c2i_adapter.
  1768   // Now override the saved SP with the senderSP so we can pop c2i
  1769   // arguments (if any) off when we return.
  1770   __ std(R21_sender_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
  1772   // Save LR to caller's frame. We don't use _abi(lr) here,
  1773   // because it is not safe.
  1774   __ mflr(R0);
  1775   __ std(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
  1777   // If we come here, it is the first invocation of the frame manager.
  1778   // So there is no previous interpreter state.
  1779   __ li(R15_prev_state, 0);
  1782   // Fall through to where "recursive" invocations go.
  1784   //=============================================================================
  1785   // Dispatch an instance of the interpreter. Recursive activations
  1786   // come here.
  1788   Label re_dispatch;
  1789   __ BIND(re_dispatch);
  1791   //
  1792   // Registers alive
  1793   //    R16_thread        - JavaThread*
  1794   //    R19_method        - callee's Method
  1795   //    R17_tos           - address of caller's tos (prepushed)
  1796   //    R15_prev_state    - address of caller's BytecodeInterpreter or 0
  1797   //    R1_SP             - caller's SP trimmed such that caller's outgoing args are near top.
  1798   //
  1799   // Stack layout at this point:
  1800   //
  1801   //   0       [TOP_IJAVA_FRAME_ABI]
  1802   //           alignment (optional)
  1803   //           [outgoing Java arguments]
  1804   //           ...
  1805   //   PARENT  [PARENT_IJAVA_FRAME_ABI]
  1806   //           ...
  1808   // fall through to interpreted execution
  1810   //=============================================================================
  1811   // Allocate a new Java frame and initialize the new interpreter state.
  1813   Label stack_overflow_return;
  1815   // Create a suitable new Java frame plus a new BytecodeInterpreter instance
  1816   // in the current (frame manager's) C frame.
  1817   generate_compute_interpreter_state(stack_overflow_return);
  1819   // fall through
  1821   //=============================================================================
  1822   // Interpreter dispatch.
  1824   Label call_interpreter;
  1825   __ BIND(call_interpreter);
  1827   //
  1828   // Registers alive
  1829   //   R16_thread       - JavaThread*
  1830   //   R15_prev_state   - previous BytecodeInterpreter or 0
  1831   //   R14_state        - address of receiver's BytecodeInterpreter
  1832   //   R1_SP            - receiver's stack pointer
  1833   //
  1835   // Thread fields.
  1836   const Register pending_exception = R21_tmp1;
  1838   // Interpreter state fields.
  1839   const Register msg               = R24_tmp4;
  1841   // MethodOop fields.
  1842   const Register parameter_count   = R25_tmp5;
  1843   const Register result_index      = R26_tmp6;
  1845   const Register dummy             = R28_tmp8;
  1847   // Address of various interpreter stubs.
  1848   // R29_tmp9 is reserved.
  1849   const Register stub_addr         = R27_tmp7;
  1851   // Uncommon trap needs to jump to here to enter the interpreter
  1852   // (re-execute current bytecode).
  1853   unctrap_frame_manager_entry  = __ pc();
  1855   // If we are profiling, store our fp (BSP) in the thread so we can
  1856   // find it during a tick.
  1857   if (Arguments::has_profile()) {
  1858     // On PPC64 we store the pointer to the current BytecodeInterpreter,
  1859     // instead of the bsp of ia64. This should suffice to be able to
  1860     // find all interesting information.
  1861     __ std(R14_state, thread_(last_interpreter_fp));
  1864   // R16_thread, R14_state and R15_prev_state are nonvolatile
  1865   // registers. There is no need to save these. If we needed to save
  1866   // some state in the current Java frame, this could be a place to do
  1867   // so.
  1869   // Call Java bytecode dispatcher passing "BytecodeInterpreter* istate".
  1870   __ call_VM_leaf(CAST_FROM_FN_PTR(address,
  1871                                    JvmtiExport::can_post_interpreter_events()
  1872                                    ? BytecodeInterpreter::runWithChecks
  1873                                    : BytecodeInterpreter::run),
  1874                   R14_state);
  1876   interpreter_return_address  = __ last_calls_return_pc();
  1878   // R16_thread, R14_state and R15_prev_state have their values preserved.
  1880   // If we are profiling, clear the fp in the thread to tell
  1881   // the profiler that we are no longer in the interpreter.
  1882   if (Arguments::has_profile()) {
  1883     __ li(R11_scratch1, 0);
  1884     __ std(R11_scratch1, thread_(last_interpreter_fp));
  1887   // Load message from bytecode dispatcher.
  1888   // TODO: PPC port: guarantee(4 == BytecodeInterpreter::sz_msg(), "unexpected field size");
  1889   __ lwz(msg, state_(_msg));
  1892   Label more_monitors;
  1893   Label return_from_native;
  1894   Label return_from_native_common;
  1895   Label return_from_native_no_exception;
  1896   Label return_from_interpreted_method;
  1897   Label return_from_recursive_activation;
  1898   Label unwind_recursive_activation;
  1899   Label resume_interpreter;
  1900   Label return_to_initial_caller;
  1901   Label unwind_initial_activation;
  1902   Label unwind_initial_activation_pending_exception;
  1903   Label call_method;
  1904   Label call_special;
  1905   Label retry_method;
  1906   Label retry_method_osr;
  1907   Label popping_frame;
  1908   Label throwing_exception;
  1910   // Branch according to the received message
  1912   __ cmpwi(CCR1, msg, BytecodeInterpreter::call_method);
  1913   __ cmpwi(CCR2, msg, BytecodeInterpreter::return_from_method);
  1915   __ beq(CCR1, call_method);
  1916   __ beq(CCR2, return_from_interpreted_method);
  1918   __ cmpwi(CCR3, msg, BytecodeInterpreter::more_monitors);
  1919   __ cmpwi(CCR4, msg, BytecodeInterpreter::throwing_exception);
  1921   __ beq(CCR3, more_monitors);
  1922   __ beq(CCR4, throwing_exception);
  1924   __ cmpwi(CCR5, msg, BytecodeInterpreter::popping_frame);
  1925   __ cmpwi(CCR6, msg, BytecodeInterpreter::do_osr);
  1927   __ beq(CCR5, popping_frame);
  1928   __ beq(CCR6, retry_method_osr);
  1930   __ stop("bad message from interpreter");
  1933   //=============================================================================
  1934   // Add a monitor just below the existing one(s). State->_stack_base
  1935   // points to the lowest existing one, so we insert the new one just
  1936   // below it and shuffle the expression stack down. Ref. the above
  1937   // stack layout picture, we must update _stack_base, _stack, _stack_limit
  1938   // and _last_Java_sp in the interpreter state.
  1940   __ BIND(more_monitors);
  1942   generate_more_monitors();
  1943   __ b(call_interpreter);
  1945   generate_deopt_handling(result_index);
  1947   // Restoring the R14_state is already done by the deopt_blob.
  1949   // Current tos includes no parameter slots.
  1950   __ ld(R17_tos, state_(_stack));
  1951   __ li(msg, BytecodeInterpreter::deopt_resume);
  1952   __ b(return_from_native_common);
  1954   // We are sent here when we are unwinding from a native method or
  1955   // adapter with an exception pending. We need to notify the interpreter
  1956   // that there is an exception to process.
  1957   // We arrive here also if the frame manager called an (interpreted) target
  1958   // which returns with a StackOverflow exception.
  1959   // The control flow is in this case is:
  1960   // frame_manager->throw_excp_stub->forward_excp->rethrow_excp_entry
  1962   AbstractInterpreter::_rethrow_exception_entry = __ pc();
  1964   // Restore R14_state.
  1965   __ ld(R14_state, 0, R1_SP);
  1966   __ addi(R14_state, R14_state,
  1967               -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
  1969   // Store exception oop into thread object.
  1970   __ std(R3_RET, thread_(pending_exception));
  1971   __ li(msg, BytecodeInterpreter::method_resume /*rethrow_exception*/);
  1972   //
  1973   // NOTE: the interpreter frame as setup be deopt does NOT include
  1974   // any parameter slots (good thing since we have no callee here
  1975   // and couldn't remove them) so we don't have to do any calculations
  1976   // here to figure it out.
  1977   //
  1978   __ ld(R17_tos, state_(_stack));
  1979   __ b(return_from_native_common);
  1982   //=============================================================================
  1983   // Returning from a native method.  Result is in the native abi
  1984   // location so we must move it to the java expression stack.
  1986   __ BIND(return_from_native);
  1987   guarantee(return_from_native_pc == (address) NULL, "precondition");
  1988   return_from_native_pc = __ pc();
  1990   // Restore R14_state.
  1991   __ ld(R14_state, 0, R1_SP);
  1992   __ addi(R14_state, R14_state, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
  1994   //
  1995   // Registers alive
  1996   //   R16_thread
  1997   //   R14_state    - address of caller's BytecodeInterpreter.
  1998   //   R3_RET       - integer result, if any.
  1999   //   F1_RET       - float result, if any.
  2000   //
  2001   // Registers updated
  2002   //   R19_method   - callee's Method
  2003   //   R17_tos      - caller's tos, with outgoing args popped
  2004   //   result_index - index of result handler.
  2005   //   msg          - message for resuming interpreter.
  2006   //
  2008   // Very-local scratch registers.
  2010   const ConditionRegister have_pending_exception = CCR0;
  2012   // Load callee Method, gc may have moved it.
  2013   __ ld(R19_method, state_(_result._to_call._callee));
  2015   // Load address of caller's tos. includes parameter slots.
  2016   __ ld(R17_tos, state_(_stack));
  2018   // Pop callee's parameters.
  2020   __ ld(parameter_count, in_bytes(Method::const_offset()), R19_method);
  2021   __ lhz(parameter_count, in_bytes(ConstMethod::size_of_parameters_offset()), parameter_count);
  2022   __ sldi(parameter_count, parameter_count, Interpreter::logStackElementSize);
  2023   __ add(R17_tos, R17_tos, parameter_count);
  2025   // Result stub address array index
  2026   // TODO: PPC port: assert(4 == methodOopDesc::sz_result_index(), "unexpected field size");
  2027   __ lwa(result_index, method_(result_index));
  2029   __ li(msg, BytecodeInterpreter::method_resume);
  2031   //
  2032   // Registers alive
  2033   //   R16_thread
  2034   //   R14_state    - address of caller's BytecodeInterpreter.
  2035   //   R17_tos      - address of caller's tos with outgoing args already popped
  2036   //   R3_RET       - integer return value, if any.
  2037   //   F1_RET       - float return value, if any.
  2038   //   result_index - index of result handler.
  2039   //   msg          - message for resuming interpreter.
  2040   //
  2041   // Registers updated
  2042   //   R3_RET       - new address of caller's tos, including result, if any
  2043   //
  2045   __ BIND(return_from_native_common);
  2047   // Check for pending exception
  2048   __ ld(pending_exception, thread_(pending_exception));
  2049   __ cmpdi(CCR0, pending_exception, 0);
  2050   __ beq(CCR0, return_from_native_no_exception);
  2052   // If there's a pending exception, we really have no result, so
  2053   // R3_RET is dead. Resume_interpreter assumes the new tos is in
  2054   // R3_RET.
  2055   __ mr(R3_RET, R17_tos);
  2056   // `resume_interpreter' expects R15_prev_state to be alive.
  2057   __ ld(R15_prev_state, state_(_prev_link));
  2058   __ b(resume_interpreter);
  2060   __ BIND(return_from_native_no_exception);
  2062   // No pending exception, copy method result from native ABI register
  2063   // to tos.
  2065   // Address of stub descriptor address array.
  2066   __ load_const(stub_addr, CppInterpreter::tosca_result_to_stack());
  2068   // Pass address of tos to stub.
  2069   __ mr(R4_ARG2, R17_tos);
  2071   // Address of stub descriptor address.
  2072   __ sldi(result_index, result_index, LogBytesPerWord);
  2073   __ add(stub_addr, stub_addr, result_index);
  2075   // Stub descriptor address.
  2076   __ ld(stub_addr, 0, stub_addr);
  2078   // TODO: don't do this via a call, do it in place!
  2079   //
  2080   // call stub via descriptor
  2081   // in R3_ARG1/F1_ARG1: result value (R3_RET or F1_RET)
  2082   __ call_stub(stub_addr);
  2084   // new tos = result of call in R3_RET
  2086   // `resume_interpreter' expects R15_prev_state to be alive.
  2087   __ ld(R15_prev_state, state_(_prev_link));
  2088   __ b(resume_interpreter);
  2090   //=============================================================================
  2091   // We encountered an exception while computing the interpreter
  2092   // state, so R14_state isn't valid. Act as if we just returned from
  2093   // the callee method with a pending exception.
  2094   __ BIND(stack_overflow_return);
  2096   //
  2097   // Registers alive
  2098   //   R16_thread        - JavaThread*
  2099   //   R1_SP             - old stack pointer
  2100   //   R19_method        - callee's Method
  2101   //   R17_tos           - address of caller's tos (prepushed)
  2102   //   R15_prev_state    - address of caller's BytecodeInterpreter or 0
  2103   //   R18_locals        - address of callee's locals array
  2104   //
  2105   // Registers updated
  2106   //   R3_RET           - address of resuming tos, if recursive unwind
  2108   Label Lskip_unextend_SP;
  2111   const ConditionRegister is_initial_call = CCR0;
  2112   const Register tos_save = R21_tmp1;
  2113   const Register tmp = R22_tmp2;
  2115   assert(tos_save->is_nonvolatile(), "need a nonvolatile");
  2117   // Is the exception thrown in the initial Java frame of this frame
  2118   // manager frame?
  2119   __ cmpdi(is_initial_call, R15_prev_state, 0);
  2120   __ bne(is_initial_call, Lskip_unextend_SP);
  2122   // Pop any c2i extension from the stack. This is necessary in the
  2123   // non-recursive case (that is we were called by the c2i adapter,
  2124   // meaning we have to prev state). In this case we entered the frame
  2125   // manager through a special entry which pushes the orignal
  2126   // unextended SP to the stack. Here we load it back.
  2127   __ ld(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
  2128   __ mtlr(R0);
  2129   // Resize frame to get rid of a potential extension.
  2130   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
  2132   // Fall through
  2134   __ bind(Lskip_unextend_SP);
  2136   // Throw the exception via RuntimeStub "throw_StackOverflowError_entry".
  2137   //
  2138   // Previously, we called C-Code directly. As a consequence, a
  2139   // possible GC tried to process the argument oops of the top frame
  2140   // (see RegisterMap::clear, which sets the corresponding flag to
  2141   // true). This lead to crashes because:
  2142   // 1. The top register map did not contain locations for the argument registers
  2143   // 2. The arguments are dead anyway, could be already overwritten in the worst case
  2144   // Solution: Call via special runtime stub that pushes it's own frame. This runtime stub has the flag
  2145   // "CodeBlob::caller_must_gc_arguments()" set to "false", what prevents the dead arguments getting GC'd.
  2146   //
  2147   // 2 cases exist:
  2148   // 1. We were called by the c2i adapter / call stub
  2149   // 2. We were called by the frame manager
  2150   //
  2151   // Both cases are handled by this code:
  2152   // 1. - initial_caller_sp was saved on stack => Load it back and we're ok
  2153   //    - control flow will be:
  2154   //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->excp_blob of calling method
  2155   // 2. - control flow will be:
  2156   //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->
  2157   //        ->rethrow_excp_entry of frame manager->resume_method
  2158   //      Since we restored the caller SP above, the rethrow_excp_entry can restore the original interpreter state
  2159   //      registers using the stack and resume the calling method with a pending excp.
  2161   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
  2162   __ load_const(R3_ARG1, (StubRoutines::throw_StackOverflowError_entry()));
  2163   __ mtctr(R3_ARG1);
  2164   __ bctr();
  2166   //=============================================================================
  2167   // We have popped a frame from an interpreted call. We are assured
  2168   // of returning to an interpreted call by the popframe abi. We have
  2169   // no return value all we have to do is pop the current frame and
  2170   // then make sure that the top of stack (of the caller) gets set to
  2171   // where it was when we entered the callee (i.e. the args are still
  2172   // in place).  Or we are returning to the interpreter. In the first
  2173   // case we must extract result (if any) from the java expression
  2174   // stack and store it in the location the native abi would expect
  2175   // for a call returning this type. In the second case we must simply
  2176   // do a stack to stack move as we unwind.
  2178   __ BIND(popping_frame);
  2180   // Registers alive
  2181   //   R14_state
  2182   //   R15_prev_state
  2183   //   R17_tos
  2184   //
  2185   // Registers updated
  2186   //   R19_method
  2187   //   R3_RET
  2188   //   msg
  2190     Label L;
  2192     // Reload callee method, gc may have moved it.
  2193     __ ld(R19_method, state_(_method));
  2195     // We may be returning to a deoptimized frame in which case the
  2196     // usual assumption of a recursive return is not true.
  2198     // not equal = is recursive call
  2199     __ cmpdi(CCR0, R15_prev_state, 0);
  2201     __ bne(CCR0, L);
  2203     // Pop_frame capability.
  2204     // The pop_frame api says that the underlying frame is a Java frame, in this case
  2205     // (prev_state==null) it must be a compiled frame:
  2206     //
  2207     // Stack at this point: I, C2I + C, ...
  2208     //
  2209     // The outgoing arguments of the call have just been copied (popframe_preserve_args).
  2210     // By the pop_frame api, we must end up in an interpreted frame. So the compiled frame
  2211     // will be deoptimized. Deoptimization will restore the outgoing arguments from
  2212     // popframe_preserve_args, adjust the tos such that it includes the popframe_preserve_args,
  2213     // and adjust the bci such that the call will be executed again.
  2214     // We have no results, just pop the interpreter frame, resize the compiled frame to get rid
  2215     // of the c2i extension and return to the deopt_handler.
  2216     __ b(unwind_initial_activation);
  2218     // is recursive call
  2219     __ bind(L);
  2221     // Resume_interpreter expects the original tos in R3_RET.
  2222     __ ld(R3_RET, prev_state_(_stack));
  2224     // We're done.
  2225     __ li(msg, BytecodeInterpreter::popping_frame);
  2227     __ b(unwind_recursive_activation);
  2231   //=============================================================================
  2233   // We have finished an interpreted call. We are either returning to
  2234   // native (call_stub/c2) or we are returning to the interpreter.
  2235   // When returning to native, we must extract the result (if any)
  2236   // from the java expression stack and store it in the location the
  2237   // native abi expects. When returning to the interpreter we must
  2238   // simply do a stack to stack move as we unwind.
  2240   __ BIND(return_from_interpreted_method);
  2242   //
  2243   // Registers alive
  2244   //   R16_thread     - JavaThread*
  2245   //   R15_prev_state - address of caller's BytecodeInterpreter or 0
  2246   //   R14_state      - address of callee's interpreter state
  2247   //   R1_SP          - callee's stack pointer
  2248   //
  2249   // Registers updated
  2250   //   R19_method     - callee's method
  2251   //   R3_RET         - address of result (new caller's tos),
  2252   //
  2253   // if returning to interpreted
  2254   //   msg  - message for interpreter,
  2255   // if returning to interpreted
  2256   //
  2258   // Check if this is the initial invocation of the frame manager.
  2259   // If so, R15_prev_state will be null.
  2260   __ cmpdi(CCR0, R15_prev_state, 0);
  2262   // Reload callee method, gc may have moved it.
  2263   __ ld(R19_method, state_(_method));
  2265   // Load the method's result type.
  2266   __ lwz(result_index, method_(result_index));
  2268   // Go to return_to_initial_caller if R15_prev_state is null.
  2269   __ beq(CCR0, return_to_initial_caller);
  2271   // Copy callee's result to caller's expression stack via inline stack-to-stack
  2272   // converters.
  2274     Register new_tos   = R3_RET;
  2275     Register from_temp = R4_ARG2;
  2276     Register from      = R5_ARG3;
  2277     Register tos       = R6_ARG4;
  2278     Register tmp1      = R7_ARG5;
  2279     Register tmp2      = R8_ARG6;
  2281     ConditionRegister result_type_is_void   = CCR1;
  2282     ConditionRegister result_type_is_long   = CCR2;
  2283     ConditionRegister result_type_is_double = CCR3;
  2285     Label stack_to_stack_void;
  2286     Label stack_to_stack_double_slot; // T_LONG, T_DOUBLE
  2287     Label stack_to_stack_single_slot; // T_BOOLEAN, T_BYTE, T_CHAR, T_SHORT, T_INT, T_FLOAT, T_OBJECT
  2288     Label stack_to_stack_done;
  2290     // Pass callee's address of tos + BytesPerWord
  2291     __ ld(from_temp, state_(_stack));
  2293     // result type: void
  2294     __ cmpwi(result_type_is_void, result_index, AbstractInterpreter::BasicType_as_index(T_VOID));
  2296     // Pass caller's tos == callee's locals address
  2297     __ ld(tos, state_(_locals));
  2299     // result type: long
  2300     __ cmpwi(result_type_is_long, result_index, AbstractInterpreter::BasicType_as_index(T_LONG));
  2302     __ addi(from, from_temp, Interpreter::stackElementSize);
  2304     // !! don't branch above this line !!
  2306     // handle void
  2307     __ beq(result_type_is_void,   stack_to_stack_void);
  2309     // result type: double
  2310     __ cmpwi(result_type_is_double, result_index, AbstractInterpreter::BasicType_as_index(T_DOUBLE));
  2312     // handle long or double
  2313     __ beq(result_type_is_long, stack_to_stack_double_slot);
  2314     __ beq(result_type_is_double, stack_to_stack_double_slot);
  2316     // fall through to single slot types (incl. object)
  2319       __ BIND(stack_to_stack_single_slot);
  2320       // T_BOOLEAN, T_BYTE, T_CHAR, T_SHORT, T_INT, T_FLOAT, T_OBJECT
  2322       __ ld(tmp1, 0, from);
  2323       __ std(tmp1, 0, tos);
  2324       // New expression stack top
  2325       __ addi(new_tos, tos, - BytesPerWord);
  2327       __ b(stack_to_stack_done);
  2331       __ BIND(stack_to_stack_double_slot);
  2332       // T_LONG, T_DOUBLE
  2334       // Move both entries for debug purposes even though only one is live
  2335       __ ld(tmp1, BytesPerWord, from);
  2336       __ ld(tmp2, 0, from);
  2337       __ std(tmp1, 0, tos);
  2338       __ std(tmp2, -BytesPerWord, tos);
  2340       // new expression stack top
  2341       __ addi(new_tos, tos, - 2 * BytesPerWord); // two slots
  2342       __ b(stack_to_stack_done);
  2346       __ BIND(stack_to_stack_void);
  2347       // T_VOID
  2349       // new expression stack top
  2350       __ mr(new_tos, tos);
  2351       // fall through to stack_to_stack_done
  2354     __ BIND(stack_to_stack_done);
  2357   // new tos = R3_RET
  2359   // Get the message for the interpreter
  2360   __ li(msg, BytecodeInterpreter::method_resume);
  2362   // And fall thru
  2365   //=============================================================================
  2366   // Restore caller's interpreter state and pass pointer to caller's
  2367   // new tos to caller.
  2369   __ BIND(unwind_recursive_activation);
  2371   //
  2372   // Registers alive
  2373   //   R15_prev_state   - address of caller's BytecodeInterpreter
  2374   //   R3_RET           - address of caller's tos
  2375   //   msg              - message for caller's BytecodeInterpreter
  2376   //   R1_SP            - callee's stack pointer
  2377   //
  2378   // Registers updated
  2379   //   R14_state        - address of caller's BytecodeInterpreter
  2380   //   R15_prev_state   - address of its parent or 0
  2381   //
  2383   // Pop callee's interpreter and set R14_state to caller's interpreter.
  2384   __ pop_interpreter_state(/*prev_state_may_be_0=*/false);
  2386   // And fall thru
  2389   //=============================================================================
  2390   // Resume the (calling) interpreter after a call.
  2392   __ BIND(resume_interpreter);
  2394   //
  2395   // Registers alive
  2396   //   R14_state        - address of resuming BytecodeInterpreter
  2397   //   R15_prev_state   - address of its parent or 0
  2398   //   R3_RET           - address of resuming tos
  2399   //   msg              - message for resuming interpreter
  2400   //   R1_SP            - callee's stack pointer
  2401   //
  2402   // Registers updated
  2403   //   R1_SP            - caller's stack pointer
  2404   //
  2406   // Restore C stack pointer of caller (resuming interpreter),
  2407   // R14_state already points to the resuming BytecodeInterpreter.
  2408   __ pop_interpreter_frame_to_state(R14_state, R21_tmp1, R11_scratch1, R12_scratch2);
  2410   // Store new address of tos (holding return value) in interpreter state.
  2411   __ std(R3_RET, state_(_stack));
  2413   // Store message for interpreter.
  2414   __ stw(msg, state_(_msg));
  2416   __ b(call_interpreter);
  2418   //=============================================================================
  2419   // Interpreter returning to native code (call_stub/c1/c2) from
  2420   // initial activation. Convert stack result and unwind activation.
  2422   __ BIND(return_to_initial_caller);
  2424   //
  2425   // Registers alive
  2426   //   R19_method       - callee's Method
  2427   //   R14_state        - address of callee's interpreter state
  2428   //   R16_thread       - JavaThread
  2429   //   R1_SP            - callee's stack pointer
  2430   //
  2431   // Registers updated
  2432   //   R3_RET/F1_RET - result in expected output register
  2433   //
  2435   // If we have an exception pending we have no result and we
  2436   // must figure out where to really return to.
  2437   //
  2438   __ ld(pending_exception, thread_(pending_exception));
  2439   __ cmpdi(CCR0, pending_exception, 0);
  2440   __ bne(CCR0, unwind_initial_activation_pending_exception);
  2442   __ lwa(result_index, method_(result_index));
  2444   // Address of stub descriptor address array.
  2445   __ load_const(stub_addr, CppInterpreter::stack_result_to_native());
  2447   // Pass address of callee's tos + BytesPerWord.
  2448   // Will then point directly to result.
  2449   __ ld(R3_ARG1, state_(_stack));
  2450   __ addi(R3_ARG1, R3_ARG1, Interpreter::stackElementSize);
  2452   // Address of stub descriptor address
  2453   __ sldi(result_index, result_index, LogBytesPerWord);
  2454   __ add(stub_addr, stub_addr, result_index);
  2456   // Stub descriptor address
  2457   __ ld(stub_addr, 0, stub_addr);
  2459   // TODO: don't do this via a call, do it in place!
  2460   //
  2461   // call stub via descriptor
  2462   __ call_stub(stub_addr);
  2464   __ BIND(unwind_initial_activation);
  2466   // Unwind from initial activation. No exception is pending.
  2468   //
  2469   // Stack layout at this point:
  2470   //
  2471   //    0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
  2472   //            ...
  2473   //    CALLER  [PARENT_IJAVA_FRAME_ABI]
  2474   //            ...
  2475   //    CALLER  [unextended ABI]
  2476   //            ...
  2477   //
  2478   //  The CALLER frame has a C2I adapter or is an entry-frame.
  2479   //
  2481   // An interpreter frame exists, we may pop the TOP_IJAVA_FRAME and
  2482   // turn the caller's PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
  2483   // But, we simply restore the return pc from the caller's frame and
  2484   // use the caller's initial_caller_sp as the new SP which pops the
  2485   // interpreter frame and "resizes" the caller's frame to its "unextended"
  2486   // size.
  2488   // get rid of top frame
  2489   __ pop_frame();
  2491   // Load return PC from parent frame.
  2492   __ ld(R21_tmp1, _parent_ijava_frame_abi(lr), R1_SP);
  2494   // Resize frame to get rid of a potential extension.
  2495   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
  2497   // update LR
  2498   __ mtlr(R21_tmp1);
  2500   // return
  2501   __ blr();
  2503   //=============================================================================
  2504   // Unwind from initial activation. An exception is pending
  2506   __ BIND(unwind_initial_activation_pending_exception);
  2508   //
  2509   // Stack layout at this point:
  2510   //
  2511   //   0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
  2512   //           ...
  2513   //   CALLER  [PARENT_IJAVA_FRAME_ABI]
  2514   //           ...
  2515   //   CALLER  [unextended ABI]
  2516   //           ...
  2517   //
  2518   // The CALLER frame has a C2I adapter or is an entry-frame.
  2519   //
  2521   // An interpreter frame exists, we may pop the TOP_IJAVA_FRAME and
  2522   // turn the caller's PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
  2523   // But, we just pop the current TOP_IJAVA_FRAME and fall through
  2525   __ pop_frame();
  2526   __ ld(R3_ARG1, _top_ijava_frame_abi(lr), R1_SP);
  2528   //
  2529   // Stack layout at this point:
  2530   //
  2531   //   CALLER  [PARENT_IJAVA_FRAME_ABI]      <-- R1_SP
  2532   //           ...
  2533   //   CALLER  [unextended ABI]
  2534   //           ...
  2535   //
  2536   // The CALLER frame has a C2I adapter or is an entry-frame.
  2537   //
  2538   // Registers alive
  2539   //   R16_thread
  2540   //   R3_ARG1 - return address to caller
  2541   //
  2542   // Registers updated
  2543   //   R3_ARG1 - address of pending exception
  2544   //   R4_ARG2 - issuing pc = return address to caller
  2545   //   LR      - address of exception handler stub
  2546   //
  2548   // Resize frame to get rid of a potential extension.
  2549   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
  2551   __ mr(R14, R3_ARG1);   // R14 := ARG1
  2552   __ mr(R4_ARG2, R3_ARG1);  // ARG2 := ARG1
  2554   // Find the address of the "catch_exception" stub.
  2555   __ push_frame_reg_args(0, R11_scratch1);
  2556   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
  2557                   R16_thread,
  2558                   R4_ARG2);
  2559   __ pop_frame();
  2561   // Load continuation address into LR.
  2562   __ mtlr(R3_RET);
  2564   // Load address of pending exception and clear it in thread object.
  2565   __ ld(R3_ARG1/*R3_RET*/, thread_(pending_exception));
  2566   __ li(R4_ARG2, 0);
  2567   __ std(R4_ARG2, thread_(pending_exception));
  2569   // re-load issuing pc
  2570   __ mr(R4_ARG2, R14);
  2572   // Branch to found exception handler.
  2573   __ blr();
  2575   //=============================================================================
  2576   // Call a new method. Compute new args and trim the expression stack
  2577   // to only what we are currently using and then recurse.
  2579   __ BIND(call_method);
  2581   //
  2582   //  Registers alive
  2583   //    R16_thread
  2584   //    R14_state      - address of caller's BytecodeInterpreter
  2585   //    R1_SP          - caller's stack pointer
  2586   //
  2587   //  Registers updated
  2588   //    R15_prev_state - address of caller's BytecodeInterpreter
  2589   //    R17_tos        - address of caller's tos
  2590   //    R19_method     - callee's Method
  2591   //    R1_SP          - trimmed back
  2592   //
  2594   // Very-local scratch registers.
  2596   const Register offset = R21_tmp1;
  2597   const Register tmp    = R22_tmp2;
  2598   const Register self_entry  = R23_tmp3;
  2599   const Register stub_entry  = R24_tmp4;
  2601   const ConditionRegister cr = CCR0;
  2603   // Load the address of the frame manager.
  2604   __ load_const(self_entry, &interpreter_frame_manager);
  2605   __ ld(self_entry, 0, self_entry);
  2607   // Load BytecodeInterpreter._result._to_call._callee (callee's Method).
  2608   __ ld(R19_method, state_(_result._to_call._callee));
  2609   // Load BytecodeInterpreter._stack (outgoing tos).
  2610   __ ld(R17_tos, state_(_stack));
  2612   // Save address of caller's BytecodeInterpreter.
  2613   __ mr(R15_prev_state, R14_state);
  2615   // Load the callee's entry point.
  2616   // Load BytecodeInterpreter._result._to_call._callee_entry_point.
  2617   __ ld(stub_entry, state_(_result._to_call._callee_entry_point));
  2619   // Check whether stub_entry is equal to self_entry.
  2620   __ cmpd(cr, self_entry, stub_entry);
  2621   // if (self_entry == stub_entry)
  2622   //   do a re-dispatch
  2623   __ beq(cr, re_dispatch);
  2624   // else
  2625   //   call the specialized entry (adapter for jni or compiled code)
  2626   __ BIND(call_special);
  2628   //
  2629   // Call the entry generated by `InterpreterGenerator::generate_native_entry'.
  2630   //
  2631   // Registers alive
  2632   //   R16_thread
  2633   //   R15_prev_state    - address of caller's BytecodeInterpreter
  2634   //   R19_method        - callee's Method
  2635   //   R17_tos           - address of caller's tos
  2636   //   R1_SP             - caller's stack pointer
  2637   //
  2639   // Mark return from specialized entry for generate_native_entry.
  2640   guarantee(return_from_native_pc != (address) NULL, "precondition");
  2641   frame_manager_specialized_return = return_from_native_pc;
  2643   // Set sender_SP in case we call interpreter native wrapper which
  2644   // will expect it. Compiled code should not care.
  2645   __ mr(R21_sender_SP, R1_SP);
  2647   // Do a tail call here, and let the link register point to
  2648   // frame_manager_specialized_return which is return_from_native_pc.
  2649   __ load_const(tmp, frame_manager_specialized_return);
  2650   __ call_stub_and_return_to(stub_entry,  tmp /* return_pc=tmp */);
  2653   //=============================================================================
  2654   //
  2655   // InterpretMethod triggered OSR compilation of some Java method M
  2656   // and now asks to run the compiled code.  We call this code the
  2657   // `callee'.
  2658   //
  2659   // This is our current idea on how OSR should look like on PPC64:
  2660   //
  2661   // While interpreting a Java method M the stack is:
  2662   //
  2663   //  (InterpretMethod (M), IJAVA_FRAME (M), ANY_FRAME, ...).
  2664   //
  2665   // After having OSR compiled M, `InterpretMethod' returns to the
  2666   // frame manager, sending the message `retry_method_osr'.  The stack
  2667   // is:
  2668   //
  2669   //  (IJAVA_FRAME (M), ANY_FRAME, ...).
  2670   //
  2671   // The compiler will have generated an `nmethod' suitable for
  2672   // continuing execution of M at the bytecode index at which OSR took
  2673   // place.  So now the frame manager calls the OSR entry.  The OSR
  2674   // entry sets up a JIT_FRAME for M and continues execution of M with
  2675   // initial state determined by the IJAVA_FRAME.
  2676   //
  2677   //  (JIT_FRAME (M), IJAVA_FRAME (M), ANY_FRAME, ...).
  2678   //
  2680   __ BIND(retry_method_osr);
  2682   //
  2683   // Registers alive
  2684   //   R16_thread
  2685   //   R15_prev_state     - address of caller's BytecodeInterpreter
  2686   //   R14_state          - address of callee's BytecodeInterpreter
  2687   //   R1_SP              - callee's SP before call to InterpretMethod
  2688   //
  2689   // Registers updated
  2690   //   R17                - pointer to callee's locals array
  2691   //                       (declared via `interpreter_arg_ptr_reg' in the AD file)
  2692   //   R19_method         - callee's Method
  2693   //   R1_SP              - callee's SP (will become SP of OSR adapter frame)
  2694   //
  2696   // Provide a debugger breakpoint in the frame manager if breakpoints
  2697   // in osr'd methods are requested.
  2698 #ifdef COMPILER2
  2699   NOT_PRODUCT( if (OptoBreakpointOSR) { __ illtrap(); } )
  2700 #endif
  2702   // Load callee's pointer to locals array from callee's state.
  2703   //  __ ld(R17, state_(_locals));
  2705   // Load osr entry.
  2706   __ ld(R12_scratch2, state_(_result._osr._osr_entry));
  2708   // Load address of temporary osr buffer to arg1.
  2709   __ ld(R3_ARG1, state_(_result._osr._osr_buf));
  2710   __ mtctr(R12_scratch2);
  2712   // Load method oop, gc may move it during execution of osr'd method.
  2713   __ ld(R22_tmp2, state_(_method));
  2714   // Load message 'call_method'.
  2715   __ li(R23_tmp3, BytecodeInterpreter::call_method);
  2718     // Pop the IJAVA frame of the method which we are going to call osr'd.
  2719     Label no_state, skip_no_state;
  2720     __ pop_interpreter_state(/*prev_state_may_be_0=*/true);
  2721     __ cmpdi(CCR0, R14_state,0);
  2722     __ beq(CCR0, no_state);
  2723     // return to interpreter
  2724     __ pop_interpreter_frame_to_state(R14_state, R11_scratch1, R12_scratch2, R21_tmp1);
  2726     // Init _result._to_call._callee and tell gc that it contains a valid oop
  2727     // by setting _msg to 'call_method'.
  2728     __ std(R22_tmp2, state_(_result._to_call._callee));
  2729     // TODO: PPC port: assert(4 == BytecodeInterpreter::sz_msg(), "unexpected field size");
  2730     __ stw(R23_tmp3, state_(_msg));
  2732     __ load_const(R21_tmp1, frame_manager_specialized_return);
  2733     __ b(skip_no_state);
  2734     __ bind(no_state);
  2736     // Return to initial caller.
  2738     // Get rid of top frame.
  2739     __ pop_frame();
  2741     // Load return PC from parent frame.
  2742     __ ld(R21_tmp1, _parent_ijava_frame_abi(lr), R1_SP);
  2744     // Resize frame to get rid of a potential extension.
  2745     __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
  2747     __ bind(skip_no_state);
  2749     // Update LR with return pc.
  2750     __ mtlr(R21_tmp1);
  2752   // Jump to the osr entry point.
  2753   __ bctr();
  2757   //=============================================================================
  2758   // Interpreted method "returned" with an exception, pass it on.
  2759   // Pass no result, unwind activation and continue/return to
  2760   // interpreter/call_stub/c2.
  2762   __ BIND(throwing_exception);
  2764   // Check if this is the initial invocation of the frame manager.  If
  2765   // so, previous interpreter state in R15_prev_state will be null.
  2767   // New tos of caller is callee's first parameter address, that is
  2768   // callee's incoming arguments are popped.
  2769   __ ld(R3_RET, state_(_locals));
  2771   // Check whether this is an initial call.
  2772   __ cmpdi(CCR0, R15_prev_state, 0);
  2773   // Yes, called from the call stub or from generated code via a c2i frame.
  2774   __ beq(CCR0, unwind_initial_activation_pending_exception);
  2776   // Send resume message, interpreter will see the exception first.
  2778   __ li(msg, BytecodeInterpreter::method_resume);
  2779   __ b(unwind_recursive_activation);
  2782   //=============================================================================
  2783   // Push the last instruction out to the code buffer.
  2786     __ unimplemented("end of InterpreterGenerator::generate_normal_entry", 128);
  2789   interpreter_frame_manager = entry;
  2790   return interpreter_frame_manager;
  2793 // Generate code for various sorts of method entries
  2794 //
  2795 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
  2796   address entry_point = NULL;
  2798   switch (kind) {
  2799     case Interpreter::zerolocals                 :                                                                              break;
  2800     case Interpreter::zerolocals_synchronized    :                                                                              break;
  2801     case Interpreter::native                     : // Fall thru
  2802     case Interpreter::native_synchronized        : entry_point = ((CppInterpreterGenerator*)this)->generate_native_entry();     break;
  2803     case Interpreter::empty                      :                                                                              break;
  2804     case Interpreter::accessor                   : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry();      break;
  2805     case Interpreter::abstract                   : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry();      break;
  2806     // These are special interpreter intrinsics which we don't support so far.
  2807     case Interpreter::java_lang_math_sin         :                                                                              break;
  2808     case Interpreter::java_lang_math_cos         :                                                                              break;
  2809     case Interpreter::java_lang_math_tan         :                                                                              break;
  2810     case Interpreter::java_lang_math_abs         :                                                                              break;
  2811     case Interpreter::java_lang_math_log         :                                                                              break;
  2812     case Interpreter::java_lang_math_log10       :                                                                              break;
  2813     case Interpreter::java_lang_math_sqrt        :                                                                              break;
  2814     case Interpreter::java_lang_math_pow         :                                                                              break;
  2815     case Interpreter::java_lang_math_exp         :                                                                              break;
  2816     case Interpreter::java_lang_ref_reference_get: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
  2817     default                                      : ShouldNotReachHere();                                                        break;
  2820   if (entry_point) {
  2821     return entry_point;
  2823   return ((InterpreterGenerator*)this)->generate_normal_entry();
  2826 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
  2827  : CppInterpreterGenerator(code) {
  2828    generate_all(); // down here so it can be "virtual"
  2831 // How much stack a topmost interpreter method activation needs in words.
  2832 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
  2833   // Computation is in bytes not words to match layout_activation_impl
  2834   // below, but the return is in words.
  2836   //
  2837   //  0       [TOP_IJAVA_FRAME_ABI]                                                    \
  2838   //          alignment (optional)                                             \       |
  2839   //          [operand stack / Java parameters] > stack                        |       |
  2840   //          [monitors] (optional)             > monitors                     |       |
  2841   //          [PARENT_IJAVA_FRAME_ABI]                                \        |       |
  2842   //          [BytecodeInterpreter object]      > interpreter \       |        |       |
  2843   //          alignment (optional)                            | round | parent | round | top
  2844   //          [Java result] (2 slots)           > result      |       |        |       |
  2845   //          [Java non-arg locals]             \ locals      |       |        |       |
  2846   //          [arg locals]                      /             /       /        /       /
  2847   //
  2849   int locals = method->max_locals() * BytesPerWord;
  2850   int interpreter = frame::interpreter_frame_cinterpreterstate_size_in_bytes();
  2851   int result = 2 * BytesPerWord;
  2853   int parent = round_to(interpreter + result + locals, 16) + frame::parent_ijava_frame_abi_size;
  2855   int stack = method->max_stack() * BytesPerWord;
  2856   int monitors = method->is_synchronized() ? frame::interpreter_frame_monitor_size_in_bytes() : 0;
  2857   int top = round_to(parent + monitors + stack, 16) + frame::top_ijava_frame_abi_size;
  2859   return (top / BytesPerWord);
  2862 void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
  2863                                                   frame* caller,
  2864                                                   frame* current,
  2865                                                   Method* method,
  2866                                                   intptr_t* locals,
  2867                                                   intptr_t* stack,
  2868                                                   intptr_t* stack_base,
  2869                                                   intptr_t* monitor_base,
  2870                                                   intptr_t* frame_sp,
  2871                                                   bool is_top_frame) {
  2872   // What about any vtable?
  2873   //
  2874   to_fill->_thread = JavaThread::current();
  2875   // This gets filled in later but make it something recognizable for now.
  2876   to_fill->_bcp = method->code_base();
  2877   to_fill->_locals = locals;
  2878   to_fill->_constants = method->constants()->cache();
  2879   to_fill->_method = method;
  2880   to_fill->_mdx = NULL;
  2881   to_fill->_stack = stack;
  2883   if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution()) {
  2884     to_fill->_msg = deopt_resume2;
  2885   } else {
  2886     to_fill->_msg = method_resume;
  2888   to_fill->_result._to_call._bcp_advance = 0;
  2889   to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
  2890   to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
  2891   to_fill->_prev_link = NULL;
  2893   if (caller->is_interpreted_frame()) {
  2894     interpreterState prev  = caller->get_interpreterState();
  2896     // Support MH calls. Make sure the interpreter will return the right address:
  2897     // 1. Caller did ordinary interpreted->compiled call call: Set a prev_state
  2898     //    which makes the CPP interpreter return to frame manager "return_from_interpreted_method"
  2899     //    entry after finishing execution.
  2900     // 2. Caller did a MH call: If the caller has a MethodHandleInvoke in it's
  2901     //    state (invariant: must be the caller of the bottom vframe) we used the
  2902     //    "call_special" entry to do the call, meaning the arguments have not been
  2903     //    popped from the stack. Therefore, don't enter a prev state in this case
  2904     //    in order to return to "return_from_native" frame manager entry which takes
  2905     //    care of popping arguments. Also, don't overwrite the MH.invoke Method in
  2906     //    the prev_state in order to be able to figure out the number of arguments to
  2907     //     pop.
  2908     // The parameter method can represent MethodHandle.invokeExact(...).
  2909     // The MethodHandleCompiler generates these synthetic Methods,
  2910     // including bytecodes, if an invokedynamic call gets inlined. In
  2911     // this case we want to return like from any other interpreted
  2912     // Java call, so we set _prev_link.
  2913     to_fill->_prev_link = prev;
  2915     if (*prev->_bcp == Bytecodes::_invokeinterface || *prev->_bcp == Bytecodes::_invokedynamic) {
  2916       prev->_result._to_call._bcp_advance = 5;
  2917     } else {
  2918       prev->_result._to_call._bcp_advance = 3;
  2921   to_fill->_oop_temp = NULL;
  2922   to_fill->_stack_base = stack_base;
  2923   // Need +1 here because stack_base points to the word just above the
  2924   // first expr stack entry and stack_limit is supposed to point to
  2925   // the word just below the last expr stack entry. See
  2926   // generate_compute_interpreter_state.
  2927   to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
  2928   to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
  2930   to_fill->_frame_bottom = frame_sp;
  2932   // PPC64 specific
  2933   to_fill->_last_Java_pc = NULL;
  2934   to_fill->_last_Java_fp = NULL;
  2935   to_fill->_last_Java_sp = frame_sp;
  2936 #ifdef ASSERT
  2937   to_fill->_self_link = to_fill;
  2938   to_fill->_native_fresult = 123456.789;
  2939   to_fill->_native_lresult = CONST64(0xdeafcafedeadc0de);
  2940 #endif
  2943 void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate,
  2944                                                      address last_Java_pc,
  2945                                                      intptr_t* last_Java_fp) {
  2946   istate->_last_Java_pc = last_Java_pc;
  2947   istate->_last_Java_fp = last_Java_fp;
  2950 int AbstractInterpreter::layout_activation(Method* method,
  2951                                            int temps,        // Number of slots on java expression stack in use.
  2952                                            int popframe_args,
  2953                                            int monitors,     // Number of active monitors.
  2954                                            int caller_actual_parameters,
  2955                                            int callee_params,// Number of slots for callee parameters.
  2956                                            int callee_locals,// Number of slots for locals.
  2957                                            frame* caller,
  2958                                            frame* interpreter_frame,
  2959                                            bool is_top_frame,
  2960                                            bool is_bottom_frame) {
  2962   // NOTE this code must exactly mimic what
  2963   // InterpreterGenerator::generate_compute_interpreter_state() does
  2964   // as far as allocating an interpreter frame. However there is an
  2965   // exception. With the C++ based interpreter only the top most frame
  2966   // has a full sized expression stack.  The 16 byte slop factor is
  2967   // both the abi scratch area and a place to hold a result from a
  2968   // callee on its way to the callers stack.
  2970   int monitor_size = frame::interpreter_frame_monitor_size_in_bytes() * monitors;
  2971   int frame_size;
  2972   int top_frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
  2973                                 + monitor_size
  2974                                 + (method->max_stack() *Interpreter::stackElementWords * BytesPerWord)
  2975                                 + 2*BytesPerWord,
  2976                                 frame::alignment_in_bytes)
  2977                       + frame::top_ijava_frame_abi_size;
  2978   if (is_top_frame) {
  2979     frame_size = top_frame_size;
  2980   } else {
  2981     frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
  2982                           + monitor_size
  2983                           + ((temps - callee_params + callee_locals) *
  2984                              Interpreter::stackElementWords * BytesPerWord)
  2985                           + 2*BytesPerWord,
  2986                           frame::alignment_in_bytes)
  2987                  + frame::parent_ijava_frame_abi_size;
  2988     assert(popframe_args==0, "non-zero for top_frame only");
  2991   // If we actually have a frame to layout we must now fill in all the pieces.
  2992   if (interpreter_frame != NULL) {
  2994     intptr_t sp = (intptr_t)interpreter_frame->sp();
  2995     intptr_t fp = *(intptr_t *)sp;
  2996     assert(fp == (intptr_t)caller->sp(), "fp must match");
  2997     interpreterState cur_state =
  2998       (interpreterState)(fp - frame::interpreter_frame_cinterpreterstate_size_in_bytes());
  3000     // Now fill in the interpreterState object.
  3002     intptr_t* locals;
  3003     if (caller->is_interpreted_frame()) {
  3004       // Locals must agree with the caller because it will be used to set the
  3005       // caller's tos when we return.
  3006       interpreterState prev  = caller->get_interpreterState();
  3007       // Calculate start of "locals" for MH calls.  For MH calls, the
  3008       // current method() (= MH target) and prev->callee() (=
  3009       // MH.invoke*()) are different and especially have different
  3010       // signatures. To pop the argumentsof the caller, we must use
  3011       // the prev->callee()->size_of_arguments() because that's what
  3012       // the caller actually pushed.  Currently, for synthetic MH
  3013       // calls (deoptimized from inlined MH calls), detected by
  3014       // is_method_handle_invoke(), we use the callee's arguments
  3015       // because here, the caller's and callee's signature match.
  3016       if (true /*!caller->is_at_mh_callsite()*/) {
  3017         locals = prev->stack() + method->size_of_parameters();
  3018       } else {
  3019         // Normal MH call.
  3020         locals = prev->stack() + prev->callee()->size_of_parameters();
  3022     } else {
  3023       bool is_deopted;
  3024       locals = (intptr_t*) (fp + ((method->max_locals() - 1) * BytesPerWord) +
  3025                             frame::parent_ijava_frame_abi_size);
  3028     intptr_t* monitor_base = (intptr_t*) cur_state;
  3029     intptr_t* stack_base   = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
  3031     // Provide pop_frame capability on PPC64, add popframe_args.
  3032     // +1 because stack is always prepushed.
  3033     intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (temps + popframe_args + 1) * BytesPerWord);
  3035     BytecodeInterpreter::layout_interpreterState(cur_state,
  3036                                                  caller,
  3037                                                  interpreter_frame,
  3038                                                  method,
  3039                                                  locals,
  3040                                                  stack,
  3041                                                  stack_base,
  3042                                                  monitor_base,
  3043                                                  (intptr_t*)(((intptr_t)fp)-top_frame_size),
  3044                                                  is_top_frame);
  3046     BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address,
  3047                                                     interpreter_frame->fp());
  3049   return frame_size/BytesPerWord;
  3052 #endif // CC_INTERP

mercurial