src/cpu/mips/vm/templateInterpreter_mips_64.cpp

Wed, 09 Jan 2019 14:47:56 +0800

author
aoqi
date
Wed, 09 Jan 2019 14:47:56 +0800
changeset 9454
9f319eefe17b
parent 9274
0e232e568230
child 9459
814e9e335067
permissions
-rw-r--r--

#7808 Save return values on the top of the stack when calling notify_method_exit
Contributed-by: wanghaomin, aoqi

     1 /*
     2  * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright (c) 2015, 2018, Loongson Technology. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #include "precompiled.hpp"
    27 #include "asm/macroAssembler.hpp"
    28 #include "interpreter/bytecodeHistogram.hpp"
    29 #include "interpreter/interpreter.hpp"
    30 #include "interpreter/interpreterGenerator.hpp"
    31 #include "interpreter/interpreterRuntime.hpp"
    32 #include "interpreter/templateTable.hpp"
    33 #include "oops/arrayOop.hpp"
    34 #include "oops/methodData.hpp"
    35 #include "oops/method.hpp"
    36 #include "oops/oop.inline.hpp"
    37 #include "prims/jvmtiExport.hpp"
    38 #include "prims/jvmtiThreadState.hpp"
    39 #include "runtime/arguments.hpp"
    40 #include "runtime/deoptimization.hpp"
    41 #include "runtime/frame.inline.hpp"
    42 #include "runtime/sharedRuntime.hpp"
    43 #include "runtime/stubRoutines.hpp"
    44 #include "runtime/synchronizer.hpp"
    45 #include "runtime/timer.hpp"
    46 #include "runtime/vframeArray.hpp"
    47 #include "utilities/debug.hpp"
    49 #define __ _masm->
    51 #ifndef CC_INTERP
    53 // asm based interpreter deoptimization helpers
    54 int AbstractInterpreter::size_activation(int max_stack,
    55                                          int temps,
    56                                          int extra_args,
    57                                          int monitors,
    58                                          int callee_params,
    59                                          int callee_locals,
    60                                          bool is_top_frame) {
    61   // Note: This calculation must exactly parallel the frame setup
    62   // in AbstractInterpreterGenerator::generate_method_entry.
    64   // fixed size of an interpreter frame:
    65   int overhead = frame::sender_sp_offset -
    66                  frame::interpreter_frame_initial_sp_offset;
    67   // Our locals were accounted for by the caller (or last_frame_adjust
    68   // on the transistion) Since the callee parameters already account
    69   // for the callee's params we only need to account for the extra
    70   // locals.
    71   int size = overhead +
    72          (callee_locals - callee_params)*Interpreter::stackElementWords +
    73          monitors * frame::interpreter_frame_monitor_size() +
    74          temps* Interpreter::stackElementWords + extra_args;
    76   return size;
    77 }
    80 const int Interpreter::return_sentinel = 0xfeedbeed;
    81 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
    82 const int bci_offset    = frame::interpreter_frame_bcx_offset    * wordSize;
    83 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
    85 //-----------------------------------------------------------------------------
    87 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
    88   address entry = __ pc();
    90 #ifdef ASSERT
    91   {
    92     Label L;
    93     __ addi(T1, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize);
    94     __ sub(T1, T1, SP); // T1 = maximal sp for current fp
    95     __ bgez(T1, L);     // check if frame is complete
    96     __ delayed()->nop();
    97     __ stop("interpreter frame not set up");
    98     __ bind(L);
    99   }
   100 #endif // ASSERT
   101   // Restore bcp under the assumption that the current frame is still
   102   // interpreted
   103   // FIXME: please change the func restore_bcp
   104   // S0 is the conventional register for bcp
   105   __ restore_bcp();
   107   // expression stack must be empty before entering the VM if an
   108   // exception happened
   109   __ empty_expression_stack();
   110   // throw exception
   111   // FIXME: why do not pass parameter thread ?
   112   __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
   113   return entry;
   114 }
   116 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
   117         const char* name) {
   118   address entry = __ pc();
   119   // expression stack must be empty before entering the VM if an
   120   // exception happened
   121   __ empty_expression_stack();
   122   __ li(A1, (long)name);
   123   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
   124   InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), A1, A2);
   125   return entry;
   126 }
   128 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
   129   address entry = __ pc();
   131   // expression stack must be empty before entering the VM if an
   132   // exception happened
   133   __ empty_expression_stack();
   134   __ empty_FPU_stack();
   135   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException),  FSR);
   136   return entry;
   137 }
   139 address TemplateInterpreterGenerator::generate_exception_handler_common(
   140         const char* name, const char* message, bool pass_oop) {
   141   assert(!pass_oop || message == NULL, "either oop or message but not both");
   142   address entry = __ pc();
   144   // expression stack must be empty before entering the VM if an exception happened
   145   __ empty_expression_stack();
   146   // setup parameters
   147   __ li(A1, (long)name);
   148   if (pass_oop) {
   149     __ call_VM(V0,
   150     CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), A1, FSR);
   151   } else {
   152     __ li(A2, (long)message);
   153     __ call_VM(V0,
   154     CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), A1, A2);
   155   }
   156   // throw exception
   157   __ jmp(Interpreter::throw_exception_entry(), relocInfo::none);
   158   __ delayed()->nop();
   159   return entry;
   160 }
   163 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
   164   address entry = __ pc();
   165   // NULL last_sp until next java call
   166   __ sd(R0,Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
   167   __ dispatch_next(state);
   168   return entry;
   169 }
   172 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
   174   address entry = __ pc();
   176   // Restore stack bottom in case i2c adjusted stack
   177   __ ld(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
   178   // and NULL it as marker that esp is now tos until next java call
   179   __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize);
   181   __ restore_bcp();
   182   __ restore_locals();
   184   // 2014/11/24 Fu
   185   // mdp: T8
   186   // ret: FSR
   187   // tmp: T9
   188   if (state == atos) {
   189     Register mdp = T8;
   190     Register tmp = T9;
   191     __ profile_return_type(mdp, FSR, tmp);
   192   }
   195   const Register cache = T9;
   196   const Register index = T3;
   197   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
   199   const Register flags = cache;
   200   __ dsll(AT, index, Address::times_ptr);
   201   __ daddu(AT, cache, AT);
   202   __ lw(flags, AT, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
   203   __ andi(flags, flags, ConstantPoolCacheEntry::parameter_size_mask);
   204   __ dsll(AT, flags, Interpreter::stackElementScale());
   205   __ daddu(SP, SP, AT);
   207   __ dispatch_next(state, step);
   209   return entry;
   210 }
   213 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
   214                                                                int step) {
   215   address entry = __ pc();
   216   // NULL last_sp until next java call
   217   __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize);
   218   __ restore_bcp();
   219   __ restore_locals();
   220   // handle exceptions
   221   {
   222     Label L;
   223     const Register thread = TREG;
   224 #ifndef OPT_THREAD
   225     __ get_thread(thread);
   226 #endif
   227     __ lw(AT, thread, in_bytes(Thread::pending_exception_offset()));
   228     __ beq(AT, R0, L);
   229     __ delayed()->nop();
   230     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
   231     __ should_not_reach_here();
   232     __ bind(L);
   233   }
   234   __ dispatch_next(state, step);
   235   return entry;
   236 }
   238 int AbstractInterpreter::BasicType_as_index(BasicType type) {
   239   int i = 0;
   240   switch (type) {
   241     case T_BOOLEAN: i = 0; break;
   242     case T_CHAR   : i = 1; break;
   243     case T_BYTE   : i = 2; break;
   244     case T_SHORT  : i = 3; break;
   245     case T_INT    : // fall through
   246     case T_LONG   : // fall through
   247     case T_VOID   : i = 4; break;
   248     case T_FLOAT  : i = 5; break;
   249     case T_DOUBLE : i = 6; break;
   250     case T_OBJECT : // fall through
   251     case T_ARRAY  : i = 7; break;
   252     default       : ShouldNotReachHere();
   253   }
   254   assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
   255          "index out of bounds");
   256   return i;
   257 }
   260 address TemplateInterpreterGenerator::generate_result_handler_for(
   261         BasicType type) {
   262   address entry = __ pc();
   263   switch (type) {
   264     case T_BOOLEAN: __ c2bool(V0);             break;
   265     case T_CHAR   : __ andi(V0, V0, 0xFFFF);   break;
   266     case T_BYTE   : __ sign_extend_byte (V0);  break;
   267     case T_SHORT  : __ sign_extend_short(V0);  break;
   268     case T_INT    : /* nothing to do */        break;
   269     case T_FLOAT  : /* nothing to do */        break;
   270     case T_DOUBLE : /* nothing to do */        break;
   271     case T_OBJECT :
   272     {
   273        __ ld(V0, FP, frame::interpreter_frame_oop_temp_offset * wordSize);
   274       __ verify_oop(V0);         // and verify it
   275     }
   276                  break;
   277     default       : ShouldNotReachHere();
   278   }
   279   __ jr(RA);                                  // return from result handler
   280   __ delayed()->nop();
   281   return entry;
   282 }
   284 address TemplateInterpreterGenerator::generate_safept_entry_for(
   285         TosState state,
   286         address runtime_entry) {
   287   address entry = __ pc();
   288   __ push(state);
   289   __ call_VM(noreg, runtime_entry);
   290   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
   291   return entry;
   292 }
   296 // Helpers for commoning out cases in the various type of method entries.
   297 //
   300 // increment invocation count & check for overflow
   301 //
   302 // Note: checking for negative value instead of overflow
   303 //       so we have a 'sticky' overflow test
   304 //
   305 // Rmethod: method
   306 // T3     : invocation counter
   307 //
   308 void InterpreterGenerator::generate_counter_incr(
   309         Label* overflow,
   310         Label* profile_method,
   311         Label* profile_method_continue) {
   312   Label done;
   313   if (TieredCompilation) {
   314     int increment = InvocationCounter::count_increment;
   315     int mask = ((1 << Tier0InvokeNotifyFreqLog)  - 1) << InvocationCounter::count_shift;
   316     Label no_mdo;
   317     if (ProfileInterpreter) {
   318       // Are we profiling?
   319       __ ld(FSR, Address(Rmethod, Method::method_data_offset()));
   320       __ beq(FSR, R0, no_mdo);
   321       __ delayed()->nop();
   322       // Increment counter in the MDO
   323       const Address mdo_invocation_counter(FSR, in_bytes(MethodData::invocation_counter_offset()) +
   324                                                 in_bytes(InvocationCounter::counter_offset()));
   325       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, T3, false, Assembler::zero, overflow);
   326       __ beq(R0, R0, done);
   327       __ delayed()->nop();
   328     }
   329     __ bind(no_mdo);
   330     // Increment counter in MethodCounters
   331     const Address invocation_counter(FSR,
   332                   MethodCounters::invocation_counter_offset() +
   333                   InvocationCounter::counter_offset());
   334     __ get_method_counters(Rmethod, FSR, done);
   335     __ increment_mask_and_jump(invocation_counter, increment, mask, T3, false, Assembler::zero, overflow);
   336     __ bind(done);
   337   } else {
   338     const Address invocation_counter(FSR, in_bytes(MethodCounters::invocation_counter_offset())
   339         + in_bytes(InvocationCounter::counter_offset()));
   340     const Address backedge_counter  (FSR, in_bytes(MethodCounters::backedge_counter_offset())
   341         + in_bytes(InvocationCounter::counter_offset()));
   343     __ get_method_counters(Rmethod, FSR, done);
   345     if (ProfileInterpreter) { // %%% Merge this into methodDataOop
   346       __ lw(T9, FSR, in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
   347       __ incrementl(T9, 1);
   348       __ sw(T9, FSR, in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
   349     }
   350     // Update standard invocation counters
   351     __ lw(T3, invocation_counter);
   352     __ increment(T3, InvocationCounter::count_increment);
   353     __ sw(T3, invocation_counter);  // save invocation count
   355     __ lw(FSR, backedge_counter);  // load backedge counter
   356     __ li(AT, InvocationCounter::count_mask_value);   // mask out the status bits
   357     __ andr(FSR, FSR, AT);
   359     __ dadd(T3, T3, FSR);          // add both counters
   361     if (ProfileInterpreter && profile_method != NULL) {
   362       // Test to see if we should create a method data oop
   363       if (Assembler::is_simm16(InvocationCounter::InterpreterProfileLimit)) {
   364         __ slti(AT, T3, InvocationCounter::InterpreterProfileLimit);
   365       } else {
   366         __ li(AT, (long)&InvocationCounter::InterpreterProfileLimit);
   367         __ lw(AT, AT, 0);
   368         __ slt(AT, T3, AT);
   369       }
   371       __ bne_far(AT, R0, *profile_method_continue);
   372       __ delayed()->nop();
   374       // if no method data exists, go to profile_method
   375       __ test_method_data_pointer(FSR, *profile_method);
   376     }
   378     if (Assembler::is_simm16(CompileThreshold)) {
   379       __ srl(AT, T3, InvocationCounter::count_shift);
   380       __ slti(AT, AT, CompileThreshold);
   381     } else {
   382       __ li(AT, (long)&InvocationCounter::InterpreterInvocationLimit);
   383       __ lw(AT, AT, 0);
   384       __ slt(AT, T3, AT);
   385     }
   387     __ beq_far(AT, R0, *overflow);
   388     __ delayed()->nop();
   389     __ bind(done);
   390   }
   391 }
   393 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
   395   // Asm interpreter on entry
   396   // S7 - locals
   397   // S0 - bcp
   398   // Rmethod - method
   399   // FP - interpreter frame
   401   // On return (i.e. jump to entry_point)
   402   // Rmethod - method
   403   // RA - return address of interpreter caller
   404   // tos - the last parameter to Java method
   405   // SP - sender_sp
   407   //const Address size_of_parameters(Rmethod,in_bytes( Method::size_of_parameters_offset()));
   409   // the bcp is valid if and only if it's not null
   410   __ call_VM(NOREG, CAST_FROM_FN_PTR(address,
   411       InterpreterRuntime::frequency_counter_overflow), R0);
   412   __ ld(Rmethod, FP, method_offset);
   413   // Preserve invariant that esi/edi contain bcp/locals of sender frame
   414   __ b_far(*do_continue);
   415   __ delayed()->nop();
   416 }
   418 // See if we've got enough room on the stack for locals plus overhead.
   419 // The expression stack grows down incrementally, so the normal guard
   420 // page mechanism will work for that.
   421 //
   422 // NOTE: Since the additional locals are also always pushed (wasn't
   423 // obvious in generate_method_entry) so the guard should work for them
   424 // too.
   425 //
   426 // Args:
   427 //      rdx: number of additional locals this frame needs (what we must check)
   428 //      rbx: Method*
   429 //
   430 // Kills:
   431 //      rax
   432 void InterpreterGenerator::generate_stack_overflow_check(void) {
   433   // see if we've got enough room on the stack for locals plus overhead.
   434   // the expression stack grows down incrementally, so the normal guard
   435   // page mechanism will work for that.
   436   //
   437   // Registers live on entry:
   438   //
   439   // T0: Method*
   440   // T2: number of additional locals this frame needs (what we must check)
   442   // NOTE:  since the additional locals are also always pushed (wasn't obvious in
   443   // generate_method_entry) so the guard should work for them too.
   444   //
   446   // monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
   447   const int entry_size    = frame::interpreter_frame_monitor_size() * wordSize;
   449   // total overhead size: entry_size + (saved ebp thru expr stack bottom).
   450   // be sure to change this if you add/subtract anything to/from the overhead area
   451   const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize)
   452     + entry_size;
   454   const int page_size = os::vm_page_size();
   456   Label after_frame_check;
   458   // see if the frame is greater than one page in size. If so,
   459   // then we need to verify there is enough stack space remaining
   460   // for the additional locals.
   461   __ move(AT, (page_size - overhead_size) / Interpreter::stackElementSize);
   462   __ slt(AT, AT, T2);
   463   __ beq(AT, R0, after_frame_check);
   464   __ delayed()->nop();
   466   // compute sp as if this were going to be the last frame on
   467   // the stack before the red zone
   468 #ifndef OPT_THREAD
   469   Register thread = T1;
   470   __ get_thread(thread);
   471 #else
   472   Register thread = TREG;
   473 #endif
   475   // locals + overhead, in bytes
   476   __ dsll(T3, T2, Interpreter::stackElementScale());
   477   __ daddiu(T3, T3, overhead_size);   // locals * 4 + overhead_size --> T3
   479 #ifdef ASSERT
   480   Label stack_base_okay, stack_size_okay;
   481   // verify that thread stack base is non-zero
   482   __ ld(AT, thread, in_bytes(Thread::stack_base_offset()));
   483   __ bne(AT, R0, stack_base_okay);
   484   __ delayed()->nop();
   485   __ stop("stack base is zero");
   486   __ bind(stack_base_okay);
   487   // verify that thread stack size is non-zero
   488   __ ld(AT, thread, in_bytes(Thread::stack_size_offset()));
   489   __ bne(AT, R0, stack_size_okay);
   490   __ delayed()->nop();
   491   __ stop("stack size is zero");
   492   __ bind(stack_size_okay);
   493 #endif
   495   // Add stack base to locals and subtract stack size
   496   __ ld(AT, thread, in_bytes(Thread::stack_base_offset())); // stack_base --> AT
   497   __ dadd(T3, T3, AT);   // locals * 4 + overhead_size + stack_base--> T3
   498   __ ld(AT, thread, in_bytes(Thread::stack_size_offset()));  // stack_size --> AT
   499   __ dsub(T3, T3, AT);  // locals * 4 + overhead_size + stack_base - stack_size --> T3
   502   // add in the redzone and yellow size
   503   __ move(AT, (StackRedPages+StackYellowPages) * page_size);
   504   __ add(T3, T3, AT);
   506   // check against the current stack bottom
   507   __ slt(AT, T3, SP);
   508   __ bne(AT, R0, after_frame_check);
   509   __ delayed()->nop();
   511   // Note: the restored frame is not necessarily interpreted.
   512   // Use the shared runtime version of the StackOverflowError.
   513   __ move(SP, Rsender);
   514   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
   515   __ jmp(StubRoutines::throw_StackOverflowError_entry(), relocInfo::runtime_call_type);
   516   __ delayed()->nop();
   518   // all done with frame size check
   519   __ bind(after_frame_check);
   520 }
   522 // Allocate monitor and lock method (asm interpreter)
   523 // Rmethod - Method*
   524 void InterpreterGenerator::lock_method(void) {
   525   // synchronize method
   526   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
   528 #ifdef ASSERT
   529   { Label L;
   530     __ lw(T0, Rmethod, in_bytes(Method::access_flags_offset()));
   531     __ andi(T0, T0, JVM_ACC_SYNCHRONIZED);
   532     __ bne(T0, R0, L);
   533     __ delayed()->nop();
   534     __ stop("method doesn't need synchronization");
   535     __ bind(L);
   536   }
   537 #endif // ASSERT
   538   // get synchronization object
   539   {
   540     Label done;
   541     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
   542     __ lw(T0, Rmethod, in_bytes(Method::access_flags_offset()));
   543     __ andi(T2, T0, JVM_ACC_STATIC);
   544     __ ld(T0, LVP, Interpreter::local_offset_in_bytes(0));
   545     __ beq(T2, R0, done);
   546     __ delayed()->nop();
   547     __ ld(T0, Rmethod, in_bytes(Method::const_offset()));
   548     __ ld(T0, T0, in_bytes(ConstMethod::constants_offset()));
   549     __ ld(T0, T0, ConstantPool::pool_holder_offset_in_bytes());
   550     __ ld(T0, T0, mirror_offset);
   551     __ bind(done);
   552   }
   553   // add space for monitor & lock
   554   __ daddi(SP, SP, (-1) * entry_size);           // add space for a monitor entry
   555   __ sd(SP, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize);
   556   // set new monitor block top
   557   __ sd(T0, SP, BasicObjectLock::obj_offset_in_bytes());   // store object
   558   // FIXME: I do not know what lock_object will do and what it will need
   559   __ move(c_rarg0, SP);      // object address
   560   __ lock_object(c_rarg0);
   561 }
   563 // Generate a fixed interpreter frame. This is identical setup for
   564 // interpreted methods and for native methods hence the shared code.
   565 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
   567   // [ local var m-1      ] <--- sp
   568   //   ...
   569   // [ local var 0        ]
   570   // [ argumnet word n-1  ] <--- T0(sender's sp)
   571   //   ...
   572   // [ argument word 0    ] <--- S7
   574   // initialize fixed part of activation frame
   575   // sender's sp in Rsender
   576   int i = 0;
   577   __ sd(RA, SP, (-1) * wordSize);   // save return address
   578   __ sd(FP, SP, (-2) * wordSize);  // save sender's fp
   579   __ daddiu(FP, SP, (-2) * wordSize);
   580   __ sd(Rsender, FP, (-++i) * wordSize);  // save sender's sp
   581   __ sd(R0, FP,(-++i)*wordSize);       //save last_sp as null
   582   __ sd(LVP, FP, (-++i) * wordSize);  // save locals offset
   583   __ ld(BCP, Rmethod, in_bytes(Method::const_offset())); // get constMethodOop
   584   __ daddiu(BCP, BCP, in_bytes(ConstMethod::codes_offset())); // get codebase
   585   __ sd(Rmethod, FP, (-++i) * wordSize);                              // save Method*
   586 #ifndef CORE
   587   if (ProfileInterpreter) {
   588     Label method_data_continue;
   589     __ ld(AT, Rmethod,  in_bytes(Method::method_data_offset()));
   590     __ beq(AT, R0, method_data_continue);
   591     __ delayed()->nop();
   592     __ daddi(AT, AT, in_bytes(MethodData::data_offset()));
   593     __ bind(method_data_continue);
   594     __ sd(AT, FP,  (-++i) * wordSize);
   595   } else {
   596     __ sd(R0, FP, (-++i) * wordSize);
   597   }
   598 #endif // !CORE
   600   __ ld(T2, Rmethod, in_bytes(Method::const_offset()));
   601   __ ld(T2, T2, in_bytes(ConstMethod::constants_offset()));
   602   __ ld(T2, T2, ConstantPool::cache_offset_in_bytes());
   603   __ sd(T2, FP, (-++i) * wordSize);                    // set constant pool cache
   604   if (native_call) {
   605     __ sd(R0, FP, (-++i) * wordSize);          // no bcp
   606   } else {
   607     __ sd(BCP, FP, (-++i) * wordSize);          // set bcp
   608   }
   609   __ daddiu(SP, FP, (-++i) * wordSize);
   610   __ sd(SP, FP, (-i) * wordSize);               // reserve word for pointer to expression stack bottom
   611 }
   613 // End of helpers
   615 // Various method entries
   616 //------------------------------------------------------------------------------------------------------------------------
   617 //
   618 //
   620 // Call an accessor method (assuming it is resolved, otherwise drop
   621 // into vanilla (slow path) entry
   622 address InterpreterGenerator::generate_accessor_entry(void) {
   624   // Rmethod: Method*
   625   // V0: receiver (preserve for slow entry into asm interpreter)
   626   //  Rsender: senderSP must preserved for slow path, set SP to it on fast path
   628   address entry_point = __ pc();
   629   Label xreturn_path;
   630   // do fastpath for resolved accessor methods
   631   if (UseFastAccessorMethods) {
   632     Label slow_path;
   633     __ li(T2, SafepointSynchronize::address_of_state());
   634     __ lw(AT, T2, 0);
   635     __ daddi(AT, AT, -(SafepointSynchronize::_not_synchronized));
   636     __ bne(AT, R0, slow_path);
   637     __ delayed()->nop();
   638     // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
   639     // parameter size = 1
   640     // Note: We can only use this code if the getfield has been resolved
   641     //       and if we don't have a null-pointer exception => check for
   642     //       these conditions first and use slow path if necessary.
   643     // Rmethod: method
   644     // V0: receiver
   646     // [ receiver  ] <-- sp
   647     __ ld(T0, SP, 0);
   649     // check if local 0 != NULL and read field
   650     __ beq(T0, R0, slow_path);
   651     __ delayed()->nop();
   652     __ ld(T2, Rmethod, in_bytes(Method::const_offset()));
   653     __ ld(T2, T2, in_bytes(ConstMethod::constants_offset()));
   654     // read first instruction word and extract bytecode @ 1 and index @ 2
   655     __ ld(T3, Rmethod, in_bytes(Method::const_offset()));
   656     __ lw(T3, T3, in_bytes(ConstMethod::codes_offset()));
   657     // Shift codes right to get the index on the right.
   658     // The bytecode fetched looks like <index><0xb4><0x2a>
   659     __ dsrl(T3, T3, 2 * BitsPerByte);
   660     // FIXME: maybe it's wrong
   661     __ dsll(T3, T3, exact_log2(in_words(ConstantPoolCacheEntry::size())));
   662     __ ld(T2, T2, ConstantPool::cache_offset_in_bytes());
   664     // T0: local 0 eax
   665     // Rmethod: method ebx
   666     // V0: receiver - do not destroy since it is needed for slow path! ecx
   667     // ecx: scratch use which register instead ?
   668     // T1: scratch use which register instead ?
   669     // T3: constant pool cache index  edx
   670     // T2: constant pool cache  edi
   671     // esi: send's sp
   672     // Rsender: send's sp
   673     // check if getfield has been resolved and read constant pool cache entry
   674     // check the validity of the cache entry by testing whether _indices field
   675     // contains Bytecode::_getfield in b1 byte.
   676     assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
   677     //    __ movl(esi,
   678     //      Address(edi,
   679     //        edx,
   680     //        Address::times_4, ConstantPoolCache::base_offset()
   681     //        + ConstantPoolCacheEntry::indices_offset()));
   684     __ dsll(T8, T3, Address::times_8);
   685     __ move(T1, in_bytes(ConstantPoolCache::base_offset()
   686     + ConstantPoolCacheEntry::indices_offset()));
   687     __ dadd(T1, T8, T1);
   688     __ dadd(T1, T1, T2);
   689     __ lw(T1, T1, 0);
   690     __ dsrl(T1, T1, 2 * BitsPerByte);
   691     __ andi(T1, T1, 0xFF);
   692     __ daddi(T1, T1, (-1) * Bytecodes::_getfield);
   693     __ bne(T1, R0, slow_path);
   694     __ delayed()->nop();
   696     // Note: constant pool entry is not valid before bytecode is resolved
   698     __ move(T1, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
   699     __ dadd(T1, T1, T8);
   700     __ dadd(T1, T1, T2);
   701     __ lw(AT, T1, 0);
   703     __ move(T1, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
   704     __ dadd(T1, T1, T8);
   705     __ dadd(T1, T1, T2);
   706     __ lw(T3, T1, 0);
   708     Label notByte, notBool, notShort, notChar, notObj;
   709     //    const Address field_address (eax, esi, Address::times_1);
   711     // Need to differentiate between igetfield, agetfield, bgetfield etc.
   712     // because they are different sizes.
   713     // Use the type from the constant pool cache
   714     __ srl(T3, T3, ConstantPoolCacheEntry::tos_state_shift);
   715     // Make sure we don't need to mask edx for tosBits after the above shift
   716     ConstantPoolCacheEntry::verify_tos_state_shift();
   717     // btos = 0
   718     __ bne(T3, R0, notByte);
   719     __ delayed()->dadd(T0, T0, AT);
   721     __ lb(V0, T0, 0);
   722     __ b(xreturn_path);
   723     __ delayed()->nop();
   725     //ztos
   726     __ bind(notByte);
   727     __ daddi(T1, T3, (-1) * ztos);
   728     __ bne(T1, R0, notBool);
   729     __ delayed()->nop();
   730     __ lb(V0, T0, 0);
   731     __ b(xreturn_path);
   732     __ delayed()->nop();
   734     //stos
   735     __ bind(notBool);
   736     __ daddi(T1, T3, (-1) * stos);
   737     __ bne(T1, R0, notShort);
   738     __ delayed()->nop();
   739     __ lh(V0, T0, 0);
   740     __ b(xreturn_path);
   741     __ delayed()->nop();
   743     //ctos
   744     __ bind(notShort);
   745     __ daddi(T1, T3, (-1) * ctos);
   746     __ bne(T1, R0, notChar);
   747     __ delayed()->nop();
   748     __ lhu(V0, T0, 0);
   749     __ b(xreturn_path);
   750     __ delayed()->nop();
   752     //atos
   753     __ bind(notChar);
   754     __ daddi(T1, T3, (-1) * atos);
   755     __ bne(T1, R0, notObj);
   756     __ delayed()->nop();
   757     //add for compressedoops
   758     __ load_heap_oop(V0, Address(T0, 0));
   759     __ b(xreturn_path);
   760     __ delayed()->nop();
   762     //itos
   763     __ bind(notObj);
   764 #ifdef ASSERT
   765     Label okay;
   766     __ daddi(T1, T3, (-1) * itos);
   767     __ beq(T1, R0, okay);
   768     __ delayed()->nop();
   769     __ stop("what type is this?");
   770     __ bind(okay);
   771 #endif // ASSERT
   772     __ lw(V0, T0, 0);
   774     __ bind(xreturn_path);
   776     // _ireturn/_areturn
   777     //FIXME
   778     __ move(SP, Rsender);//FIXME, set sender's fp to SP
   779     __ jr(RA);
   780     __ delayed()->nop();
   782     // generate a vanilla interpreter entry as the slow path
   783     __ bind(slow_path);
   784     (void) generate_normal_entry(false);
   785   } else {
   786     (void) generate_normal_entry(false);
   787   }
   789   return entry_point;
   790 }
   792 // Method entry for java.lang.ref.Reference.get.
   793 address InterpreterGenerator::generate_Reference_get_entry(void) {
   794 #if INCLUDE_ALL_GCS
   795   // Code: _aload_0, _getfield, _areturn
   796   // parameter size = 1
   797   //
   798   // The code that gets generated by this routine is split into 2 parts:
   799   //    1. The "intrinsified" code for G1 (or any SATB based GC),
   800   //    2. The slow path - which is an expansion of the regular method entry.
   801   //
   802   // Notes:-
   803   // * In the G1 code we do not check whether we need to block for
   804   //   a safepoint. If G1 is enabled then we must execute the specialized
   805   //   code for Reference.get (except when the Reference object is null)
   806   //   so that we can log the value in the referent field with an SATB
   807   //   update buffer.
   808   //   If the code for the getfield template is modified so that the
   809   //   G1 pre-barrier code is executed when the current method is
   810   //   Reference.get() then going through the normal method entry
   811   //   will be fine.
   812   // * The G1 code can, however, check the receiver object (the instance
   813   //   of java.lang.Reference) and jump to the slow path if null. If the
   814   //   Reference object is null then we obviously cannot fetch the referent
   815   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
   816   //   regular method entry code to generate the NPE.
   817   //
   818   // This code is based on generate_accessor_enty.
   819   //
   820   // rbx: Method* (Rmethod)
   822   // r13: senderSP must preserve for slow path, set SP to it on fast path (Rsender)
   824   // rax: V0
   825   // rbx: Rmethod
   826   // r13: Rsender
   827   // rdi: T9
   829   address entry = __ pc();
   831   const int referent_offset = java_lang_ref_Reference::referent_offset;
   832   guarantee(referent_offset > 0, "referent offset not initialized");
   834   if (UseG1GC) {
   835     Label slow_path;
   837     // Check if local 0 != NULL
   838     // If the receiver is null then it is OK to jump to the slow path.
   839     __ ld(V0, SP, 0);
   841     __ beq(V0, R0, slow_path);
   842     __ delayed()->nop();
   844     // Generate the G1 pre-barrier code to log the value of
   845     // the referent field in an SATB buffer.
   847     // Load the value of the referent field.
   848     const Address field_address(V0, referent_offset);
   849     __ load_heap_oop(V0, field_address);
   851     __ push(RA);
   852     // Generate the G1 pre-barrier code to log the value of
   853     // the referent field in an SATB buffer.
   854     __ g1_write_barrier_pre(noreg /* obj */,
   855                             V0 /* pre_val */,
   856                             TREG /* thread */,
   857                             Rmethod /* tmp */,
   858                             true /* tosca_live */,
   859                             true /* expand_call */);
   860     __ pop(RA);
   862     __ jr(RA);
   863     __ delayed()->daddu(SP, Rsender, R0);      // set sp to sender sp
   865     // generate a vanilla interpreter entry as the slow path
   866     __ bind(slow_path);
   867     (void) generate_normal_entry(false);
   869     return entry;
   870   }
   871 #endif // INCLUDE_ALL_GCS
   873   // If G1 is not enabled then attempt to go through the accessor entry point
   874   // Reference.get is an accessor
   875   return generate_accessor_entry();
   876 }
   878 // Interpreter stub for calling a native method. (asm interpreter)
   879 // This sets up a somewhat different looking stack for calling the
   880 // native method than the typical interpreter frame setup.
   881 address InterpreterGenerator::generate_native_entry(bool synchronized) {
   882   // determine code generation flags
   883   bool inc_counter  = UseCompiler || CountCompiledCalls;
   884   // Rsender: sender's sp
   885   // Rmethod: Method*
   886   address entry_point = __ pc();
   888 #ifndef CORE
   889   const Address invocation_counter(Rmethod,in_bytes(MethodCounters::invocation_counter_offset() +   // Fu: 20130814
   890   InvocationCounter::counter_offset()));
   891 #endif
   893   // get parameter size (always needed)
   894   // the size in the java stack
   895   __ ld(V0, Rmethod, in_bytes(Method::const_offset()));
   896   __ lhu(V0, V0, in_bytes(ConstMethod::size_of_parameters_offset()));   // Fu: 20130814
   898   // native calls don't need the stack size check since they have no expression stack
   899   // and the arguments are already on the stack and we only add a handful of words
   900   // to the stack
   902   // Rmethod: Method*
   903   // V0: size of parameters
   904   // Layout of frame at this point
   905   //
   906   // [ argument word n-1  ] <--- sp
   907   //   ...
   908   // [ argument word 0    ]
   910   // for natives the size of locals is zero
   912   // compute beginning of parameters (S7)
   913   __ dsll(LVP, V0, Address::times_8);
   914   __ daddiu(LVP, LVP, (-1) * wordSize);
   915   __ dadd(LVP, LVP, SP);
   918   // add 2 zero-initialized slots for native calls
   919   __ daddi(SP, SP, (-2) * wordSize);
   920   __ sd(R0, SP, 1 * wordSize);  // slot for native oop temp offset (setup via runtime)
   921   __ sd(R0, SP, 0 * wordSize);  // slot for static native result handler3 (setup via runtime)
   923   // Layout of frame at this point
   924   // [ method holder mirror  ] <--- sp
   925   // [ result type info      ]
   926   // [ argument word n-1     ] <--- T0
   927   //   ...
   928   // [ argument word 0      ] <--- LVP
   931 #ifndef CORE
   932   if (inc_counter) __ lw(T3, invocation_counter);  // (pre-)fetch invocation count
   933 #endif
   935   // initialize fixed part of activation frame
   936   generate_fixed_frame(true);
   937   // after this function, the layout of frame is as following
   938   //
   939   // [ monitor block top        ] <--- sp ( the top monitor entry )
   940   // [ byte code pointer (0)    ] (if native, bcp = 0)
   941   // [ constant pool cache      ]
   942   // [ Method*                ]
   943   // [ locals offset            ]
   944   // [ sender's sp              ]
   945   // [ sender's fp              ]
   946   // [ return address           ] <--- fp
   947   // [ method holder mirror     ]
   948   // [ result type info         ]
   949   // [ argumnet word n-1        ] <--- sender's sp
   950   //   ...
   951   // [ argument word 0          ] <--- S7
   954   // make sure method is native & not abstract
   955 #ifdef ASSERT
   956   __ lw(T0, Rmethod, in_bytes(Method::access_flags_offset()));
   957   {
   958     Label L;
   959     __ andi(AT, T0, JVM_ACC_NATIVE);
   960     __ bne(AT, R0, L);
   961     __ delayed()->nop();
   962     __ stop("tried to execute native method as non-native");
   963     __ bind(L);
   964   }
   965   {
   966     Label L;
   967     __ andi(AT, T0, JVM_ACC_ABSTRACT);
   968     __ beq(AT, R0, L);
   969     __ delayed()->nop();
   970     __ stop("tried to execute abstract method in interpreter");
   971     __ bind(L);
   972   }
   973 #endif
   975   // Since at this point in the method invocation the exception handler
   976   // would try to exit the monitor of synchronized methods which hasn't
   977   // been entered yet, we set the thread local variable
   978   // _do_not_unlock_if_synchronized to true. The remove_activation will
   979   // check this flag.
   980   Register thread = TREG;
   981 #ifndef OPT_THREAD
   982   __ get_thread(thread);
   983 #endif
   984   __ move(AT, (int)true);
   985   __ sb(AT, thread, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
   987 #ifndef CORE
   988   // increment invocation count & check for overflow
   989   Label invocation_counter_overflow;
   990   if (inc_counter) {
   991     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
   992   }
   994   Label continue_after_compile;
   995   __ bind(continue_after_compile);
   996 #endif // CORE
   998   bang_stack_shadow_pages(true);
  1000   // reset the _do_not_unlock_if_synchronized flag
  1001 #ifndef OPT_THREAD
  1002   __ get_thread(thread);
  1003 #endif
  1004   __ sb(R0, thread, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
  1006   // check for synchronized methods
  1007   // Must happen AFTER invocation_counter check and stack overflow check,
  1008   // so method is not locked if overflows.
  1009   if (synchronized) {
  1010     lock_method();
  1011   } else {
  1012     // no synchronization necessary
  1013 #ifdef ASSERT
  1015       Label L;
  1016       __ lw(T0, Rmethod, in_bytes(Method::access_flags_offset()));
  1017       __ andi(AT, T0, JVM_ACC_SYNCHRONIZED);
  1018       __ beq(AT, R0, L);
  1019       __ delayed()->nop();
  1020       __ stop("method needs synchronization");
  1021       __ bind(L);
  1023 #endif
  1026   // after method_lock, the layout of frame is as following
  1027   //
  1028   // [ monitor entry            ] <--- sp
  1029   //   ...
  1030   // [ monitor entry            ]
  1031   // [ monitor block top        ] ( the top monitor entry )
  1032   // [ byte code pointer (0)    ] (if native, bcp = 0)
  1033   // [ constant pool cache      ]
  1034   // [ Method*                ]
  1035   // [ locals offset        ]
  1036   // [ sender's sp              ]
  1037   // [ sender's fp              ]
  1038   // [ return address           ] <--- fp
  1039   // [ method holder mirror     ]
  1040   // [ result type info         ]
  1041   // [ argumnet word n-1        ] <--- ( sender's sp )
  1042   //   ...
  1043   // [ argument word 0          ] <--- S7
  1045   // start execution
  1046 #ifdef ASSERT
  1048     Label L;
  1049     __ ld(AT, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize);
  1050     __ beq(AT, SP, L);
  1051     __ delayed()->nop();
  1052     __ stop("broken stack frame setup in interpreter in asm");
  1053     __ bind(L);
  1055 #endif
  1057   // jvmti/jvmpi support
  1058   __ notify_method_entry();
  1060   // work registers
  1061   const Register method = Rmethod;
  1062   //const Register thread = T2;
  1063   const Register t      = RT4;
  1065   __ get_method(method);
  1066   __ verify_oop(method);
  1068     Label L, Lstatic;
  1069     __ ld(t,method,in_bytes(Method::const_offset()));
  1070     __ lhu(t, t, in_bytes(ConstMethod::size_of_parameters_offset()));
  1071     // MIPS n64 ABI: caller does not reserve space for the register auguments.
  1072     // A0 and A1(if needed)
  1073     __ lw(AT, Rmethod, in_bytes(Method::access_flags_offset()));
  1074     __ andi(AT, AT, JVM_ACC_STATIC);
  1075     __ beq(AT, R0, Lstatic);
  1076     __ delayed()->nop();
  1077     __ daddiu(t, t, 1);
  1078     __ bind(Lstatic);
  1079     __ daddiu(t, t, -7);
  1080     __ blez(t, L);
  1081     __ delayed()->nop();
  1082     __ dsll(t, t, Address::times_8);
  1083     __ dsub(SP, SP, t);
  1084     __ bind(L);
  1086   __ move(AT, -(StackAlignmentInBytes));
  1087   __ andr(SP, SP, AT);
  1088   __ move(AT, SP);
  1089   // [        ] <--- sp
  1090   //   ...                        (size of parameters - 8 )
  1091   // [ monitor entry            ]
  1092   //   ...
  1093   // [ monitor entry            ]
  1094   // [ monitor block top        ] ( the top monitor entry )
  1095   // [ byte code pointer (0)    ] (if native, bcp = 0)
  1096   // [ constant pool cache      ]
  1097   // [ Method*                ]
  1098   // [ locals offset            ]
  1099   // [ sender's sp              ]
  1100   // [ sender's fp              ]
  1101   // [ return address           ] <--- fp
  1102   // [ method holder mirror     ]
  1103   // [ result type info         ]
  1104   // [ argumnet word n-1        ] <--- ( sender's sp )
  1105   //   ...
  1106   // [ argument word 0          ] <--- LVP
  1108   // get signature handler
  1110     Label L;
  1111     __ ld(T9, method, in_bytes(Method::signature_handler_offset()));
  1112     __ bne(T9, R0, L);
  1113     __ delayed()->nop();
  1114     __ call_VM(NOREG, CAST_FROM_FN_PTR(address,
  1115                InterpreterRuntime::prepare_native_call), method);
  1116     __ get_method(method);
  1117     __ ld(T9, method, in_bytes(Method::signature_handler_offset()));
  1118     __ bind(L);
  1121   // call signature handler
  1122   // FIXME: when change codes in InterpreterRuntime, note this point
  1123   // from: begin of parameters
  1124   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == LVP, "adjust this code");
  1125   // to: current sp
  1126   assert(InterpreterRuntime::SignatureHandlerGenerator::to  () == SP, "adjust this code");
  1127   // temp: T3
  1128   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t  , "adjust this code");
  1130   __ jalr(T9);
  1131   __ delayed()->nop();
  1132   __ get_method(method);  // slow path call blows EBX on DevStudio 5.0
  1134   /*
  1135      if native function is static, and its second parameter has type length of double word,
  1136      and first parameter has type length of word, we have to reserve one word
  1137      for the first parameter, according to mips o32 abi.
  1138      if native function is not static, and its third parameter has type length of double word,
  1139      and second parameter has type length of word, we have to reserve one word for the second
  1140      parameter.
  1141    */
  1144   // result handler is in V0
  1145   // set result handler
  1146   __ sd(V0, FP, (frame::interpreter_frame_result_handler_offset)*wordSize);
  1148 #define FIRSTPARA_SHIFT_COUNT 5
  1149 #define SECONDPARA_SHIFT_COUNT 9
  1150 #define THIRDPARA_SHIFT_COUNT 13
  1151 #define PARA_MASK  0xf
  1153   // pass mirror handle if static call
  1155     Label L;
  1156     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
  1157     __ lw(t, method, in_bytes(Method::access_flags_offset()));
  1158     __ andi(AT, t, JVM_ACC_STATIC);
  1159     __ beq(AT, R0, L);
  1160     __ delayed()->nop();
  1162     // get mirror
  1163     __ ld(t, method, in_bytes(Method:: const_offset()));
  1164     __ ld(t, t, in_bytes(ConstMethod::constants_offset())); //??
  1165     __ ld(t, t, ConstantPool::pool_holder_offset_in_bytes());
  1166     __ ld(t, t, mirror_offset);
  1167     // copy mirror into activation frame
  1168     //__ sw(t, FP, frame::interpreter_frame_oop_temp_offset * wordSize);
  1169     // pass handle to mirror
  1170     __ sd(t, FP, frame::interpreter_frame_oop_temp_offset * wordSize);
  1171     __ daddi(t, FP, frame::interpreter_frame_oop_temp_offset * wordSize);
  1172     __ move(A1, t);
  1173     __ bind(L);
  1176   // [ mthd holder mirror ptr   ] <--- sp  --------------------| (only for static method)
  1177   // [                          ]                              |
  1178   //   ...                        size of parameters(or +1)    |
  1179   // [ monitor entry            ]                              |
  1180   //   ...                                                     |
  1181   // [ monitor entry            ]                              |
  1182   // [ monitor block top        ] ( the top monitor entry )    |
  1183   // [ byte code pointer (0)    ] (if native, bcp = 0)         |
  1184   // [ constant pool cache      ]                              |
  1185   // [ Method*                ]                              |
  1186   // [ locals offset            ]                              |
  1187   // [ sender's sp              ]                              |
  1188   // [ sender's fp              ]                              |
  1189   // [ return address           ] <--- fp                      |
  1190   // [ method holder mirror     ] <----------------------------|
  1191   // [ result type info         ]
  1192   // [ argumnet word n-1        ] <--- ( sender's sp )
  1193   //   ...
  1194   // [ argument word 0          ] <--- S7
  1196   // get native function entry point
  1197   { Label L;
  1198     __ ld(T9, method, in_bytes(Method::native_function_offset()));
  1199     __ li(V1, SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
  1200     __ bne(V1, T9, L);
  1201     __ delayed()->nop();
  1202     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
  1203     __ get_method(method);
  1204     __ verify_oop(method);
  1205     __ ld(T9, method, in_bytes(Method::native_function_offset()));
  1206     __ bind(L);
  1208   /*
  1209   __ pushad();
  1210   __ move(A0, T9);
  1211   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::func_debug),relocInfo::runtime_call_type);
  1212   __ popad();
  1213   */
  1215   // pass JNIEnv
  1216   // native function in T9
  1217 #ifndef OPT_THREAD
  1218   __ get_thread(thread);
  1219 #endif
  1220   __ daddi(t, thread, in_bytes(JavaThread::jni_environment_offset()));
  1221   __ move(A0, t);
  1222   // [ jni environment          ] <--- sp
  1223   // [ mthd holder mirror ptr   ] ---------------------------->| (only for static method)
  1224   // [                          ]                              |
  1225   //   ...                        size of parameters           |
  1226   // [ monitor entry            ]                              |
  1227   //   ...                                                     |
  1228   // [ monitor entry            ]                              |
  1229   // [ monitor block top        ] ( the top monitor entry )    |
  1230   // [ byte code pointer (0)    ] (if native, bcp = 0)         |
  1231   // [ constant pool cache      ]                              |
  1232   // [ Method*                ]                              |
  1233   // [ locals offset            ]                              |
  1234   // [ sender's sp              ]                              |
  1235   // [ sender's fp              ]                              |
  1236   // [ return address           ] <--- fp                      |
  1237   // [ method holder mirror     ] <----------------------------|
  1238   // [ result type info         ]
  1239   // [ argumnet word n-1        ] <--- ( sender's sp )
  1240   //   ...
  1241   // [ argument word 0          ] <--- S7
  1243   // set_last_Java_frame_before_call
  1244   __ sd(FP, thread, in_bytes(JavaThread::last_Java_fp_offset()));
  1245   // Change state to native (we save the return address in the thread, since it might not
  1246   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
  1247   // points into the right code segment. It does not have to be the correct return pc.
  1248   __ li(t, __ pc());
  1249   __ sd(t, thread, in_bytes(JavaThread::last_Java_pc_offset()));
  1250   __ sd(SP, thread, in_bytes(JavaThread::last_Java_sp_offset()));
  1252   // change thread state
  1253 #ifdef ASSERT
  1255     Label L;
  1256     __ lw(t, thread, in_bytes(JavaThread::thread_state_offset()));
  1257     __ daddi(t, t, (-1) * _thread_in_Java);
  1258     __ beq(t, R0, L);
  1259     __ delayed()->nop();
  1260     __ stop("Wrong thread state in native stub");
  1261     __ bind(L);
  1263 #endif
  1265   __ move(t, _thread_in_native);
  1266   __ sw(t, thread, in_bytes(JavaThread::thread_state_offset()));
  1268   // call native method
  1269   __ jalr(T9);
  1270   __ delayed()->nop();
  1271   // result potentially in V0 or F0
  1274   // via _last_native_pc and not via _last_jave_sp
  1275   // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
  1276   //  If the order changes or anything else is added to the stack the code in
  1277   // interpreter_frame_result will have to be changed.
  1278   //FIXME, should modify here
  1279   // save return value to keep the value from being destroyed by other calls
  1280   __ push(dtos);
  1281   __ push(ltos);
  1283   // change thread state
  1284   __ get_thread(thread);
  1285   __ move(t, _thread_in_native_trans);
  1286   __ sw(t, thread, in_bytes(JavaThread::thread_state_offset()));
  1288   if( os::is_MP() ) __ sync(); // Force this write out before the read below
  1290   // check for safepoint operation in progress and/or pending suspend requests
  1291   { Label Continue;
  1293     // Don't use call_VM as it will see a possible pending exception and forward it
  1294     // and never return here preventing us from clearing _last_native_pc down below.
  1295     // Also can't use call_VM_leaf either as it will check to see if esi & edi are
  1296     // preserved and correspond to the bcp/locals pointers. So we do a runtime call
  1297     // by hand.
  1298     //
  1299     Label L;
  1300     __ li(AT, SafepointSynchronize::address_of_state());
  1301     __ lw(AT, AT, 0);
  1302     __ bne(AT, R0, L);
  1303     __ delayed()->nop();
  1304     __ lw(AT, thread, in_bytes(JavaThread::suspend_flags_offset()));
  1305     __ beq(AT, R0, Continue);
  1306     __ delayed()->nop();
  1307     __ bind(L);
  1308     __ move(A0, thread);
  1309     __ call(CAST_FROM_FN_PTR(address,
  1310     JavaThread::check_special_condition_for_native_trans),
  1311   relocInfo::runtime_call_type);
  1312     __ delayed()->nop();
  1314 #ifndef OPT_THREAD
  1315     __ get_thread(thread);
  1316 #endif
  1317     //add for compressedoops
  1318     __ reinit_heapbase();
  1319     __ bind(Continue);
  1322   // change thread state
  1323   __ move(t, _thread_in_Java);
  1324   __ sw(t, thread, in_bytes(JavaThread::thread_state_offset()));
  1325   __ reset_last_Java_frame(thread, true);
  1327   // reset handle block
  1328   __ ld(t, thread, in_bytes(JavaThread::active_handles_offset()));
  1329   __ sw(R0, t, JNIHandleBlock::top_offset_in_bytes());
  1331   // If result was an oop then unbox and save it in the frame
  1332   { Label L;
  1333     Label no_oop, store_result;
  1334     //FIXME, addi only support 16-bit imeditate
  1335     __ ld(AT, FP, frame::interpreter_frame_result_handler_offset*wordSize);
  1336     __ li(T0, AbstractInterpreter::result_handler(T_OBJECT));
  1337     __ bne(AT, T0, no_oop);
  1338     __ delayed()->nop();
  1339     __ pop(ltos);
  1340     __ beq(V0, R0, store_result);
  1341     __ delayed()->nop();
  1342     // unbox
  1343     __ ld(V0, V0, 0);
  1344     __ bind(store_result);
  1345     __ sd(V0, FP, (frame::interpreter_frame_oop_temp_offset)*wordSize);
  1346     // keep stack depth as expected by pushing oop which will eventually be discarded
  1347     __ push(ltos);
  1348     __ bind(no_oop);
  1351     Label no_reguard;
  1352     __ lw(t, thread, in_bytes(JavaThread::stack_guard_state_offset()));
  1353     __ move(AT,(int) JavaThread::stack_guard_yellow_disabled);
  1354     __ bne(t, AT, no_reguard);
  1355     __ delayed()->nop();
  1356     __ pushad();
  1357     __ move(S5_heapbase, SP);
  1358     __ move(AT, -StackAlignmentInBytes);
  1359     __ andr(SP, SP, AT);
  1360     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::runtime_call_type);
  1361     __ delayed()->nop();
  1362     __ move(SP, S5_heapbase);
  1363     __ popad();
  1364     //add for compressedoops
  1365     __ reinit_heapbase();
  1366     __ bind(no_reguard);
  1368   // restore esi to have legal interpreter frame,
  1369   // i.e., bci == 0 <=> esi == code_base()
  1370   // Can't call_VM until bcp is within reasonable.
  1371   __ get_method(method);      // method is junk from thread_in_native to now.
  1372   __ verify_oop(method);
  1373   __ ld(BCP, method, in_bytes(Method::const_offset()));
  1374   __ lea(BCP, Address(BCP, in_bytes(ConstMethod::codes_offset())));
  1375   // handle exceptions (exception handling will handle unlocking!)
  1377     Label L;
  1378     __ lw(t, thread, in_bytes(Thread::pending_exception_offset()));
  1379     __ beq(t, R0, L);
  1380     __ delayed()->nop();
  1381     // Note: At some point we may want to unify this with the code used in
  1382     // call_VM_base();
  1383     // i.e., we should use the StubRoutines::forward_exception code. For now this
  1384     // doesn't work here because the esp is not correctly set at this point.
  1385     __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address,
  1386     InterpreterRuntime::throw_pending_exception));
  1387     __ should_not_reach_here();
  1388     __ bind(L);
  1391   // do unlocking if necessary
  1393     Label L;
  1394     __ lw(t, method, in_bytes(Method::access_flags_offset()));
  1395     __ andi(t, t, JVM_ACC_SYNCHRONIZED);
  1396     __ beq(t, R0, L);
  1397     // the code below should be shared with interpreter macro assembler implementation
  1399       Label unlock;
  1400       // BasicObjectLock will be first in list,
  1401       // since this is a synchronized method. However, need
  1402       // to check that the object has not been unlocked by
  1403       // an explicit monitorexit bytecode.
  1404       __ delayed()->daddi(c_rarg0, FP, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
  1405       // address of first monitor
  1407       __ ld(t, c_rarg0, BasicObjectLock::obj_offset_in_bytes());
  1408       __ bne(t, R0, unlock);
  1409       __ delayed()->nop();
  1411       // Entry already unlocked, need to throw exception
  1412       __ MacroAssembler::call_VM(NOREG, CAST_FROM_FN_PTR(address,
  1413       InterpreterRuntime::throw_illegal_monitor_state_exception));
  1414       __ should_not_reach_here();
  1416       __ bind(unlock);
  1417       __ unlock_object(c_rarg0);
  1419     __ bind(L);
  1422   // jvmti/jvmpi support
  1423   // Note: This must happen _after_ handling/throwing any exceptions since
  1424   //       the exception handler code notifies the runtime of method exits
  1425   //       too. If this happens before, method entry/exit notifications are
  1426   //       not properly paired (was bug - gri 11/22/99).
  1427   __ notify_method_exit(false, vtos, InterpreterMacroAssembler::NotifyJVMTI);
  1429   // restore potential result in V0,
  1430   // call result handler to restore potential result in ST0 & handle result
  1432   __ pop(ltos);
  1433   __ pop(dtos);
  1435   __ ld(t, FP, (frame::interpreter_frame_result_handler_offset) * wordSize);
  1436   __ jalr(t);
  1437   __ delayed()->nop();
  1440   // remove activation
  1441   __ ld(SP, FP, frame::interpreter_frame_sender_sp_offset * wordSize); // get sender sp
  1442   __ ld(RA, FP, frame::interpreter_frame_return_addr_offset * wordSize); // get return address
  1443   __ ld(FP, FP, frame::interpreter_frame_sender_fp_offset * wordSize); // restore sender's fp
  1444   __ jr(RA);
  1445   __ delayed()->nop();
  1447 #ifndef CORE
  1448   if (inc_counter) {
  1449     // Handle overflow of counter and compile method
  1450     __ bind(invocation_counter_overflow);
  1451     generate_counter_overflow(&continue_after_compile);
  1452     // entry_point is the beginning of this
  1453     // function and checks again for compiled code
  1455 #endif
  1456   return entry_point;
  1459 //
  1460 // Generic interpreted method entry to (asm) interpreter
  1461 //
  1462 // Layout of frame just at the entry
  1463 //
  1464 //   [ argument word n-1  ] <--- sp
  1465 //     ...
  1466 //   [ argument word 0    ]
  1467 // assume Method* in Rmethod before call this method.
  1468 // prerequisites to the generated stub : the callee Method* in Rmethod
  1469 // note you must save the caller bcp before call the generated stub
  1470 //
  1471 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
  1472   // determine code generation flags
  1473   bool inc_counter  = UseCompiler || CountCompiledCalls;
  1475   // Rmethod: Method*
  1476   // Rsender: sender 's sp
  1477   address entry_point = __ pc();
  1479   const Address invocation_counter(Rmethod,
  1480       in_bytes(MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset()));
  1482   // get parameter size (always needed)
  1483   __ ld(T3, Rmethod, in_bytes(Method::const_offset()));  //T3 --> Rmethod._constMethod
  1484   __ lhu(V0, T3, in_bytes(ConstMethod::size_of_parameters_offset()));
  1486   // Rmethod: Method*
  1487   // V0: size of parameters
  1488   // Rsender: sender 's sp ,could be different frome sp+ wordSize if we call via c2i
  1489   // get size of locals in words to T2
  1490   __ lhu(T2, T3, in_bytes(ConstMethod::size_of_locals_offset()));
  1491   // T2 = no. of additional locals, locals include parameters
  1492   __ dsub(T2, T2, V0);
  1494   // see if we've got enough room on the stack for locals plus overhead.
  1495   // Layout of frame at this point
  1496   //
  1497   // [ argument word n-1  ] <--- sp
  1498   //   ...
  1499   // [ argument word 0    ]
  1500   generate_stack_overflow_check();
  1501   // after this function, the layout of frame does not change
  1503   // compute beginning of parameters (LVP)
  1504   __ dsll(LVP, V0, LogBytesPerWord);
  1505   __ daddiu(LVP, LVP, (-1) * wordSize);
  1506   __ dadd(LVP, LVP, SP);
  1508   // T2 - # of additional locals
  1509   // allocate space for locals
  1510   // explicitly initialize locals
  1512     Label exit, loop;
  1513     __ beq(T2, R0, exit);
  1514     __ delayed()->nop();
  1516     __ bind(loop);
  1517     __ sd(R0, SP, -1 * wordSize);     // initialize local variables
  1518     __ daddiu(T2, T2, -1);               // until everything initialized
  1519     __ bne(T2, R0, loop);
  1520     __ delayed();
  1522     __ daddiu(SP, SP, (-1) * wordSize); //fill delay slot
  1524     __ bind(exit);
  1527   //
  1528   // [ local var m-1  ] <--- sp
  1529   //   ...
  1530   // [ local var 0  ]
  1531   // [ argument word n-1  ] <--- T0?
  1532   //   ...
  1533   // [ argument word 0    ] <--- LVP
  1535   // initialize fixed part of activation frame
  1537   generate_fixed_frame(false);
  1540   // after this function, the layout of frame is as following
  1541   //
  1542   // [ monitor block top        ] <--- sp ( the top monitor entry )
  1543   // [ byte code pointer        ] (if native, bcp = 0)
  1544   // [ constant pool cache      ]
  1545   // [ Method*                ]
  1546   // [ locals offset    ]
  1547   // [ sender's sp              ]
  1548   // [ sender's fp              ] <--- fp
  1549   // [ return address           ]
  1550   // [ local var m-1            ]
  1551   //   ...
  1552   // [ local var 0              ]
  1553   // [ argumnet word n-1        ] <--- ( sender's sp )
  1554   //   ...
  1555   // [ argument word 0          ] <--- LVP
  1558   // make sure method is not native & not abstract
  1559 #ifdef ASSERT
  1560   __ ld(AT, Rmethod, in_bytes(Method::access_flags_offset()));
  1562     Label L;
  1563     __ andi(T2, AT, JVM_ACC_NATIVE);
  1564     __ beq(T2, R0, L);
  1565     __ delayed()->nop();
  1566     __ stop("tried to execute native method as non-native");
  1567     __ bind(L);
  1570     Label L;
  1571     __ andi(T2, AT, JVM_ACC_ABSTRACT);
  1572     __ beq(T2, R0, L);
  1573     __ delayed()->nop();
  1574     __ stop("tried to execute abstract method in interpreter");
  1575     __ bind(L);
  1577 #endif
  1579   // Since at this point in the method invocation the exception handler
  1580   // would try to exit the monitor of synchronized methods which hasn't
  1581   // been entered yet, we set the thread local variable
  1582   // _do_not_unlock_if_synchronized to true. The remove_activation will
  1583   // check this flag.
  1585 #ifndef OPT_THREAD
  1586   Register thread = T8;
  1587   __ get_thread(thread);
  1588 #else
  1589   Register thread = TREG;
  1590 #endif
  1591   __ move(AT, (int)true);
  1592   __ sb(AT, thread, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
  1594 #ifndef CORE
  1596   // 2014/11/24 Fu
  1597   // mdp : T8
  1598   // tmp1: T9
  1599   // tmp2: T2
  1600    __ profile_parameters_type(T8, T9, T2);
  1602   // increment invocation count & check for overflow
  1603   Label invocation_counter_overflow;
  1604   Label profile_method;
  1605   Label profile_method_continue;
  1606   if (inc_counter) {
  1607     generate_counter_incr(&invocation_counter_overflow,
  1608                           &profile_method,
  1609                           &profile_method_continue);
  1610     if (ProfileInterpreter) {
  1611       __ bind(profile_method_continue);
  1615   Label continue_after_compile;
  1616   __ bind(continue_after_compile);
  1618 #endif // CORE
  1620   bang_stack_shadow_pages(false);
  1622   // reset the _do_not_unlock_if_synchronized flag
  1623 #ifndef OPT_THREAD
  1624   __ get_thread(thread);
  1625 #endif
  1626   __ sb(R0, thread, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
  1628   // check for synchronized methods
  1629   // Must happen AFTER invocation_counter check and stack overflow check,
  1630   // so method is not locked if overflows.
  1631   //
  1632   if (synchronized) {
  1633     // Allocate monitor and lock method
  1634     lock_method();
  1635   } else {
  1636     // no synchronization necessary
  1637 #ifdef ASSERT
  1638     { Label L;
  1639       __ lw(AT, Rmethod, in_bytes(Method::access_flags_offset()));
  1640       __ andi(T2, AT, JVM_ACC_SYNCHRONIZED);
  1641       __ beq(T2, R0, L);
  1642       __ delayed()->nop();
  1643       __ stop("method needs synchronization");
  1644       __ bind(L);
  1646 #endif
  1649   // layout of frame after lock_method
  1650   // [ monitor entry        ] <--- sp
  1651   //   ...
  1652   // [ monitor entry        ]
  1653   // [ monitor block top        ] ( the top monitor entry )
  1654   // [ byte code pointer        ] (if native, bcp = 0)
  1655   // [ constant pool cache      ]
  1656   // [ Method*                ]
  1657   // [ locals offset        ]
  1658   // [ sender's sp              ]
  1659   // [ sender's fp              ]
  1660   // [ return address           ] <--- fp
  1661   // [ local var m-1            ]
  1662   //   ...
  1663   // [ local var 0              ]
  1664   // [ argumnet word n-1        ] <--- ( sender's sp )
  1665   //   ...
  1666   // [ argument word 0          ] <--- LVP
  1669   // start execution
  1670 #ifdef ASSERT
  1672     Label L;
  1673     __ ld(AT, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize);
  1674     __ beq(AT, SP, L);
  1675     __ delayed()->nop();
  1676     __ stop("broken stack frame setup in interpreter in native");
  1677     __ bind(L);
  1679 #endif
  1681   // jvmti/jvmpi support
  1682   __ notify_method_entry();
  1684   __ dispatch_next(vtos);
  1686   // invocation counter overflow
  1687   if (inc_counter) {
  1688     if (ProfileInterpreter) {
  1689       // We have decided to profile this method in the interpreter
  1690       __ bind(profile_method);
  1691       __ call_VM(noreg, CAST_FROM_FN_PTR(address,
  1692                  InterpreterRuntime::profile_method));
  1693       __ set_method_data_pointer_for_bcp();
  1694       __ get_method(Rmethod);
  1695       __ b(profile_method_continue);
  1696       __ delayed()->nop();
  1698     // Handle overflow of counter and compile method
  1699     __ bind(invocation_counter_overflow);
  1700     generate_counter_overflow(&continue_after_compile);
  1703   return entry_point;
  1706 // Entry points
  1707 //
  1708 // Here we generate the various kind of entries into the interpreter.
  1709 // The two main entry type are generic bytecode methods and native
  1710 // call method.  These both come in synchronized and non-synchronized
  1711 // versions but the frame layout they create is very similar. The
  1712 // other method entry types are really just special purpose entries
  1713 // that are really entry and interpretation all in one. These are for
  1714 // trivial methods like accessor, empty, or special math methods.
  1715 //
  1716 // When control flow reaches any of the entry types for the interpreter
  1717 // the following holds ->
  1718 //
  1719 // Arguments:
  1720 //
  1721 // Rmethod: Method*
  1722 // V0: receiver
  1723 //
  1724 //
  1725 // Stack layout immediately at entry
  1726 //
  1727 // [ parameter n-1      ] <--- sp
  1728 //   ...
  1729 // [ parameter 0        ]
  1730 // [ expression stack   ] (caller's java expression stack)
  1732 // Assuming that we don't go to one of the trivial specialized entries
  1733 // the stack will look like below when we are ready to execute the
  1734 // first bytecode (or call the native routine). The register usage
  1735 // will be as the template based interpreter expects (see
  1736 // interpreter_amd64.hpp).
  1737 //
  1738 // local variables follow incoming parameters immediately; i.e.
  1739 // the return address is moved to the end of the locals).
  1740 //
  1741 // [ monitor entry        ] <--- sp
  1742 //   ...
  1743 // [ monitor entry        ]
  1744 // [ monitor block top        ] ( the top monitor entry )
  1745 // [ byte code pointer        ] (if native, bcp = 0)
  1746 // [ constant pool cache      ]
  1747 // [ Method*                ]
  1748 // [ locals offset        ]
  1749 // [ sender's sp              ]
  1750 // [ sender's fp              ]
  1751 // [ return address           ] <--- fp
  1752 // [ local var m-1            ]
  1753 //   ...
  1754 // [ local var 0              ]
  1755 // [ argumnet word n-1        ] <--- ( sender's sp )
  1756 //   ...
  1757 // [ argument word 0          ] <--- S7
  1759 address AbstractInterpreterGenerator::generate_method_entry(
  1760                                         AbstractInterpreter::MethodKind kind) {
  1761   // determine code generation flags
  1762   bool synchronized = false;
  1763   address entry_point = NULL;
  1764   switch (kind) {
  1765     case Interpreter::zerolocals             :
  1766       break;
  1767     case Interpreter::zerolocals_synchronized:
  1768       synchronized = true;
  1769       break;
  1770     case Interpreter::native                 :
  1771       entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false);
  1772       break;
  1773     case Interpreter::native_synchronized    :
  1774       entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true);
  1775       break;
  1776     case Interpreter::empty                  :
  1777       entry_point = ((InterpreterGenerator*)this)->generate_empty_entry();
  1778       break;
  1779     case Interpreter::accessor               :
  1780       entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry();
  1781       break;
  1782     case Interpreter::abstract               :
  1783       entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry();
  1784       break;
  1786     case Interpreter::java_lang_math_sin     : // fall thru
  1787     case Interpreter::java_lang_math_cos     : // fall thru
  1788     case Interpreter::java_lang_math_tan     : // fall thru
  1789     case Interpreter::java_lang_math_log     : // fall thru
  1790     case Interpreter::java_lang_math_log10   : // fall thru
  1791     case Interpreter::java_lang_math_pow     : // fall thru
  1792     case Interpreter::java_lang_math_exp     : break;
  1793     case Interpreter::java_lang_math_abs     : // fall thru
  1794     case Interpreter::java_lang_math_sqrt    :
  1795       entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind);    break;
  1796     case Interpreter::java_lang_ref_reference_get:
  1797       entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
  1798     default:
  1799       fatal(err_msg("unexpected method kind: %d", kind));
  1800       break;
  1802   if (entry_point) return entry_point;
  1804   return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
  1807 // These should never be compiled since the interpreter will prefer
  1808 // the compiled version to the intrinsic version.
  1809 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
  1810   switch (method_kind(m)) {
  1811     case Interpreter::java_lang_math_sin     : // fall thru
  1812     case Interpreter::java_lang_math_cos     : // fall thru
  1813     case Interpreter::java_lang_math_tan     : // fall thru
  1814     case Interpreter::java_lang_math_abs     : // fall thru
  1815     case Interpreter::java_lang_math_log     : // fall thru
  1816     case Interpreter::java_lang_math_log10   : // fall thru
  1817     case Interpreter::java_lang_math_sqrt    : // fall thru
  1818     case Interpreter::java_lang_math_pow     : // fall thru
  1819     case Interpreter::java_lang_math_exp     :
  1820       return false;
  1821     default:
  1822       return true;
  1826 // How much stack a method activation needs in words.
  1827 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
  1829   const int entry_size    = frame::interpreter_frame_monitor_size();
  1831   // total overhead size: entry_size + (saved ebp thru expr stack bottom).
  1832   // be sure to change this if you add/subtract anything to/from the overhead area
  1833   const int overhead_size = -(frame::interpreter_frame_initial_sp_offset) + entry_size;
  1835   const int stub_code = 6;  // see generate_call_stub
  1836   // return overhead_size + method->max_locals() + method->max_stack() + stub_code;
  1837   const int method_stack = (method->max_locals() + method->max_stack()) *
  1838           Interpreter::stackElementWords;
  1839   return overhead_size + method_stack + stub_code;
  1842 void AbstractInterpreter::layout_activation(Method* method,
  1843                                            int tempcount,
  1844                                            int popframe_extra_args,
  1845                                            int moncount,
  1846                                            int caller_actual_parameters,
  1847                                            int callee_param_count,
  1848                                            int callee_locals,
  1849                                            frame* caller,
  1850                                            frame* interpreter_frame,
  1851                                            bool is_top_frame,
  1852                                            bool is_bottom_frame) {
  1853   // Note: This calculation must exactly parallel the frame setup
  1854   // in AbstractInterpreterGenerator::generate_method_entry.
  1855   // If interpreter_frame!=NULL, set up the method, locals, and monitors.
  1856   // The frame interpreter_frame, if not NULL, is guaranteed to be the
  1857   // right size, as determined by a previous call to this method.
  1858   // It is also guaranteed to be walkable even though it is in a skeletal state
  1860   // fixed size of an interpreter frame:
  1862   int max_locals = method->max_locals() * Interpreter::stackElementWords;
  1863   int extra_locals = (method->max_locals() - method->size_of_parameters()) * Interpreter::stackElementWords;
  1865 #ifdef ASSERT
  1866   if (!EnableInvokeDynamic) {
  1867     // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
  1868     // Probably, since deoptimization doesn't work yet.
  1869     assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
  1871   assert(caller->sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable(2)");
  1872 #endif
  1874     interpreter_frame->interpreter_frame_set_method(method);
  1875     // NOTE the difference in using sender_sp and interpreter_frame_sender_sp
  1876     // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
  1877     // and sender_sp is fp+8
  1878     intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
  1880 #ifdef ASSERT
  1881   if (caller->is_interpreted_frame()) {
  1882     assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
  1884 #endif
  1886   interpreter_frame->interpreter_frame_set_locals(locals);
  1887   BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
  1888   BasicObjectLock* monbot = montop - moncount;
  1889   interpreter_frame->interpreter_frame_set_monitor_end(montop - moncount);
  1891   //set last sp;
  1892   intptr_t*  esp = (intptr_t*) monbot - tempcount*Interpreter::stackElementWords -
  1893                       popframe_extra_args;
  1894   interpreter_frame->interpreter_frame_set_last_sp(esp);
  1895   // All frames but the initial interpreter frame we fill in have a
  1896   // value for sender_sp that allows walking the stack but isn't
  1897   // truly correct. Correct the value here.
  1898   //
  1899     if (extra_locals != 0 &&
  1900         interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
  1901       interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
  1903     *interpreter_frame->interpreter_frame_cache_addr() = method->constants()->cache();
  1906 //-----------------------------------------------------------------------------
  1907 // Exceptions
  1909 void TemplateInterpreterGenerator::generate_throw_exception() {
  1910   // Entry point in previous activation (i.e., if the caller was
  1911   // interpreted)
  1912   Interpreter::_rethrow_exception_entry = __ pc();
  1913   // Restore sp to interpreter_frame_last_sp even though we are going
  1914   // to empty the expression stack for the exception processing.
  1915   __ sd(R0,FP, frame::interpreter_frame_last_sp_offset * wordSize);
  1917   // V0: exception
  1918   // V1: return address/pc that threw exception
  1919   __ restore_bcp();                              // esi points to call/send
  1920   __ restore_locals();
  1922   //add for compressedoops
  1923   __ reinit_heapbase();
  1924   // Entry point for exceptions thrown within interpreter code
  1925   Interpreter::_throw_exception_entry = __ pc();
  1926   // expression stack is undefined here
  1927   // V0: exception
  1928   // BCP: exception bcp
  1929   __ verify_oop(V0);
  1931   // expression stack must be empty before entering the VM in case of an exception
  1932   __ empty_expression_stack();
  1933   // find exception handler address and preserve exception oop
  1934   __ move(A1, V0);
  1935   __ call_VM(V1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), A1);
  1936   // V0: exception handler entry point
  1937   // V1: preserved exception oop
  1938   // S0: bcp for exception handler
  1939   __ daddi(SP, SP, (-1) * wordSize);
  1940   __ sd(V1, SP, 0);                              // push exception which is now the only value on the stack
  1941   __ jr(V0);                                   // jump to exception handler (may be _remove_activation_entry!)
  1942   __ delayed()->nop();
  1944   // If the exception is not handled in the current frame the frame is removed and
  1945   // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
  1946   //
  1947   // Note: At this point the bci is still the bxi for the instruction which caused
  1948   //       the exception and the expression stack is empty. Thus, for any VM calls
  1949   //       at this point, GC will find a legal oop map (with empty expression stack).
  1951   // In current activation
  1952   // V0: exception
  1953   // BCP: exception bcp
  1955   //
  1956   // JVMTI PopFrame support
  1957   //
  1959   Interpreter::_remove_activation_preserving_args_entry = __ pc();
  1960   __ empty_expression_stack();
  1961   // Set the popframe_processing bit in pending_popframe_condition indicating that we are
  1962   // currently handling popframe, so that call_VMs that may happen later do not trigger new
  1963   // popframe handling cycles.
  1964 #ifndef OPT_THREAD
  1965   Register thread = T2;
  1966   __ get_thread(T2);
  1967 #else
  1968   Register thread = TREG;
  1969 #endif
  1970   __ lw(T3, thread, in_bytes(JavaThread::popframe_condition_offset()));
  1971   __ ori(T3, T3, JavaThread::popframe_processing_bit);
  1972   __ sw(T3, thread, in_bytes(JavaThread::popframe_condition_offset()));
  1974 #ifndef CORE
  1976     // Check to see whether we are returning to a deoptimized frame.
  1977     // (The PopFrame call ensures that the caller of the popped frame is
  1978     // either interpreted or compiled and deoptimizes it if compiled.)
  1979     // In this case, we can't call dispatch_next() after the frame is
  1980     // popped, but instead must save the incoming arguments and restore
  1981     // them after deoptimization has occurred.
  1982     //
  1983     // Note that we don't compare the return PC against the
  1984     // deoptimization blob's unpack entry because of the presence of
  1985     // adapter frames in C2.
  1986     Label caller_not_deoptimized;
  1987     __ ld(A0, FP, frame::return_addr_offset * wordSize);
  1988     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), A0);
  1989     __ bne(V0, R0, caller_not_deoptimized);
  1990     __ delayed()->nop();
  1992     // Compute size of arguments for saving when returning to deoptimized caller
  1993     __ get_method(A1);
  1994     __ verify_oop(A1);
  1995     __ ld(A1, A1, in_bytes(Method::const_offset()));
  1996     __ lhu(A1, A1, in_bytes(ConstMethod::size_of_parameters_offset()));
  1997     __ shl(A1, Interpreter::logStackElementSize);
  1998     __ restore_locals();
  1999     __ dsub(A2, LVP, A1);
  2000     __ daddiu(A2, A2, wordSize);
  2001     // Save these arguments
  2002 #ifndef OPT_THREAD
  2003     __ get_thread(A0);
  2004 #else
  2005     __ move(A0, TREG);
  2006 #endif
  2007     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), A0, A1, A2);
  2009     __ remove_activation(vtos, T9, false, false, false);
  2011     // Inform deoptimization that it is responsible for restoring these arguments
  2012 #ifndef OPT_THREAD
  2013     __ get_thread(thread);
  2014 #endif
  2015     __ move(AT, JavaThread::popframe_force_deopt_reexecution_bit);
  2016     __ sw(AT, thread, in_bytes(JavaThread::popframe_condition_offset()));
  2017     // Continue in deoptimization handler
  2018     __ jr(T9);
  2019     __ delayed()->nop();
  2021     __ bind(caller_not_deoptimized);
  2023 #endif /* !CORE */
  2025   __ remove_activation(vtos, T3,
  2026                        /* throw_monitor_exception */ false,
  2027                        /* install_monitor_exception */ false,
  2028                        /* notify_jvmdi */ false);
  2030   // Clear the popframe condition flag
  2031   // Finish with popframe handling
  2032   // A previous I2C followed by a deoptimization might have moved the
  2033   // outgoing arguments further up the stack. PopFrame expects the
  2034   // mutations to those outgoing arguments to be preserved and other
  2035   // constraints basically require this frame to look exactly as
  2036   // though it had previously invoked an interpreted activation with
  2037   // no space between the top of the expression stack (current
  2038   // last_sp) and the top of stack. Rather than force deopt to
  2039   // maintain this kind of invariant all the time we call a small
  2040   // fixup routine to move the mutated arguments onto the top of our
  2041   // expression stack if necessary.
  2042   __ move(T8, SP);
  2043   __ ld(A2, FP, frame::interpreter_frame_last_sp_offset * wordSize);
  2044 #ifndef OPT_THREAD
  2045   __ get_thread(thread);
  2046 #endif
  2047   // PC must point into interpreter here
  2048   __ set_last_Java_frame(thread, noreg, FP, __ pc());
  2049   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, T8, A2);
  2050   __ reset_last_Java_frame(thread, true);
  2051   // Restore the last_sp and null it out
  2052   __ ld(SP, FP, frame::interpreter_frame_last_sp_offset * wordSize);
  2053   __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize);
  2057   __ move(AT, JavaThread::popframe_inactive);
  2058   __ sw(AT, thread, in_bytes(JavaThread::popframe_condition_offset()));
  2060   // Finish with popframe handling
  2061   __ restore_bcp();
  2062   __ restore_locals();
  2063 #ifndef CORE
  2064   // The method data pointer was incremented already during
  2065   // call profiling. We have to restore the mdp for the current bcp.
  2066   if (ProfileInterpreter) {
  2067     __ set_method_data_pointer_for_bcp();
  2069 #endif // !CORE
  2070   // Clear the popframe condition flag
  2071 #ifndef OPT_THREAD
  2072   __ get_thread(thread);
  2073 #endif
  2074   __ move(AT, JavaThread::popframe_inactive);
  2075   __ sw(AT, thread, in_bytes(JavaThread::popframe_condition_offset()));
  2076   __ dispatch_next(vtos);
  2077   // end of PopFrame support
  2079   Interpreter::_remove_activation_entry = __ pc();
  2081   // preserve exception over this code sequence
  2082   __ ld(T0, SP, 0);
  2083   __ daddi(SP, SP, wordSize);
  2084 #ifndef OPT_THREAD
  2085   __ get_thread(thread);
  2086 #endif
  2087   __ sd(T0, thread, in_bytes(JavaThread::vm_result_offset()));
  2088   // remove the activation (without doing throws on illegalMonitorExceptions)
  2089   __ remove_activation(vtos, T3, false, true, false);
  2090   // restore exception
  2091   __ get_vm_result(T0, thread);
  2092   __ verify_oop(T0);
  2094   // Inbetween activations - previous activation type unknown yet
  2095   // compute continuation point - the continuation point expects
  2096   // the following registers set up:
  2097   //
  2098   // T0: exception                                eax
  2099   // T1: return address/pc that threw exception    edx
  2100   // SP: expression stack of caller      esp
  2101   // FP: ebp of caller          ebp
  2102   __ daddi(SP, SP, (-2) * wordSize);
  2103   __ sd(T0, SP, wordSize);      // save exception
  2104   __ sd(T3, SP, 0);                               // save return address
  2105   __ move(A1, T3);
  2106   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, A1);
  2107   __ move(T9, V0);                             // save exception handler
  2108   __ ld(V0, SP, wordSize);        // restore exception
  2109   __ ld(V1, SP, 0);                               // restore return address
  2110   __ daddi(SP, SP, 2 * wordSize);
  2112   // Note that an "issuing PC" is actually the next PC after the call
  2113   __ jr(T9);                                   // jump to exception handler of caller
  2114   __ delayed()->nop();
  2118 //
  2119 // JVMTI ForceEarlyReturn support
  2120 //
  2121 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
  2122   address entry = __ pc();
  2123   __ restore_bcp();
  2124   __ restore_locals();
  2125   __ empty_expression_stack();
  2126   __ empty_FPU_stack();
  2127   __ load_earlyret_value(state);
  2129 #ifndef OPT_THREAD
  2130   __ get_thread(TREG);
  2131 #endif
  2132   __ ld_ptr(T9, TREG, in_bytes(JavaThread::jvmti_thread_state_offset()));
  2133   const Address cond_addr(T9, in_bytes(JvmtiThreadState::earlyret_state_offset()));
  2134   // Clear the earlyret state
  2135   __ move(AT, JvmtiThreadState::earlyret_inactive);
  2136   __ sw(AT, cond_addr);
  2137   __ sync();
  2140   __ remove_activation(state, T0,
  2141                          false, /* throw_monitor_exception */
  2142                          false, /* install_monitor_exception */
  2143                          true); /* notify_jvmdi */
  2144   __ sync();
  2145   __ jr(T0);
  2146   __ delayed()->nop();
  2147   return entry;
  2148 } // end of ForceEarlyReturn support
  2151 //-----------------------------------------------------------------------------
  2152 // Helper for vtos entry point generation
  2154 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
  2155                                                          address& bep,
  2156                                                          address& cep,
  2157                                                          address& sep,
  2158                                                          address& aep,
  2159                                                          address& iep,
  2160                                                          address& lep,
  2161                                                          address& fep,
  2162                                                          address& dep,
  2163                                                          address& vep) {
  2164   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
  2165   Label L;
  2166   fep = __ pc(); __ push(ftos); __ b(L); __ delayed()->nop();
  2167   dep = __ pc(); __ push(dtos); __ b(L); __ delayed()->nop();
  2168   lep = __ pc(); __ push(ltos); __ b(L); __ delayed()->nop();
  2169   aep  =__ pc(); __ push(atos); __ b(L); __ delayed()->nop();
  2170   bep = cep = sep =
  2171   iep = __ pc(); __ push(itos);
  2172   vep = __ pc();
  2173   __ bind(L);
  2174   generate_and_dispatch(t);
  2178 //-----------------------------------------------------------------------------
  2179 // Generation of individual instructions
  2181 // helpers for generate_and_dispatch
  2184 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
  2185   : TemplateInterpreterGenerator(code) {
  2186    generate_all(); // down here so it can be "virtual"
  2189 //-----------------------------------------------------------------------------
  2191 // Non-product code
  2192 #ifndef PRODUCT
  2193 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
  2194   address entry = __ pc();
  2196   // prepare expression stack
  2197   __ push(state);       // save tosca
  2199   // tos & tos2
  2200   // trace_bytecode need actually 4 args, the last two is tos&tos2
  2201   // this work fine for x86. but mips o32 call convention will store A2-A3
  2202   // to the stack position it think is the tos&tos2
  2203   // when the expression stack have no more than 2 data, error occur.
  2204   __ ld(A2, SP, 0);
  2205   __ ld(A3, SP, 1 * wordSize);
  2207   // pass arguments & call tracer
  2208   __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), RA, A2, A3);
  2209   __ move(RA, V0);    // make sure return address is not destroyed by pop(state)
  2211   // restore expression stack
  2212   __ pop(state);        // restore tosca
  2214   // return
  2215   __ jr(RA);
  2216   __ delayed()->nop();
  2218   return entry;
  2221 void TemplateInterpreterGenerator::count_bytecode() {
  2222   __ li(T8, (long)&BytecodeCounter::_counter_value);
  2223   __ lw(AT, T8, 0);
  2224   __ daddi(AT, AT, 1);
  2225   __ sw(AT, T8, 0);
  2228 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
  2229   __ li(T8, (long)&BytecodeHistogram::_counters[t->bytecode()]);
  2230   __ lw(AT, T8, 0);
  2231   __ daddi(AT, AT, 1);
  2232   __ sw(AT, T8, 0);
  2235 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
  2236   __ li(T8, (long)&BytecodePairHistogram::_index);
  2237   __ lw(T9, T8, 0);
  2238   __ dsrl(T9, T9, BytecodePairHistogram::log2_number_of_codes);
  2239   __ li(T8, ((long)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
  2240   __ orr(T9, T9, T8);
  2241   __ li(T8, (long)&BytecodePairHistogram::_index);
  2242   __ sw(T9, T8, 0);
  2243   __ dsll(T9, T9, 2);
  2244   __ li(T8, (long)BytecodePairHistogram::_counters);
  2245   __ dadd(T8, T8, T9);
  2246   __ lw(AT, T8, 0);
  2247   __ daddi(AT, AT, 1);
  2248   __ sw(AT, T8, 0);
  2252 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
  2253   // Call a little run-time stub to avoid blow-up for each bytecode.
  2254   // The run-time runtime saves the right registers, depending on
  2255   // the tosca in-state for the given template.
  2257   address entry = Interpreter::trace_code(t->tos_in());
  2258   assert(entry != NULL, "entry must have been generated");
  2259   __ call(entry, relocInfo::none);
  2260   __ delayed()->nop();
  2261   //add for compressedoops
  2262   __ reinit_heapbase();
  2266 void TemplateInterpreterGenerator::stop_interpreter_at() {
  2267   Label L;
  2268   __ li(T8, long(&BytecodeCounter::_counter_value));
  2269   __ lw(T8, T8, 0);
  2270   __ move(AT, StopInterpreterAt);
  2271   __ bne(T8, AT, L);
  2272   __ delayed()->nop();
  2273   __ call(CAST_FROM_FN_PTR(address, os::breakpoint), relocInfo::runtime_call_type);
  2274   __ delayed()->nop();
  2275   __ bind(L);
  2277 #endif // !PRODUCT
  2278 #endif // ! CC_INTERP

mercurial