src/cpu/sparc/vm/templateInterpreter_sparc.cpp

Wed, 24 Apr 2013 20:55:28 -0400

author
dlong
date
Wed, 24 Apr 2013 20:55:28 -0400
changeset 5000
a6e09d6dd8e5
parent 4936
aeaca88565e6
child 5225
603ca7e51354
permissions
-rw-r--r--

8003853: specify offset of IC load in java_to_interp stub
Summary: refactored code to allow platform-specific differences
Reviewed-by: dlong, twisti
Contributed-by: Goetz Lindenmaier <goetz.lindenmaier@sap.com>

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "asm/macroAssembler.hpp"
    27 #include "interpreter/bytecodeHistogram.hpp"
    28 #include "interpreter/interpreter.hpp"
    29 #include "interpreter/interpreterGenerator.hpp"
    30 #include "interpreter/interpreterRuntime.hpp"
    31 #include "interpreter/templateTable.hpp"
    32 #include "oops/arrayOop.hpp"
    33 #include "oops/methodData.hpp"
    34 #include "oops/method.hpp"
    35 #include "oops/oop.inline.hpp"
    36 #include "prims/jvmtiExport.hpp"
    37 #include "prims/jvmtiThreadState.hpp"
    38 #include "runtime/arguments.hpp"
    39 #include "runtime/deoptimization.hpp"
    40 #include "runtime/frame.inline.hpp"
    41 #include "runtime/sharedRuntime.hpp"
    42 #include "runtime/stubRoutines.hpp"
    43 #include "runtime/synchronizer.hpp"
    44 #include "runtime/timer.hpp"
    45 #include "runtime/vframeArray.hpp"
    46 #include "utilities/debug.hpp"
    47 #include "utilities/macros.hpp"
    49 #ifndef CC_INTERP
    50 #ifndef FAST_DISPATCH
    51 #define FAST_DISPATCH 1
    52 #endif
    53 #undef FAST_DISPATCH
    56 // Generation of Interpreter
    57 //
    58 // The InterpreterGenerator generates the interpreter into Interpreter::_code.
    61 #define __ _masm->
    64 //----------------------------------------------------------------------------------------------------
    67 void InterpreterGenerator::save_native_result(void) {
    68   // result potentially in O0/O1: save it across calls
    69   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
    71   // result potentially in F0/F1: save it across calls
    72   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
    74   // save and restore any potential method result value around the unlocking operation
    75   __ stf(FloatRegisterImpl::D, F0, d_tmp);
    76 #ifdef _LP64
    77   __ stx(O0, l_tmp);
    78 #else
    79   __ std(O0, l_tmp);
    80 #endif
    81 }
    83 void InterpreterGenerator::restore_native_result(void) {
    84   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
    85   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
    87   // Restore any method result value
    88   __ ldf(FloatRegisterImpl::D, d_tmp, F0);
    89 #ifdef _LP64
    90   __ ldx(l_tmp, O0);
    91 #else
    92   __ ldd(l_tmp, O0);
    93 #endif
    94 }
    96 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
    97   assert(!pass_oop || message == NULL, "either oop or message but not both");
    98   address entry = __ pc();
    99   // expression stack must be empty before entering the VM if an exception happened
   100   __ empty_expression_stack();
   101   // load exception object
   102   __ set((intptr_t)name, G3_scratch);
   103   if (pass_oop) {
   104     __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
   105   } else {
   106     __ set((intptr_t)message, G4_scratch);
   107     __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
   108   }
   109   // throw exception
   110   assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
   111   AddressLiteral thrower(Interpreter::throw_exception_entry());
   112   __ jump_to(thrower, G3_scratch);
   113   __ delayed()->nop();
   114   return entry;
   115 }
   117 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
   118   address entry = __ pc();
   119   // expression stack must be empty before entering the VM if an exception
   120   // happened
   121   __ empty_expression_stack();
   122   // load exception object
   123   __ call_VM(Oexception,
   124              CAST_FROM_FN_PTR(address,
   125                               InterpreterRuntime::throw_ClassCastException),
   126              Otos_i);
   127   __ should_not_reach_here();
   128   return entry;
   129 }
   132 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
   133   address entry = __ pc();
   134   // expression stack must be empty before entering the VM if an exception happened
   135   __ empty_expression_stack();
   136   // convention: expect aberrant index in register G3_scratch, then shuffle the
   137   // index to G4_scratch for the VM call
   138   __ mov(G3_scratch, G4_scratch);
   139   __ set((intptr_t)name, G3_scratch);
   140   __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
   141   __ should_not_reach_here();
   142   return entry;
   143 }
   146 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
   147   address entry = __ pc();
   148   // expression stack must be empty before entering the VM if an exception happened
   149   __ empty_expression_stack();
   150   __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
   151   __ should_not_reach_here();
   152   return entry;
   153 }
   156 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
   157   TosState incoming_state = state;
   159   Label cont;
   160   address compiled_entry = __ pc();
   162   address entry = __ pc();
   163 #if !defined(_LP64) && defined(COMPILER2)
   164   // All return values are where we want them, except for Longs.  C2 returns
   165   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
   166   // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
   167   // build even if we are returning from interpreted we just do a little
   168   // stupid shuffing.
   169   // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
   170   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
   171   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
   173   if (incoming_state == ltos) {
   174     __ srl (G1,  0, O1);
   175     __ srlx(G1, 32, O0);
   176   }
   177 #endif // !_LP64 && COMPILER2
   179   __ bind(cont);
   181   // The callee returns with the stack possibly adjusted by adapter transition
   182   // We remove that possible adjustment here.
   183   // All interpreter local registers are untouched. Any result is passed back
   184   // in the O0/O1 or float registers. Before continuing, the arguments must be
   185   // popped from the java expression stack; i.e., Lesp must be adjusted.
   187   __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
   189   Label L_got_cache, L_giant_index;
   190   const Register cache = G3_scratch;
   191   const Register size  = G1_scratch;
   192   if (EnableInvokeDynamic) {
   193     __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode.
   194     __ cmp_and_br_short(G1_scratch, Bytecodes::_invokedynamic, Assembler::equal, Assembler::pn, L_giant_index);
   195   }
   196   __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
   197   __ bind(L_got_cache);
   198   __ ld_ptr(cache, ConstantPoolCache::base_offset() +
   199                    ConstantPoolCacheEntry::flags_offset(), size);
   200   __ and3(size, 0xFF, size);                   // argument size in words
   201   __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
   202   __ add(Lesp, size, Lesp);                    // pop arguments
   203   __ dispatch_next(state, step);
   205   // out of the main line of code...
   206   if (EnableInvokeDynamic) {
   207     __ bind(L_giant_index);
   208     __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
   209     __ ba_short(L_got_cache);
   210   }
   212   return entry;
   213 }
   216 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
   217   address entry = __ pc();
   218   __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
   219   { Label L;
   220     Address exception_addr(G2_thread, Thread::pending_exception_offset());
   221     __ ld_ptr(exception_addr, Gtemp);  // Load pending exception.
   222     __ br_null_short(Gtemp, Assembler::pt, L);
   223     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
   224     __ should_not_reach_here();
   225     __ bind(L);
   226   }
   227   __ dispatch_next(state, step);
   228   return entry;
   229 }
   231 // A result handler converts/unboxes a native call result into
   232 // a java interpreter/compiler result. The current frame is an
   233 // interpreter frame. The activation frame unwind code must be
   234 // consistent with that of TemplateTable::_return(...). In the
   235 // case of native methods, the caller's SP was not modified.
   236 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
   237   address entry = __ pc();
   238   Register Itos_i  = Otos_i ->after_save();
   239   Register Itos_l  = Otos_l ->after_save();
   240   Register Itos_l1 = Otos_l1->after_save();
   241   Register Itos_l2 = Otos_l2->after_save();
   242   switch (type) {
   243     case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
   244     case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
   245     case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
   246     case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
   247     case T_LONG   :
   248 #ifndef _LP64
   249                     __ mov(O1, Itos_l2);  // move other half of long
   250 #endif              // ifdef or no ifdef, fall through to the T_INT case
   251     case T_INT    : __ mov(O0, Itos_i);                         break;
   252     case T_VOID   : /* nothing to do */                         break;
   253     case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
   254     case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
   255     case T_OBJECT :
   256       __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
   257       __ verify_oop(Itos_i);
   258       break;
   259     default       : ShouldNotReachHere();
   260   }
   261   __ ret();                           // return from interpreter activation
   262   __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
   263   NOT_PRODUCT(__ emit_int32(0);)       // marker for disassembly
   264   return entry;
   265 }
   267 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
   268   address entry = __ pc();
   269   __ push(state);
   270   __ call_VM(noreg, runtime_entry);
   271   __ dispatch_via(vtos, Interpreter::normal_table(vtos));
   272   return entry;
   273 }
   276 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
   277   address entry = __ pc();
   278   __ dispatch_next(state);
   279   return entry;
   280 }
   282 //
   283 // Helpers for commoning out cases in the various type of method entries.
   284 //
   286 // increment invocation count & check for overflow
   287 //
   288 // Note: checking for negative value instead of overflow
   289 //       so we have a 'sticky' overflow test
   290 //
   291 // Lmethod: method
   292 // ??: invocation counter
   293 //
   294 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
   295   // Note: In tiered we increment either counters in MethodCounters* or in
   296   // MDO depending if we're profiling or not.
   297   const Register Rcounters = G3_scratch;
   298   Label done;
   300   if (TieredCompilation) {
   301     const int increment = InvocationCounter::count_increment;
   302     const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
   303     Label no_mdo;
   304     if (ProfileInterpreter) {
   305       // If no method data exists, go to profile_continue.
   306       __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
   307       __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
   308       // Increment counter
   309       Address mdo_invocation_counter(G4_scratch,
   310                                      in_bytes(MethodData::invocation_counter_offset()) +
   311                                      in_bytes(InvocationCounter::counter_offset()));
   312       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
   313                                  G3_scratch, Lscratch,
   314                                  Assembler::zero, overflow);
   315       __ ba_short(done);
   316     }
   318     // Increment counter in MethodCounters*
   319     __ bind(no_mdo);
   320     Address invocation_counter(Rcounters,
   321             in_bytes(MethodCounters::invocation_counter_offset()) +
   322             in_bytes(InvocationCounter::counter_offset()));
   323     __ get_method_counters(Lmethod, Rcounters, done);
   324     __ increment_mask_and_jump(invocation_counter, increment, mask,
   325                                G4_scratch, Lscratch,
   326                                Assembler::zero, overflow);
   327     __ bind(done);
   328   } else {
   329     // Update standard invocation counters
   330     __ get_method_counters(Lmethod, Rcounters, done);
   331     __ increment_invocation_counter(Rcounters, O0, G4_scratch);
   332     if (ProfileInterpreter) {
   333       Address interpreter_invocation_counter(Rcounters,
   334             in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
   335       __ ld(interpreter_invocation_counter, G4_scratch);
   336       __ inc(G4_scratch);
   337       __ st(G4_scratch, interpreter_invocation_counter);
   338     }
   340     if (ProfileInterpreter && profile_method != NULL) {
   341       // Test to see if we should create a method data oop
   342       AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
   343       __ load_contents(profile_limit, G3_scratch);
   344       __ cmp_and_br_short(O0, G3_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
   346       // if no method data exists, go to profile_method
   347       __ test_method_data_pointer(*profile_method);
   348     }
   350     AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
   351     __ load_contents(invocation_limit, G3_scratch);
   352     __ cmp(O0, G3_scratch);
   353     __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
   354     __ delayed()->nop();
   355     __ bind(done);
   356   }
   358 }
   360 // Allocate monitor and lock method (asm interpreter)
   361 // ebx - Method*
   362 //
   363 void InterpreterGenerator::lock_method(void) {
   364   __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0);  // Load access flags.
   366 #ifdef ASSERT
   367  { Label ok;
   368    __ btst(JVM_ACC_SYNCHRONIZED, O0);
   369    __ br( Assembler::notZero, false, Assembler::pt, ok);
   370    __ delayed()->nop();
   371    __ stop("method doesn't need synchronization");
   372    __ bind(ok);
   373   }
   374 #endif // ASSERT
   376   // get synchronization object to O0
   377   { Label done;
   378     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
   379     __ btst(JVM_ACC_STATIC, O0);
   380     __ br( Assembler::zero, true, Assembler::pt, done);
   381     __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
   383     __ ld_ptr( Lmethod, in_bytes(Method::const_offset()), O0);
   384     __ ld_ptr( O0, in_bytes(ConstMethod::constants_offset()), O0);
   385     __ ld_ptr( O0, ConstantPool::pool_holder_offset_in_bytes(), O0);
   387     // lock the mirror, not the Klass*
   388     __ ld_ptr( O0, mirror_offset, O0);
   390 #ifdef ASSERT
   391     __ tst(O0);
   392     __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
   393 #endif // ASSERT
   395     __ bind(done);
   396   }
   398   __ add_monitor_to_stack(true, noreg, noreg);  // allocate monitor elem
   399   __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes());   // store object
   400   // __ untested("lock_object from method entry");
   401   __ lock_object(Lmonitors, O0);
   402 }
   405 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
   406                                                          Register Rscratch,
   407                                                          Register Rscratch2) {
   408   const int page_size = os::vm_page_size();
   409   Label after_frame_check;
   411   assert_different_registers(Rframe_size, Rscratch, Rscratch2);
   413   __ set(page_size, Rscratch);
   414   __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
   416   // get the stack base, and in debug, verify it is non-zero
   417   __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
   418 #ifdef ASSERT
   419   Label base_not_zero;
   420   __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero);
   421   __ stop("stack base is zero in generate_stack_overflow_check");
   422   __ bind(base_not_zero);
   423 #endif
   425   // get the stack size, and in debug, verify it is non-zero
   426   assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
   427   __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
   428 #ifdef ASSERT
   429   Label size_not_zero;
   430   __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero);
   431   __ stop("stack size is zero in generate_stack_overflow_check");
   432   __ bind(size_not_zero);
   433 #endif
   435   // compute the beginning of the protected zone minus the requested frame size
   436   __ sub( Rscratch, Rscratch2,   Rscratch );
   437   __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
   438   __ add( Rscratch, Rscratch2,   Rscratch );
   440   // Add in the size of the frame (which is the same as subtracting it from the
   441   // SP, which would take another register
   442   __ add( Rscratch, Rframe_size, Rscratch );
   444   // the frame is greater than one page in size, so check against
   445   // the bottom of the stack
   446   __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
   448   // the stack will overflow, throw an exception
   450   // Note that SP is restored to sender's sp (in the delay slot). This
   451   // is necessary if the sender's frame is an extended compiled frame
   452   // (see gen_c2i_adapter()) and safer anyway in case of JSR292
   453   // adaptations.
   455   // Note also that the restored frame is not necessarily interpreted.
   456   // Use the shared runtime version of the StackOverflowError.
   457   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
   458   AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
   459   __ jump_to(stub, Rscratch);
   460   __ delayed()->mov(O5_savedSP, SP);
   462   // if you get to here, then there is enough stack space
   463   __ bind( after_frame_check );
   464 }
   467 //
   468 // Generate a fixed interpreter frame. This is identical setup for interpreted
   469 // methods and for native methods hence the shared code.
   471 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
   472   //
   473   //
   474   // The entry code sets up a new interpreter frame in 4 steps:
   475   //
   476   // 1) Increase caller's SP by for the extra local space needed:
   477   //    (check for overflow)
   478   //    Efficient implementation of xload/xstore bytecodes requires
   479   //    that arguments and non-argument locals are in a contigously
   480   //    addressable memory block => non-argument locals must be
   481   //    allocated in the caller's frame.
   482   //
   483   // 2) Create a new stack frame and register window:
   484   //    The new stack frame must provide space for the standard
   485   //    register save area, the maximum java expression stack size,
   486   //    the monitor slots (0 slots initially), and some frame local
   487   //    scratch locations.
   488   //
   489   // 3) The following interpreter activation registers must be setup:
   490   //    Lesp       : expression stack pointer
   491   //    Lbcp       : bytecode pointer
   492   //    Lmethod    : method
   493   //    Llocals    : locals pointer
   494   //    Lmonitors  : monitor pointer
   495   //    LcpoolCache: constant pool cache
   496   //
   497   // 4) Initialize the non-argument locals if necessary:
   498   //    Non-argument locals may need to be initialized to NULL
   499   //    for GC to work. If the oop-map information is accurate
   500   //    (in the absence of the JSR problem), no initialization
   501   //    is necessary.
   502   //
   503   // (gri - 2/25/2000)
   506   int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
   508   const int extra_space =
   509     rounded_vm_local_words +                   // frame local scratch space
   510     //6815692//Method::extra_stack_words() +       // extra push slots for MH adapters
   511     frame::memory_parameter_word_sp_offset +   // register save area
   512     (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
   514   const Register Glocals_size = G3;
   515   const Register RconstMethod = Glocals_size;
   516   const Register Otmp1 = O3;
   517   const Register Otmp2 = O4;
   518   // Lscratch can't be used as a temporary because the call_stub uses
   519   // it to assert that the stack frame was setup correctly.
   520   const Address constMethod       (G5_method, Method::const_offset());
   521   const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
   523   __ ld_ptr( constMethod, RconstMethod );
   524   __ lduh( size_of_parameters, Glocals_size);
   526   // Gargs points to first local + BytesPerWord
   527   // Set the saved SP after the register window save
   528   //
   529   assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
   530   __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
   531   __ add(Gargs, Otmp1, Gargs);
   533   if (native_call) {
   534     __ calc_mem_param_words( Glocals_size, Gframe_size );
   535     __ add( Gframe_size,  extra_space, Gframe_size);
   536     __ round_to( Gframe_size, WordsPerLong );
   537     __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
   538   } else {
   540     //
   541     // Compute number of locals in method apart from incoming parameters
   542     //
   543     const Address size_of_locals    (Otmp1, ConstMethod::size_of_locals_offset());
   544     __ ld_ptr( constMethod, Otmp1 );
   545     __ lduh( size_of_locals, Otmp1 );
   546     __ sub( Otmp1, Glocals_size, Glocals_size );
   547     __ round_to( Glocals_size, WordsPerLong );
   548     __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
   550     // see if the frame is greater than one page in size. If so,
   551     // then we need to verify there is enough stack space remaining
   552     // Frame_size = (max_stack + extra_space) * BytesPerWord;
   553     __ ld_ptr( constMethod, Gframe_size );
   554     __ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size );
   555     __ add( Gframe_size, extra_space, Gframe_size );
   556     __ round_to( Gframe_size, WordsPerLong );
   557     __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
   559     // Add in java locals size for stack overflow check only
   560     __ add( Gframe_size, Glocals_size, Gframe_size );
   562     const Register Otmp2 = O4;
   563     assert_different_registers(Otmp1, Otmp2, O5_savedSP);
   564     generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
   566     __ sub( Gframe_size, Glocals_size, Gframe_size);
   568     //
   569     // bump SP to accomodate the extra locals
   570     //
   571     __ sub( SP, Glocals_size, SP );
   572   }
   574   //
   575   // now set up a stack frame with the size computed above
   576   //
   577   __ neg( Gframe_size );
   578   __ save( SP, Gframe_size, SP );
   580   //
   581   // now set up all the local cache registers
   582   //
   583   // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
   584   // that all present references to Lbyte_code initialize the register
   585   // immediately before use
   586   if (native_call) {
   587     __ mov(G0, Lbcp);
   588   } else {
   589     __ ld_ptr(G5_method, Method::const_offset(), Lbcp);
   590     __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp);
   591   }
   592   __ mov( G5_method, Lmethod);                 // set Lmethod
   593   __ get_constant_pool_cache( LcpoolCache );   // set LcpoolCache
   594   __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
   595 #ifdef _LP64
   596   __ add( Lmonitors, STACK_BIAS, Lmonitors );   // Account for 64 bit stack bias
   597 #endif
   598   __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
   600   // setup interpreter activation registers
   601   __ sub(Gargs, BytesPerWord, Llocals);        // set Llocals
   603   if (ProfileInterpreter) {
   604 #ifdef FAST_DISPATCH
   605     // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
   606     // they both use I2.
   607     assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
   608 #endif // FAST_DISPATCH
   609     __ set_method_data_pointer();
   610   }
   612 }
   614 // Empty method, generate a very fast return.
   616 address InterpreterGenerator::generate_empty_entry(void) {
   618   // A method that does nother but return...
   620   address entry = __ pc();
   621   Label slow_path;
   623   // do nothing for empty methods (do not even increment invocation counter)
   624   if ( UseFastEmptyMethods) {
   625     // If we need a safepoint check, generate full interpreter entry.
   626     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
   627     __ set(sync_state, G3_scratch);
   628     __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
   630     // Code: _return
   631     __ retl();
   632     __ delayed()->mov(O5_savedSP, SP);
   634     __ bind(slow_path);
   635     (void) generate_normal_entry(false);
   637     return entry;
   638   }
   639   return NULL;
   640 }
   642 // Call an accessor method (assuming it is resolved, otherwise drop into
   643 // vanilla (slow path) entry
   645 // Generates code to elide accessor methods
   646 // Uses G3_scratch and G1_scratch as scratch
   647 address InterpreterGenerator::generate_accessor_entry(void) {
   649   // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
   650   // parameter size = 1
   651   // Note: We can only use this code if the getfield has been resolved
   652   //       and if we don't have a null-pointer exception => check for
   653   //       these conditions first and use slow path if necessary.
   654   address entry = __ pc();
   655   Label slow_path;
   658   // XXX: for compressed oops pointer loading and decoding doesn't fit in
   659   // delay slot and damages G1
   660   if ( UseFastAccessorMethods && !UseCompressedOops ) {
   661     // Check if we need to reach a safepoint and generate full interpreter
   662     // frame if so.
   663     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
   664     __ load_contents(sync_state, G3_scratch);
   665     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
   666     __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
   668     // Check if local 0 != NULL
   669     __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
   670     // check if local 0 == NULL and go the slow path
   671     __ br_null_short(Otos_i, Assembler::pn, slow_path);
   674     // read first instruction word and extract bytecode @ 1 and index @ 2
   675     // get first 4 bytes of the bytecodes (big endian!)
   676     __ ld_ptr(G5_method, Method::const_offset(), G1_scratch);
   677     __ ld(G1_scratch, ConstMethod::codes_offset(), G1_scratch);
   679     // move index @ 2 far left then to the right most two bytes.
   680     __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
   681     __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
   682                       ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
   684     // get constant pool cache
   685     __ ld_ptr(G5_method, Method::const_offset(), G3_scratch);
   686     __ ld_ptr(G3_scratch, ConstMethod::constants_offset(), G3_scratch);
   687     __ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
   689     // get specific constant pool cache entry
   690     __ add(G3_scratch, G1_scratch, G3_scratch);
   692     // Check the constant Pool cache entry to see if it has been resolved.
   693     // If not, need the slow path.
   694     ByteSize cp_base_offset = ConstantPoolCache::base_offset();
   695     __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
   696     __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
   697     __ and3(G1_scratch, 0xFF, G1_scratch);
   698     __ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
   700     // Get the type and return field offset from the constant pool cache
   701     __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
   702     __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
   704     Label xreturn_path;
   705     // Need to differentiate between igetfield, agetfield, bgetfield etc.
   706     // because they are different sizes.
   707     // Get the type from the constant pool cache
   708     __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
   709     // Make sure we don't need to mask G1_scratch after the above shift
   710     ConstantPoolCacheEntry::verify_tos_state_shift();
   711     __ cmp(G1_scratch, atos );
   712     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
   713     __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
   714     __ cmp(G1_scratch, itos);
   715     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
   716     __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
   717     __ cmp(G1_scratch, stos);
   718     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
   719     __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
   720     __ cmp(G1_scratch, ctos);
   721     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
   722     __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
   723 #ifdef ASSERT
   724     __ cmp(G1_scratch, btos);
   725     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
   726     __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
   727     __ should_not_reach_here();
   728 #endif
   729     __ ldsb(Otos_i, G3_scratch, Otos_i);
   730     __ bind(xreturn_path);
   732     // _ireturn/_areturn
   733     __ retl();                      // return from leaf routine
   734     __ delayed()->mov(O5_savedSP, SP);
   736     // Generate regular method entry
   737     __ bind(slow_path);
   738     (void) generate_normal_entry(false);
   739     return entry;
   740   }
   741   return NULL;
   742 }
   744 // Method entry for java.lang.ref.Reference.get.
   745 address InterpreterGenerator::generate_Reference_get_entry(void) {
   746 #if INCLUDE_ALL_GCS
   747   // Code: _aload_0, _getfield, _areturn
   748   // parameter size = 1
   749   //
   750   // The code that gets generated by this routine is split into 2 parts:
   751   //    1. The "intrinsified" code for G1 (or any SATB based GC),
   752   //    2. The slow path - which is an expansion of the regular method entry.
   753   //
   754   // Notes:-
   755   // * In the G1 code we do not check whether we need to block for
   756   //   a safepoint. If G1 is enabled then we must execute the specialized
   757   //   code for Reference.get (except when the Reference object is null)
   758   //   so that we can log the value in the referent field with an SATB
   759   //   update buffer.
   760   //   If the code for the getfield template is modified so that the
   761   //   G1 pre-barrier code is executed when the current method is
   762   //   Reference.get() then going through the normal method entry
   763   //   will be fine.
   764   // * The G1 code can, however, check the receiver object (the instance
   765   //   of java.lang.Reference) and jump to the slow path if null. If the
   766   //   Reference object is null then we obviously cannot fetch the referent
   767   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
   768   //   regular method entry code to generate the NPE.
   769   //
   770   // This code is based on generate_accessor_enty.
   772   address entry = __ pc();
   774   const int referent_offset = java_lang_ref_Reference::referent_offset;
   775   guarantee(referent_offset > 0, "referent offset not initialized");
   777   if (UseG1GC) {
   778      Label slow_path;
   780     // In the G1 code we don't check if we need to reach a safepoint. We
   781     // continue and the thread will safepoint at the next bytecode dispatch.
   783     // Check if local 0 != NULL
   784     // If the receiver is null then it is OK to jump to the slow path.
   785     __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
   786     // check if local 0 == NULL and go the slow path
   787     __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
   790     // Load the value of the referent field.
   791     if (Assembler::is_simm13(referent_offset)) {
   792       __ load_heap_oop(Otos_i, referent_offset, Otos_i);
   793     } else {
   794       __ set(referent_offset, G3_scratch);
   795       __ load_heap_oop(Otos_i, G3_scratch, Otos_i);
   796     }
   798     // Generate the G1 pre-barrier code to log the value of
   799     // the referent field in an SATB buffer. Note with
   800     // these parameters the pre-barrier does not generate
   801     // the load of the previous value
   803     __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
   804                             Otos_i /* pre_val */,
   805                             G3_scratch /* tmp */,
   806                             true /* preserve_o_regs */);
   808     // _areturn
   809     __ retl();                      // return from leaf routine
   810     __ delayed()->mov(O5_savedSP, SP);
   812     // Generate regular method entry
   813     __ bind(slow_path);
   814     (void) generate_normal_entry(false);
   815     return entry;
   816   }
   817 #endif // INCLUDE_ALL_GCS
   819   // If G1 is not enabled then attempt to go through the accessor entry point
   820   // Reference.get is an accessor
   821   return generate_accessor_entry();
   822 }
   824 //
   825 // Interpreter stub for calling a native method. (asm interpreter)
   826 // This sets up a somewhat different looking stack for calling the native method
   827 // than the typical interpreter frame setup.
   828 //
   830 address InterpreterGenerator::generate_native_entry(bool synchronized) {
   831   address entry = __ pc();
   833   // the following temporary registers are used during frame creation
   834   const Register Gtmp1 = G3_scratch ;
   835   const Register Gtmp2 = G1_scratch;
   836   bool inc_counter  = UseCompiler || CountCompiledCalls;
   838   // make sure registers are different!
   839   assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
   841   const Address Laccess_flags(Lmethod, Method::access_flags_offset());
   843   const Register Glocals_size = G3;
   844   assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
   846   // make sure method is native & not abstract
   847   // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
   848 #ifdef ASSERT
   849   __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
   850   {
   851     Label L;
   852     __ btst(JVM_ACC_NATIVE, Gtmp1);
   853     __ br(Assembler::notZero, false, Assembler::pt, L);
   854     __ delayed()->nop();
   855     __ stop("tried to execute non-native method as native");
   856     __ bind(L);
   857   }
   858   { Label L;
   859     __ btst(JVM_ACC_ABSTRACT, Gtmp1);
   860     __ br(Assembler::zero, false, Assembler::pt, L);
   861     __ delayed()->nop();
   862     __ stop("tried to execute abstract method as non-abstract");
   863     __ bind(L);
   864   }
   865 #endif // ASSERT
   867  // generate the code to allocate the interpreter stack frame
   868   generate_fixed_frame(true);
   870   //
   871   // No locals to initialize for native method
   872   //
   874   // this slot will be set later, we initialize it to null here just in
   875   // case we get a GC before the actual value is stored later
   876   __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
   878   const Address do_not_unlock_if_synchronized(G2_thread,
   879     JavaThread::do_not_unlock_if_synchronized_offset());
   880   // Since at this point in the method invocation the exception handler
   881   // would try to exit the monitor of synchronized methods which hasn't
   882   // been entered yet, we set the thread local variable
   883   // _do_not_unlock_if_synchronized to true. If any exception was thrown by
   884   // runtime, exception handling i.e. unlock_if_synchronized_method will
   885   // check this thread local flag.
   886   // This flag has two effects, one is to force an unwind in the topmost
   887   // interpreter frame and not perform an unlock while doing so.
   889   __ movbool(true, G3_scratch);
   890   __ stbool(G3_scratch, do_not_unlock_if_synchronized);
   892   // increment invocation counter and check for overflow
   893   //
   894   // Note: checking for negative value instead of overflow
   895   //       so we have a 'sticky' overflow test (may be of
   896   //       importance as soon as we have true MT/MP)
   897   Label invocation_counter_overflow;
   898   Label Lcontinue;
   899   if (inc_counter) {
   900     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
   902   }
   903   __ bind(Lcontinue);
   905   bang_stack_shadow_pages(true);
   907   // reset the _do_not_unlock_if_synchronized flag
   908   __ stbool(G0, do_not_unlock_if_synchronized);
   910   // check for synchronized methods
   911   // Must happen AFTER invocation_counter check and stack overflow check,
   912   // so method is not locked if overflows.
   914   if (synchronized) {
   915     lock_method();
   916   } else {
   917 #ifdef ASSERT
   918     { Label ok;
   919       __ ld(Laccess_flags, O0);
   920       __ btst(JVM_ACC_SYNCHRONIZED, O0);
   921       __ br( Assembler::zero, false, Assembler::pt, ok);
   922       __ delayed()->nop();
   923       __ stop("method needs synchronization");
   924       __ bind(ok);
   925     }
   926 #endif // ASSERT
   927   }
   930   // start execution
   931   __ verify_thread();
   933   // JVMTI support
   934   __ notify_method_entry();
   936   // native call
   938   // (note that O0 is never an oop--at most it is a handle)
   939   // It is important not to smash any handles created by this call,
   940   // until any oop handle in O0 is dereferenced.
   942   // (note that the space for outgoing params is preallocated)
   944   // get signature handler
   945   { Label L;
   946     Address signature_handler(Lmethod, Method::signature_handler_offset());
   947     __ ld_ptr(signature_handler, G3_scratch);
   948     __ br_notnull_short(G3_scratch, Assembler::pt, L);
   949     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
   950     __ ld_ptr(signature_handler, G3_scratch);
   951     __ bind(L);
   952   }
   954   // Push a new frame so that the args will really be stored in
   955   // Copy a few locals across so the new frame has the variables
   956   // we need but these values will be dead at the jni call and
   957   // therefore not gc volatile like the values in the current
   958   // frame (Lmethod in particular)
   960   // Flush the method pointer to the register save area
   961   __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
   962   __ mov(Llocals, O1);
   964   // calculate where the mirror handle body is allocated in the interpreter frame:
   965   __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
   967   // Calculate current frame size
   968   __ sub(SP, FP, O3);         // Calculate negative of current frame size
   969   __ save(SP, O3, SP);        // Allocate an identical sized frame
   971   // Note I7 has leftover trash. Slow signature handler will fill it in
   972   // should we get there. Normal jni call will set reasonable last_Java_pc
   973   // below (and fix I7 so the stack trace doesn't have a meaningless frame
   974   // in it).
   976   // Load interpreter frame's Lmethod into same register here
   978   __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
   980   __ mov(I1, Llocals);
   981   __ mov(I2, Lscratch2);     // save the address of the mirror
   984   // ONLY Lmethod and Llocals are valid here!
   986   // call signature handler, It will move the arg properly since Llocals in current frame
   987   // matches that in outer frame
   989   __ callr(G3_scratch, 0);
   990   __ delayed()->nop();
   992   // Result handler is in Lscratch
   994   // Reload interpreter frame's Lmethod since slow signature handler may block
   995   __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
   997   { Label not_static;
   999     __ ld(Laccess_flags, O0);
  1000     __ btst(JVM_ACC_STATIC, O0);
  1001     __ br( Assembler::zero, false, Assembler::pt, not_static);
  1002     // get native function entry point(O0 is a good temp until the very end)
  1003     __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0);
  1004     // for static methods insert the mirror argument
  1005     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
  1007     __ ld_ptr(Lmethod, Method:: const_offset(), O1);
  1008     __ ld_ptr(O1, ConstMethod::constants_offset(), O1);
  1009     __ ld_ptr(O1, ConstantPool::pool_holder_offset_in_bytes(), O1);
  1010     __ ld_ptr(O1, mirror_offset, O1);
  1011 #ifdef ASSERT
  1012     if (!PrintSignatureHandlers)  // do not dirty the output with this
  1013     { Label L;
  1014       __ br_notnull_short(O1, Assembler::pt, L);
  1015       __ stop("mirror is missing");
  1016       __ bind(L);
  1018 #endif // ASSERT
  1019     __ st_ptr(O1, Lscratch2, 0);
  1020     __ mov(Lscratch2, O1);
  1021     __ bind(not_static);
  1024   // At this point, arguments have been copied off of stack into
  1025   // their JNI positions, which are O1..O5 and SP[68..].
  1026   // Oops are boxed in-place on the stack, with handles copied to arguments.
  1027   // The result handler is in Lscratch.  O0 will shortly hold the JNIEnv*.
  1029 #ifdef ASSERT
  1030   { Label L;
  1031     __ br_notnull_short(O0, Assembler::pt, L);
  1032     __ stop("native entry point is missing");
  1033     __ bind(L);
  1035 #endif // ASSERT
  1037   //
  1038   // setup the frame anchor
  1039   //
  1040   // The scavenge function only needs to know that the PC of this frame is
  1041   // in the interpreter method entry code, it doesn't need to know the exact
  1042   // PC and hence we can use O7 which points to the return address from the
  1043   // previous call in the code stream (signature handler function)
  1044   //
  1045   // The other trick is we set last_Java_sp to FP instead of the usual SP because
  1046   // we have pushed the extra frame in order to protect the volatile register(s)
  1047   // in that frame when we return from the jni call
  1048   //
  1050   __ set_last_Java_frame(FP, O7);
  1051   __ mov(O7, I7);  // make dummy interpreter frame look like one above,
  1052                    // not meaningless information that'll confuse me.
  1054   // flush the windows now. We don't care about the current (protection) frame
  1055   // only the outer frames
  1057   __ flush_windows();
  1059   // mark windows as flushed
  1060   Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
  1061   __ set(JavaFrameAnchor::flushed, G3_scratch);
  1062   __ st(G3_scratch, flags);
  1064   // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
  1066   Address thread_state(G2_thread, JavaThread::thread_state_offset());
  1067 #ifdef ASSERT
  1068   { Label L;
  1069     __ ld(thread_state, G3_scratch);
  1070     __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
  1071     __ stop("Wrong thread state in native stub");
  1072     __ bind(L);
  1074 #endif // ASSERT
  1075   __ set(_thread_in_native, G3_scratch);
  1076   __ st(G3_scratch, thread_state);
  1078   // Call the jni method, using the delay slot to set the JNIEnv* argument.
  1079   __ save_thread(L7_thread_cache); // save Gthread
  1080   __ callr(O0, 0);
  1081   __ delayed()->
  1082      add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
  1084   // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
  1086   __ restore_thread(L7_thread_cache); // restore G2_thread
  1087   __ reinit_heapbase();
  1089   // must we block?
  1091   // Block, if necessary, before resuming in _thread_in_Java state.
  1092   // In order for GC to work, don't clear the last_Java_sp until after blocking.
  1093   { Label no_block;
  1094     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
  1096     // Switch thread to "native transition" state before reading the synchronization state.
  1097     // This additional state is necessary because reading and testing the synchronization
  1098     // state is not atomic w.r.t. GC, as this scenario demonstrates:
  1099     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
  1100     //     VM thread changes sync state to synchronizing and suspends threads for GC.
  1101     //     Thread A is resumed to finish this native method, but doesn't block here since it
  1102     //     didn't see any synchronization is progress, and escapes.
  1103     __ set(_thread_in_native_trans, G3_scratch);
  1104     __ st(G3_scratch, thread_state);
  1105     if(os::is_MP()) {
  1106       if (UseMembar) {
  1107         // Force this write out before the read below
  1108         __ membar(Assembler::StoreLoad);
  1109       } else {
  1110         // Write serialization page so VM thread can do a pseudo remote membar.
  1111         // We use the current thread pointer to calculate a thread specific
  1112         // offset to write to within the page. This minimizes bus traffic
  1113         // due to cache line collision.
  1114         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
  1117     __ load_contents(sync_state, G3_scratch);
  1118     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
  1120     Label L;
  1121     __ br(Assembler::notEqual, false, Assembler::pn, L);
  1122     __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
  1123     __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
  1124     __ bind(L);
  1126     // Block.  Save any potential method result value before the operation and
  1127     // use a leaf call to leave the last_Java_frame setup undisturbed.
  1128     save_native_result();
  1129     __ call_VM_leaf(L7_thread_cache,
  1130                     CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
  1131                     G2_thread);
  1133     // Restore any method result value
  1134     restore_native_result();
  1135     __ bind(no_block);
  1138   // Clear the frame anchor now
  1140   __ reset_last_Java_frame();
  1142   // Move the result handler address
  1143   __ mov(Lscratch, G3_scratch);
  1144   // return possible result to the outer frame
  1145 #ifndef __LP64
  1146   __ mov(O0, I0);
  1147   __ restore(O1, G0, O1);
  1148 #else
  1149   __ restore(O0, G0, O0);
  1150 #endif /* __LP64 */
  1152   // Move result handler to expected register
  1153   __ mov(G3_scratch, Lscratch);
  1155   // Back in normal (native) interpreter frame. State is thread_in_native_trans
  1156   // switch to thread_in_Java.
  1158   __ set(_thread_in_Java, G3_scratch);
  1159   __ st(G3_scratch, thread_state);
  1161   // reset handle block
  1162   __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
  1163   __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
  1165   // If we have an oop result store it where it will be safe for any further gc
  1166   // until we return now that we've released the handle it might be protected by
  1169     Label no_oop, store_result;
  1171     __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
  1172     __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
  1173     __ addcc(G0, O0, O0);
  1174     __ brx(Assembler::notZero, true, Assembler::pt, store_result);     // if result is not NULL:
  1175     __ delayed()->ld_ptr(O0, 0, O0);                                   // unbox it
  1176     __ mov(G0, O0);
  1178     __ bind(store_result);
  1179     // Store it where gc will look for it and result handler expects it.
  1180     __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
  1182     __ bind(no_oop);
  1187   // handle exceptions (exception handling will handle unlocking!)
  1188   { Label L;
  1189     Address exception_addr(G2_thread, Thread::pending_exception_offset());
  1190     __ ld_ptr(exception_addr, Gtemp);
  1191     __ br_null_short(Gtemp, Assembler::pt, L);
  1192     // Note: This could be handled more efficiently since we know that the native
  1193     //       method doesn't have an exception handler. We could directly return
  1194     //       to the exception handler for the caller.
  1195     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
  1196     __ should_not_reach_here();
  1197     __ bind(L);
  1200   // JVMTI support (preserves thread register)
  1201   __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
  1203   if (synchronized) {
  1204     // save and restore any potential method result value around the unlocking operation
  1205     save_native_result();
  1207     __ add( __ top_most_monitor(), O1);
  1208     __ unlock_object(O1);
  1210     restore_native_result();
  1213 #if defined(COMPILER2) && !defined(_LP64)
  1215   // C2 expects long results in G1 we can't tell if we're returning to interpreted
  1216   // or compiled so just be safe.
  1218   __ sllx(O0, 32, G1);          // Shift bits into high G1
  1219   __ srl (O1, 0, O1);           // Zero extend O1
  1220   __ or3 (O1, G1, G1);          // OR 64 bits into G1
  1222 #endif /* COMPILER2 && !_LP64 */
  1224   // dispose of return address and remove activation
  1225 #ifdef ASSERT
  1227     Label ok;
  1228     __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
  1229     __ stop("bad I5_savedSP value");
  1230     __ should_not_reach_here();
  1231     __ bind(ok);
  1233 #endif
  1234   if (TraceJumps) {
  1235     // Move target to register that is recordable
  1236     __ mov(Lscratch, G3_scratch);
  1237     __ JMP(G3_scratch, 0);
  1238   } else {
  1239     __ jmp(Lscratch, 0);
  1241   __ delayed()->nop();
  1244   if (inc_counter) {
  1245     // handle invocation counter overflow
  1246     __ bind(invocation_counter_overflow);
  1247     generate_counter_overflow(Lcontinue);
  1252   return entry;
  1256 // Generic method entry to (asm) interpreter
  1257 //------------------------------------------------------------------------------------------------------------------------
  1258 //
  1259 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
  1260   address entry = __ pc();
  1262   bool inc_counter  = UseCompiler || CountCompiledCalls;
  1264   // the following temporary registers are used during frame creation
  1265   const Register Gtmp1 = G3_scratch ;
  1266   const Register Gtmp2 = G1_scratch;
  1268   // make sure registers are different!
  1269   assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
  1271   const Address constMethod       (G5_method, Method::const_offset());
  1272   // Seems like G5_method is live at the point this is used. So we could make this look consistent
  1273   // and use in the asserts.
  1274   const Address access_flags      (Lmethod,   Method::access_flags_offset());
  1276   const Register Glocals_size = G3;
  1277   assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
  1279   // make sure method is not native & not abstract
  1280   // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
  1281 #ifdef ASSERT
  1282   __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
  1284     Label L;
  1285     __ btst(JVM_ACC_NATIVE, Gtmp1);
  1286     __ br(Assembler::zero, false, Assembler::pt, L);
  1287     __ delayed()->nop();
  1288     __ stop("tried to execute native method as non-native");
  1289     __ bind(L);
  1291   { Label L;
  1292     __ btst(JVM_ACC_ABSTRACT, Gtmp1);
  1293     __ br(Assembler::zero, false, Assembler::pt, L);
  1294     __ delayed()->nop();
  1295     __ stop("tried to execute abstract method as non-abstract");
  1296     __ bind(L);
  1298 #endif // ASSERT
  1300   // generate the code to allocate the interpreter stack frame
  1302   generate_fixed_frame(false);
  1304 #ifdef FAST_DISPATCH
  1305   __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
  1306                                           // set bytecode dispatch table base
  1307 #endif
  1309   //
  1310   // Code to initialize the extra (i.e. non-parm) locals
  1311   //
  1312   Register init_value = noreg;    // will be G0 if we must clear locals
  1313   // The way the code was setup before zerolocals was always true for vanilla java entries.
  1314   // It could only be false for the specialized entries like accessor or empty which have
  1315   // no extra locals so the testing was a waste of time and the extra locals were always
  1316   // initialized. We removed this extra complication to already over complicated code.
  1318   init_value = G0;
  1319   Label clear_loop;
  1321   const Register RconstMethod = O1;
  1322   const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
  1323   const Address size_of_locals    (RconstMethod, ConstMethod::size_of_locals_offset());
  1325   // NOTE: If you change the frame layout, this code will need to
  1326   // be updated!
  1327   __ ld_ptr( constMethod, RconstMethod );
  1328   __ lduh( size_of_locals, O2 );
  1329   __ lduh( size_of_parameters, O1 );
  1330   __ sll( O2, Interpreter::logStackElementSize, O2);
  1331   __ sll( O1, Interpreter::logStackElementSize, O1 );
  1332   __ sub( Llocals, O2, O2 );
  1333   __ sub( Llocals, O1, O1 );
  1335   __ bind( clear_loop );
  1336   __ inc( O2, wordSize );
  1338   __ cmp( O2, O1 );
  1339   __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
  1340   __ delayed()->st_ptr( init_value, O2, 0 );
  1342   const Address do_not_unlock_if_synchronized(G2_thread,
  1343     JavaThread::do_not_unlock_if_synchronized_offset());
  1344   // Since at this point in the method invocation the exception handler
  1345   // would try to exit the monitor of synchronized methods which hasn't
  1346   // been entered yet, we set the thread local variable
  1347   // _do_not_unlock_if_synchronized to true. If any exception was thrown by
  1348   // runtime, exception handling i.e. unlock_if_synchronized_method will
  1349   // check this thread local flag.
  1350   __ movbool(true, G3_scratch);
  1351   __ stbool(G3_scratch, do_not_unlock_if_synchronized);
  1353   // increment invocation counter and check for overflow
  1354   //
  1355   // Note: checking for negative value instead of overflow
  1356   //       so we have a 'sticky' overflow test (may be of
  1357   //       importance as soon as we have true MT/MP)
  1358   Label invocation_counter_overflow;
  1359   Label profile_method;
  1360   Label profile_method_continue;
  1361   Label Lcontinue;
  1362   if (inc_counter) {
  1363     generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
  1364     if (ProfileInterpreter) {
  1365       __ bind(profile_method_continue);
  1368   __ bind(Lcontinue);
  1370   bang_stack_shadow_pages(false);
  1372   // reset the _do_not_unlock_if_synchronized flag
  1373   __ stbool(G0, do_not_unlock_if_synchronized);
  1375   // check for synchronized methods
  1376   // Must happen AFTER invocation_counter check and stack overflow check,
  1377   // so method is not locked if overflows.
  1379   if (synchronized) {
  1380     lock_method();
  1381   } else {
  1382 #ifdef ASSERT
  1383     { Label ok;
  1384       __ ld(access_flags, O0);
  1385       __ btst(JVM_ACC_SYNCHRONIZED, O0);
  1386       __ br( Assembler::zero, false, Assembler::pt, ok);
  1387       __ delayed()->nop();
  1388       __ stop("method needs synchronization");
  1389       __ bind(ok);
  1391 #endif // ASSERT
  1394   // start execution
  1396   __ verify_thread();
  1398   // jvmti support
  1399   __ notify_method_entry();
  1401   // start executing instructions
  1402   __ dispatch_next(vtos);
  1405   if (inc_counter) {
  1406     if (ProfileInterpreter) {
  1407       // We have decided to profile this method in the interpreter
  1408       __ bind(profile_method);
  1410       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
  1411       __ set_method_data_pointer_for_bcp();
  1412       __ ba_short(profile_method_continue);
  1415     // handle invocation counter overflow
  1416     __ bind(invocation_counter_overflow);
  1417     generate_counter_overflow(Lcontinue);
  1421   return entry;
  1425 //----------------------------------------------------------------------------------------------------
  1426 // Entry points & stack frame layout
  1427 //
  1428 // Here we generate the various kind of entries into the interpreter.
  1429 // The two main entry type are generic bytecode methods and native call method.
  1430 // These both come in synchronized and non-synchronized versions but the
  1431 // frame layout they create is very similar. The other method entry
  1432 // types are really just special purpose entries that are really entry
  1433 // and interpretation all in one. These are for trivial methods like
  1434 // accessor, empty, or special math methods.
  1435 //
  1436 // When control flow reaches any of the entry types for the interpreter
  1437 // the following holds ->
  1438 //
  1439 // C2 Calling Conventions:
  1440 //
  1441 // The entry code below assumes that the following registers are set
  1442 // when coming in:
  1443 //    G5_method: holds the Method* of the method to call
  1444 //    Lesp:    points to the TOS of the callers expression stack
  1445 //             after having pushed all the parameters
  1446 //
  1447 // The entry code does the following to setup an interpreter frame
  1448 //   pop parameters from the callers stack by adjusting Lesp
  1449 //   set O0 to Lesp
  1450 //   compute X = (max_locals - num_parameters)
  1451 //   bump SP up by X to accomadate the extra locals
  1452 //   compute X = max_expression_stack
  1453 //               + vm_local_words
  1454 //               + 16 words of register save area
  1455 //   save frame doing a save sp, -X, sp growing towards lower addresses
  1456 //   set Lbcp, Lmethod, LcpoolCache
  1457 //   set Llocals to i0
  1458 //   set Lmonitors to FP - rounded_vm_local_words
  1459 //   set Lesp to Lmonitors - 4
  1460 //
  1461 //  The frame has now been setup to do the rest of the entry code
  1463 // Try this optimization:  Most method entries could live in a
  1464 // "one size fits all" stack frame without all the dynamic size
  1465 // calculations.  It might be profitable to do all this calculation
  1466 // statically and approximately for "small enough" methods.
  1468 //-----------------------------------------------------------------------------------------------
  1470 // C1 Calling conventions
  1471 //
  1472 // Upon method entry, the following registers are setup:
  1473 //
  1474 // g2 G2_thread: current thread
  1475 // g5 G5_method: method to activate
  1476 // g4 Gargs  : pointer to last argument
  1477 //
  1478 //
  1479 // Stack:
  1480 //
  1481 // +---------------+ <--- sp
  1482 // |               |
  1483 // : reg save area :
  1484 // |               |
  1485 // +---------------+ <--- sp + 0x40
  1486 // |               |
  1487 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
  1488 // |               |
  1489 // +---------------+ <--- sp + 0x5c
  1490 // |               |
  1491 // :     free      :
  1492 // |               |
  1493 // +---------------+ <--- Gargs
  1494 // |               |
  1495 // :   arguments   :
  1496 // |               |
  1497 // +---------------+
  1498 // |               |
  1499 //
  1500 //
  1501 //
  1502 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
  1503 //
  1504 // +---------------+ <--- sp
  1505 // |               |
  1506 // : reg save area :
  1507 // |               |
  1508 // +---------------+ <--- sp + 0x40
  1509 // |               |
  1510 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
  1511 // |               |
  1512 // +---------------+ <--- sp + 0x5c
  1513 // |               |
  1514 // :               :
  1515 // |               | <--- Lesp
  1516 // +---------------+ <--- Lmonitors (fp - 0x18)
  1517 // |   VM locals   |
  1518 // +---------------+ <--- fp
  1519 // |               |
  1520 // : reg save area :
  1521 // |               |
  1522 // +---------------+ <--- fp + 0x40
  1523 // |               |
  1524 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
  1525 // |               |
  1526 // +---------------+ <--- fp + 0x5c
  1527 // |               |
  1528 // :     free      :
  1529 // |               |
  1530 // +---------------+
  1531 // |               |
  1532 // : nonarg locals :
  1533 // |               |
  1534 // +---------------+
  1535 // |               |
  1536 // :   arguments   :
  1537 // |               | <--- Llocals
  1538 // +---------------+ <--- Gargs
  1539 // |               |
  1541 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
  1543   // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
  1544   // expression stack, the callee will have callee_extra_locals (so we can account for
  1545   // frame extension) and monitor_size for monitors. Basically we need to calculate
  1546   // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
  1547   //
  1548   //
  1549   // The big complicating thing here is that we must ensure that the stack stays properly
  1550   // aligned. This would be even uglier if monitor size wasn't modulo what the stack
  1551   // needs to be aligned for). We are given that the sp (fp) is already aligned by
  1552   // the caller so we must ensure that it is properly aligned for our callee.
  1553   //
  1554   const int rounded_vm_local_words =
  1555        round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
  1556   // callee_locals and max_stack are counts, not the size in frame.
  1557   const int locals_size =
  1558        round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
  1559   const int max_stack_words = max_stack * Interpreter::stackElementWords;
  1560   return (round_to((max_stack_words
  1561                    //6815692//+ Method::extra_stack_words()
  1562                    + rounded_vm_local_words
  1563                    + frame::memory_parameter_word_sp_offset), WordsPerLong)
  1564                    // already rounded
  1565                    + locals_size + monitor_size);
  1568 // How much stack a method top interpreter activation needs in words.
  1569 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
  1571   // See call_stub code
  1572   int call_stub_size  = round_to(7 + frame::memory_parameter_word_sp_offset,
  1573                                  WordsPerLong);    // 7 + register save area
  1575   // Save space for one monitor to get into the interpreted method in case
  1576   // the method is synchronized
  1577   int monitor_size    = method->is_synchronized() ?
  1578                                 1*frame::interpreter_frame_monitor_size() : 0;
  1579   return size_activation_helper(method->max_locals(), method->max_stack(),
  1580                                  monitor_size) + call_stub_size;
  1583 int AbstractInterpreter::layout_activation(Method* method,
  1584                                            int tempcount,
  1585                                            int popframe_extra_args,
  1586                                            int moncount,
  1587                                            int caller_actual_parameters,
  1588                                            int callee_param_count,
  1589                                            int callee_local_count,
  1590                                            frame* caller,
  1591                                            frame* interpreter_frame,
  1592                                            bool is_top_frame,
  1593                                            bool is_bottom_frame) {
  1594   // Note: This calculation must exactly parallel the frame setup
  1595   // in InterpreterGenerator::generate_fixed_frame.
  1596   // If f!=NULL, set up the following variables:
  1597   //   - Lmethod
  1598   //   - Llocals
  1599   //   - Lmonitors (to the indicated number of monitors)
  1600   //   - Lesp (to the indicated number of temps)
  1601   // The frame f (if not NULL) on entry is a description of the caller of the frame
  1602   // we are about to layout. We are guaranteed that we will be able to fill in a
  1603   // new interpreter frame as its callee (i.e. the stack space is allocated and
  1604   // the amount was determined by an earlier call to this method with f == NULL).
  1605   // On return f (if not NULL) while describe the interpreter frame we just layed out.
  1607   int monitor_size           = moncount * frame::interpreter_frame_monitor_size();
  1608   int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
  1610   assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
  1611   //
  1612   // Note: if you look closely this appears to be doing something much different
  1613   // than generate_fixed_frame. What is happening is this. On sparc we have to do
  1614   // this dance with interpreter_sp_adjustment because the window save area would
  1615   // appear just below the bottom (tos) of the caller's java expression stack. Because
  1616   // the interpreter want to have the locals completely contiguous generate_fixed_frame
  1617   // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size).
  1618   // Now in generate_fixed_frame the extension of the caller's sp happens in the callee.
  1619   // In this code the opposite occurs the caller adjusts it's own stack base on the callee.
  1620   // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest)
  1621   // because the oldest frame would have adjust its callers frame and yet that frame
  1622   // already exists and isn't part of this array of frames we are unpacking. So at first
  1623   // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper()
  1624   // will after it calculates all of the frame's on_stack_size()'s will then figure out the
  1625   // amount to adjust the caller of the initial (oldest) frame and the calculation will all
  1626   // add up. It does seem like it simpler to account for the adjustment here (and remove the
  1627   // callee... parameters here). However this would mean that this routine would have to take
  1628   // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment)
  1629   // and run the calling loop in the reverse order. This would also would appear to mean making
  1630   // this code aware of what the interactions are when that initial caller fram was an osr or
  1631   // other adapter frame. deoptimization is complicated enough and  hard enough to debug that
  1632   // there is no sense in messing working code.
  1633   //
  1635   int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
  1636   assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
  1638   int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
  1639                                               monitor_size);
  1641   if (interpreter_frame != NULL) {
  1642     // The skeleton frame must already look like an interpreter frame
  1643     // even if not fully filled out.
  1644     assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
  1646     intptr_t* fp = interpreter_frame->fp();
  1648     JavaThread* thread = JavaThread::current();
  1649     RegisterMap map(thread, false);
  1650     // More verification that skeleton frame is properly walkable
  1651     assert(fp == caller->sp(), "fp must match");
  1653     intptr_t* montop     = fp - rounded_vm_local_words;
  1655     // preallocate monitors (cf. __ add_monitor_to_stack)
  1656     intptr_t* monitors = montop - monitor_size;
  1658     // preallocate stack space
  1659     intptr_t*  esp = monitors - 1 -
  1660                      (tempcount * Interpreter::stackElementWords) -
  1661                      popframe_extra_args;
  1663     int local_words = method->max_locals() * Interpreter::stackElementWords;
  1664     NEEDS_CLEANUP;
  1665     intptr_t* locals;
  1666     if (caller->is_interpreted_frame()) {
  1667       // Can force the locals area to end up properly overlapping the top of the expression stack.
  1668       intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
  1669       // Note that this computation means we replace size_of_parameters() values from the caller
  1670       // interpreter frame's expression stack with our argument locals
  1671       int parm_words  = caller_actual_parameters * Interpreter::stackElementWords;
  1672       locals = Lesp_ptr + parm_words;
  1673       int delta = local_words - parm_words;
  1674       int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
  1675       *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
  1676       if (!is_bottom_frame) {
  1677         // Llast_SP is set below for the current frame to SP (with the
  1678         // extra space for the callee's locals). Here we adjust
  1679         // Llast_SP for the caller's frame, removing the extra space
  1680         // for the current method's locals.
  1681         *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
  1682       } else {
  1683         assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
  1685     } else {
  1686       assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
  1687       // Don't have Lesp available; lay out locals block in the caller
  1688       // adjacent to the register window save area.
  1689       //
  1690       // Compiled frames do not allocate a varargs area which is why this if
  1691       // statement is needed.
  1692       //
  1693       if (caller->is_compiled_frame()) {
  1694         locals = fp + frame::register_save_words + local_words - 1;
  1695       } else {
  1696         locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
  1698       if (!caller->is_entry_frame()) {
  1699         // Caller wants his own SP back
  1700         int caller_frame_size = caller->cb()->frame_size();
  1701         *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
  1704     if (TraceDeoptimization) {
  1705       if (caller->is_entry_frame()) {
  1706         // make sure I5_savedSP and the entry frames notion of saved SP
  1707         // agree.  This assertion duplicate a check in entry frame code
  1708         // but catches the failure earlier.
  1709         assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
  1710                "would change callers SP");
  1712       if (caller->is_entry_frame()) {
  1713         tty->print("entry ");
  1715       if (caller->is_compiled_frame()) {
  1716         tty->print("compiled ");
  1717         if (caller->is_deoptimized_frame()) {
  1718           tty->print("(deopt) ");
  1721       if (caller->is_interpreted_frame()) {
  1722         tty->print("interpreted ");
  1724       tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
  1725       tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
  1726       tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
  1727       tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
  1728       tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
  1729       tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
  1730       tty->print_cr("Llocals = 0x%x", locals);
  1731       tty->print_cr("Lesp = 0x%x", esp);
  1732       tty->print_cr("Lmonitors = 0x%x", monitors);
  1735     if (method->max_locals() > 0) {
  1736       assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
  1737       assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
  1738       assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
  1739       assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
  1741 #ifdef _LP64
  1742     assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
  1743 #endif
  1745     *interpreter_frame->register_addr(Lmethod)     = (intptr_t) method;
  1746     *interpreter_frame->register_addr(Llocals)     = (intptr_t) locals;
  1747     *interpreter_frame->register_addr(Lmonitors)   = (intptr_t) monitors;
  1748     *interpreter_frame->register_addr(Lesp)        = (intptr_t) esp;
  1749     // Llast_SP will be same as SP as there is no adapter space
  1750     *interpreter_frame->register_addr(Llast_SP)    = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
  1751     *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
  1752 #ifdef FAST_DISPATCH
  1753     *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
  1754 #endif
  1757 #ifdef ASSERT
  1758     BasicObjectLock* mp = (BasicObjectLock*)monitors;
  1760     assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
  1761     assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
  1762     assert(interpreter_frame->interpreter_frame_monitor_end()   == mp, "monitor_end matches");
  1763     assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
  1764     assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
  1766     // check bounds
  1767     intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
  1768     intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
  1769     assert(lo < monitors && montop <= hi, "monitors in bounds");
  1770     assert(lo <= esp && esp < monitors, "esp in bounds");
  1771 #endif // ASSERT
  1774   return raw_frame_size;
  1777 //----------------------------------------------------------------------------------------------------
  1778 // Exceptions
  1779 void TemplateInterpreterGenerator::generate_throw_exception() {
  1781   // Entry point in previous activation (i.e., if the caller was interpreted)
  1782   Interpreter::_rethrow_exception_entry = __ pc();
  1783   // O0: exception
  1785   // entry point for exceptions thrown within interpreter code
  1786   Interpreter::_throw_exception_entry = __ pc();
  1787   __ verify_thread();
  1788   // expression stack is undefined here
  1789   // O0: exception, i.e. Oexception
  1790   // Lbcp: exception bcx
  1791   __ verify_oop(Oexception);
  1794   // expression stack must be empty before entering the VM in case of an exception
  1795   __ empty_expression_stack();
  1796   // find exception handler address and preserve exception oop
  1797   // call C routine to find handler and jump to it
  1798   __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
  1799   __ push_ptr(O1); // push exception for exception handler bytecodes
  1801   __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
  1802   __ delayed()->nop();
  1805   // if the exception is not handled in the current frame
  1806   // the frame is removed and the exception is rethrown
  1807   // (i.e. exception continuation is _rethrow_exception)
  1808   //
  1809   // Note: At this point the bci is still the bxi for the instruction which caused
  1810   //       the exception and the expression stack is empty. Thus, for any VM calls
  1811   //       at this point, GC will find a legal oop map (with empty expression stack).
  1813   // in current activation
  1814   // tos: exception
  1815   // Lbcp: exception bcp
  1817   //
  1818   // JVMTI PopFrame support
  1819   //
  1821   Interpreter::_remove_activation_preserving_args_entry = __ pc();
  1822   Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
  1823   // Set the popframe_processing bit in popframe_condition indicating that we are
  1824   // currently handling popframe, so that call_VMs that may happen later do not trigger new
  1825   // popframe handling cycles.
  1827   __ ld(popframe_condition_addr, G3_scratch);
  1828   __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
  1829   __ stw(G3_scratch, popframe_condition_addr);
  1831   // Empty the expression stack, as in normal exception handling
  1832   __ empty_expression_stack();
  1833   __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
  1836     // Check to see whether we are returning to a deoptimized frame.
  1837     // (The PopFrame call ensures that the caller of the popped frame is
  1838     // either interpreted or compiled and deoptimizes it if compiled.)
  1839     // In this case, we can't call dispatch_next() after the frame is
  1840     // popped, but instead must save the incoming arguments and restore
  1841     // them after deoptimization has occurred.
  1842     //
  1843     // Note that we don't compare the return PC against the
  1844     // deoptimization blob's unpack entry because of the presence of
  1845     // adapter frames in C2.
  1846     Label caller_not_deoptimized;
  1847     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
  1848     __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
  1850     const Register Gtmp1 = G3_scratch;
  1851     const Register Gtmp2 = G1_scratch;
  1852     const Register RconstMethod = Gtmp1;
  1853     const Address constMethod(Lmethod, Method::const_offset());
  1854     const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
  1856     // Compute size of arguments for saving when returning to deoptimized caller
  1857     __ ld_ptr(constMethod, RconstMethod);
  1858     __ lduh(size_of_parameters, Gtmp1);
  1859     __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
  1860     __ sub(Llocals, Gtmp1, Gtmp2);
  1861     __ add(Gtmp2, wordSize, Gtmp2);
  1862     // Save these arguments
  1863     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
  1864     // Inform deoptimization that it is responsible for restoring these arguments
  1865     __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
  1866     Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
  1867     __ st(Gtmp1, popframe_condition_addr);
  1869     // Return from the current method
  1870     // The caller's SP was adjusted upon method entry to accomodate
  1871     // the callee's non-argument locals. Undo that adjustment.
  1872     __ ret();
  1873     __ delayed()->restore(I5_savedSP, G0, SP);
  1875     __ bind(caller_not_deoptimized);
  1878   // Clear the popframe condition flag
  1879   __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
  1881   // Get out of the current method (how this is done depends on the particular compiler calling
  1882   // convention that the interpreter currently follows)
  1883   // The caller's SP was adjusted upon method entry to accomodate
  1884   // the callee's non-argument locals. Undo that adjustment.
  1885   __ restore(I5_savedSP, G0, SP);
  1886   // The method data pointer was incremented already during
  1887   // call profiling. We have to restore the mdp for the current bcp.
  1888   if (ProfileInterpreter) {
  1889     __ set_method_data_pointer_for_bcp();
  1891   // Resume bytecode interpretation at the current bcp
  1892   __ dispatch_next(vtos);
  1893   // end of JVMTI PopFrame support
  1895   Interpreter::_remove_activation_entry = __ pc();
  1897   // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
  1898   __ pop_ptr(Oexception);                                  // get exception
  1900   // Intel has the following comment:
  1901   //// remove the activation (without doing throws on illegalMonitorExceptions)
  1902   // They remove the activation without checking for bad monitor state.
  1903   // %%% We should make sure this is the right semantics before implementing.
  1905   __ set_vm_result(Oexception);
  1906   __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
  1908   __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
  1910   __ get_vm_result(Oexception);
  1911   __ verify_oop(Oexception);
  1913     const int return_reg_adjustment = frame::pc_return_offset;
  1914   Address issuing_pc_addr(I7, return_reg_adjustment);
  1916   // We are done with this activation frame; find out where to go next.
  1917   // The continuation point will be an exception handler, which expects
  1918   // the following registers set up:
  1919   //
  1920   // Oexception: exception
  1921   // Oissuing_pc: the local call that threw exception
  1922   // Other On: garbage
  1923   // In/Ln:  the contents of the caller's register window
  1924   //
  1925   // We do the required restore at the last possible moment, because we
  1926   // need to preserve some state across a runtime call.
  1927   // (Remember that the caller activation is unknown--it might not be
  1928   // interpreted, so things like Lscratch are useless in the caller.)
  1930   // Although the Intel version uses call_C, we can use the more
  1931   // compact call_VM.  (The only real difference on SPARC is a
  1932   // harmlessly ignored [re]set_last_Java_frame, compared with
  1933   // the Intel code which lacks this.)
  1934   __ mov(Oexception,      Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
  1935   __ add(issuing_pc_addr, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
  1936   __ super_call_VM_leaf(L7_thread_cache,
  1937                         CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
  1938                         G2_thread, Oissuing_pc->after_save());
  1940   // The caller's SP was adjusted upon method entry to accomodate
  1941   // the callee's non-argument locals. Undo that adjustment.
  1942   __ JMP(O0, 0);                         // return exception handler in caller
  1943   __ delayed()->restore(I5_savedSP, G0, SP);
  1945   // (same old exception object is already in Oexception; see above)
  1946   // Note that an "issuing PC" is actually the next PC after the call
  1950 //
  1951 // JVMTI ForceEarlyReturn support
  1952 //
  1954 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
  1955   address entry = __ pc();
  1957   __ empty_expression_stack();
  1958   __ load_earlyret_value(state);
  1960   __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
  1961   Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
  1963   // Clear the earlyret state
  1964   __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
  1966   __ remove_activation(state,
  1967                        /* throw_monitor_exception */ false,
  1968                        /* install_monitor_exception */ false);
  1970   // The caller's SP was adjusted upon method entry to accomodate
  1971   // the callee's non-argument locals. Undo that adjustment.
  1972   __ ret();                             // return to caller
  1973   __ delayed()->restore(I5_savedSP, G0, SP);
  1975   return entry;
  1976 } // end of JVMTI ForceEarlyReturn support
  1979 //------------------------------------------------------------------------------------------------------------------------
  1980 // Helper for vtos entry point generation
  1982 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
  1983   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
  1984   Label L;
  1985   aep = __ pc(); __ push_ptr(); __ ba_short(L);
  1986   fep = __ pc(); __ push_f();   __ ba_short(L);
  1987   dep = __ pc(); __ push_d();   __ ba_short(L);
  1988   lep = __ pc(); __ push_l();   __ ba_short(L);
  1989   iep = __ pc(); __ push_i();
  1990   bep = cep = sep = iep;                        // there aren't any
  1991   vep = __ pc(); __ bind(L);                    // fall through
  1992   generate_and_dispatch(t);
  1995 // --------------------------------------------------------------------------------
  1998 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
  1999  : TemplateInterpreterGenerator(code) {
  2000    generate_all(); // down here so it can be "virtual"
  2003 // --------------------------------------------------------------------------------
  2005 // Non-product code
  2006 #ifndef PRODUCT
  2007 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
  2008   address entry = __ pc();
  2010   __ push(state);
  2011   __ mov(O7, Lscratch); // protect return address within interpreter
  2013   // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
  2014   __ mov( Otos_l2, G3_scratch );
  2015   __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
  2016   __ mov(Lscratch, O7); // restore return address
  2017   __ pop(state);
  2018   __ retl();
  2019   __ delayed()->nop();
  2021   return entry;
  2025 // helpers for generate_and_dispatch
  2027 void TemplateInterpreterGenerator::count_bytecode() {
  2028   __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
  2032 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
  2033   __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
  2037 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
  2038   AddressLiteral index   (&BytecodePairHistogram::_index);
  2039   AddressLiteral counters((address) &BytecodePairHistogram::_counters);
  2041   // get index, shift out old bytecode, bring in new bytecode, and store it
  2042   // _index = (_index >> log2_number_of_codes) |
  2043   //          (bytecode << log2_number_of_codes);
  2045   __ load_contents(index, G4_scratch);
  2046   __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
  2047   __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes,  G3_scratch );
  2048   __ or3( G3_scratch,  G4_scratch, G4_scratch );
  2049   __ store_contents(G4_scratch, index, G3_scratch);
  2051   // bump bucket contents
  2052   // _counters[_index] ++;
  2054   __ set(counters, G3_scratch);                       // loads into G3_scratch
  2055   __ sll( G4_scratch, LogBytesPerWord, G4_scratch );  // Index is word address
  2056   __ add (G3_scratch, G4_scratch, G3_scratch);        // Add in index
  2057   __ ld (G3_scratch, 0, G4_scratch);
  2058   __ inc (G4_scratch);
  2059   __ st (G4_scratch, 0, G3_scratch);
  2063 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
  2064   // Call a little run-time stub to avoid blow-up for each bytecode.
  2065   // The run-time runtime saves the right registers, depending on
  2066   // the tosca in-state for the given template.
  2067   address entry = Interpreter::trace_code(t->tos_in());
  2068   guarantee(entry != NULL, "entry must have been generated");
  2069   __ call(entry, relocInfo::none);
  2070   __ delayed()->nop();
  2074 void TemplateInterpreterGenerator::stop_interpreter_at() {
  2075   AddressLiteral counter(&BytecodeCounter::_counter_value);
  2076   __ load_contents(counter, G3_scratch);
  2077   AddressLiteral stop_at(&StopInterpreterAt);
  2078   __ load_ptr_contents(stop_at, G4_scratch);
  2079   __ cmp(G3_scratch, G4_scratch);
  2080   __ breakpoint_trap(Assembler::equal, Assembler::icc);
  2082 #endif // not PRODUCT
  2083 #endif // !CC_INTERP

mercurial