duke@435: /* xdono@631: * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: #include "incls/_precompiled.incl" duke@435: #include "incls/_templateInterpreter_sparc.cpp.incl" duke@435: duke@435: #ifndef CC_INTERP duke@435: #ifndef FAST_DISPATCH duke@435: #define FAST_DISPATCH 1 duke@435: #endif duke@435: #undef FAST_DISPATCH duke@435: duke@435: duke@435: // Generation of Interpreter duke@435: // duke@435: // The InterpreterGenerator generates the interpreter into Interpreter::_code. duke@435: duke@435: duke@435: #define __ _masm-> duke@435: duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: duke@435: duke@435: void InterpreterGenerator::save_native_result(void) { duke@435: // result potentially in O0/O1: save it across calls duke@435: const Address& l_tmp = InterpreterMacroAssembler::l_tmp; duke@435: duke@435: // result potentially in F0/F1: save it across calls duke@435: const Address& d_tmp = InterpreterMacroAssembler::d_tmp; duke@435: duke@435: // save and restore any potential method result value around the unlocking operation duke@435: __ stf(FloatRegisterImpl::D, F0, d_tmp); duke@435: #ifdef _LP64 duke@435: __ stx(O0, l_tmp); duke@435: #else duke@435: __ std(O0, l_tmp); duke@435: #endif duke@435: } duke@435: duke@435: void InterpreterGenerator::restore_native_result(void) { duke@435: const Address& l_tmp = InterpreterMacroAssembler::l_tmp; duke@435: const Address& d_tmp = InterpreterMacroAssembler::d_tmp; duke@435: duke@435: // Restore any method result value duke@435: __ ldf(FloatRegisterImpl::D, d_tmp, F0); duke@435: #ifdef _LP64 duke@435: __ ldx(l_tmp, O0); duke@435: #else duke@435: __ ldd(l_tmp, O0); duke@435: #endif duke@435: } duke@435: duke@435: address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { duke@435: assert(!pass_oop || message == NULL, "either oop or message but not both"); duke@435: address entry = __ pc(); duke@435: // expression stack must be empty before entering the VM if an exception happened duke@435: __ empty_expression_stack(); duke@435: // load exception object duke@435: __ set((intptr_t)name, G3_scratch); duke@435: if (pass_oop) { duke@435: __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i); duke@435: } else { duke@435: __ set((intptr_t)message, G4_scratch); duke@435: __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch); duke@435: } duke@435: // throw exception duke@435: assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); duke@435: Address thrower(G3_scratch, Interpreter::throw_exception_entry()); duke@435: __ jump_to (thrower); duke@435: __ delayed()->nop(); duke@435: return entry; duke@435: } duke@435: duke@435: address TemplateInterpreterGenerator::generate_ClassCastException_handler() { duke@435: address entry = __ pc(); duke@435: // expression stack must be empty before entering the VM if an exception duke@435: // happened duke@435: __ empty_expression_stack(); duke@435: // load exception object duke@435: __ call_VM(Oexception, duke@435: CAST_FROM_FN_PTR(address, duke@435: InterpreterRuntime::throw_ClassCastException), duke@435: Otos_i); duke@435: __ should_not_reach_here(); duke@435: return entry; duke@435: } duke@435: duke@435: duke@435: address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { duke@435: address entry = __ pc(); duke@435: // expression stack must be empty before entering the VM if an exception happened duke@435: __ empty_expression_stack(); duke@435: // convention: expect aberrant index in register G3_scratch, then shuffle the duke@435: // index to G4_scratch for the VM call duke@435: __ mov(G3_scratch, G4_scratch); duke@435: __ set((intptr_t)name, G3_scratch); duke@435: __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch); duke@435: __ should_not_reach_here(); duke@435: return entry; duke@435: } duke@435: duke@435: duke@435: address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { duke@435: address entry = __ pc(); duke@435: // expression stack must be empty before entering the VM if an exception happened duke@435: __ empty_expression_stack(); duke@435: __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); duke@435: __ should_not_reach_here(); duke@435: return entry; duke@435: } duke@435: duke@435: duke@435: address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { duke@435: address compiled_entry = __ pc(); duke@435: Label cont; duke@435: duke@435: address entry = __ pc(); duke@435: #if !defined(_LP64) && defined(COMPILER2) duke@435: // All return values are where we want them, except for Longs. C2 returns duke@435: // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. duke@435: // Since the interpreter will return longs in G1 and O0/O1 in the 32bit duke@435: // build even if we are returning from interpreted we just do a little duke@435: // stupid shuffing. duke@435: // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to duke@435: // do this here. Unfortunately if we did a rethrow we'd see an machepilog node duke@435: // first which would move g1 -> O0/O1 and destroy the exception we were throwing. duke@435: duke@435: if( state == ltos ) { duke@435: __ srl (G1, 0,O1); duke@435: __ srlx(G1,32,O0); duke@435: } duke@435: #endif /* !_LP64 && COMPILER2 */ duke@435: duke@435: duke@435: __ bind(cont); duke@435: duke@435: // The callee returns with the stack possibly adjusted by adapter transition duke@435: // We remove that possible adjustment here. duke@435: // All interpreter local registers are untouched. Any result is passed back duke@435: // in the O0/O1 or float registers. Before continuing, the arguments must be duke@435: // popped from the java expression stack; i.e., Lesp must be adjusted. duke@435: duke@435: __ mov(Llast_SP, SP); // Remove any adapter added stack space. duke@435: duke@435: duke@435: const Register cache = G3_scratch; duke@435: const Register size = G1_scratch; duke@435: __ get_cache_and_index_at_bcp(cache, G1_scratch, 1); duke@435: __ ld_ptr(Address(cache, 0, in_bytes(constantPoolCacheOopDesc::base_offset()) + duke@435: in_bytes(ConstantPoolCacheEntry::flags_offset())), size); duke@435: __ and3(size, 0xFF, size); // argument size in words duke@435: __ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes duke@435: __ add(Lesp, size, Lesp); // pop arguments duke@435: __ dispatch_next(state, step); duke@435: duke@435: return entry; duke@435: } duke@435: duke@435: duke@435: address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { duke@435: address entry = __ pc(); duke@435: __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache duke@435: { Label L; duke@435: Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); duke@435: duke@435: __ ld_ptr(exception_addr, Gtemp); duke@435: __ tst(Gtemp); duke@435: __ brx(Assembler::equal, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); duke@435: __ should_not_reach_here(); duke@435: __ bind(L); duke@435: } duke@435: __ dispatch_next(state, step); duke@435: return entry; duke@435: } duke@435: duke@435: // A result handler converts/unboxes a native call result into duke@435: // a java interpreter/compiler result. The current frame is an duke@435: // interpreter frame. The activation frame unwind code must be duke@435: // consistent with that of TemplateTable::_return(...). In the duke@435: // case of native methods, the caller's SP was not modified. duke@435: address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { duke@435: address entry = __ pc(); duke@435: Register Itos_i = Otos_i ->after_save(); duke@435: Register Itos_l = Otos_l ->after_save(); duke@435: Register Itos_l1 = Otos_l1->after_save(); duke@435: Register Itos_l2 = Otos_l2->after_save(); duke@435: switch (type) { duke@435: case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false duke@435: case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value! duke@435: case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break; duke@435: case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break; duke@435: case T_LONG : duke@435: #ifndef _LP64 duke@435: __ mov(O1, Itos_l2); // move other half of long duke@435: #endif // ifdef or no ifdef, fall through to the T_INT case duke@435: case T_INT : __ mov(O0, Itos_i); break; duke@435: case T_VOID : /* nothing to do */ break; duke@435: case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break; duke@435: case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break; duke@435: case T_OBJECT : duke@435: __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i); duke@435: __ verify_oop(Itos_i); duke@435: break; duke@435: default : ShouldNotReachHere(); duke@435: } duke@435: __ ret(); // return from interpreter activation duke@435: __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame duke@435: NOT_PRODUCT(__ emit_long(0);) // marker for disassembly duke@435: return entry; duke@435: } duke@435: duke@435: address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { duke@435: address entry = __ pc(); duke@435: __ push(state); duke@435: __ call_VM(noreg, runtime_entry); duke@435: __ dispatch_via(vtos, Interpreter::normal_table(vtos)); duke@435: return entry; duke@435: } duke@435: duke@435: duke@435: address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { duke@435: address entry = __ pc(); duke@435: __ dispatch_next(state); duke@435: return entry; duke@435: } duke@435: duke@435: // duke@435: // Helpers for commoning out cases in the various type of method entries. duke@435: // duke@435: duke@435: // increment invocation count & check for overflow duke@435: // duke@435: // Note: checking for negative value instead of overflow duke@435: // so we have a 'sticky' overflow test duke@435: // duke@435: // Lmethod: method duke@435: // ??: invocation counter duke@435: // duke@435: void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { duke@435: // Update standard invocation counters duke@435: __ increment_invocation_counter(O0, G3_scratch); duke@435: if (ProfileInterpreter) { // %%% Merge this into methodDataOop duke@435: Address interpreter_invocation_counter(Lmethod, 0, in_bytes(methodOopDesc::interpreter_invocation_counter_offset())); duke@435: __ ld(interpreter_invocation_counter, G3_scratch); duke@435: __ inc(G3_scratch); duke@435: __ st(G3_scratch, interpreter_invocation_counter); duke@435: } duke@435: duke@435: if (ProfileInterpreter && profile_method != NULL) { duke@435: // Test to see if we should create a method data oop duke@435: Address profile_limit(G3_scratch, (address)&InvocationCounter::InterpreterProfileLimit); duke@435: __ sethi(profile_limit); duke@435: __ ld(profile_limit, G3_scratch); duke@435: __ cmp(O0, G3_scratch); duke@435: __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue); duke@435: __ delayed()->nop(); duke@435: duke@435: // if no method data exists, go to profile_method duke@435: __ test_method_data_pointer(*profile_method); duke@435: } duke@435: duke@435: Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit); duke@435: __ sethi(invocation_limit); duke@435: __ ld(invocation_limit, G3_scratch); duke@435: __ cmp(O0, G3_scratch); duke@435: __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); duke@435: __ delayed()->nop(); duke@435: duke@435: } duke@435: duke@435: // Allocate monitor and lock method (asm interpreter) duke@435: // ebx - methodOop duke@435: // duke@435: void InterpreterGenerator::lock_method(void) { duke@435: const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); duke@435: __ ld(access_flags, O0); duke@435: duke@435: #ifdef ASSERT duke@435: { Label ok; duke@435: __ btst(JVM_ACC_SYNCHRONIZED, O0); duke@435: __ br( Assembler::notZero, false, Assembler::pt, ok); duke@435: __ delayed()->nop(); duke@435: __ stop("method doesn't need synchronization"); duke@435: __ bind(ok); duke@435: } duke@435: #endif // ASSERT duke@435: duke@435: // get synchronization object to O0 duke@435: { Label done; duke@435: const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); duke@435: __ btst(JVM_ACC_STATIC, O0); duke@435: __ br( Assembler::zero, true, Assembler::pt, done); duke@435: __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case duke@435: duke@435: __ ld_ptr( Lmethod, in_bytes(methodOopDesc::constants_offset()), O0); duke@435: __ ld_ptr( O0, constantPoolOopDesc::pool_holder_offset_in_bytes(), O0); duke@435: duke@435: // lock the mirror, not the klassOop duke@435: __ ld_ptr( O0, mirror_offset, O0); duke@435: duke@435: #ifdef ASSERT duke@435: __ tst(O0); duke@435: __ breakpoint_trap(Assembler::zero); duke@435: #endif // ASSERT duke@435: duke@435: __ bind(done); duke@435: } duke@435: duke@435: __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem duke@435: __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object duke@435: // __ untested("lock_object from method entry"); duke@435: __ lock_object(Lmonitors, O0); duke@435: } duke@435: duke@435: duke@435: void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size, duke@435: Register Rscratch, duke@435: Register Rscratch2) { duke@435: const int page_size = os::vm_page_size(); duke@435: Address saved_exception_pc(G2_thread, 0, duke@435: in_bytes(JavaThread::saved_exception_pc_offset())); duke@435: Label after_frame_check; duke@435: duke@435: assert_different_registers(Rframe_size, Rscratch, Rscratch2); duke@435: duke@435: __ set( page_size, Rscratch ); duke@435: __ cmp( Rframe_size, Rscratch ); duke@435: duke@435: __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check ); duke@435: __ delayed()->nop(); duke@435: duke@435: // get the stack base, and in debug, verify it is non-zero duke@435: __ ld_ptr( G2_thread, in_bytes(Thread::stack_base_offset()), Rscratch ); duke@435: #ifdef ASSERT duke@435: Label base_not_zero; duke@435: __ cmp( Rscratch, G0 ); duke@435: __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero ); duke@435: __ delayed()->nop(); duke@435: __ stop("stack base is zero in generate_stack_overflow_check"); duke@435: __ bind(base_not_zero); duke@435: #endif duke@435: duke@435: // get the stack size, and in debug, verify it is non-zero duke@435: assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); duke@435: __ ld_ptr( G2_thread, in_bytes(Thread::stack_size_offset()), Rscratch2 ); duke@435: #ifdef ASSERT duke@435: Label size_not_zero; duke@435: __ cmp( Rscratch2, G0 ); duke@435: __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero ); duke@435: __ delayed()->nop(); duke@435: __ stop("stack size is zero in generate_stack_overflow_check"); duke@435: __ bind(size_not_zero); duke@435: #endif duke@435: duke@435: // compute the beginning of the protected zone minus the requested frame size duke@435: __ sub( Rscratch, Rscratch2, Rscratch ); duke@435: __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 ); duke@435: __ add( Rscratch, Rscratch2, Rscratch ); duke@435: duke@435: // Add in the size of the frame (which is the same as subtracting it from the duke@435: // SP, which would take another register duke@435: __ add( Rscratch, Rframe_size, Rscratch ); duke@435: duke@435: // the frame is greater than one page in size, so check against duke@435: // the bottom of the stack duke@435: __ cmp( SP, Rscratch ); duke@435: __ brx( Assembler::greater, false, Assembler::pt, after_frame_check ); duke@435: __ delayed()->nop(); duke@435: duke@435: // Save the return address as the exception pc duke@435: __ st_ptr(O7, saved_exception_pc); duke@435: duke@435: // the stack will overflow, throw an exception duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); duke@435: duke@435: // if you get to here, then there is enough stack space duke@435: __ bind( after_frame_check ); duke@435: } duke@435: duke@435: duke@435: // duke@435: // Generate a fixed interpreter frame. This is identical setup for interpreted duke@435: // methods and for native methods hence the shared code. duke@435: duke@435: void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { duke@435: // duke@435: // duke@435: // The entry code sets up a new interpreter frame in 4 steps: duke@435: // duke@435: // 1) Increase caller's SP by for the extra local space needed: duke@435: // (check for overflow) duke@435: // Efficient implementation of xload/xstore bytecodes requires duke@435: // that arguments and non-argument locals are in a contigously duke@435: // addressable memory block => non-argument locals must be duke@435: // allocated in the caller's frame. duke@435: // duke@435: // 2) Create a new stack frame and register window: duke@435: // The new stack frame must provide space for the standard duke@435: // register save area, the maximum java expression stack size, duke@435: // the monitor slots (0 slots initially), and some frame local duke@435: // scratch locations. duke@435: // duke@435: // 3) The following interpreter activation registers must be setup: duke@435: // Lesp : expression stack pointer duke@435: // Lbcp : bytecode pointer duke@435: // Lmethod : method duke@435: // Llocals : locals pointer duke@435: // Lmonitors : monitor pointer duke@435: // LcpoolCache: constant pool cache duke@435: // duke@435: // 4) Initialize the non-argument locals if necessary: duke@435: // Non-argument locals may need to be initialized to NULL duke@435: // for GC to work. If the oop-map information is accurate duke@435: // (in the absence of the JSR problem), no initialization duke@435: // is necessary. duke@435: // duke@435: // (gri - 2/25/2000) duke@435: duke@435: duke@435: const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); duke@435: const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); duke@435: const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset())); duke@435: int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong ); duke@435: duke@435: const int extra_space = duke@435: rounded_vm_local_words + // frame local scratch space duke@435: frame::memory_parameter_word_sp_offset + // register save area duke@435: (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0); duke@435: duke@435: const Register Glocals_size = G3; duke@435: const Register Otmp1 = O3; duke@435: const Register Otmp2 = O4; duke@435: // Lscratch can't be used as a temporary because the call_stub uses duke@435: // it to assert that the stack frame was setup correctly. duke@435: duke@435: __ lduh( size_of_parameters, Glocals_size); duke@435: duke@435: // Gargs points to first local + BytesPerWord duke@435: // Set the saved SP after the register window save duke@435: // duke@435: assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP); duke@435: __ sll(Glocals_size, Interpreter::logStackElementSize(), Otmp1); duke@435: __ add(Gargs, Otmp1, Gargs); duke@435: duke@435: if (native_call) { duke@435: __ calc_mem_param_words( Glocals_size, Gframe_size ); duke@435: __ add( Gframe_size, extra_space, Gframe_size); duke@435: __ round_to( Gframe_size, WordsPerLong ); duke@435: __ sll( Gframe_size, LogBytesPerWord, Gframe_size ); duke@435: } else { duke@435: duke@435: // duke@435: // Compute number of locals in method apart from incoming parameters duke@435: // duke@435: __ lduh( size_of_locals, Otmp1 ); duke@435: __ sub( Otmp1, Glocals_size, Glocals_size ); duke@435: __ round_to( Glocals_size, WordsPerLong ); duke@435: __ sll( Glocals_size, Interpreter::logStackElementSize(), Glocals_size ); duke@435: duke@435: // see if the frame is greater than one page in size. If so, duke@435: // then we need to verify there is enough stack space remaining duke@435: // Frame_size = (max_stack + extra_space) * BytesPerWord; duke@435: __ lduh( max_stack, Gframe_size ); duke@435: __ add( Gframe_size, extra_space, Gframe_size ); duke@435: __ round_to( Gframe_size, WordsPerLong ); duke@435: __ sll( Gframe_size, Interpreter::logStackElementSize(), Gframe_size); duke@435: duke@435: // Add in java locals size for stack overflow check only duke@435: __ add( Gframe_size, Glocals_size, Gframe_size ); duke@435: duke@435: const Register Otmp2 = O4; duke@435: assert_different_registers(Otmp1, Otmp2, O5_savedSP); duke@435: generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2); duke@435: duke@435: __ sub( Gframe_size, Glocals_size, Gframe_size); duke@435: duke@435: // duke@435: // bump SP to accomodate the extra locals duke@435: // duke@435: __ sub( SP, Glocals_size, SP ); duke@435: } duke@435: duke@435: // duke@435: // now set up a stack frame with the size computed above duke@435: // duke@435: __ neg( Gframe_size ); duke@435: __ save( SP, Gframe_size, SP ); duke@435: duke@435: // duke@435: // now set up all the local cache registers duke@435: // duke@435: // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note duke@435: // that all present references to Lbyte_code initialize the register duke@435: // immediately before use duke@435: if (native_call) { duke@435: __ mov(G0, Lbcp); duke@435: } else { duke@435: __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), Lbcp ); duke@435: __ add(Address(Lbcp, 0, in_bytes(constMethodOopDesc::codes_offset())), Lbcp ); duke@435: } duke@435: __ mov( G5_method, Lmethod); // set Lmethod duke@435: __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache duke@435: __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors duke@435: #ifdef _LP64 duke@435: __ add( Lmonitors, STACK_BIAS, Lmonitors ); // Account for 64 bit stack bias duke@435: #endif duke@435: __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp duke@435: duke@435: // setup interpreter activation registers duke@435: __ sub(Gargs, BytesPerWord, Llocals); // set Llocals duke@435: duke@435: if (ProfileInterpreter) { duke@435: #ifdef FAST_DISPATCH duke@435: // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since duke@435: // they both use I2. duke@435: assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); duke@435: #endif // FAST_DISPATCH duke@435: __ set_method_data_pointer(); duke@435: } duke@435: duke@435: } duke@435: duke@435: // Empty method, generate a very fast return. duke@435: duke@435: address InterpreterGenerator::generate_empty_entry(void) { duke@435: duke@435: // A method that does nother but return... duke@435: duke@435: address entry = __ pc(); duke@435: Label slow_path; duke@435: duke@435: __ verify_oop(G5_method); duke@435: duke@435: // do nothing for empty methods (do not even increment invocation counter) duke@435: if ( UseFastEmptyMethods) { duke@435: // If we need a safepoint check, generate full interpreter entry. duke@435: Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); duke@435: __ load_contents(sync_state, G3_scratch); duke@435: __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); duke@435: __ br(Assembler::notEqual, false, Assembler::pn, slow_path); duke@435: __ delayed()->nop(); duke@435: duke@435: // Code: _return duke@435: __ retl(); duke@435: __ delayed()->mov(O5_savedSP, SP); duke@435: duke@435: __ bind(slow_path); duke@435: (void) generate_normal_entry(false); duke@435: duke@435: return entry; duke@435: } duke@435: return NULL; duke@435: } duke@435: duke@435: // Call an accessor method (assuming it is resolved, otherwise drop into duke@435: // vanilla (slow path) entry duke@435: duke@435: // Generates code to elide accessor methods duke@435: // Uses G3_scratch and G1_scratch as scratch duke@435: address InterpreterGenerator::generate_accessor_entry(void) { duke@435: duke@435: // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; duke@435: // parameter size = 1 duke@435: // Note: We can only use this code if the getfield has been resolved duke@435: // and if we don't have a null-pointer exception => check for duke@435: // these conditions first and use slow path if necessary. duke@435: address entry = __ pc(); duke@435: Label slow_path; duke@435: coleenp@548: coleenp@548: // XXX: for compressed oops pointer loading and decoding doesn't fit in coleenp@548: // delay slot and damages G1 coleenp@548: if ( UseFastAccessorMethods && !UseCompressedOops ) { duke@435: // Check if we need to reach a safepoint and generate full interpreter duke@435: // frame if so. duke@435: Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); duke@435: __ load_contents(sync_state, G3_scratch); duke@435: __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); duke@435: __ br(Assembler::notEqual, false, Assembler::pn, slow_path); duke@435: __ delayed()->nop(); duke@435: duke@435: // Check if local 0 != NULL duke@435: __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 duke@435: __ tst(Otos_i); // check if local 0 == NULL and go the slow path duke@435: __ brx(Assembler::zero, false, Assembler::pn, slow_path); duke@435: __ delayed()->nop(); duke@435: duke@435: duke@435: // read first instruction word and extract bytecode @ 1 and index @ 2 duke@435: // get first 4 bytes of the bytecodes (big endian!) duke@435: __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), G1_scratch); duke@435: __ ld(Address(G1_scratch, 0, in_bytes(constMethodOopDesc::codes_offset())), G1_scratch); duke@435: duke@435: // move index @ 2 far left then to the right most two bytes. duke@435: __ sll(G1_scratch, 2*BitsPerByte, G1_scratch); duke@435: __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words( duke@435: ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch); duke@435: duke@435: // get constant pool cache duke@435: __ ld_ptr(G5_method, in_bytes(methodOopDesc::constants_offset()), G3_scratch); duke@435: __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch); duke@435: duke@435: // get specific constant pool cache entry duke@435: __ add(G3_scratch, G1_scratch, G3_scratch); duke@435: duke@435: // Check the constant Pool cache entry to see if it has been resolved. duke@435: // If not, need the slow path. duke@435: ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); duke@435: __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch); duke@435: __ srl(G1_scratch, 2*BitsPerByte, G1_scratch); duke@435: __ and3(G1_scratch, 0xFF, G1_scratch); duke@435: __ cmp(G1_scratch, Bytecodes::_getfield); duke@435: __ br(Assembler::notEqual, false, Assembler::pn, slow_path); duke@435: __ delayed()->nop(); duke@435: duke@435: // Get the type and return field offset from the constant pool cache duke@435: __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch); duke@435: __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch); duke@435: duke@435: Label xreturn_path; duke@435: // Need to differentiate between igetfield, agetfield, bgetfield etc. duke@435: // because they are different sizes. duke@435: // Get the type from the constant pool cache duke@435: __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch); duke@435: // Make sure we don't need to mask G1_scratch for tosBits after the above shift duke@435: ConstantPoolCacheEntry::verify_tosBits(); duke@435: __ cmp(G1_scratch, atos ); duke@435: __ br(Assembler::equal, true, Assembler::pt, xreturn_path); duke@435: __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); duke@435: __ cmp(G1_scratch, itos); duke@435: __ br(Assembler::equal, true, Assembler::pt, xreturn_path); duke@435: __ delayed()->ld(Otos_i, G3_scratch, Otos_i); duke@435: __ cmp(G1_scratch, stos); duke@435: __ br(Assembler::equal, true, Assembler::pt, xreturn_path); duke@435: __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i); duke@435: __ cmp(G1_scratch, ctos); duke@435: __ br(Assembler::equal, true, Assembler::pt, xreturn_path); duke@435: __ delayed()->lduh(Otos_i, G3_scratch, Otos_i); duke@435: #ifdef ASSERT duke@435: __ cmp(G1_scratch, btos); duke@435: __ br(Assembler::equal, true, Assembler::pt, xreturn_path); duke@435: __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i); duke@435: __ should_not_reach_here(); duke@435: #endif duke@435: __ ldsb(Otos_i, G3_scratch, Otos_i); duke@435: __ bind(xreturn_path); duke@435: duke@435: // _ireturn/_areturn duke@435: __ retl(); // return from leaf routine duke@435: __ delayed()->mov(O5_savedSP, SP); duke@435: duke@435: // Generate regular method entry duke@435: __ bind(slow_path); duke@435: (void) generate_normal_entry(false); duke@435: return entry; duke@435: } duke@435: return NULL; duke@435: } duke@435: duke@435: // duke@435: // Interpreter stub for calling a native method. (asm interpreter) duke@435: // This sets up a somewhat different looking stack for calling the native method duke@435: // than the typical interpreter frame setup. duke@435: // duke@435: duke@435: address InterpreterGenerator::generate_native_entry(bool synchronized) { duke@435: address entry = __ pc(); duke@435: duke@435: // the following temporary registers are used during frame creation duke@435: const Register Gtmp1 = G3_scratch ; duke@435: const Register Gtmp2 = G1_scratch; duke@435: bool inc_counter = UseCompiler || CountCompiledCalls; duke@435: duke@435: // make sure registers are different! duke@435: assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); duke@435: duke@435: const Address Laccess_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); duke@435: duke@435: __ verify_oop(G5_method); duke@435: duke@435: const Register Glocals_size = G3; duke@435: assert_different_registers(Glocals_size, G4_scratch, Gframe_size); duke@435: duke@435: // make sure method is native & not abstract duke@435: // rethink these assertions - they can be simplified and shared (gri 2/25/2000) duke@435: #ifdef ASSERT duke@435: __ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1); duke@435: { duke@435: Label L; duke@435: __ btst(JVM_ACC_NATIVE, Gtmp1); duke@435: __ br(Assembler::notZero, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: __ stop("tried to execute non-native method as native"); duke@435: __ bind(L); duke@435: } duke@435: { Label L; duke@435: __ btst(JVM_ACC_ABSTRACT, Gtmp1); duke@435: __ br(Assembler::zero, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: __ stop("tried to execute abstract method as non-abstract"); duke@435: __ bind(L); duke@435: } duke@435: #endif // ASSERT duke@435: duke@435: // generate the code to allocate the interpreter stack frame duke@435: generate_fixed_frame(true); duke@435: duke@435: // duke@435: // No locals to initialize for native method duke@435: // duke@435: duke@435: // this slot will be set later, we initialize it to null here just in duke@435: // case we get a GC before the actual value is stored later duke@435: __ st_ptr(G0, Address(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS)); duke@435: duke@435: const Address do_not_unlock_if_synchronized(G2_thread, 0, duke@435: in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); duke@435: // Since at this point in the method invocation the exception handler duke@435: // would try to exit the monitor of synchronized methods which hasn't duke@435: // been entered yet, we set the thread local variable duke@435: // _do_not_unlock_if_synchronized to true. If any exception was thrown by duke@435: // runtime, exception handling i.e. unlock_if_synchronized_method will duke@435: // check this thread local flag. duke@435: // This flag has two effects, one is to force an unwind in the topmost duke@435: // interpreter frame and not perform an unlock while doing so. duke@435: duke@435: __ movbool(true, G3_scratch); duke@435: __ stbool(G3_scratch, do_not_unlock_if_synchronized); duke@435: duke@435: // increment invocation counter and check for overflow duke@435: // duke@435: // Note: checking for negative value instead of overflow duke@435: // so we have a 'sticky' overflow test (may be of duke@435: // importance as soon as we have true MT/MP) duke@435: Label invocation_counter_overflow; duke@435: Label Lcontinue; duke@435: if (inc_counter) { duke@435: generate_counter_incr(&invocation_counter_overflow, NULL, NULL); duke@435: duke@435: } duke@435: __ bind(Lcontinue); duke@435: duke@435: bang_stack_shadow_pages(true); duke@435: duke@435: // reset the _do_not_unlock_if_synchronized flag duke@435: __ stbool(G0, do_not_unlock_if_synchronized); duke@435: duke@435: // check for synchronized methods duke@435: // Must happen AFTER invocation_counter check and stack overflow check, duke@435: // so method is not locked if overflows. duke@435: duke@435: if (synchronized) { duke@435: lock_method(); duke@435: } else { duke@435: #ifdef ASSERT duke@435: { Label ok; duke@435: __ ld(Laccess_flags, O0); duke@435: __ btst(JVM_ACC_SYNCHRONIZED, O0); duke@435: __ br( Assembler::zero, false, Assembler::pt, ok); duke@435: __ delayed()->nop(); duke@435: __ stop("method needs synchronization"); duke@435: __ bind(ok); duke@435: } duke@435: #endif // ASSERT duke@435: } duke@435: duke@435: duke@435: // start execution duke@435: __ verify_thread(); duke@435: duke@435: // JVMTI support duke@435: __ notify_method_entry(); duke@435: duke@435: // native call duke@435: duke@435: // (note that O0 is never an oop--at most it is a handle) duke@435: // It is important not to smash any handles created by this call, duke@435: // until any oop handle in O0 is dereferenced. duke@435: duke@435: // (note that the space for outgoing params is preallocated) duke@435: duke@435: // get signature handler duke@435: { Label L; duke@435: __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); duke@435: __ tst(G3_scratch); duke@435: __ brx(Assembler::notZero, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); duke@435: __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); duke@435: __ bind(L); duke@435: } duke@435: duke@435: // Push a new frame so that the args will really be stored in duke@435: // Copy a few locals across so the new frame has the variables duke@435: // we need but these values will be dead at the jni call and duke@435: // therefore not gc volatile like the values in the current duke@435: // frame (Lmethod in particular) duke@435: duke@435: // Flush the method pointer to the register save area duke@435: __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); duke@435: __ mov(Llocals, O1); duke@435: // calculate where the mirror handle body is allocated in the interpreter frame: duke@435: duke@435: Address mirror(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); duke@435: __ add(mirror, O2); duke@435: duke@435: // Calculate current frame size duke@435: __ sub(SP, FP, O3); // Calculate negative of current frame size duke@435: __ save(SP, O3, SP); // Allocate an identical sized frame duke@435: duke@435: // Note I7 has leftover trash. Slow signature handler will fill it in duke@435: // should we get there. Normal jni call will set reasonable last_Java_pc duke@435: // below (and fix I7 so the stack trace doesn't have a meaningless frame duke@435: // in it). duke@435: duke@435: // Load interpreter frame's Lmethod into same register here duke@435: duke@435: __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); duke@435: duke@435: __ mov(I1, Llocals); duke@435: __ mov(I2, Lscratch2); // save the address of the mirror duke@435: duke@435: duke@435: // ONLY Lmethod and Llocals are valid here! duke@435: duke@435: // call signature handler, It will move the arg properly since Llocals in current frame duke@435: // matches that in outer frame duke@435: duke@435: __ callr(G3_scratch, 0); duke@435: __ delayed()->nop(); duke@435: duke@435: // Result handler is in Lscratch duke@435: duke@435: // Reload interpreter frame's Lmethod since slow signature handler may block duke@435: __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod); duke@435: duke@435: { Label not_static; duke@435: duke@435: __ ld(Laccess_flags, O0); duke@435: __ btst(JVM_ACC_STATIC, O0); duke@435: __ br( Assembler::zero, false, Assembler::pt, not_static); duke@435: __ delayed()-> duke@435: // get native function entry point(O0 is a good temp until the very end) duke@435: ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::native_function_offset())), O0); duke@435: // for static methods insert the mirror argument duke@435: const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); duke@435: duke@435: __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc:: constants_offset())), O1); duke@435: __ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1); duke@435: __ ld_ptr(O1, mirror_offset, O1); duke@435: #ifdef ASSERT duke@435: if (!PrintSignatureHandlers) // do not dirty the output with this duke@435: { Label L; duke@435: __ tst(O1); duke@435: __ brx(Assembler::notZero, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: __ stop("mirror is missing"); duke@435: __ bind(L); duke@435: } duke@435: #endif // ASSERT duke@435: __ st_ptr(O1, Lscratch2, 0); duke@435: __ mov(Lscratch2, O1); duke@435: __ bind(not_static); duke@435: } duke@435: duke@435: // At this point, arguments have been copied off of stack into duke@435: // their JNI positions, which are O1..O5 and SP[68..]. duke@435: // Oops are boxed in-place on the stack, with handles copied to arguments. duke@435: // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*. duke@435: duke@435: #ifdef ASSERT duke@435: { Label L; duke@435: __ tst(O0); duke@435: __ brx(Assembler::notZero, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: __ stop("native entry point is missing"); duke@435: __ bind(L); duke@435: } duke@435: #endif // ASSERT duke@435: duke@435: // duke@435: // setup the frame anchor duke@435: // duke@435: // The scavenge function only needs to know that the PC of this frame is duke@435: // in the interpreter method entry code, it doesn't need to know the exact duke@435: // PC and hence we can use O7 which points to the return address from the duke@435: // previous call in the code stream (signature handler function) duke@435: // duke@435: // The other trick is we set last_Java_sp to FP instead of the usual SP because duke@435: // we have pushed the extra frame in order to protect the volatile register(s) duke@435: // in that frame when we return from the jni call duke@435: // duke@435: duke@435: __ set_last_Java_frame(FP, O7); duke@435: __ mov(O7, I7); // make dummy interpreter frame look like one above, duke@435: // not meaningless information that'll confuse me. duke@435: duke@435: // flush the windows now. We don't care about the current (protection) frame duke@435: // only the outer frames duke@435: duke@435: __ flush_windows(); duke@435: duke@435: // mark windows as flushed duke@435: Address flags(G2_thread, duke@435: 0, duke@435: in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); duke@435: __ set(JavaFrameAnchor::flushed, G3_scratch); duke@435: __ st(G3_scratch, flags); duke@435: duke@435: // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. duke@435: duke@435: Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset())); duke@435: #ifdef ASSERT duke@435: { Label L; duke@435: __ ld(thread_state, G3_scratch); duke@435: __ cmp(G3_scratch, _thread_in_Java); duke@435: __ br(Assembler::equal, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: __ stop("Wrong thread state in native stub"); duke@435: __ bind(L); duke@435: } duke@435: #endif // ASSERT duke@435: __ set(_thread_in_native, G3_scratch); duke@435: __ st(G3_scratch, thread_state); duke@435: duke@435: // Call the jni method, using the delay slot to set the JNIEnv* argument. duke@435: __ save_thread(L7_thread_cache); // save Gthread duke@435: __ callr(O0, 0); duke@435: __ delayed()-> duke@435: add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0); duke@435: duke@435: // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD duke@435: duke@435: __ restore_thread(L7_thread_cache); // restore G2_thread coleenp@548: __ reinit_heapbase(); duke@435: duke@435: // must we block? duke@435: duke@435: // Block, if necessary, before resuming in _thread_in_Java state. duke@435: // In order for GC to work, don't clear the last_Java_sp until after blocking. duke@435: { Label no_block; duke@435: Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); duke@435: duke@435: // Switch thread to "native transition" state before reading the synchronization state. duke@435: // This additional state is necessary because reading and testing the synchronization duke@435: // state is not atomic w.r.t. GC, as this scenario demonstrates: duke@435: // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. duke@435: // VM thread changes sync state to synchronizing and suspends threads for GC. duke@435: // Thread A is resumed to finish this native method, but doesn't block here since it duke@435: // didn't see any synchronization is progress, and escapes. duke@435: __ set(_thread_in_native_trans, G3_scratch); duke@435: __ st(G3_scratch, thread_state); duke@435: if(os::is_MP()) { duke@435: if (UseMembar) { duke@435: // Force this write out before the read below duke@435: __ membar(Assembler::StoreLoad); duke@435: } else { duke@435: // Write serialization page so VM thread can do a pseudo remote membar. duke@435: // We use the current thread pointer to calculate a thread specific duke@435: // offset to write to within the page. This minimizes bus traffic duke@435: // due to cache line collision. duke@435: __ serialize_memory(G2_thread, G1_scratch, G3_scratch); duke@435: } duke@435: } duke@435: __ load_contents(sync_state, G3_scratch); duke@435: __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); duke@435: duke@435: Label L; duke@435: Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset())); duke@435: __ br(Assembler::notEqual, false, Assembler::pn, L); duke@435: __ delayed()-> duke@435: ld(suspend_state, G3_scratch); duke@435: __ cmp(G3_scratch, 0); duke@435: __ br(Assembler::equal, false, Assembler::pt, no_block); duke@435: __ delayed()->nop(); duke@435: __ bind(L); duke@435: duke@435: // Block. Save any potential method result value before the operation and duke@435: // use a leaf call to leave the last_Java_frame setup undisturbed. duke@435: save_native_result(); duke@435: __ call_VM_leaf(L7_thread_cache, duke@435: CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), duke@435: G2_thread); duke@435: duke@435: // Restore any method result value duke@435: restore_native_result(); duke@435: __ bind(no_block); duke@435: } duke@435: duke@435: // Clear the frame anchor now duke@435: duke@435: __ reset_last_Java_frame(); duke@435: duke@435: // Move the result handler address duke@435: __ mov(Lscratch, G3_scratch); duke@435: // return possible result to the outer frame duke@435: #ifndef __LP64 duke@435: __ mov(O0, I0); duke@435: __ restore(O1, G0, O1); duke@435: #else duke@435: __ restore(O0, G0, O0); duke@435: #endif /* __LP64 */ duke@435: duke@435: // Move result handler to expected register duke@435: __ mov(G3_scratch, Lscratch); duke@435: duke@435: // Back in normal (native) interpreter frame. State is thread_in_native_trans duke@435: // switch to thread_in_Java. duke@435: duke@435: __ set(_thread_in_Java, G3_scratch); duke@435: __ st(G3_scratch, thread_state); duke@435: duke@435: // reset handle block duke@435: __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch); duke@435: __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); duke@435: duke@435: // If we have an oop result store it where it will be safe for any further gc duke@435: // until we return now that we've released the handle it might be protected by duke@435: duke@435: { duke@435: Label no_oop, store_result; duke@435: duke@435: __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch); duke@435: __ cmp(G3_scratch, Lscratch); duke@435: __ brx(Assembler::notEqual, false, Assembler::pt, no_oop); duke@435: __ delayed()->nop(); duke@435: __ addcc(G0, O0, O0); duke@435: __ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL: duke@435: __ delayed()->ld_ptr(O0, 0, O0); // unbox it duke@435: __ mov(G0, O0); duke@435: duke@435: __ bind(store_result); duke@435: // Store it where gc will look for it and result handler expects it. duke@435: __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); duke@435: duke@435: __ bind(no_oop); duke@435: duke@435: } duke@435: duke@435: duke@435: // handle exceptions (exception handling will handle unlocking!) duke@435: { Label L; duke@435: Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); duke@435: duke@435: __ ld_ptr(exception_addr, Gtemp); duke@435: __ tst(Gtemp); duke@435: __ brx(Assembler::equal, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: // Note: This could be handled more efficiently since we know that the native duke@435: // method doesn't have an exception handler. We could directly return duke@435: // to the exception handler for the caller. duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); duke@435: __ should_not_reach_here(); duke@435: __ bind(L); duke@435: } duke@435: duke@435: // JVMTI support (preserves thread register) duke@435: __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI); duke@435: duke@435: if (synchronized) { duke@435: // save and restore any potential method result value around the unlocking operation duke@435: save_native_result(); duke@435: duke@435: __ add( __ top_most_monitor(), O1); duke@435: __ unlock_object(O1); duke@435: duke@435: restore_native_result(); duke@435: } duke@435: duke@435: #if defined(COMPILER2) && !defined(_LP64) duke@435: duke@435: // C2 expects long results in G1 we can't tell if we're returning to interpreted duke@435: // or compiled so just be safe. duke@435: duke@435: __ sllx(O0, 32, G1); // Shift bits into high G1 duke@435: __ srl (O1, 0, O1); // Zero extend O1 duke@435: __ or3 (O1, G1, G1); // OR 64 bits into G1 duke@435: duke@435: #endif /* COMPILER2 && !_LP64 */ duke@435: duke@435: // dispose of return address and remove activation duke@435: #ifdef ASSERT duke@435: { duke@435: Label ok; duke@435: __ cmp(I5_savedSP, FP); duke@435: __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok); duke@435: __ delayed()->nop(); duke@435: __ stop("bad I5_savedSP value"); duke@435: __ should_not_reach_here(); duke@435: __ bind(ok); duke@435: } duke@435: #endif duke@435: if (TraceJumps) { duke@435: // Move target to register that is recordable duke@435: __ mov(Lscratch, G3_scratch); duke@435: __ JMP(G3_scratch, 0); duke@435: } else { duke@435: __ jmp(Lscratch, 0); duke@435: } duke@435: __ delayed()->nop(); duke@435: duke@435: duke@435: if (inc_counter) { duke@435: // handle invocation counter overflow duke@435: __ bind(invocation_counter_overflow); duke@435: generate_counter_overflow(Lcontinue); duke@435: } duke@435: duke@435: duke@435: duke@435: return entry; duke@435: } duke@435: duke@435: duke@435: // Generic method entry to (asm) interpreter duke@435: //------------------------------------------------------------------------------------------------------------------------ duke@435: // duke@435: address InterpreterGenerator::generate_normal_entry(bool synchronized) { duke@435: address entry = __ pc(); duke@435: duke@435: bool inc_counter = UseCompiler || CountCompiledCalls; duke@435: duke@435: // the following temporary registers are used during frame creation duke@435: const Register Gtmp1 = G3_scratch ; duke@435: const Register Gtmp2 = G1_scratch; duke@435: duke@435: // make sure registers are different! duke@435: assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); duke@435: duke@435: const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); duke@435: const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); duke@435: // Seems like G5_method is live at the point this is used. So we could make this look consistent duke@435: // and use in the asserts. duke@435: const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); duke@435: duke@435: __ verify_oop(G5_method); duke@435: duke@435: const Register Glocals_size = G3; duke@435: assert_different_registers(Glocals_size, G4_scratch, Gframe_size); duke@435: duke@435: // make sure method is not native & not abstract duke@435: // rethink these assertions - they can be simplified and shared (gri 2/25/2000) duke@435: #ifdef ASSERT duke@435: __ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1); duke@435: { duke@435: Label L; duke@435: __ btst(JVM_ACC_NATIVE, Gtmp1); duke@435: __ br(Assembler::zero, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: __ stop("tried to execute native method as non-native"); duke@435: __ bind(L); duke@435: } duke@435: { Label L; duke@435: __ btst(JVM_ACC_ABSTRACT, Gtmp1); duke@435: __ br(Assembler::zero, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: __ stop("tried to execute abstract method as non-abstract"); duke@435: __ bind(L); duke@435: } duke@435: #endif // ASSERT duke@435: duke@435: // generate the code to allocate the interpreter stack frame duke@435: duke@435: generate_fixed_frame(false); duke@435: duke@435: #ifdef FAST_DISPATCH duke@435: __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables); duke@435: // set bytecode dispatch table base duke@435: #endif duke@435: duke@435: // duke@435: // Code to initialize the extra (i.e. non-parm) locals duke@435: // duke@435: Register init_value = noreg; // will be G0 if we must clear locals duke@435: // The way the code was setup before zerolocals was always true for vanilla java entries. duke@435: // It could only be false for the specialized entries like accessor or empty which have duke@435: // no extra locals so the testing was a waste of time and the extra locals were always duke@435: // initialized. We removed this extra complication to already over complicated code. duke@435: duke@435: init_value = G0; duke@435: Label clear_loop; duke@435: duke@435: // NOTE: If you change the frame layout, this code will need to duke@435: // be updated! duke@435: __ lduh( size_of_locals, O2 ); duke@435: __ lduh( size_of_parameters, O1 ); duke@435: __ sll( O2, Interpreter::logStackElementSize(), O2); duke@435: __ sll( O1, Interpreter::logStackElementSize(), O1 ); duke@435: __ sub( Llocals, O2, O2 ); duke@435: __ sub( Llocals, O1, O1 ); duke@435: duke@435: __ bind( clear_loop ); duke@435: __ inc( O2, wordSize ); duke@435: duke@435: __ cmp( O2, O1 ); duke@435: __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); duke@435: __ delayed()->st_ptr( init_value, O2, 0 ); duke@435: duke@435: const Address do_not_unlock_if_synchronized(G2_thread, 0, duke@435: in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); duke@435: // Since at this point in the method invocation the exception handler duke@435: // would try to exit the monitor of synchronized methods which hasn't duke@435: // been entered yet, we set the thread local variable duke@435: // _do_not_unlock_if_synchronized to true. If any exception was thrown by duke@435: // runtime, exception handling i.e. unlock_if_synchronized_method will duke@435: // check this thread local flag. duke@435: __ movbool(true, G3_scratch); duke@435: __ stbool(G3_scratch, do_not_unlock_if_synchronized); duke@435: duke@435: // increment invocation counter and check for overflow duke@435: // duke@435: // Note: checking for negative value instead of overflow duke@435: // so we have a 'sticky' overflow test (may be of duke@435: // importance as soon as we have true MT/MP) duke@435: Label invocation_counter_overflow; duke@435: Label profile_method; duke@435: Label profile_method_continue; duke@435: Label Lcontinue; duke@435: if (inc_counter) { duke@435: generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); duke@435: if (ProfileInterpreter) { duke@435: __ bind(profile_method_continue); duke@435: } duke@435: } duke@435: __ bind(Lcontinue); duke@435: duke@435: bang_stack_shadow_pages(false); duke@435: duke@435: // reset the _do_not_unlock_if_synchronized flag duke@435: __ stbool(G0, do_not_unlock_if_synchronized); duke@435: duke@435: // check for synchronized methods duke@435: // Must happen AFTER invocation_counter check and stack overflow check, duke@435: // so method is not locked if overflows. duke@435: duke@435: if (synchronized) { duke@435: lock_method(); duke@435: } else { duke@435: #ifdef ASSERT duke@435: { Label ok; duke@435: __ ld(access_flags, O0); duke@435: __ btst(JVM_ACC_SYNCHRONIZED, O0); duke@435: __ br( Assembler::zero, false, Assembler::pt, ok); duke@435: __ delayed()->nop(); duke@435: __ stop("method needs synchronization"); duke@435: __ bind(ok); duke@435: } duke@435: #endif // ASSERT duke@435: } duke@435: duke@435: // start execution duke@435: duke@435: __ verify_thread(); duke@435: duke@435: // jvmti support duke@435: __ notify_method_entry(); duke@435: duke@435: // start executing instructions duke@435: __ dispatch_next(vtos); duke@435: duke@435: duke@435: if (inc_counter) { duke@435: if (ProfileInterpreter) { duke@435: // We have decided to profile this method in the interpreter duke@435: __ bind(profile_method); duke@435: duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), Lbcp, true); duke@435: duke@435: #ifdef ASSERT duke@435: __ tst(O0); duke@435: __ breakpoint_trap(Assembler::notEqual); duke@435: #endif duke@435: duke@435: __ set_method_data_pointer(); duke@435: duke@435: __ ba(false, profile_method_continue); duke@435: __ delayed()->nop(); duke@435: } duke@435: duke@435: // handle invocation counter overflow duke@435: __ bind(invocation_counter_overflow); duke@435: generate_counter_overflow(Lcontinue); duke@435: } duke@435: duke@435: duke@435: return entry; duke@435: } duke@435: duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Entry points & stack frame layout duke@435: // duke@435: // Here we generate the various kind of entries into the interpreter. duke@435: // The two main entry type are generic bytecode methods and native call method. duke@435: // These both come in synchronized and non-synchronized versions but the duke@435: // frame layout they create is very similar. The other method entry duke@435: // types are really just special purpose entries that are really entry duke@435: // and interpretation all in one. These are for trivial methods like duke@435: // accessor, empty, or special math methods. duke@435: // duke@435: // When control flow reaches any of the entry types for the interpreter duke@435: // the following holds -> duke@435: // duke@435: // C2 Calling Conventions: duke@435: // duke@435: // The entry code below assumes that the following registers are set duke@435: // when coming in: duke@435: // G5_method: holds the methodOop of the method to call duke@435: // Lesp: points to the TOS of the callers expression stack duke@435: // after having pushed all the parameters duke@435: // duke@435: // The entry code does the following to setup an interpreter frame duke@435: // pop parameters from the callers stack by adjusting Lesp duke@435: // set O0 to Lesp duke@435: // compute X = (max_locals - num_parameters) duke@435: // bump SP up by X to accomadate the extra locals duke@435: // compute X = max_expression_stack duke@435: // + vm_local_words duke@435: // + 16 words of register save area duke@435: // save frame doing a save sp, -X, sp growing towards lower addresses duke@435: // set Lbcp, Lmethod, LcpoolCache duke@435: // set Llocals to i0 duke@435: // set Lmonitors to FP - rounded_vm_local_words duke@435: // set Lesp to Lmonitors - 4 duke@435: // duke@435: // The frame has now been setup to do the rest of the entry code duke@435: duke@435: // Try this optimization: Most method entries could live in a duke@435: // "one size fits all" stack frame without all the dynamic size duke@435: // calculations. It might be profitable to do all this calculation duke@435: // statically and approximately for "small enough" methods. duke@435: duke@435: //----------------------------------------------------------------------------------------------- duke@435: duke@435: // C1 Calling conventions duke@435: // duke@435: // Upon method entry, the following registers are setup: duke@435: // duke@435: // g2 G2_thread: current thread duke@435: // g5 G5_method: method to activate duke@435: // g4 Gargs : pointer to last argument duke@435: // duke@435: // duke@435: // Stack: duke@435: // duke@435: // +---------------+ <--- sp duke@435: // | | duke@435: // : reg save area : duke@435: // | | duke@435: // +---------------+ <--- sp + 0x40 duke@435: // | | duke@435: // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) duke@435: // | | duke@435: // +---------------+ <--- sp + 0x5c duke@435: // | | duke@435: // : free : duke@435: // | | duke@435: // +---------------+ <--- Gargs duke@435: // | | duke@435: // : arguments : duke@435: // | | duke@435: // +---------------+ duke@435: // | | duke@435: // duke@435: // duke@435: // duke@435: // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like: duke@435: // duke@435: // +---------------+ <--- sp duke@435: // | | duke@435: // : reg save area : duke@435: // | | duke@435: // +---------------+ <--- sp + 0x40 duke@435: // | | duke@435: // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) duke@435: // | | duke@435: // +---------------+ <--- sp + 0x5c duke@435: // | | duke@435: // : : duke@435: // | | <--- Lesp duke@435: // +---------------+ <--- Lmonitors (fp - 0x18) duke@435: // | VM locals | duke@435: // +---------------+ <--- fp duke@435: // | | duke@435: // : reg save area : duke@435: // | | duke@435: // +---------------+ <--- fp + 0x40 duke@435: // | | duke@435: // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later) duke@435: // | | duke@435: // +---------------+ <--- fp + 0x5c duke@435: // | | duke@435: // : free : duke@435: // | | duke@435: // +---------------+ duke@435: // | | duke@435: // : nonarg locals : duke@435: // | | duke@435: // +---------------+ duke@435: // | | duke@435: // : arguments : duke@435: // | | <--- Llocals duke@435: // +---------------+ <--- Gargs duke@435: // | | duke@435: duke@435: static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) { duke@435: duke@435: // Figure out the size of an interpreter frame (in words) given that we have a fully allocated duke@435: // expression stack, the callee will have callee_extra_locals (so we can account for duke@435: // frame extension) and monitor_size for monitors. Basically we need to calculate duke@435: // this exactly like generate_fixed_frame/generate_compute_interpreter_state. duke@435: // duke@435: // duke@435: // The big complicating thing here is that we must ensure that the stack stays properly duke@435: // aligned. This would be even uglier if monitor size wasn't modulo what the stack duke@435: // needs to be aligned for). We are given that the sp (fp) is already aligned by duke@435: // the caller so we must ensure that it is properly aligned for our callee. duke@435: // duke@435: const int rounded_vm_local_words = duke@435: round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); duke@435: // callee_locals and max_stack are counts, not the size in frame. duke@435: const int locals_size = duke@435: round_to(callee_extra_locals * Interpreter::stackElementWords(), WordsPerLong); duke@435: const int max_stack_words = max_stack * Interpreter::stackElementWords(); duke@435: return (round_to((max_stack_words duke@435: + rounded_vm_local_words duke@435: + frame::memory_parameter_word_sp_offset), WordsPerLong) duke@435: // already rounded duke@435: + locals_size + monitor_size); duke@435: } duke@435: duke@435: // How much stack a method top interpreter activation needs in words. duke@435: int AbstractInterpreter::size_top_interpreter_activation(methodOop method) { duke@435: duke@435: // See call_stub code duke@435: int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset, duke@435: WordsPerLong); // 7 + register save area duke@435: duke@435: // Save space for one monitor to get into the interpreted method in case duke@435: // the method is synchronized duke@435: int monitor_size = method->is_synchronized() ? duke@435: 1*frame::interpreter_frame_monitor_size() : 0; duke@435: return size_activation_helper(method->max_locals(), method->max_stack(), duke@435: monitor_size) + call_stub_size; duke@435: } duke@435: duke@435: int AbstractInterpreter::layout_activation(methodOop method, duke@435: int tempcount, duke@435: int popframe_extra_args, duke@435: int moncount, duke@435: int callee_param_count, duke@435: int callee_local_count, duke@435: frame* caller, duke@435: frame* interpreter_frame, duke@435: bool is_top_frame) { duke@435: // Note: This calculation must exactly parallel the frame setup duke@435: // in InterpreterGenerator::generate_fixed_frame. duke@435: // If f!=NULL, set up the following variables: duke@435: // - Lmethod duke@435: // - Llocals duke@435: // - Lmonitors (to the indicated number of monitors) duke@435: // - Lesp (to the indicated number of temps) duke@435: // The frame f (if not NULL) on entry is a description of the caller of the frame duke@435: // we are about to layout. We are guaranteed that we will be able to fill in a duke@435: // new interpreter frame as its callee (i.e. the stack space is allocated and duke@435: // the amount was determined by an earlier call to this method with f == NULL). duke@435: // On return f (if not NULL) while describe the interpreter frame we just layed out. duke@435: duke@435: int monitor_size = moncount * frame::interpreter_frame_monitor_size(); duke@435: int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong); duke@435: duke@435: assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align"); duke@435: // duke@435: // Note: if you look closely this appears to be doing something much different duke@435: // than generate_fixed_frame. What is happening is this. On sparc we have to do duke@435: // this dance with interpreter_sp_adjustment because the window save area would duke@435: // appear just below the bottom (tos) of the caller's java expression stack. Because duke@435: // the interpreter want to have the locals completely contiguous generate_fixed_frame duke@435: // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size). duke@435: // Now in generate_fixed_frame the extension of the caller's sp happens in the callee. duke@435: // In this code the opposite occurs the caller adjusts it's own stack base on the callee. duke@435: // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest) duke@435: // because the oldest frame would have adjust its callers frame and yet that frame duke@435: // already exists and isn't part of this array of frames we are unpacking. So at first duke@435: // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper() duke@435: // will after it calculates all of the frame's on_stack_size()'s will then figure out the duke@435: // amount to adjust the caller of the initial (oldest) frame and the calculation will all duke@435: // add up. It does seem like it simpler to account for the adjustment here (and remove the duke@435: // callee... parameters here). However this would mean that this routine would have to take duke@435: // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment) duke@435: // and run the calling loop in the reverse order. This would also would appear to mean making duke@435: // this code aware of what the interactions are when that initial caller fram was an osr or duke@435: // other adapter frame. deoptimization is complicated enough and hard enough to debug that duke@435: // there is no sense in messing working code. duke@435: // duke@435: duke@435: int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong); duke@435: assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align"); duke@435: duke@435: int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(), duke@435: monitor_size); duke@435: duke@435: if (interpreter_frame != NULL) { duke@435: // The skeleton frame must already look like an interpreter frame duke@435: // even if not fully filled out. duke@435: assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame"); duke@435: duke@435: intptr_t* fp = interpreter_frame->fp(); duke@435: duke@435: JavaThread* thread = JavaThread::current(); duke@435: RegisterMap map(thread, false); duke@435: // More verification that skeleton frame is properly walkable duke@435: assert(fp == caller->sp(), "fp must match"); duke@435: duke@435: intptr_t* montop = fp - rounded_vm_local_words; duke@435: duke@435: // preallocate monitors (cf. __ add_monitor_to_stack) duke@435: intptr_t* monitors = montop - monitor_size; duke@435: duke@435: // preallocate stack space duke@435: intptr_t* esp = monitors - 1 - duke@435: (tempcount * Interpreter::stackElementWords()) - duke@435: popframe_extra_args; duke@435: duke@435: int local_words = method->max_locals() * Interpreter::stackElementWords(); duke@435: int parm_words = method->size_of_parameters() * Interpreter::stackElementWords(); duke@435: NEEDS_CLEANUP; duke@435: intptr_t* locals; duke@435: if (caller->is_interpreted_frame()) { duke@435: // Can force the locals area to end up properly overlapping the top of the expression stack. duke@435: intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1; duke@435: // Note that this computation means we replace size_of_parameters() values from the caller duke@435: // interpreter frame's expression stack with our argument locals duke@435: locals = Lesp_ptr + parm_words; duke@435: int delta = local_words - parm_words; duke@435: int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0; duke@435: *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS; duke@435: } else { duke@435: assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases"); duke@435: // Don't have Lesp available; lay out locals block in the caller duke@435: // adjacent to the register window save area. duke@435: // duke@435: // Compiled frames do not allocate a varargs area which is why this if duke@435: // statement is needed. duke@435: // duke@435: if (caller->is_compiled_frame()) { duke@435: locals = fp + frame::register_save_words + local_words - 1; duke@435: } else { duke@435: locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1; duke@435: } duke@435: if (!caller->is_entry_frame()) { duke@435: // Caller wants his own SP back duke@435: int caller_frame_size = caller->cb()->frame_size(); duke@435: *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS; duke@435: } duke@435: } duke@435: if (TraceDeoptimization) { duke@435: if (caller->is_entry_frame()) { duke@435: // make sure I5_savedSP and the entry frames notion of saved SP duke@435: // agree. This assertion duplicate a check in entry frame code duke@435: // but catches the failure earlier. duke@435: assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP), duke@435: "would change callers SP"); duke@435: } duke@435: if (caller->is_entry_frame()) { duke@435: tty->print("entry "); duke@435: } duke@435: if (caller->is_compiled_frame()) { duke@435: tty->print("compiled "); duke@435: if (caller->is_deoptimized_frame()) { duke@435: tty->print("(deopt) "); duke@435: } duke@435: } duke@435: if (caller->is_interpreted_frame()) { duke@435: tty->print("interpreted "); duke@435: } duke@435: tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp()); duke@435: tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16); duke@435: tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16); duke@435: tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp()); duke@435: tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16); duke@435: tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16); duke@435: tty->print_cr("Llocals = 0x%x", locals); duke@435: tty->print_cr("Lesp = 0x%x", esp); duke@435: tty->print_cr("Lmonitors = 0x%x", monitors); duke@435: } duke@435: duke@435: if (method->max_locals() > 0) { duke@435: assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area"); duke@435: assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area"); duke@435: assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area"); duke@435: assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area"); duke@435: } duke@435: #ifdef _LP64 duke@435: assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd"); duke@435: #endif duke@435: duke@435: *interpreter_frame->register_addr(Lmethod) = (intptr_t) method; duke@435: *interpreter_frame->register_addr(Llocals) = (intptr_t) locals; duke@435: *interpreter_frame->register_addr(Lmonitors) = (intptr_t) monitors; duke@435: *interpreter_frame->register_addr(Lesp) = (intptr_t) esp; duke@435: // Llast_SP will be same as SP as there is no adapter space duke@435: *interpreter_frame->register_addr(Llast_SP) = (intptr_t) interpreter_frame->sp() - STACK_BIAS; duke@435: *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache(); duke@435: #ifdef FAST_DISPATCH duke@435: *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table(); duke@435: #endif duke@435: duke@435: duke@435: #ifdef ASSERT duke@435: BasicObjectLock* mp = (BasicObjectLock*)monitors; duke@435: duke@435: assert(interpreter_frame->interpreter_frame_method() == method, "method matches"); duke@435: assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize())+Interpreter::value_offset_in_bytes()), "locals match"); duke@435: assert(interpreter_frame->interpreter_frame_monitor_end() == mp, "monitor_end matches"); duke@435: assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches"); duke@435: assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches"); duke@435: duke@435: // check bounds duke@435: intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1); duke@435: intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words; duke@435: assert(lo < monitors && montop <= hi, "monitors in bounds"); duke@435: assert(lo <= esp && esp < monitors, "esp in bounds"); duke@435: #endif // ASSERT duke@435: } duke@435: duke@435: return raw_frame_size; duke@435: } duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Exceptions duke@435: void TemplateInterpreterGenerator::generate_throw_exception() { duke@435: duke@435: // Entry point in previous activation (i.e., if the caller was interpreted) duke@435: Interpreter::_rethrow_exception_entry = __ pc(); duke@435: // O0: exception duke@435: duke@435: // entry point for exceptions thrown within interpreter code duke@435: Interpreter::_throw_exception_entry = __ pc(); duke@435: __ verify_thread(); duke@435: // expression stack is undefined here duke@435: // O0: exception, i.e. Oexception duke@435: // Lbcp: exception bcx duke@435: __ verify_oop(Oexception); duke@435: duke@435: duke@435: // expression stack must be empty before entering the VM in case of an exception duke@435: __ empty_expression_stack(); duke@435: // find exception handler address and preserve exception oop duke@435: // call C routine to find handler and jump to it duke@435: __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception); duke@435: __ push_ptr(O1); // push exception for exception handler bytecodes duke@435: duke@435: __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!) duke@435: __ delayed()->nop(); duke@435: duke@435: duke@435: // if the exception is not handled in the current frame duke@435: // the frame is removed and the exception is rethrown duke@435: // (i.e. exception continuation is _rethrow_exception) duke@435: // duke@435: // Note: At this point the bci is still the bxi for the instruction which caused duke@435: // the exception and the expression stack is empty. Thus, for any VM calls duke@435: // at this point, GC will find a legal oop map (with empty expression stack). duke@435: duke@435: // in current activation duke@435: // tos: exception duke@435: // Lbcp: exception bcp duke@435: duke@435: // duke@435: // JVMTI PopFrame support duke@435: // duke@435: duke@435: Interpreter::_remove_activation_preserving_args_entry = __ pc(); duke@435: Address popframe_condition_addr (G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); duke@435: // Set the popframe_processing bit in popframe_condition indicating that we are duke@435: // currently handling popframe, so that call_VMs that may happen later do not trigger new duke@435: // popframe handling cycles. duke@435: duke@435: __ ld(popframe_condition_addr, G3_scratch); duke@435: __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch); duke@435: __ stw(G3_scratch, popframe_condition_addr); duke@435: duke@435: // Empty the expression stack, as in normal exception handling duke@435: __ empty_expression_stack(); duke@435: __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); duke@435: duke@435: { duke@435: // Check to see whether we are returning to a deoptimized frame. duke@435: // (The PopFrame call ensures that the caller of the popped frame is duke@435: // either interpreted or compiled and deoptimizes it if compiled.) duke@435: // In this case, we can't call dispatch_next() after the frame is duke@435: // popped, but instead must save the incoming arguments and restore duke@435: // them after deoptimization has occurred. duke@435: // duke@435: // Note that we don't compare the return PC against the duke@435: // deoptimization blob's unpack entry because of the presence of duke@435: // adapter frames in C2. duke@435: Label caller_not_deoptimized; duke@435: __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7); duke@435: __ tst(O0); duke@435: __ brx(Assembler::notEqual, false, Assembler::pt, caller_not_deoptimized); duke@435: __ delayed()->nop(); duke@435: duke@435: const Register Gtmp1 = G3_scratch; duke@435: const Register Gtmp2 = G1_scratch; duke@435: duke@435: // Compute size of arguments for saving when returning to deoptimized caller duke@435: __ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1); duke@435: __ sll(Gtmp1, Interpreter::logStackElementSize(), Gtmp1); duke@435: __ sub(Llocals, Gtmp1, Gtmp2); duke@435: __ add(Gtmp2, wordSize, Gtmp2); duke@435: // Save these arguments duke@435: __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); duke@435: // Inform deoptimization that it is responsible for restoring these arguments duke@435: __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); duke@435: Address popframe_condition_addr(G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); duke@435: __ st(Gtmp1, popframe_condition_addr); duke@435: duke@435: // Return from the current method duke@435: // The caller's SP was adjusted upon method entry to accomodate duke@435: // the callee's non-argument locals. Undo that adjustment. duke@435: __ ret(); duke@435: __ delayed()->restore(I5_savedSP, G0, SP); duke@435: duke@435: __ bind(caller_not_deoptimized); duke@435: } duke@435: duke@435: // Clear the popframe condition flag duke@435: __ stw(G0 /* popframe_inactive */, popframe_condition_addr); duke@435: duke@435: // Get out of the current method (how this is done depends on the particular compiler calling duke@435: // convention that the interpreter currently follows) duke@435: // The caller's SP was adjusted upon method entry to accomodate duke@435: // the callee's non-argument locals. Undo that adjustment. duke@435: __ restore(I5_savedSP, G0, SP); duke@435: // The method data pointer was incremented already during duke@435: // call profiling. We have to restore the mdp for the current bcp. duke@435: if (ProfileInterpreter) { duke@435: __ set_method_data_pointer_for_bcp(); duke@435: } duke@435: // Resume bytecode interpretation at the current bcp duke@435: __ dispatch_next(vtos); duke@435: // end of JVMTI PopFrame support duke@435: duke@435: Interpreter::_remove_activation_entry = __ pc(); duke@435: duke@435: // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here) duke@435: __ pop_ptr(Oexception); // get exception duke@435: duke@435: // Intel has the following comment: duke@435: //// remove the activation (without doing throws on illegalMonitorExceptions) duke@435: // They remove the activation without checking for bad monitor state. duke@435: // %%% We should make sure this is the right semantics before implementing. duke@435: duke@435: // %%% changed set_vm_result_2 to set_vm_result and get_vm_result_2 to get_vm_result. Is there a bug here? duke@435: __ set_vm_result(Oexception); duke@435: __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false); duke@435: duke@435: __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI); duke@435: duke@435: __ get_vm_result(Oexception); duke@435: __ verify_oop(Oexception); duke@435: duke@435: const int return_reg_adjustment = frame::pc_return_offset; duke@435: Address issuing_pc_addr(I7, 0, return_reg_adjustment); duke@435: duke@435: // We are done with this activation frame; find out where to go next. duke@435: // The continuation point will be an exception handler, which expects duke@435: // the following registers set up: duke@435: // duke@435: // Oexception: exception duke@435: // Oissuing_pc: the local call that threw exception duke@435: // Other On: garbage duke@435: // In/Ln: the contents of the caller's register window duke@435: // duke@435: // We do the required restore at the last possible moment, because we duke@435: // need to preserve some state across a runtime call. duke@435: // (Remember that the caller activation is unknown--it might not be duke@435: // interpreted, so things like Lscratch are useless in the caller.) duke@435: duke@435: // Although the Intel version uses call_C, we can use the more duke@435: // compact call_VM. (The only real difference on SPARC is a duke@435: // harmlessly ignored [re]set_last_Java_frame, compared with duke@435: // the Intel code which lacks this.) duke@435: __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore duke@435: __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller duke@435: __ super_call_VM_leaf(L7_thread_cache, duke@435: CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), duke@435: Oissuing_pc->after_save()); duke@435: duke@435: // The caller's SP was adjusted upon method entry to accomodate duke@435: // the callee's non-argument locals. Undo that adjustment. duke@435: __ JMP(O0, 0); // return exception handler in caller duke@435: __ delayed()->restore(I5_savedSP, G0, SP); duke@435: duke@435: // (same old exception object is already in Oexception; see above) duke@435: // Note that an "issuing PC" is actually the next PC after the call duke@435: } duke@435: duke@435: duke@435: // duke@435: // JVMTI ForceEarlyReturn support duke@435: // duke@435: duke@435: address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { duke@435: address entry = __ pc(); duke@435: duke@435: __ empty_expression_stack(); duke@435: __ load_earlyret_value(state); duke@435: duke@435: __ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())), G3_scratch); duke@435: Address cond_addr(G3_scratch, 0, in_bytes(JvmtiThreadState::earlyret_state_offset())); duke@435: duke@435: // Clear the earlyret state duke@435: __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); duke@435: duke@435: __ remove_activation(state, duke@435: /* throw_monitor_exception */ false, duke@435: /* install_monitor_exception */ false); duke@435: duke@435: // The caller's SP was adjusted upon method entry to accomodate duke@435: // the callee's non-argument locals. Undo that adjustment. duke@435: __ ret(); // return to caller duke@435: __ delayed()->restore(I5_savedSP, G0, SP); duke@435: duke@435: return entry; duke@435: } // end of JVMTI ForceEarlyReturn support duke@435: duke@435: duke@435: //------------------------------------------------------------------------------------------------------------------------ duke@435: // Helper for vtos entry point generation duke@435: duke@435: void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { duke@435: assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); duke@435: Label L; duke@435: aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop(); duke@435: fep = __ pc(); __ push_f(); __ ba(false, L); __ delayed()->nop(); duke@435: dep = __ pc(); __ push_d(); __ ba(false, L); __ delayed()->nop(); duke@435: lep = __ pc(); __ push_l(); __ ba(false, L); __ delayed()->nop(); duke@435: iep = __ pc(); __ push_i(); duke@435: bep = cep = sep = iep; // there aren't any duke@435: vep = __ pc(); __ bind(L); // fall through duke@435: generate_and_dispatch(t); duke@435: } duke@435: duke@435: // -------------------------------------------------------------------------------- duke@435: duke@435: duke@435: InterpreterGenerator::InterpreterGenerator(StubQueue* code) duke@435: : TemplateInterpreterGenerator(code) { duke@435: generate_all(); // down here so it can be "virtual" duke@435: } duke@435: duke@435: // -------------------------------------------------------------------------------- duke@435: duke@435: // Non-product code duke@435: #ifndef PRODUCT duke@435: address TemplateInterpreterGenerator::generate_trace_code(TosState state) { duke@435: address entry = __ pc(); duke@435: duke@435: __ push(state); duke@435: __ mov(O7, Lscratch); // protect return address within interpreter duke@435: duke@435: // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer duke@435: __ mov( Otos_l2, G3_scratch ); duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch); duke@435: __ mov(Lscratch, O7); // restore return address duke@435: __ pop(state); duke@435: __ retl(); duke@435: __ delayed()->nop(); duke@435: duke@435: return entry; duke@435: } duke@435: duke@435: duke@435: // helpers for generate_and_dispatch duke@435: duke@435: void TemplateInterpreterGenerator::count_bytecode() { duke@435: Address c(G3_scratch, (address)&BytecodeCounter::_counter_value); duke@435: __ load_contents(c, G4_scratch); duke@435: __ inc(G4_scratch); duke@435: __ st(G4_scratch, c); duke@435: } duke@435: duke@435: duke@435: void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { duke@435: Address bucket( G3_scratch, (address) &BytecodeHistogram::_counters[t->bytecode()] ); duke@435: __ load_contents(bucket, G4_scratch); duke@435: __ inc(G4_scratch); duke@435: __ st(G4_scratch, bucket); duke@435: } duke@435: duke@435: duke@435: void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { duke@435: address index_addr = (address)&BytecodePairHistogram::_index; duke@435: Address index(G3_scratch, index_addr); duke@435: duke@435: address counters_addr = (address)&BytecodePairHistogram::_counters; duke@435: Address counters(G3_scratch, counters_addr); duke@435: duke@435: // get index, shift out old bytecode, bring in new bytecode, and store it duke@435: // _index = (_index >> log2_number_of_codes) | duke@435: // (bytecode << log2_number_of_codes); duke@435: duke@435: duke@435: __ load_contents( index, G4_scratch ); duke@435: __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); duke@435: __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); duke@435: __ or3( G3_scratch, G4_scratch, G4_scratch ); duke@435: __ store_contents( G4_scratch, index ); duke@435: duke@435: // bump bucket contents duke@435: // _counters[_index] ++; duke@435: duke@435: __ load_address( counters ); // loads into G3_scratch duke@435: __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address duke@435: __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index duke@435: __ ld (G3_scratch, 0, G4_scratch); duke@435: __ inc (G4_scratch); duke@435: __ st (G4_scratch, 0, G3_scratch); duke@435: } duke@435: duke@435: duke@435: void TemplateInterpreterGenerator::trace_bytecode(Template* t) { duke@435: // Call a little run-time stub to avoid blow-up for each bytecode. duke@435: // The run-time runtime saves the right registers, depending on duke@435: // the tosca in-state for the given template. duke@435: address entry = Interpreter::trace_code(t->tos_in()); duke@435: guarantee(entry != NULL, "entry must have been generated"); duke@435: __ call(entry, relocInfo::none); duke@435: __ delayed()->nop(); duke@435: } duke@435: duke@435: duke@435: void TemplateInterpreterGenerator::stop_interpreter_at() { duke@435: Address counter(G3_scratch , (address)&BytecodeCounter::_counter_value); duke@435: __ load_contents (counter, G3_scratch ); duke@435: Address stop_at(G4_scratch, (address)&StopInterpreterAt); duke@435: __ load_ptr_contents(stop_at, G4_scratch); duke@435: __ cmp(G3_scratch, G4_scratch); duke@435: __ breakpoint_trap(Assembler::equal); duke@435: } duke@435: #endif // not PRODUCT duke@435: #endif // !CC_INTERP