duke@435: /* mikael@4153: * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "opto/addnode.hpp" stefank@2314: #include "opto/callnode.hpp" stefank@2314: #include "opto/cfgnode.hpp" stefank@2314: #include "opto/compile.hpp" stefank@2314: #include "opto/connode.hpp" stefank@2314: #include "opto/locknode.hpp" stefank@2314: #include "opto/memnode.hpp" stefank@2314: #include "opto/mulnode.hpp" stefank@2314: #include "opto/node.hpp" stefank@2314: #include "opto/parse.hpp" stefank@2314: #include "opto/phaseX.hpp" stefank@2314: #include "opto/rootnode.hpp" stefank@2314: #include "opto/runtime.hpp" stefank@2314: #include "opto/type.hpp" duke@435: duke@435: //--------------------gen_stub------------------------------- duke@435: void GraphKit::gen_stub(address C_function, duke@435: const char *name, duke@435: int is_fancy_jump, duke@435: bool pass_tls, duke@435: bool return_pc) { duke@435: ResourceMark rm; duke@435: duke@435: const TypeTuple *jdomain = C->tf()->domain(); duke@435: const TypeTuple *jrange = C->tf()->range(); duke@435: duke@435: // The procedure start kvn@4115: StartNode* start = new (C) StartNode(root(), jdomain); duke@435: _gvn.set_type_bottom(start); duke@435: duke@435: // Make a map, with JVM state duke@435: uint parm_cnt = jdomain->cnt(); duke@435: uint max_map = MAX2(2*parm_cnt+1, jrange->cnt()); duke@435: // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces duke@435: assert(SynchronizationEntryBCI == InvocationEntryBci, ""); duke@435: JVMState* jvms = new (C) JVMState(0); duke@435: jvms->set_bci(InvocationEntryBci); duke@435: jvms->set_monoff(max_map); kvn@5626: jvms->set_scloff(max_map); duke@435: jvms->set_endoff(max_map); duke@435: { kvn@4115: SafePointNode *map = new (C) SafePointNode( max_map, jvms ); duke@435: jvms->set_map(map); duke@435: set_jvms(jvms); duke@435: assert(map == this->map(), "kit.map is set"); duke@435: } duke@435: duke@435: // Make up the parameters duke@435: uint i; duke@435: for( i = 0; i < parm_cnt; i++ ) kvn@4115: map()->init_req(i, _gvn.transform(new (C) ParmNode(start, i))); duke@435: for( ; ireq(); i++ ) duke@435: map()->init_req(i, top()); // For nicer debugging duke@435: duke@435: // GraphKit requires memory to be a MergeMemNode: duke@435: set_all_memory(map()->memory()); duke@435: duke@435: // Get base of thread-local storage area kvn@4115: Node* thread = _gvn.transform( new (C) ThreadLocalNode() ); duke@435: duke@435: const int NoAlias = Compile::AliasIdxBot; duke@435: duke@435: Node* adr_last_Java_pc = basic_plus_adr(top(), duke@435: thread, duke@435: in_bytes(JavaThread::frame_anchor_offset()) + duke@435: in_bytes(JavaFrameAnchor::last_Java_pc_offset())); morris@4535: #if defined(SPARC) duke@435: Node* adr_flags = basic_plus_adr(top(), duke@435: thread, duke@435: in_bytes(JavaThread::frame_anchor_offset()) + duke@435: in_bytes(JavaFrameAnchor::flags_offset())); morris@4535: #endif /* defined(SPARC) */ duke@435: duke@435: duke@435: // Drop in the last_Java_sp. last_Java_fp is not touched. duke@435: // Always do this after the other "last_Java_frame" fields are set since duke@435: // as soon as last_Java_sp != NULL the has_last_Java_frame is true and duke@435: // users will look at the other fields. duke@435: // duke@435: Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset())); duke@435: Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS); duke@435: store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias); duke@435: duke@435: // Set _thread_in_native duke@435: // The order of stores into TLS is critical! Setting _thread_in_native MUST duke@435: // be last, because a GC is allowed at any time after setting it and the GC duke@435: // will require last_Java_pc and last_Java_sp. duke@435: Node* adr_state = basic_plus_adr(top(), thread, in_bytes(JavaThread::thread_state_offset())); duke@435: duke@435: //----------------------------- duke@435: // Compute signature for C call. Varies from the Java signature! duke@435: const Type **fields = TypeTuple::fields(2*parm_cnt+2); duke@435: uint cnt = TypeFunc::Parms; duke@435: // The C routines gets the base of thread-local storage passed in as an duke@435: // extra argument. Not all calls need it, but its cheap to add here. goetz@6468: for (uint pcnt = cnt; pcnt < parm_cnt; pcnt++, cnt++) { goetz@6468: // Convert ints to longs if required. goetz@6468: if (CCallingConventionRequiresIntsAsLongs && jdomain->field_at(pcnt)->isa_int()) { goetz@6468: fields[cnt++] = TypeLong::LONG; goetz@6468: fields[cnt] = Type::HALF; // must add an additional half for a long goetz@6468: } else { goetz@6468: fields[cnt] = jdomain->field_at(pcnt); goetz@6468: } goetz@6468: } goetz@6468: duke@435: fields[cnt++] = TypeRawPtr::BOTTOM; // Thread-local storage duke@435: // Also pass in the caller's PC, if asked for. duke@435: if( return_pc ) duke@435: fields[cnt++] = TypeRawPtr::BOTTOM; // Return PC duke@435: duke@435: const TypeTuple* domain = TypeTuple::make(cnt,fields); duke@435: // The C routine we are about to call cannot return an oop; it can block on duke@435: // exit and a GC will trash the oop while it sits in C-land. Instead, we duke@435: // return the oop through TLS for runtime calls. duke@435: // Also, C routines returning integer subword values leave the high duke@435: // order bits dirty; these must be cleaned up by explicit sign extension. duke@435: const Type* retval = (jrange->cnt() == TypeFunc::Parms) ? Type::TOP : jrange->field_at(TypeFunc::Parms); duke@435: // Make a private copy of jrange->fields(); duke@435: const Type **rfields = TypeTuple::fields(jrange->cnt() - TypeFunc::Parms); duke@435: // Fixup oop returns duke@435: int retval_ptr = retval->isa_oop_ptr(); duke@435: if( retval_ptr ) { duke@435: assert( pass_tls, "Oop must be returned thru TLS" ); duke@435: // Fancy-jumps return address; others return void duke@435: rfields[TypeFunc::Parms] = is_fancy_jump ? TypeRawPtr::BOTTOM : Type::TOP; duke@435: duke@435: } else if( retval->isa_int() ) { // Returning any integer subtype? duke@435: // "Fatten" byte, char & short return types to 'int' to show that duke@435: // the native C code can return values with junk high order bits. duke@435: // We'll sign-extend it below later. duke@435: rfields[TypeFunc::Parms] = TypeInt::INT; // It's "dirty" and needs sign-ext duke@435: duke@435: } else if( jrange->cnt() >= TypeFunc::Parms+1 ) { // Else copy other types duke@435: rfields[TypeFunc::Parms] = jrange->field_at(TypeFunc::Parms); duke@435: if( jrange->cnt() == TypeFunc::Parms+2 ) duke@435: rfields[TypeFunc::Parms+1] = jrange->field_at(TypeFunc::Parms+1); duke@435: } duke@435: const TypeTuple* range = TypeTuple::make(jrange->cnt(),rfields); duke@435: duke@435: // Final C signature duke@435: const TypeFunc *c_sig = TypeFunc::make(domain,range); duke@435: duke@435: //----------------------------- duke@435: // Make the call node kvn@4115: CallRuntimeNode *call = new (C) duke@435: CallRuntimeNode(c_sig, C_function, name, TypePtr::BOTTOM); duke@435: //----------------------------- duke@435: duke@435: // Fix-up the debug info for the call duke@435: call->set_jvms( new (C) JVMState(0) ); duke@435: call->jvms()->set_bci(0); duke@435: call->jvms()->set_offsets(cnt); duke@435: duke@435: // Set fixed predefined input arguments duke@435: cnt = 0; goetz@6468: for (i = 0; i < TypeFunc::Parms; i++) goetz@6468: call->init_req(cnt++, map()->in(i)); duke@435: // A little too aggressive on the parm copy; return address is not an input duke@435: call->set_req(TypeFunc::ReturnAdr, top()); goetz@6468: for (; i < parm_cnt; i++) { // Regular input arguments goetz@6468: // Convert ints to longs if required. goetz@6468: if (CCallingConventionRequiresIntsAsLongs && jdomain->field_at(i)->isa_int()) { goetz@6468: Node* int_as_long = _gvn.transform(new (C) ConvI2LNode(map()->in(i))); goetz@6468: call->init_req(cnt++, int_as_long); // long goetz@6468: call->init_req(cnt++, top()); // half goetz@6468: } else { goetz@6468: call->init_req(cnt++, map()->in(i)); goetz@6468: } goetz@6468: } duke@435: duke@435: call->init_req( cnt++, thread ); duke@435: if( return_pc ) // Return PC, if asked for duke@435: call->init_req( cnt++, returnadr() ); duke@435: _gvn.transform_no_reclaim(call); duke@435: duke@435: duke@435: //----------------------------- duke@435: // Now set up the return results kvn@4115: set_control( _gvn.transform( new (C) ProjNode(call,TypeFunc::Control)) ); kvn@4115: set_i_o( _gvn.transform( new (C) ProjNode(call,TypeFunc::I_O )) ); duke@435: set_all_memory_call(call); duke@435: if (range->cnt() > TypeFunc::Parms) { kvn@4115: Node* retnode = _gvn.transform( new (C) ProjNode(call,TypeFunc::Parms) ); duke@435: // C-land is allowed to return sub-word values. Convert to integer type. duke@435: assert( retval != Type::TOP, "" ); duke@435: if (retval == TypeInt::BOOL) { kvn@4115: retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFF)) ); duke@435: } else if (retval == TypeInt::CHAR) { kvn@4115: retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFFFF)) ); duke@435: } else if (retval == TypeInt::BYTE) { kvn@4115: retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(24)) ); kvn@4115: retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(24)) ); duke@435: } else if (retval == TypeInt::SHORT) { kvn@4115: retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(16)) ); kvn@4115: retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(16)) ); duke@435: } duke@435: map()->set_req( TypeFunc::Parms, retnode ); duke@435: } duke@435: duke@435: //----------------------------- duke@435: duke@435: // Clear last_Java_sp duke@435: store_to_memory(NULL, adr_sp, null(), T_ADDRESS, NoAlias); duke@435: // Clear last_Java_pc and (optionally)_flags duke@435: store_to_memory(NULL, adr_last_Java_pc, null(), T_ADDRESS, NoAlias); morris@4535: #if defined(SPARC) duke@435: store_to_memory(NULL, adr_flags, intcon(0), T_INT, NoAlias); morris@4535: #endif /* defined(SPARC) */ goetz@6453: #if (defined(IA64) && !defined(AIX)) duke@435: Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset())); duke@435: if( os::is_MP() ) insert_mem_bar(Op_MemBarRelease); duke@435: store_to_memory(NULL, adr_last_Java_fp, null(), T_ADDRESS, NoAlias); duke@435: #endif duke@435: duke@435: // For is-fancy-jump, the C-return value is also the branch target duke@435: Node* target = map()->in(TypeFunc::Parms); duke@435: // Runtime call returning oop in TLS? Fetch it out duke@435: if( pass_tls ) { duke@435: Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_offset())); duke@435: Node* vm_result = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, false); duke@435: map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result duke@435: // clear thread-local-storage(tls) duke@435: store_to_memory(NULL, adr, null(), T_ADDRESS, NoAlias); duke@435: } duke@435: duke@435: //----------------------------- duke@435: // check exception duke@435: Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset())); duke@435: Node* pending = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, false); duke@435: duke@435: Node* exit_memory = reset_memory(); duke@435: kvn@4115: Node* cmp = _gvn.transform( new (C) CmpPNode(pending, null()) ); kvn@4115: Node* bo = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ne) ); duke@435: IfNode *iff = create_and_map_if(control(), bo, PROB_MIN, COUNT_UNKNOWN); duke@435: kvn@4115: Node* if_null = _gvn.transform( new (C) IfFalseNode(iff) ); kvn@4115: Node* if_not_null = _gvn.transform( new (C) IfTrueNode(iff) ); duke@435: duke@435: assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); duke@435: Node *exc_target = makecon(TypeRawPtr::make( StubRoutines::forward_exception_entry() )); kvn@4115: Node *to_exc = new (C) TailCallNode(if_not_null, kvn@4115: i_o(), kvn@4115: exit_memory, kvn@4115: frameptr(), kvn@4115: returnadr(), kvn@4115: exc_target, null()); duke@435: root()->add_req(_gvn.transform(to_exc)); // bind to root to keep live duke@435: C->init_start(start); duke@435: duke@435: //----------------------------- duke@435: // If this is a normal subroutine return, issue the return and be done. duke@435: Node *ret; duke@435: switch( is_fancy_jump ) { duke@435: case 0: // Make a return instruction duke@435: // Return to caller, free any space for return address kvn@4115: ret = new (C) ReturnNode(TypeFunc::Parms, if_null, kvn@4115: i_o(), kvn@4115: exit_memory, kvn@4115: frameptr(), kvn@4115: returnadr()); duke@435: if (C->tf()->range()->cnt() > TypeFunc::Parms) duke@435: ret->add_req( map()->in(TypeFunc::Parms) ); duke@435: break; duke@435: case 1: // This is a fancy tail-call jump. Jump to computed address. duke@435: // Jump to new callee; leave old return address alone. kvn@4115: ret = new (C) TailCallNode(if_null, kvn@4115: i_o(), kvn@4115: exit_memory, kvn@4115: frameptr(), kvn@4115: returnadr(), kvn@4115: target, map()->in(TypeFunc::Parms)); duke@435: break; duke@435: case 2: // Pop return address & jump duke@435: // Throw away old return address; jump to new computed address duke@435: //assert(C_function == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C), "fancy_jump==2 only for rethrow"); kvn@4115: ret = new (C) TailJumpNode(if_null, kvn@4115: i_o(), kvn@4115: exit_memory, kvn@4115: frameptr(), kvn@4115: target, map()->in(TypeFunc::Parms)); duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: root()->add_req(_gvn.transform(ret)); duke@435: }