src/share/vm/opto/generateOptoStub.cpp

Fri, 15 Nov 2013 11:05:32 -0800

author
goetz
date
Fri, 15 Nov 2013 11:05:32 -0800
changeset 6479
2113136690bc
parent 6472
2b8e28fdf503
child 6503
a9becfeecd1b
permissions
-rw-r--r--

8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering
Summary: Add a field to C2 LoadNode and StoreNode classes which indicates whether the load/store should do an acquire/release on platforms which support it.
Reviewed-by: kvn

duke@435 1 /*
mikael@4153 2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "opto/addnode.hpp"
stefank@2314 27 #include "opto/callnode.hpp"
stefank@2314 28 #include "opto/cfgnode.hpp"
stefank@2314 29 #include "opto/compile.hpp"
stefank@2314 30 #include "opto/connode.hpp"
stefank@2314 31 #include "opto/locknode.hpp"
stefank@2314 32 #include "opto/memnode.hpp"
stefank@2314 33 #include "opto/mulnode.hpp"
stefank@2314 34 #include "opto/node.hpp"
stefank@2314 35 #include "opto/parse.hpp"
stefank@2314 36 #include "opto/phaseX.hpp"
stefank@2314 37 #include "opto/rootnode.hpp"
stefank@2314 38 #include "opto/runtime.hpp"
stefank@2314 39 #include "opto/type.hpp"
duke@435 40
duke@435 41 //--------------------gen_stub-------------------------------
duke@435 42 void GraphKit::gen_stub(address C_function,
duke@435 43 const char *name,
duke@435 44 int is_fancy_jump,
duke@435 45 bool pass_tls,
duke@435 46 bool return_pc) {
duke@435 47 ResourceMark rm;
duke@435 48
duke@435 49 const TypeTuple *jdomain = C->tf()->domain();
duke@435 50 const TypeTuple *jrange = C->tf()->range();
duke@435 51
duke@435 52 // The procedure start
kvn@4115 53 StartNode* start = new (C) StartNode(root(), jdomain);
duke@435 54 _gvn.set_type_bottom(start);
duke@435 55
duke@435 56 // Make a map, with JVM state
duke@435 57 uint parm_cnt = jdomain->cnt();
duke@435 58 uint max_map = MAX2(2*parm_cnt+1, jrange->cnt());
duke@435 59 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
duke@435 60 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
duke@435 61 JVMState* jvms = new (C) JVMState(0);
duke@435 62 jvms->set_bci(InvocationEntryBci);
duke@435 63 jvms->set_monoff(max_map);
kvn@5626 64 jvms->set_scloff(max_map);
duke@435 65 jvms->set_endoff(max_map);
duke@435 66 {
kvn@4115 67 SafePointNode *map = new (C) SafePointNode( max_map, jvms );
duke@435 68 jvms->set_map(map);
duke@435 69 set_jvms(jvms);
duke@435 70 assert(map == this->map(), "kit.map is set");
duke@435 71 }
duke@435 72
duke@435 73 // Make up the parameters
duke@435 74 uint i;
duke@435 75 for( i = 0; i < parm_cnt; i++ )
kvn@4115 76 map()->init_req(i, _gvn.transform(new (C) ParmNode(start, i)));
duke@435 77 for( ; i<map()->req(); i++ )
duke@435 78 map()->init_req(i, top()); // For nicer debugging
duke@435 79
duke@435 80 // GraphKit requires memory to be a MergeMemNode:
duke@435 81 set_all_memory(map()->memory());
duke@435 82
duke@435 83 // Get base of thread-local storage area
kvn@4115 84 Node* thread = _gvn.transform( new (C) ThreadLocalNode() );
duke@435 85
duke@435 86 const int NoAlias = Compile::AliasIdxBot;
duke@435 87
duke@435 88 Node* adr_last_Java_pc = basic_plus_adr(top(),
duke@435 89 thread,
duke@435 90 in_bytes(JavaThread::frame_anchor_offset()) +
duke@435 91 in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
morris@4535 92 #if defined(SPARC)
duke@435 93 Node* adr_flags = basic_plus_adr(top(),
duke@435 94 thread,
duke@435 95 in_bytes(JavaThread::frame_anchor_offset()) +
duke@435 96 in_bytes(JavaFrameAnchor::flags_offset()));
morris@4535 97 #endif /* defined(SPARC) */
duke@435 98
duke@435 99
duke@435 100 // Drop in the last_Java_sp. last_Java_fp is not touched.
duke@435 101 // Always do this after the other "last_Java_frame" fields are set since
duke@435 102 // as soon as last_Java_sp != NULL the has_last_Java_frame is true and
duke@435 103 // users will look at the other fields.
duke@435 104 //
duke@435 105 Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
duke@435 106 Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS);
goetz@6479 107 store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias, MemNode::unordered);
duke@435 108
duke@435 109 // Set _thread_in_native
duke@435 110 // The order of stores into TLS is critical! Setting _thread_in_native MUST
duke@435 111 // be last, because a GC is allowed at any time after setting it and the GC
duke@435 112 // will require last_Java_pc and last_Java_sp.
duke@435 113
duke@435 114 //-----------------------------
duke@435 115 // Compute signature for C call. Varies from the Java signature!
duke@435 116 const Type **fields = TypeTuple::fields(2*parm_cnt+2);
duke@435 117 uint cnt = TypeFunc::Parms;
duke@435 118 // The C routines gets the base of thread-local storage passed in as an
duke@435 119 // extra argument. Not all calls need it, but its cheap to add here.
goetz@6468 120 for (uint pcnt = cnt; pcnt < parm_cnt; pcnt++, cnt++) {
goetz@6468 121 // Convert ints to longs if required.
goetz@6468 122 if (CCallingConventionRequiresIntsAsLongs && jdomain->field_at(pcnt)->isa_int()) {
goetz@6468 123 fields[cnt++] = TypeLong::LONG;
goetz@6468 124 fields[cnt] = Type::HALF; // must add an additional half for a long
goetz@6468 125 } else {
goetz@6468 126 fields[cnt] = jdomain->field_at(pcnt);
goetz@6468 127 }
goetz@6468 128 }
goetz@6468 129
duke@435 130 fields[cnt++] = TypeRawPtr::BOTTOM; // Thread-local storage
duke@435 131 // Also pass in the caller's PC, if asked for.
duke@435 132 if( return_pc )
duke@435 133 fields[cnt++] = TypeRawPtr::BOTTOM; // Return PC
duke@435 134
duke@435 135 const TypeTuple* domain = TypeTuple::make(cnt,fields);
duke@435 136 // The C routine we are about to call cannot return an oop; it can block on
duke@435 137 // exit and a GC will trash the oop while it sits in C-land. Instead, we
duke@435 138 // return the oop through TLS for runtime calls.
duke@435 139 // Also, C routines returning integer subword values leave the high
duke@435 140 // order bits dirty; these must be cleaned up by explicit sign extension.
duke@435 141 const Type* retval = (jrange->cnt() == TypeFunc::Parms) ? Type::TOP : jrange->field_at(TypeFunc::Parms);
duke@435 142 // Make a private copy of jrange->fields();
duke@435 143 const Type **rfields = TypeTuple::fields(jrange->cnt() - TypeFunc::Parms);
duke@435 144 // Fixup oop returns
duke@435 145 int retval_ptr = retval->isa_oop_ptr();
duke@435 146 if( retval_ptr ) {
duke@435 147 assert( pass_tls, "Oop must be returned thru TLS" );
duke@435 148 // Fancy-jumps return address; others return void
duke@435 149 rfields[TypeFunc::Parms] = is_fancy_jump ? TypeRawPtr::BOTTOM : Type::TOP;
duke@435 150
duke@435 151 } else if( retval->isa_int() ) { // Returning any integer subtype?
duke@435 152 // "Fatten" byte, char & short return types to 'int' to show that
duke@435 153 // the native C code can return values with junk high order bits.
duke@435 154 // We'll sign-extend it below later.
duke@435 155 rfields[TypeFunc::Parms] = TypeInt::INT; // It's "dirty" and needs sign-ext
duke@435 156
duke@435 157 } else if( jrange->cnt() >= TypeFunc::Parms+1 ) { // Else copy other types
duke@435 158 rfields[TypeFunc::Parms] = jrange->field_at(TypeFunc::Parms);
duke@435 159 if( jrange->cnt() == TypeFunc::Parms+2 )
duke@435 160 rfields[TypeFunc::Parms+1] = jrange->field_at(TypeFunc::Parms+1);
duke@435 161 }
duke@435 162 const TypeTuple* range = TypeTuple::make(jrange->cnt(),rfields);
duke@435 163
duke@435 164 // Final C signature
duke@435 165 const TypeFunc *c_sig = TypeFunc::make(domain,range);
duke@435 166
duke@435 167 //-----------------------------
duke@435 168 // Make the call node
kvn@4115 169 CallRuntimeNode *call = new (C)
duke@435 170 CallRuntimeNode(c_sig, C_function, name, TypePtr::BOTTOM);
duke@435 171 //-----------------------------
duke@435 172
duke@435 173 // Fix-up the debug info for the call
duke@435 174 call->set_jvms( new (C) JVMState(0) );
duke@435 175 call->jvms()->set_bci(0);
duke@435 176 call->jvms()->set_offsets(cnt);
duke@435 177
duke@435 178 // Set fixed predefined input arguments
duke@435 179 cnt = 0;
goetz@6468 180 for (i = 0; i < TypeFunc::Parms; i++)
goetz@6468 181 call->init_req(cnt++, map()->in(i));
duke@435 182 // A little too aggressive on the parm copy; return address is not an input
duke@435 183 call->set_req(TypeFunc::ReturnAdr, top());
goetz@6468 184 for (; i < parm_cnt; i++) { // Regular input arguments
goetz@6468 185 // Convert ints to longs if required.
goetz@6468 186 if (CCallingConventionRequiresIntsAsLongs && jdomain->field_at(i)->isa_int()) {
goetz@6468 187 Node* int_as_long = _gvn.transform(new (C) ConvI2LNode(map()->in(i)));
goetz@6468 188 call->init_req(cnt++, int_as_long); // long
goetz@6468 189 call->init_req(cnt++, top()); // half
goetz@6468 190 } else {
goetz@6468 191 call->init_req(cnt++, map()->in(i));
goetz@6468 192 }
goetz@6468 193 }
duke@435 194
duke@435 195 call->init_req( cnt++, thread );
duke@435 196 if( return_pc ) // Return PC, if asked for
duke@435 197 call->init_req( cnt++, returnadr() );
duke@435 198 _gvn.transform_no_reclaim(call);
duke@435 199
duke@435 200
duke@435 201 //-----------------------------
duke@435 202 // Now set up the return results
kvn@4115 203 set_control( _gvn.transform( new (C) ProjNode(call,TypeFunc::Control)) );
kvn@4115 204 set_i_o( _gvn.transform( new (C) ProjNode(call,TypeFunc::I_O )) );
duke@435 205 set_all_memory_call(call);
duke@435 206 if (range->cnt() > TypeFunc::Parms) {
kvn@4115 207 Node* retnode = _gvn.transform( new (C) ProjNode(call,TypeFunc::Parms) );
duke@435 208 // C-land is allowed to return sub-word values. Convert to integer type.
duke@435 209 assert( retval != Type::TOP, "" );
duke@435 210 if (retval == TypeInt::BOOL) {
kvn@4115 211 retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFF)) );
duke@435 212 } else if (retval == TypeInt::CHAR) {
kvn@4115 213 retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFFFF)) );
duke@435 214 } else if (retval == TypeInt::BYTE) {
kvn@4115 215 retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(24)) );
kvn@4115 216 retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(24)) );
duke@435 217 } else if (retval == TypeInt::SHORT) {
kvn@4115 218 retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(16)) );
kvn@4115 219 retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(16)) );
duke@435 220 }
duke@435 221 map()->set_req( TypeFunc::Parms, retnode );
duke@435 222 }
duke@435 223
duke@435 224 //-----------------------------
duke@435 225
duke@435 226 // Clear last_Java_sp
goetz@6479 227 store_to_memory(NULL, adr_sp, null(), T_ADDRESS, NoAlias, MemNode::unordered);
duke@435 228 // Clear last_Java_pc and (optionally)_flags
goetz@6479 229 store_to_memory(NULL, adr_last_Java_pc, null(), T_ADDRESS, NoAlias, MemNode::unordered);
morris@4535 230 #if defined(SPARC)
goetz@6479 231 store_to_memory(NULL, adr_flags, intcon(0), T_INT, NoAlias, MemNode::unordered);
morris@4535 232 #endif /* defined(SPARC) */
goetz@6453 233 #if (defined(IA64) && !defined(AIX))
duke@435 234 Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset()));
goetz@6479 235 store_to_memory(NULL, adr_last_Java_fp, null(), T_ADDRESS, NoAlias, MemNode::unordered);
duke@435 236 #endif
duke@435 237
duke@435 238 // For is-fancy-jump, the C-return value is also the branch target
duke@435 239 Node* target = map()->in(TypeFunc::Parms);
duke@435 240 // Runtime call returning oop in TLS? Fetch it out
duke@435 241 if( pass_tls ) {
duke@435 242 Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_offset()));
goetz@6479 243 Node* vm_result = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
duke@435 244 map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result
duke@435 245 // clear thread-local-storage(tls)
goetz@6479 246 store_to_memory(NULL, adr, null(), T_ADDRESS, NoAlias, MemNode::unordered);
duke@435 247 }
duke@435 248
duke@435 249 //-----------------------------
duke@435 250 // check exception
duke@435 251 Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset()));
goetz@6479 252 Node* pending = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
duke@435 253
duke@435 254 Node* exit_memory = reset_memory();
duke@435 255
kvn@4115 256 Node* cmp = _gvn.transform( new (C) CmpPNode(pending, null()) );
kvn@4115 257 Node* bo = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ne) );
duke@435 258 IfNode *iff = create_and_map_if(control(), bo, PROB_MIN, COUNT_UNKNOWN);
duke@435 259
kvn@4115 260 Node* if_null = _gvn.transform( new (C) IfFalseNode(iff) );
kvn@4115 261 Node* if_not_null = _gvn.transform( new (C) IfTrueNode(iff) );
duke@435 262
duke@435 263 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
duke@435 264 Node *exc_target = makecon(TypeRawPtr::make( StubRoutines::forward_exception_entry() ));
kvn@4115 265 Node *to_exc = new (C) TailCallNode(if_not_null,
kvn@4115 266 i_o(),
kvn@4115 267 exit_memory,
kvn@4115 268 frameptr(),
kvn@4115 269 returnadr(),
kvn@4115 270 exc_target, null());
duke@435 271 root()->add_req(_gvn.transform(to_exc)); // bind to root to keep live
duke@435 272 C->init_start(start);
duke@435 273
duke@435 274 //-----------------------------
duke@435 275 // If this is a normal subroutine return, issue the return and be done.
duke@435 276 Node *ret;
duke@435 277 switch( is_fancy_jump ) {
duke@435 278 case 0: // Make a return instruction
duke@435 279 // Return to caller, free any space for return address
kvn@4115 280 ret = new (C) ReturnNode(TypeFunc::Parms, if_null,
kvn@4115 281 i_o(),
kvn@4115 282 exit_memory,
kvn@4115 283 frameptr(),
kvn@4115 284 returnadr());
duke@435 285 if (C->tf()->range()->cnt() > TypeFunc::Parms)
duke@435 286 ret->add_req( map()->in(TypeFunc::Parms) );
duke@435 287 break;
duke@435 288 case 1: // This is a fancy tail-call jump. Jump to computed address.
duke@435 289 // Jump to new callee; leave old return address alone.
kvn@4115 290 ret = new (C) TailCallNode(if_null,
kvn@4115 291 i_o(),
kvn@4115 292 exit_memory,
kvn@4115 293 frameptr(),
kvn@4115 294 returnadr(),
kvn@4115 295 target, map()->in(TypeFunc::Parms));
duke@435 296 break;
duke@435 297 case 2: // Pop return address & jump
duke@435 298 // Throw away old return address; jump to new computed address
duke@435 299 //assert(C_function == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C), "fancy_jump==2 only for rethrow");
kvn@4115 300 ret = new (C) TailJumpNode(if_null,
kvn@4115 301 i_o(),
kvn@4115 302 exit_memory,
kvn@4115 303 frameptr(),
kvn@4115 304 target, map()->in(TypeFunc::Parms));
duke@435 305 break;
duke@435 306 default:
duke@435 307 ShouldNotReachHere();
duke@435 308 }
duke@435 309 root()->add_req(_gvn.transform(ret));
duke@435 310 }

mercurial