Thu, 05 Sep 2013 10:39:10 +0200
Merge
duke@435 | 1 | /* |
coleenp@5614 | 2 | * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_OPTO_CALLNODE_HPP |
stefank@2314 | 26 | #define SHARE_VM_OPTO_CALLNODE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "opto/connode.hpp" |
stefank@2314 | 29 | #include "opto/mulnode.hpp" |
stefank@2314 | 30 | #include "opto/multnode.hpp" |
stefank@2314 | 31 | #include "opto/opcodes.hpp" |
stefank@2314 | 32 | #include "opto/phaseX.hpp" |
stefank@2314 | 33 | #include "opto/type.hpp" |
stefank@2314 | 34 | |
duke@435 | 35 | // Portions of code courtesy of Clifford Click |
duke@435 | 36 | |
duke@435 | 37 | // Optimization - Graph Style |
duke@435 | 38 | |
duke@435 | 39 | class Chaitin; |
duke@435 | 40 | class NamedCounter; |
duke@435 | 41 | class MultiNode; |
duke@435 | 42 | class SafePointNode; |
duke@435 | 43 | class CallNode; |
duke@435 | 44 | class CallJavaNode; |
duke@435 | 45 | class CallStaticJavaNode; |
duke@435 | 46 | class CallDynamicJavaNode; |
duke@435 | 47 | class CallRuntimeNode; |
duke@435 | 48 | class CallLeafNode; |
duke@435 | 49 | class CallLeafNoFPNode; |
duke@435 | 50 | class AllocateNode; |
kvn@468 | 51 | class AllocateArrayNode; |
kvn@5110 | 52 | class BoxLockNode; |
duke@435 | 53 | class LockNode; |
duke@435 | 54 | class UnlockNode; |
duke@435 | 55 | class JVMState; |
duke@435 | 56 | class OopMap; |
duke@435 | 57 | class State; |
duke@435 | 58 | class StartNode; |
duke@435 | 59 | class MachCallNode; |
duke@435 | 60 | class FastLockNode; |
duke@435 | 61 | |
duke@435 | 62 | //------------------------------StartNode-------------------------------------- |
duke@435 | 63 | // The method start node |
duke@435 | 64 | class StartNode : public MultiNode { |
duke@435 | 65 | virtual uint cmp( const Node &n ) const; |
duke@435 | 66 | virtual uint size_of() const; // Size is bigger |
duke@435 | 67 | public: |
duke@435 | 68 | const TypeTuple *_domain; |
duke@435 | 69 | StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { |
duke@435 | 70 | init_class_id(Class_Start); |
duke@435 | 71 | init_req(0,this); |
duke@435 | 72 | init_req(1,root); |
duke@435 | 73 | } |
duke@435 | 74 | virtual int Opcode() const; |
duke@435 | 75 | virtual bool pinned() const { return true; }; |
duke@435 | 76 | virtual const Type *bottom_type() const; |
duke@435 | 77 | virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } |
duke@435 | 78 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 79 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 80 | virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; |
duke@435 | 81 | virtual const RegMask &in_RegMask(uint) const; |
duke@435 | 82 | virtual Node *match( const ProjNode *proj, const Matcher *m ); |
duke@435 | 83 | virtual uint ideal_reg() const { return 0; } |
duke@435 | 84 | #ifndef PRODUCT |
duke@435 | 85 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 86 | #endif |
duke@435 | 87 | }; |
duke@435 | 88 | |
duke@435 | 89 | //------------------------------StartOSRNode----------------------------------- |
duke@435 | 90 | // The method start node for on stack replacement code |
duke@435 | 91 | class StartOSRNode : public StartNode { |
duke@435 | 92 | public: |
duke@435 | 93 | StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} |
duke@435 | 94 | virtual int Opcode() const; |
duke@435 | 95 | static const TypeTuple *osr_domain(); |
duke@435 | 96 | }; |
duke@435 | 97 | |
duke@435 | 98 | |
duke@435 | 99 | //------------------------------ParmNode--------------------------------------- |
duke@435 | 100 | // Incoming parameters |
duke@435 | 101 | class ParmNode : public ProjNode { |
duke@435 | 102 | static const char * const names[TypeFunc::Parms+1]; |
duke@435 | 103 | public: |
kvn@468 | 104 | ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { |
kvn@468 | 105 | init_class_id(Class_Parm); |
kvn@468 | 106 | } |
duke@435 | 107 | virtual int Opcode() const; |
duke@435 | 108 | virtual bool is_CFG() const { return (_con == TypeFunc::Control); } |
duke@435 | 109 | virtual uint ideal_reg() const; |
duke@435 | 110 | #ifndef PRODUCT |
duke@435 | 111 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 112 | #endif |
duke@435 | 113 | }; |
duke@435 | 114 | |
duke@435 | 115 | |
duke@435 | 116 | //------------------------------ReturnNode------------------------------------- |
duke@435 | 117 | // Return from subroutine node |
duke@435 | 118 | class ReturnNode : public Node { |
duke@435 | 119 | public: |
duke@435 | 120 | ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); |
duke@435 | 121 | virtual int Opcode() const; |
duke@435 | 122 | virtual bool is_CFG() const { return true; } |
duke@435 | 123 | virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash |
duke@435 | 124 | virtual bool depends_only_on_test() const { return false; } |
duke@435 | 125 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 126 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 127 | virtual uint ideal_reg() const { return NotAMachineReg; } |
duke@435 | 128 | virtual uint match_edge(uint idx) const; |
duke@435 | 129 | #ifndef PRODUCT |
kvn@4478 | 130 | virtual void dump_req(outputStream *st = tty) const; |
duke@435 | 131 | #endif |
duke@435 | 132 | }; |
duke@435 | 133 | |
duke@435 | 134 | |
duke@435 | 135 | //------------------------------RethrowNode------------------------------------ |
duke@435 | 136 | // Rethrow of exception at call site. Ends a procedure before rethrowing; |
duke@435 | 137 | // ends the current basic block like a ReturnNode. Restores registers and |
duke@435 | 138 | // unwinds stack. Rethrow happens in the caller's method. |
duke@435 | 139 | class RethrowNode : public Node { |
duke@435 | 140 | public: |
duke@435 | 141 | RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); |
duke@435 | 142 | virtual int Opcode() const; |
duke@435 | 143 | virtual bool is_CFG() const { return true; } |
duke@435 | 144 | virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash |
duke@435 | 145 | virtual bool depends_only_on_test() const { return false; } |
duke@435 | 146 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 147 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 148 | virtual uint match_edge(uint idx) const; |
duke@435 | 149 | virtual uint ideal_reg() const { return NotAMachineReg; } |
duke@435 | 150 | #ifndef PRODUCT |
kvn@4478 | 151 | virtual void dump_req(outputStream *st = tty) const; |
duke@435 | 152 | #endif |
duke@435 | 153 | }; |
duke@435 | 154 | |
duke@435 | 155 | |
duke@435 | 156 | //------------------------------TailCallNode----------------------------------- |
duke@435 | 157 | // Pop stack frame and jump indirect |
duke@435 | 158 | class TailCallNode : public ReturnNode { |
duke@435 | 159 | public: |
duke@435 | 160 | TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) |
duke@435 | 161 | : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { |
duke@435 | 162 | init_req(TypeFunc::Parms, target); |
duke@435 | 163 | init_req(TypeFunc::Parms+1, moop); |
duke@435 | 164 | } |
duke@435 | 165 | |
duke@435 | 166 | virtual int Opcode() const; |
duke@435 | 167 | virtual uint match_edge(uint idx) const; |
duke@435 | 168 | }; |
duke@435 | 169 | |
duke@435 | 170 | //------------------------------TailJumpNode----------------------------------- |
duke@435 | 171 | // Pop stack frame and jump indirect |
duke@435 | 172 | class TailJumpNode : public ReturnNode { |
duke@435 | 173 | public: |
duke@435 | 174 | TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) |
duke@435 | 175 | : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { |
duke@435 | 176 | init_req(TypeFunc::Parms, target); |
duke@435 | 177 | init_req(TypeFunc::Parms+1, ex_oop); |
duke@435 | 178 | } |
duke@435 | 179 | |
duke@435 | 180 | virtual int Opcode() const; |
duke@435 | 181 | virtual uint match_edge(uint idx) const; |
duke@435 | 182 | }; |
duke@435 | 183 | |
duke@435 | 184 | //-------------------------------JVMState------------------------------------- |
duke@435 | 185 | // A linked list of JVMState nodes captures the whole interpreter state, |
duke@435 | 186 | // plus GC roots, for all active calls at some call site in this compilation |
duke@435 | 187 | // unit. (If there is no inlining, then the list has exactly one link.) |
duke@435 | 188 | // This provides a way to map the optimized program back into the interpreter, |
duke@435 | 189 | // or to let the GC mark the stack. |
duke@435 | 190 | class JVMState : public ResourceObj { |
never@3138 | 191 | friend class VMStructs; |
cfang@1335 | 192 | public: |
cfang@1335 | 193 | typedef enum { |
cfang@1335 | 194 | Reexecute_Undefined = -1, // not defined -- will be translated into false later |
cfang@1335 | 195 | Reexecute_False = 0, // false -- do not reexecute |
cfang@1335 | 196 | Reexecute_True = 1 // true -- reexecute the bytecode |
cfang@1335 | 197 | } ReexecuteState; //Reexecute State |
cfang@1335 | 198 | |
duke@435 | 199 | private: |
duke@435 | 200 | JVMState* _caller; // List pointer for forming scope chains |
twisti@3969 | 201 | uint _depth; // One more than caller depth, or one. |
duke@435 | 202 | uint _locoff; // Offset to locals in input edge mapping |
duke@435 | 203 | uint _stkoff; // Offset to stack in input edge mapping |
duke@435 | 204 | uint _monoff; // Offset to monitors in input edge mapping |
kvn@498 | 205 | uint _scloff; // Offset to fields of scalar objs in input edge mapping |
duke@435 | 206 | uint _endoff; // Offset to end of input edge mapping |
duke@435 | 207 | uint _sp; // Jave Expression Stack Pointer for this state |
duke@435 | 208 | int _bci; // Byte Code Index of this JVM point |
cfang@1335 | 209 | ReexecuteState _reexecute; // Whether this bytecode need to be re-executed |
duke@435 | 210 | ciMethod* _method; // Method Pointer |
duke@435 | 211 | SafePointNode* _map; // Map node associated with this scope |
duke@435 | 212 | public: |
duke@435 | 213 | friend class Compile; |
cfang@1335 | 214 | friend class PreserveReexecuteState; |
duke@435 | 215 | |
duke@435 | 216 | // Because JVMState objects live over the entire lifetime of the |
duke@435 | 217 | // Compile object, they are allocated into the comp_arena, which |
duke@435 | 218 | // does not get resource marked or reset during the compile process |
coleenp@5614 | 219 | void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } |
duke@435 | 220 | void operator delete( void * ) { } // fast deallocation |
duke@435 | 221 | |
duke@435 | 222 | // Create a new JVMState, ready for abstract interpretation. |
duke@435 | 223 | JVMState(ciMethod* method, JVMState* caller); |
duke@435 | 224 | JVMState(int stack_size); // root state; has a null method |
duke@435 | 225 | |
duke@435 | 226 | // Access functions for the JVM |
twisti@3969 | 227 | // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| |
twisti@3969 | 228 | // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff |
duke@435 | 229 | uint locoff() const { return _locoff; } |
duke@435 | 230 | uint stkoff() const { return _stkoff; } |
duke@435 | 231 | uint argoff() const { return _stkoff + _sp; } |
duke@435 | 232 | uint monoff() const { return _monoff; } |
kvn@498 | 233 | uint scloff() const { return _scloff; } |
duke@435 | 234 | uint endoff() const { return _endoff; } |
duke@435 | 235 | uint oopoff() const { return debug_end(); } |
duke@435 | 236 | |
twisti@3969 | 237 | int loc_size() const { return stkoff() - locoff(); } |
twisti@3969 | 238 | int stk_size() const { return monoff() - stkoff(); } |
twisti@3969 | 239 | int mon_size() const { return scloff() - monoff(); } |
twisti@3969 | 240 | int scl_size() const { return endoff() - scloff(); } |
duke@435 | 241 | |
twisti@3969 | 242 | bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } |
twisti@3969 | 243 | bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } |
twisti@3969 | 244 | bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } |
twisti@3969 | 245 | bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } |
duke@435 | 246 | |
cfang@1335 | 247 | uint sp() const { return _sp; } |
cfang@1335 | 248 | int bci() const { return _bci; } |
cfang@1335 | 249 | bool should_reexecute() const { return _reexecute==Reexecute_True; } |
cfang@1335 | 250 | bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } |
cfang@1335 | 251 | bool has_method() const { return _method != NULL; } |
cfang@1335 | 252 | ciMethod* method() const { assert(has_method(), ""); return _method; } |
cfang@1335 | 253 | JVMState* caller() const { return _caller; } |
cfang@1335 | 254 | SafePointNode* map() const { return _map; } |
cfang@1335 | 255 | uint depth() const { return _depth; } |
cfang@1335 | 256 | uint debug_start() const; // returns locoff of root caller |
cfang@1335 | 257 | uint debug_end() const; // returns endoff of self |
cfang@1335 | 258 | uint debug_size() const { |
kvn@498 | 259 | return loc_size() + sp() + mon_size() + scl_size(); |
kvn@498 | 260 | } |
duke@435 | 261 | uint debug_depth() const; // returns sum of debug_size values at all depths |
duke@435 | 262 | |
duke@435 | 263 | // Returns the JVM state at the desired depth (1 == root). |
duke@435 | 264 | JVMState* of_depth(int d) const; |
duke@435 | 265 | |
duke@435 | 266 | // Tells if two JVM states have the same call chain (depth, methods, & bcis). |
duke@435 | 267 | bool same_calls_as(const JVMState* that) const; |
duke@435 | 268 | |
duke@435 | 269 | // Monitors (monitors are stored as (boxNode, objNode) pairs |
duke@435 | 270 | enum { logMonitorEdges = 1 }; |
duke@435 | 271 | int nof_monitors() const { return mon_size() >> logMonitorEdges; } |
duke@435 | 272 | int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } |
duke@435 | 273 | int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } |
duke@435 | 274 | int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } |
duke@435 | 275 | bool is_monitor_box(uint off) const { |
duke@435 | 276 | assert(is_mon(off), "should be called only for monitor edge"); |
duke@435 | 277 | return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); |
duke@435 | 278 | } |
duke@435 | 279 | bool is_monitor_use(uint off) const { return (is_mon(off) |
duke@435 | 280 | && is_monitor_box(off)) |
duke@435 | 281 | || (caller() && caller()->is_monitor_use(off)); } |
duke@435 | 282 | |
duke@435 | 283 | // Initialization functions for the JVM |
duke@435 | 284 | void set_locoff(uint off) { _locoff = off; } |
duke@435 | 285 | void set_stkoff(uint off) { _stkoff = off; } |
duke@435 | 286 | void set_monoff(uint off) { _monoff = off; } |
kvn@498 | 287 | void set_scloff(uint off) { _scloff = off; } |
duke@435 | 288 | void set_endoff(uint off) { _endoff = off; } |
kvn@498 | 289 | void set_offsets(uint off) { |
kvn@498 | 290 | _locoff = _stkoff = _monoff = _scloff = _endoff = off; |
kvn@498 | 291 | } |
duke@435 | 292 | void set_map(SafePointNode *map) { _map = map; } |
duke@435 | 293 | void set_sp(uint sp) { _sp = sp; } |
cfang@1335 | 294 | // _reexecute is initialized to "undefined" for a new bci |
cfang@1335 | 295 | void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } |
cfang@1335 | 296 | void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} |
duke@435 | 297 | |
duke@435 | 298 | // Miscellaneous utility functions |
duke@435 | 299 | JVMState* clone_deep(Compile* C) const; // recursively clones caller chain |
duke@435 | 300 | JVMState* clone_shallow(Compile* C) const; // retains uncloned caller |
kvn@5110 | 301 | void set_map_deep(SafePointNode *map);// reset map for all callers |
duke@435 | 302 | |
duke@435 | 303 | #ifndef PRODUCT |
duke@435 | 304 | void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; |
duke@435 | 305 | void dump_spec(outputStream *st) const; |
duke@435 | 306 | void dump_on(outputStream* st) const; |
duke@435 | 307 | void dump() const { |
duke@435 | 308 | dump_on(tty); |
duke@435 | 309 | } |
duke@435 | 310 | #endif |
duke@435 | 311 | }; |
duke@435 | 312 | |
duke@435 | 313 | //------------------------------SafePointNode---------------------------------- |
duke@435 | 314 | // A SafePointNode is a subclass of a MultiNode for convenience (and |
duke@435 | 315 | // potential code sharing) only - conceptually it is independent of |
duke@435 | 316 | // the Node semantics. |
duke@435 | 317 | class SafePointNode : public MultiNode { |
duke@435 | 318 | virtual uint cmp( const Node &n ) const; |
duke@435 | 319 | virtual uint size_of() const; // Size is bigger |
duke@435 | 320 | |
duke@435 | 321 | public: |
duke@435 | 322 | SafePointNode(uint edges, JVMState* jvms, |
duke@435 | 323 | // A plain safepoint advertises no memory effects (NULL): |
duke@435 | 324 | const TypePtr* adr_type = NULL) |
duke@435 | 325 | : MultiNode( edges ), |
duke@435 | 326 | _jvms(jvms), |
duke@435 | 327 | _oop_map(NULL), |
duke@435 | 328 | _adr_type(adr_type) |
duke@435 | 329 | { |
duke@435 | 330 | init_class_id(Class_SafePoint); |
duke@435 | 331 | } |
duke@435 | 332 | |
duke@435 | 333 | OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC |
duke@435 | 334 | JVMState* const _jvms; // Pointer to list of JVM State objects |
duke@435 | 335 | const TypePtr* _adr_type; // What type of memory does this node produce? |
duke@435 | 336 | |
duke@435 | 337 | // Many calls take *all* of memory as input, |
duke@435 | 338 | // but some produce a limited subset of that memory as output. |
duke@435 | 339 | // The adr_type reports the call's behavior as a store, not a load. |
duke@435 | 340 | |
duke@435 | 341 | virtual JVMState* jvms() const { return _jvms; } |
duke@435 | 342 | void set_jvms(JVMState* s) { |
duke@435 | 343 | *(JVMState**)&_jvms = s; // override const attribute in the accessor |
duke@435 | 344 | } |
duke@435 | 345 | OopMap *oop_map() const { return _oop_map; } |
duke@435 | 346 | void set_oop_map(OopMap *om) { _oop_map = om; } |
duke@435 | 347 | |
twisti@4313 | 348 | private: |
twisti@4313 | 349 | void verify_input(JVMState* jvms, uint idx) const { |
twisti@4313 | 350 | assert(verify_jvms(jvms), "jvms must match"); |
twisti@4313 | 351 | Node* n = in(idx); |
twisti@4313 | 352 | assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || |
twisti@4313 | 353 | in(idx + 1)->is_top(), "2nd half of long/double"); |
twisti@4313 | 354 | } |
twisti@4313 | 355 | |
twisti@4313 | 356 | public: |
duke@435 | 357 | // Functionality from old debug nodes which has changed |
duke@435 | 358 | Node *local(JVMState* jvms, uint idx) const { |
twisti@4313 | 359 | verify_input(jvms, jvms->locoff() + idx); |
duke@435 | 360 | return in(jvms->locoff() + idx); |
duke@435 | 361 | } |
duke@435 | 362 | Node *stack(JVMState* jvms, uint idx) const { |
twisti@4313 | 363 | verify_input(jvms, jvms->stkoff() + idx); |
duke@435 | 364 | return in(jvms->stkoff() + idx); |
duke@435 | 365 | } |
duke@435 | 366 | Node *argument(JVMState* jvms, uint idx) const { |
twisti@4313 | 367 | verify_input(jvms, jvms->argoff() + idx); |
duke@435 | 368 | return in(jvms->argoff() + idx); |
duke@435 | 369 | } |
duke@435 | 370 | Node *monitor_box(JVMState* jvms, uint idx) const { |
duke@435 | 371 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 372 | return in(jvms->monitor_box_offset(idx)); |
duke@435 | 373 | } |
duke@435 | 374 | Node *monitor_obj(JVMState* jvms, uint idx) const { |
duke@435 | 375 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 376 | return in(jvms->monitor_obj_offset(idx)); |
duke@435 | 377 | } |
duke@435 | 378 | |
duke@435 | 379 | void set_local(JVMState* jvms, uint idx, Node *c); |
duke@435 | 380 | |
duke@435 | 381 | void set_stack(JVMState* jvms, uint idx, Node *c) { |
duke@435 | 382 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 383 | set_req(jvms->stkoff() + idx, c); |
duke@435 | 384 | } |
duke@435 | 385 | void set_argument(JVMState* jvms, uint idx, Node *c) { |
duke@435 | 386 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 387 | set_req(jvms->argoff() + idx, c); |
duke@435 | 388 | } |
duke@435 | 389 | void ensure_stack(JVMState* jvms, uint stk_size) { |
duke@435 | 390 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 391 | int grow_by = (int)stk_size - (int)jvms->stk_size(); |
duke@435 | 392 | if (grow_by > 0) grow_stack(jvms, grow_by); |
duke@435 | 393 | } |
duke@435 | 394 | void grow_stack(JVMState* jvms, uint grow_by); |
duke@435 | 395 | // Handle monitor stack |
duke@435 | 396 | void push_monitor( const FastLockNode *lock ); |
duke@435 | 397 | void pop_monitor (); |
duke@435 | 398 | Node *peek_monitor_box() const; |
duke@435 | 399 | Node *peek_monitor_obj() const; |
duke@435 | 400 | |
duke@435 | 401 | // Access functions for the JVM |
duke@435 | 402 | Node *control () const { return in(TypeFunc::Control ); } |
duke@435 | 403 | Node *i_o () const { return in(TypeFunc::I_O ); } |
duke@435 | 404 | Node *memory () const { return in(TypeFunc::Memory ); } |
duke@435 | 405 | Node *returnadr() const { return in(TypeFunc::ReturnAdr); } |
duke@435 | 406 | Node *frameptr () const { return in(TypeFunc::FramePtr ); } |
duke@435 | 407 | |
duke@435 | 408 | void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } |
duke@435 | 409 | void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } |
duke@435 | 410 | void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } |
duke@435 | 411 | |
duke@435 | 412 | MergeMemNode* merged_memory() const { |
duke@435 | 413 | return in(TypeFunc::Memory)->as_MergeMem(); |
duke@435 | 414 | } |
duke@435 | 415 | |
duke@435 | 416 | // The parser marks useless maps as dead when it's done with them: |
duke@435 | 417 | bool is_killed() { return in(TypeFunc::Control) == NULL; } |
duke@435 | 418 | |
duke@435 | 419 | // Exception states bubbling out of subgraphs such as inlined calls |
duke@435 | 420 | // are recorded here. (There might be more than one, hence the "next".) |
duke@435 | 421 | // This feature is used only for safepoints which serve as "maps" |
duke@435 | 422 | // for JVM states during parsing, intrinsic expansion, etc. |
duke@435 | 423 | SafePointNode* next_exception() const; |
duke@435 | 424 | void set_next_exception(SafePointNode* n); |
duke@435 | 425 | bool has_exceptions() const { return next_exception() != NULL; } |
duke@435 | 426 | |
duke@435 | 427 | // Standard Node stuff |
duke@435 | 428 | virtual int Opcode() const; |
duke@435 | 429 | virtual bool pinned() const { return true; } |
duke@435 | 430 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 431 | virtual const Type *bottom_type() const { return Type::CONTROL; } |
duke@435 | 432 | virtual const TypePtr *adr_type() const { return _adr_type; } |
duke@435 | 433 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 434 | virtual Node *Identity( PhaseTransform *phase ); |
duke@435 | 435 | virtual uint ideal_reg() const { return 0; } |
duke@435 | 436 | virtual const RegMask &in_RegMask(uint) const; |
duke@435 | 437 | virtual const RegMask &out_RegMask() const; |
duke@435 | 438 | virtual uint match_edge(uint idx) const; |
duke@435 | 439 | |
duke@435 | 440 | static bool needs_polling_address_input(); |
duke@435 | 441 | |
duke@435 | 442 | #ifndef PRODUCT |
kvn@5110 | 443 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 444 | #endif |
duke@435 | 445 | }; |
duke@435 | 446 | |
kvn@498 | 447 | //------------------------------SafePointScalarObjectNode---------------------- |
kvn@498 | 448 | // A SafePointScalarObjectNode represents the state of a scalarized object |
kvn@498 | 449 | // at a safepoint. |
kvn@498 | 450 | |
kvn@498 | 451 | class SafePointScalarObjectNode: public TypeNode { |
kvn@5626 | 452 | uint _first_index; // First input edge relative index of a SafePoint node where |
kvn@498 | 453 | // states of the scalarized object fields are collected. |
kvn@5626 | 454 | // It is relative to the last (youngest) jvms->_scloff. |
kvn@498 | 455 | uint _n_fields; // Number of non-static fields of the scalarized object. |
kvn@509 | 456 | DEBUG_ONLY(AllocateNode* _alloc;) |
kvn@3311 | 457 | |
kvn@3311 | 458 | virtual uint hash() const ; // { return NO_HASH; } |
kvn@3311 | 459 | virtual uint cmp( const Node &n ) const; |
kvn@3311 | 460 | |
kvn@5626 | 461 | uint first_index() const { return _first_index; } |
kvn@5626 | 462 | |
kvn@498 | 463 | public: |
kvn@498 | 464 | SafePointScalarObjectNode(const TypeOopPtr* tp, |
kvn@498 | 465 | #ifdef ASSERT |
kvn@498 | 466 | AllocateNode* alloc, |
kvn@498 | 467 | #endif |
kvn@498 | 468 | uint first_index, uint n_fields); |
kvn@498 | 469 | virtual int Opcode() const; |
kvn@498 | 470 | virtual uint ideal_reg() const; |
kvn@498 | 471 | virtual const RegMask &in_RegMask(uint) const; |
kvn@498 | 472 | virtual const RegMask &out_RegMask() const; |
kvn@498 | 473 | virtual uint match_edge(uint idx) const; |
kvn@498 | 474 | |
kvn@5626 | 475 | uint first_index(JVMState* jvms) const { |
kvn@5626 | 476 | assert(jvms != NULL, "missed JVMS"); |
kvn@5626 | 477 | return jvms->scloff() + _first_index; |
kvn@5626 | 478 | } |
kvn@498 | 479 | uint n_fields() const { return _n_fields; } |
kvn@498 | 480 | |
kvn@3311 | 481 | #ifdef ASSERT |
kvn@3311 | 482 | AllocateNode* alloc() const { return _alloc; } |
kvn@3311 | 483 | #endif |
kvn@1036 | 484 | |
kvn@498 | 485 | virtual uint size_of() const { return sizeof(*this); } |
kvn@498 | 486 | |
kvn@498 | 487 | // Assumes that "this" is an argument to a safepoint node "s", and that |
kvn@498 | 488 | // "new_call" is being created to correspond to "s". But the difference |
kvn@498 | 489 | // between the start index of the jvmstates of "new_call" and "s" is |
kvn@498 | 490 | // "jvms_adj". Produce and return a SafePointScalarObjectNode that |
kvn@498 | 491 | // corresponds appropriately to "this" in "new_call". Assumes that |
kvn@498 | 492 | // "sosn_map" is a map, specific to the translation of "s" to "new_call", |
kvn@498 | 493 | // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. |
kvn@5626 | 494 | SafePointScalarObjectNode* clone(Dict* sosn_map) const; |
kvn@498 | 495 | |
kvn@498 | 496 | #ifndef PRODUCT |
kvn@498 | 497 | virtual void dump_spec(outputStream *st) const; |
kvn@498 | 498 | #endif |
kvn@498 | 499 | }; |
kvn@498 | 500 | |
never@1515 | 501 | |
never@1515 | 502 | // Simple container for the outgoing projections of a call. Useful |
never@1515 | 503 | // for serious surgery on calls. |
never@1515 | 504 | class CallProjections : public StackObj { |
never@1515 | 505 | public: |
never@1515 | 506 | Node* fallthrough_proj; |
never@1515 | 507 | Node* fallthrough_catchproj; |
never@1515 | 508 | Node* fallthrough_memproj; |
never@1515 | 509 | Node* fallthrough_ioproj; |
never@1515 | 510 | Node* catchall_catchproj; |
never@1515 | 511 | Node* catchall_memproj; |
never@1515 | 512 | Node* catchall_ioproj; |
never@1515 | 513 | Node* resproj; |
never@1515 | 514 | Node* exobj; |
never@1515 | 515 | }; |
never@1515 | 516 | |
roland@4409 | 517 | class CallGenerator; |
never@1515 | 518 | |
duke@435 | 519 | //------------------------------CallNode--------------------------------------- |
duke@435 | 520 | // Call nodes now subsume the function of debug nodes at callsites, so they |
duke@435 | 521 | // contain the functionality of a full scope chain of debug nodes. |
duke@435 | 522 | class CallNode : public SafePointNode { |
never@3138 | 523 | friend class VMStructs; |
duke@435 | 524 | public: |
duke@435 | 525 | const TypeFunc *_tf; // Function type |
duke@435 | 526 | address _entry_point; // Address of method being called |
duke@435 | 527 | float _cnt; // Estimate of number of times called |
roland@4409 | 528 | CallGenerator* _generator; // corresponding CallGenerator for some late inline calls |
duke@435 | 529 | |
duke@435 | 530 | CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) |
duke@435 | 531 | : SafePointNode(tf->domain()->cnt(), NULL, adr_type), |
duke@435 | 532 | _tf(tf), |
duke@435 | 533 | _entry_point(addr), |
roland@4409 | 534 | _cnt(COUNT_UNKNOWN), |
roland@4409 | 535 | _generator(NULL) |
duke@435 | 536 | { |
duke@435 | 537 | init_class_id(Class_Call); |
duke@435 | 538 | } |
duke@435 | 539 | |
roland@4409 | 540 | const TypeFunc* tf() const { return _tf; } |
roland@4409 | 541 | const address entry_point() const { return _entry_point; } |
roland@4409 | 542 | const float cnt() const { return _cnt; } |
roland@4409 | 543 | CallGenerator* generator() const { return _generator; } |
duke@435 | 544 | |
roland@4409 | 545 | void set_tf(const TypeFunc* tf) { _tf = tf; } |
roland@4409 | 546 | void set_entry_point(address p) { _entry_point = p; } |
roland@4409 | 547 | void set_cnt(float c) { _cnt = c; } |
roland@4409 | 548 | void set_generator(CallGenerator* cg) { _generator = cg; } |
duke@435 | 549 | |
duke@435 | 550 | virtual const Type *bottom_type() const; |
duke@435 | 551 | virtual const Type *Value( PhaseTransform *phase ) const; |
roland@4409 | 552 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 553 | virtual Node *Identity( PhaseTransform *phase ) { return this; } |
duke@435 | 554 | virtual uint cmp( const Node &n ) const; |
duke@435 | 555 | virtual uint size_of() const = 0; |
duke@435 | 556 | virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; |
duke@435 | 557 | virtual Node *match( const ProjNode *proj, const Matcher *m ); |
duke@435 | 558 | virtual uint ideal_reg() const { return NotAMachineReg; } |
duke@435 | 559 | // Are we guaranteed that this node is a safepoint? Not true for leaf calls and |
duke@435 | 560 | // for some macro nodes whose expansion does not have a safepoint on the fast path. |
duke@435 | 561 | virtual bool guaranteed_safepoint() { return true; } |
duke@435 | 562 | // For macro nodes, the JVMState gets modified during expansion, so when cloning |
duke@435 | 563 | // the node the JVMState must be cloned. |
kvn@5110 | 564 | virtual void clone_jvms(Compile* C) { } // default is not to clone |
duke@435 | 565 | |
kvn@500 | 566 | // Returns true if the call may modify n |
kvn@5110 | 567 | virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase); |
kvn@500 | 568 | // Does this node have a use of n other than in debug information? |
kvn@603 | 569 | bool has_non_debug_use(Node *n); |
kvn@500 | 570 | // Returns the unique CheckCastPP of a call |
kvn@500 | 571 | // or result projection is there are several CheckCastPP |
kvn@500 | 572 | // or returns NULL if there is no one. |
kvn@500 | 573 | Node *result_cast(); |
kvn@3651 | 574 | // Does this node returns pointer? |
kvn@3651 | 575 | bool returns_pointer() const { |
kvn@3651 | 576 | const TypeTuple *r = tf()->range(); |
kvn@3651 | 577 | return (r->cnt() > TypeFunc::Parms && |
kvn@3651 | 578 | r->field_at(TypeFunc::Parms)->isa_ptr()); |
kvn@3651 | 579 | } |
kvn@500 | 580 | |
never@1515 | 581 | // Collect all the interesting edges from a call for use in |
never@1515 | 582 | // replacing the call by something else. Used by macro expansion |
never@1515 | 583 | // and the late inlining support. |
never@1515 | 584 | void extract_projections(CallProjections* projs, bool separate_io_proj); |
never@1515 | 585 | |
duke@435 | 586 | virtual uint match_edge(uint idx) const; |
duke@435 | 587 | |
duke@435 | 588 | #ifndef PRODUCT |
kvn@4478 | 589 | virtual void dump_req(outputStream *st = tty) const; |
duke@435 | 590 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 591 | #endif |
duke@435 | 592 | }; |
duke@435 | 593 | |
never@1515 | 594 | |
duke@435 | 595 | //------------------------------CallJavaNode----------------------------------- |
duke@435 | 596 | // Make a static or dynamic subroutine call node using Java calling |
duke@435 | 597 | // convention. (The "Java" calling convention is the compiler's calling |
duke@435 | 598 | // convention, as opposed to the interpreter's or that of native C.) |
duke@435 | 599 | class CallJavaNode : public CallNode { |
never@3138 | 600 | friend class VMStructs; |
duke@435 | 601 | protected: |
duke@435 | 602 | virtual uint cmp( const Node &n ) const; |
duke@435 | 603 | virtual uint size_of() const; // Size is bigger |
duke@435 | 604 | |
duke@435 | 605 | bool _optimized_virtual; |
twisti@1572 | 606 | bool _method_handle_invoke; |
duke@435 | 607 | ciMethod* _method; // Method being direct called |
duke@435 | 608 | public: |
duke@435 | 609 | const int _bci; // Byte Code Index of call byte code |
duke@435 | 610 | CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) |
duke@435 | 611 | : CallNode(tf, addr, TypePtr::BOTTOM), |
twisti@1572 | 612 | _method(method), _bci(bci), |
twisti@1572 | 613 | _optimized_virtual(false), |
twisti@1572 | 614 | _method_handle_invoke(false) |
duke@435 | 615 | { |
duke@435 | 616 | init_class_id(Class_CallJava); |
duke@435 | 617 | } |
duke@435 | 618 | |
duke@435 | 619 | virtual int Opcode() const; |
duke@435 | 620 | ciMethod* method() const { return _method; } |
duke@435 | 621 | void set_method(ciMethod *m) { _method = m; } |
duke@435 | 622 | void set_optimized_virtual(bool f) { _optimized_virtual = f; } |
duke@435 | 623 | bool is_optimized_virtual() const { return _optimized_virtual; } |
twisti@1572 | 624 | void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } |
twisti@1572 | 625 | bool is_method_handle_invoke() const { return _method_handle_invoke; } |
duke@435 | 626 | |
duke@435 | 627 | #ifndef PRODUCT |
duke@435 | 628 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 629 | #endif |
duke@435 | 630 | }; |
duke@435 | 631 | |
duke@435 | 632 | //------------------------------CallStaticJavaNode----------------------------- |
duke@435 | 633 | // Make a direct subroutine call using Java calling convention (for static |
duke@435 | 634 | // calls and optimized virtual calls, plus calls to wrappers for run-time |
duke@435 | 635 | // routines); generates static stub. |
duke@435 | 636 | class CallStaticJavaNode : public CallJavaNode { |
duke@435 | 637 | virtual uint cmp( const Node &n ) const; |
duke@435 | 638 | virtual uint size_of() const; // Size is bigger |
duke@435 | 639 | public: |
kvn@5110 | 640 | CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci) |
duke@435 | 641 | : CallJavaNode(tf, addr, method, bci), _name(NULL) { |
duke@435 | 642 | init_class_id(Class_CallStaticJava); |
kvn@5110 | 643 | if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { |
kvn@5110 | 644 | init_flags(Flag_is_macro); |
kvn@5110 | 645 | C->add_macro_node(this); |
kvn@5110 | 646 | } |
kvn@5110 | 647 | _is_scalar_replaceable = false; |
kvn@5110 | 648 | _is_non_escaping = false; |
duke@435 | 649 | } |
duke@435 | 650 | CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, |
duke@435 | 651 | const TypePtr* adr_type) |
duke@435 | 652 | : CallJavaNode(tf, addr, NULL, bci), _name(name) { |
duke@435 | 653 | init_class_id(Class_CallStaticJava); |
duke@435 | 654 | // This node calls a runtime stub, which often has narrow memory effects. |
duke@435 | 655 | _adr_type = adr_type; |
kvn@5110 | 656 | _is_scalar_replaceable = false; |
kvn@5110 | 657 | _is_non_escaping = false; |
duke@435 | 658 | } |
kvn@5110 | 659 | const char *_name; // Runtime wrapper name |
kvn@5110 | 660 | |
kvn@5110 | 661 | // Result of Escape Analysis |
kvn@5110 | 662 | bool _is_scalar_replaceable; |
kvn@5110 | 663 | bool _is_non_escaping; |
duke@435 | 664 | |
duke@435 | 665 | // If this is an uncommon trap, return the request code, else zero. |
duke@435 | 666 | int uncommon_trap_request() const; |
duke@435 | 667 | static int extract_uncommon_trap_request(const Node* call); |
duke@435 | 668 | |
kvn@5110 | 669 | bool is_boxing_method() const { |
kvn@5110 | 670 | return is_macro() && (method() != NULL) && method()->is_boxing_method(); |
kvn@5110 | 671 | } |
kvn@5110 | 672 | // Later inlining modifies the JVMState, so we need to clone it |
kvn@5110 | 673 | // when the call node is cloned (because it is macro node). |
kvn@5110 | 674 | virtual void clone_jvms(Compile* C) { |
kvn@5110 | 675 | if ((jvms() != NULL) && is_boxing_method()) { |
kvn@5110 | 676 | set_jvms(jvms()->clone_deep(C)); |
kvn@5110 | 677 | jvms()->set_map_deep(this); |
kvn@5110 | 678 | } |
kvn@5110 | 679 | } |
kvn@5110 | 680 | |
duke@435 | 681 | virtual int Opcode() const; |
duke@435 | 682 | #ifndef PRODUCT |
duke@435 | 683 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 684 | #endif |
duke@435 | 685 | }; |
duke@435 | 686 | |
duke@435 | 687 | //------------------------------CallDynamicJavaNode---------------------------- |
duke@435 | 688 | // Make a dispatched call using Java calling convention. |
duke@435 | 689 | class CallDynamicJavaNode : public CallJavaNode { |
duke@435 | 690 | virtual uint cmp( const Node &n ) const; |
duke@435 | 691 | virtual uint size_of() const; // Size is bigger |
duke@435 | 692 | public: |
duke@435 | 693 | CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { |
duke@435 | 694 | init_class_id(Class_CallDynamicJava); |
duke@435 | 695 | } |
duke@435 | 696 | |
duke@435 | 697 | int _vtable_index; |
duke@435 | 698 | virtual int Opcode() const; |
duke@435 | 699 | #ifndef PRODUCT |
duke@435 | 700 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 701 | #endif |
duke@435 | 702 | }; |
duke@435 | 703 | |
duke@435 | 704 | //------------------------------CallRuntimeNode-------------------------------- |
duke@435 | 705 | // Make a direct subroutine call node into compiled C++ code. |
duke@435 | 706 | class CallRuntimeNode : public CallNode { |
duke@435 | 707 | virtual uint cmp( const Node &n ) const; |
duke@435 | 708 | virtual uint size_of() const; // Size is bigger |
duke@435 | 709 | public: |
duke@435 | 710 | CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, |
duke@435 | 711 | const TypePtr* adr_type) |
duke@435 | 712 | : CallNode(tf, addr, adr_type), |
duke@435 | 713 | _name(name) |
duke@435 | 714 | { |
duke@435 | 715 | init_class_id(Class_CallRuntime); |
duke@435 | 716 | } |
duke@435 | 717 | |
duke@435 | 718 | const char *_name; // Printable name, if _method is NULL |
duke@435 | 719 | virtual int Opcode() const; |
duke@435 | 720 | virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; |
duke@435 | 721 | |
duke@435 | 722 | #ifndef PRODUCT |
duke@435 | 723 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 724 | #endif |
duke@435 | 725 | }; |
duke@435 | 726 | |
duke@435 | 727 | //------------------------------CallLeafNode----------------------------------- |
duke@435 | 728 | // Make a direct subroutine call node into compiled C++ code, without |
duke@435 | 729 | // safepoints |
duke@435 | 730 | class CallLeafNode : public CallRuntimeNode { |
duke@435 | 731 | public: |
duke@435 | 732 | CallLeafNode(const TypeFunc* tf, address addr, const char* name, |
duke@435 | 733 | const TypePtr* adr_type) |
duke@435 | 734 | : CallRuntimeNode(tf, addr, name, adr_type) |
duke@435 | 735 | { |
duke@435 | 736 | init_class_id(Class_CallLeaf); |
duke@435 | 737 | } |
duke@435 | 738 | virtual int Opcode() const; |
duke@435 | 739 | virtual bool guaranteed_safepoint() { return false; } |
duke@435 | 740 | #ifndef PRODUCT |
duke@435 | 741 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 742 | #endif |
duke@435 | 743 | }; |
duke@435 | 744 | |
duke@435 | 745 | //------------------------------CallLeafNoFPNode------------------------------- |
duke@435 | 746 | // CallLeafNode, not using floating point or using it in the same manner as |
duke@435 | 747 | // the generated code |
duke@435 | 748 | class CallLeafNoFPNode : public CallLeafNode { |
duke@435 | 749 | public: |
duke@435 | 750 | CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, |
duke@435 | 751 | const TypePtr* adr_type) |
duke@435 | 752 | : CallLeafNode(tf, addr, name, adr_type) |
duke@435 | 753 | { |
duke@435 | 754 | } |
duke@435 | 755 | virtual int Opcode() const; |
duke@435 | 756 | }; |
duke@435 | 757 | |
duke@435 | 758 | |
duke@435 | 759 | //------------------------------Allocate--------------------------------------- |
duke@435 | 760 | // High-level memory allocation |
duke@435 | 761 | // |
duke@435 | 762 | // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will |
duke@435 | 763 | // get expanded into a code sequence containing a call. Unlike other CallNodes, |
duke@435 | 764 | // they have 2 memory projections and 2 i_o projections (which are distinguished by |
duke@435 | 765 | // the _is_io_use flag in the projection.) This is needed when expanding the node in |
duke@435 | 766 | // order to differentiate the uses of the projection on the normal control path from |
duke@435 | 767 | // those on the exception return path. |
duke@435 | 768 | // |
duke@435 | 769 | class AllocateNode : public CallNode { |
duke@435 | 770 | public: |
duke@435 | 771 | enum { |
duke@435 | 772 | // Output: |
duke@435 | 773 | RawAddress = TypeFunc::Parms, // the newly-allocated raw address |
duke@435 | 774 | // Inputs: |
duke@435 | 775 | AllocSize = TypeFunc::Parms, // size (in bytes) of the new object |
duke@435 | 776 | KlassNode, // type (maybe dynamic) of the obj. |
duke@435 | 777 | InitialTest, // slow-path test (may be constant) |
duke@435 | 778 | ALength, // array length (or TOP if none) |
duke@435 | 779 | ParmLimit |
duke@435 | 780 | }; |
duke@435 | 781 | |
kvn@5110 | 782 | static const TypeFunc* alloc_type(const Type* t) { |
duke@435 | 783 | const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); |
duke@435 | 784 | fields[AllocSize] = TypeInt::POS; |
duke@435 | 785 | fields[KlassNode] = TypeInstPtr::NOTNULL; |
duke@435 | 786 | fields[InitialTest] = TypeInt::BOOL; |
kvn@5110 | 787 | fields[ALength] = t; // length (can be a bad length) |
duke@435 | 788 | |
duke@435 | 789 | const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); |
duke@435 | 790 | |
duke@435 | 791 | // create result type (range) |
duke@435 | 792 | fields = TypeTuple::fields(1); |
duke@435 | 793 | fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop |
duke@435 | 794 | |
duke@435 | 795 | const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); |
duke@435 | 796 | |
duke@435 | 797 | return TypeFunc::make(domain, range); |
duke@435 | 798 | } |
duke@435 | 799 | |
kvn@5110 | 800 | // Result of Escape Analysis |
kvn@5110 | 801 | bool _is_scalar_replaceable; |
kvn@5110 | 802 | bool _is_non_escaping; |
kvn@474 | 803 | |
duke@435 | 804 | virtual uint size_of() const; // Size is bigger |
duke@435 | 805 | AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, |
duke@435 | 806 | Node *size, Node *klass_node, Node *initial_test); |
duke@435 | 807 | // Expansion modifies the JVMState, so we need to clone it |
kvn@5110 | 808 | virtual void clone_jvms(Compile* C) { |
kvn@5110 | 809 | if (jvms() != NULL) { |
kvn@5110 | 810 | set_jvms(jvms()->clone_deep(C)); |
kvn@5110 | 811 | jvms()->set_map_deep(this); |
kvn@5110 | 812 | } |
duke@435 | 813 | } |
duke@435 | 814 | virtual int Opcode() const; |
duke@435 | 815 | virtual uint ideal_reg() const { return Op_RegP; } |
duke@435 | 816 | virtual bool guaranteed_safepoint() { return false; } |
duke@435 | 817 | |
kvn@500 | 818 | // allocations do not modify their arguments |
kvn@5110 | 819 | virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} |
kvn@500 | 820 | |
duke@435 | 821 | // Pattern-match a possible usage of AllocateNode. |
duke@435 | 822 | // Return null if no allocation is recognized. |
duke@435 | 823 | // The operand is the pointer produced by the (possible) allocation. |
duke@435 | 824 | // It must be a projection of the Allocate or its subsequent CastPP. |
duke@435 | 825 | // (Note: This function is defined in file graphKit.cpp, near |
duke@435 | 826 | // GraphKit::new_instance/new_array, whose output it recognizes.) |
duke@435 | 827 | // The 'ptr' may not have an offset unless the 'offset' argument is given. |
duke@435 | 828 | static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); |
duke@435 | 829 | |
duke@435 | 830 | // Fancy version which uses AddPNode::Ideal_base_and_offset to strip |
duke@435 | 831 | // an offset, which is reported back to the caller. |
duke@435 | 832 | // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) |
duke@435 | 833 | static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, |
duke@435 | 834 | intptr_t& offset); |
duke@435 | 835 | |
duke@435 | 836 | // Dig the klass operand out of a (possible) allocation site. |
duke@435 | 837 | static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { |
duke@435 | 838 | AllocateNode* allo = Ideal_allocation(ptr, phase); |
duke@435 | 839 | return (allo == NULL) ? NULL : allo->in(KlassNode); |
duke@435 | 840 | } |
duke@435 | 841 | |
duke@435 | 842 | // Conservatively small estimate of offset of first non-header byte. |
duke@435 | 843 | int minimum_header_size() { |
coleenp@548 | 844 | return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : |
coleenp@548 | 845 | instanceOopDesc::base_offset_in_bytes(); |
duke@435 | 846 | } |
duke@435 | 847 | |
duke@435 | 848 | // Return the corresponding initialization barrier (or null if none). |
duke@435 | 849 | // Walks out edges to find it... |
duke@435 | 850 | // (Note: Both InitializeNode::allocation and AllocateNode::initialization |
duke@435 | 851 | // are defined in graphKit.cpp, which sets up the bidirectional relation.) |
duke@435 | 852 | InitializeNode* initialization(); |
duke@435 | 853 | |
duke@435 | 854 | // Convenience for initialization->maybe_set_complete(phase) |
duke@435 | 855 | bool maybe_set_complete(PhaseGVN* phase); |
duke@435 | 856 | }; |
duke@435 | 857 | |
duke@435 | 858 | //------------------------------AllocateArray--------------------------------- |
duke@435 | 859 | // |
duke@435 | 860 | // High-level array allocation |
duke@435 | 861 | // |
duke@435 | 862 | class AllocateArrayNode : public AllocateNode { |
duke@435 | 863 | public: |
duke@435 | 864 | AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, |
duke@435 | 865 | Node* size, Node* klass_node, Node* initial_test, |
duke@435 | 866 | Node* count_val |
duke@435 | 867 | ) |
duke@435 | 868 | : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, |
duke@435 | 869 | initial_test) |
duke@435 | 870 | { |
duke@435 | 871 | init_class_id(Class_AllocateArray); |
duke@435 | 872 | set_req(AllocateNode::ALength, count_val); |
duke@435 | 873 | } |
duke@435 | 874 | virtual int Opcode() const; |
kvn@1139 | 875 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 876 | |
rasbold@801 | 877 | // Dig the length operand out of a array allocation site. |
rasbold@801 | 878 | Node* Ideal_length() { |
rasbold@801 | 879 | return in(AllocateNode::ALength); |
rasbold@801 | 880 | } |
rasbold@801 | 881 | |
rasbold@801 | 882 | // Dig the length operand out of a array allocation site and narrow the |
rasbold@801 | 883 | // type with a CastII, if necesssary |
rasbold@801 | 884 | Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); |
rasbold@801 | 885 | |
duke@435 | 886 | // Pattern-match a possible usage of AllocateArrayNode. |
duke@435 | 887 | // Return null if no allocation is recognized. |
duke@435 | 888 | static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { |
duke@435 | 889 | AllocateNode* allo = Ideal_allocation(ptr, phase); |
duke@435 | 890 | return (allo == NULL || !allo->is_AllocateArray()) |
duke@435 | 891 | ? NULL : allo->as_AllocateArray(); |
duke@435 | 892 | } |
duke@435 | 893 | }; |
duke@435 | 894 | |
duke@435 | 895 | //------------------------------AbstractLockNode----------------------------------- |
duke@435 | 896 | class AbstractLockNode: public CallNode { |
duke@435 | 897 | private: |
kvn@3406 | 898 | enum { |
kvn@3406 | 899 | Regular = 0, // Normal lock |
kvn@3406 | 900 | NonEscObj, // Lock is used for non escaping object |
kvn@3406 | 901 | Coarsened, // Lock was coarsened |
kvn@3406 | 902 | Nested // Nested lock |
kvn@3406 | 903 | } _kind; |
duke@435 | 904 | #ifndef PRODUCT |
duke@435 | 905 | NamedCounter* _counter; |
duke@435 | 906 | #endif |
duke@435 | 907 | |
duke@435 | 908 | protected: |
duke@435 | 909 | // helper functions for lock elimination |
duke@435 | 910 | // |
duke@435 | 911 | |
duke@435 | 912 | bool find_matching_unlock(const Node* ctrl, LockNode* lock, |
duke@435 | 913 | GrowableArray<AbstractLockNode*> &lock_ops); |
duke@435 | 914 | bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, |
duke@435 | 915 | GrowableArray<AbstractLockNode*> &lock_ops); |
duke@435 | 916 | bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, |
duke@435 | 917 | GrowableArray<AbstractLockNode*> &lock_ops); |
duke@435 | 918 | LockNode *find_matching_lock(UnlockNode* unlock); |
duke@435 | 919 | |
kvn@3406 | 920 | // Update the counter to indicate that this lock was eliminated. |
kvn@3406 | 921 | void set_eliminated_lock_counter() PRODUCT_RETURN; |
duke@435 | 922 | |
duke@435 | 923 | public: |
duke@435 | 924 | AbstractLockNode(const TypeFunc *tf) |
duke@435 | 925 | : CallNode(tf, NULL, TypeRawPtr::BOTTOM), |
kvn@3406 | 926 | _kind(Regular) |
duke@435 | 927 | { |
duke@435 | 928 | #ifndef PRODUCT |
duke@435 | 929 | _counter = NULL; |
duke@435 | 930 | #endif |
duke@435 | 931 | } |
duke@435 | 932 | virtual int Opcode() const = 0; |
duke@435 | 933 | Node * obj_node() const {return in(TypeFunc::Parms + 0); } |
duke@435 | 934 | Node * box_node() const {return in(TypeFunc::Parms + 1); } |
duke@435 | 935 | Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } |
kvn@3406 | 936 | void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } |
kvn@3406 | 937 | |
duke@435 | 938 | const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} |
duke@435 | 939 | |
duke@435 | 940 | virtual uint size_of() const { return sizeof(*this); } |
duke@435 | 941 | |
kvn@3406 | 942 | bool is_eliminated() const { return (_kind != Regular); } |
kvn@3406 | 943 | bool is_non_esc_obj() const { return (_kind == NonEscObj); } |
kvn@3406 | 944 | bool is_coarsened() const { return (_kind == Coarsened); } |
kvn@3406 | 945 | bool is_nested() const { return (_kind == Nested); } |
duke@435 | 946 | |
kvn@3406 | 947 | void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } |
kvn@3406 | 948 | void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } |
kvn@3406 | 949 | void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } |
kvn@895 | 950 | |
kvn@500 | 951 | // locking does not modify its arguments |
kvn@5110 | 952 | virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} |
kvn@500 | 953 | |
duke@435 | 954 | #ifndef PRODUCT |
duke@435 | 955 | void create_lock_counter(JVMState* s); |
duke@435 | 956 | NamedCounter* counter() const { return _counter; } |
duke@435 | 957 | #endif |
duke@435 | 958 | }; |
duke@435 | 959 | |
duke@435 | 960 | //------------------------------Lock--------------------------------------- |
duke@435 | 961 | // High-level lock operation |
duke@435 | 962 | // |
duke@435 | 963 | // This is a subclass of CallNode because it is a macro node which gets expanded |
duke@435 | 964 | // into a code sequence containing a call. This node takes 3 "parameters": |
duke@435 | 965 | // 0 - object to lock |
duke@435 | 966 | // 1 - a BoxLockNode |
duke@435 | 967 | // 2 - a FastLockNode |
duke@435 | 968 | // |
duke@435 | 969 | class LockNode : public AbstractLockNode { |
duke@435 | 970 | public: |
duke@435 | 971 | |
duke@435 | 972 | static const TypeFunc *lock_type() { |
duke@435 | 973 | // create input type (domain) |
duke@435 | 974 | const Type **fields = TypeTuple::fields(3); |
duke@435 | 975 | fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked |
duke@435 | 976 | fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock |
duke@435 | 977 | fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock |
duke@435 | 978 | const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); |
duke@435 | 979 | |
duke@435 | 980 | // create result type (range) |
duke@435 | 981 | fields = TypeTuple::fields(0); |
duke@435 | 982 | |
duke@435 | 983 | const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); |
duke@435 | 984 | |
duke@435 | 985 | return TypeFunc::make(domain,range); |
duke@435 | 986 | } |
duke@435 | 987 | |
duke@435 | 988 | virtual int Opcode() const; |
duke@435 | 989 | virtual uint size_of() const; // Size is bigger |
duke@435 | 990 | LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { |
duke@435 | 991 | init_class_id(Class_Lock); |
duke@435 | 992 | init_flags(Flag_is_macro); |
duke@435 | 993 | C->add_macro_node(this); |
duke@435 | 994 | } |
duke@435 | 995 | virtual bool guaranteed_safepoint() { return false; } |
duke@435 | 996 | |
duke@435 | 997 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 998 | // Expansion modifies the JVMState, so we need to clone it |
kvn@5110 | 999 | virtual void clone_jvms(Compile* C) { |
kvn@5110 | 1000 | if (jvms() != NULL) { |
kvn@5110 | 1001 | set_jvms(jvms()->clone_deep(C)); |
kvn@5110 | 1002 | jvms()->set_map_deep(this); |
kvn@5110 | 1003 | } |
duke@435 | 1004 | } |
kvn@3406 | 1005 | |
kvn@3406 | 1006 | bool is_nested_lock_region(); // Is this Lock nested? |
duke@435 | 1007 | }; |
duke@435 | 1008 | |
duke@435 | 1009 | //------------------------------Unlock--------------------------------------- |
duke@435 | 1010 | // High-level unlock operation |
duke@435 | 1011 | class UnlockNode : public AbstractLockNode { |
duke@435 | 1012 | public: |
duke@435 | 1013 | virtual int Opcode() const; |
duke@435 | 1014 | virtual uint size_of() const; // Size is bigger |
duke@435 | 1015 | UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { |
duke@435 | 1016 | init_class_id(Class_Unlock); |
duke@435 | 1017 | init_flags(Flag_is_macro); |
duke@435 | 1018 | C->add_macro_node(this); |
duke@435 | 1019 | } |
duke@435 | 1020 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 1021 | // unlock is never a safepoint |
duke@435 | 1022 | virtual bool guaranteed_safepoint() { return false; } |
duke@435 | 1023 | }; |
stefank@2314 | 1024 | |
stefank@2314 | 1025 | #endif // SHARE_VM_OPTO_CALLNODE_HPP |