Fri, 07 Mar 2008 11:09:13 -0800
6667605: (Escape Analysis) inline java constructors when EA is on
Summary: java constructors should be inlined to be able scalar replace a new object
Reviewed-by: rasbold
duke@435 | 1 | /* |
duke@435 | 2 | * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | // Portions of code courtesy of Clifford Click |
duke@435 | 26 | |
duke@435 | 27 | // Optimization - Graph Style |
duke@435 | 28 | |
duke@435 | 29 | class Chaitin; |
duke@435 | 30 | class NamedCounter; |
duke@435 | 31 | class MultiNode; |
duke@435 | 32 | class SafePointNode; |
duke@435 | 33 | class CallNode; |
duke@435 | 34 | class CallJavaNode; |
duke@435 | 35 | class CallStaticJavaNode; |
duke@435 | 36 | class CallDynamicJavaNode; |
duke@435 | 37 | class CallRuntimeNode; |
duke@435 | 38 | class CallLeafNode; |
duke@435 | 39 | class CallLeafNoFPNode; |
duke@435 | 40 | class AllocateNode; |
kvn@468 | 41 | class AllocateArrayNode; |
duke@435 | 42 | class LockNode; |
duke@435 | 43 | class UnlockNode; |
duke@435 | 44 | class JVMState; |
duke@435 | 45 | class OopMap; |
duke@435 | 46 | class State; |
duke@435 | 47 | class StartNode; |
duke@435 | 48 | class MachCallNode; |
duke@435 | 49 | class FastLockNode; |
duke@435 | 50 | |
duke@435 | 51 | //------------------------------StartNode-------------------------------------- |
duke@435 | 52 | // The method start node |
duke@435 | 53 | class StartNode : public MultiNode { |
duke@435 | 54 | virtual uint cmp( const Node &n ) const; |
duke@435 | 55 | virtual uint size_of() const; // Size is bigger |
duke@435 | 56 | public: |
duke@435 | 57 | const TypeTuple *_domain; |
duke@435 | 58 | StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { |
duke@435 | 59 | init_class_id(Class_Start); |
duke@435 | 60 | init_flags(Flag_is_block_start); |
duke@435 | 61 | init_req(0,this); |
duke@435 | 62 | init_req(1,root); |
duke@435 | 63 | } |
duke@435 | 64 | virtual int Opcode() const; |
duke@435 | 65 | virtual bool pinned() const { return true; }; |
duke@435 | 66 | virtual const Type *bottom_type() const; |
duke@435 | 67 | virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } |
duke@435 | 68 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 69 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 70 | virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; |
duke@435 | 71 | virtual const RegMask &in_RegMask(uint) const; |
duke@435 | 72 | virtual Node *match( const ProjNode *proj, const Matcher *m ); |
duke@435 | 73 | virtual uint ideal_reg() const { return 0; } |
duke@435 | 74 | #ifndef PRODUCT |
duke@435 | 75 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 76 | #endif |
duke@435 | 77 | }; |
duke@435 | 78 | |
duke@435 | 79 | //------------------------------StartOSRNode----------------------------------- |
duke@435 | 80 | // The method start node for on stack replacement code |
duke@435 | 81 | class StartOSRNode : public StartNode { |
duke@435 | 82 | public: |
duke@435 | 83 | StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} |
duke@435 | 84 | virtual int Opcode() const; |
duke@435 | 85 | static const TypeTuple *osr_domain(); |
duke@435 | 86 | }; |
duke@435 | 87 | |
duke@435 | 88 | |
duke@435 | 89 | //------------------------------ParmNode--------------------------------------- |
duke@435 | 90 | // Incoming parameters |
duke@435 | 91 | class ParmNode : public ProjNode { |
duke@435 | 92 | static const char * const names[TypeFunc::Parms+1]; |
duke@435 | 93 | public: |
kvn@468 | 94 | ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { |
kvn@468 | 95 | init_class_id(Class_Parm); |
kvn@468 | 96 | } |
duke@435 | 97 | virtual int Opcode() const; |
duke@435 | 98 | virtual bool is_CFG() const { return (_con == TypeFunc::Control); } |
duke@435 | 99 | virtual uint ideal_reg() const; |
duke@435 | 100 | #ifndef PRODUCT |
duke@435 | 101 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 102 | #endif |
duke@435 | 103 | }; |
duke@435 | 104 | |
duke@435 | 105 | |
duke@435 | 106 | //------------------------------ReturnNode------------------------------------- |
duke@435 | 107 | // Return from subroutine node |
duke@435 | 108 | class ReturnNode : public Node { |
duke@435 | 109 | public: |
duke@435 | 110 | ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); |
duke@435 | 111 | virtual int Opcode() const; |
duke@435 | 112 | virtual bool is_CFG() const { return true; } |
duke@435 | 113 | virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash |
duke@435 | 114 | virtual bool depends_only_on_test() const { return false; } |
duke@435 | 115 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 116 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 117 | virtual uint ideal_reg() const { return NotAMachineReg; } |
duke@435 | 118 | virtual uint match_edge(uint idx) const; |
duke@435 | 119 | #ifndef PRODUCT |
duke@435 | 120 | virtual void dump_req() const; |
duke@435 | 121 | #endif |
duke@435 | 122 | }; |
duke@435 | 123 | |
duke@435 | 124 | |
duke@435 | 125 | //------------------------------RethrowNode------------------------------------ |
duke@435 | 126 | // Rethrow of exception at call site. Ends a procedure before rethrowing; |
duke@435 | 127 | // ends the current basic block like a ReturnNode. Restores registers and |
duke@435 | 128 | // unwinds stack. Rethrow happens in the caller's method. |
duke@435 | 129 | class RethrowNode : public Node { |
duke@435 | 130 | public: |
duke@435 | 131 | RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); |
duke@435 | 132 | virtual int Opcode() const; |
duke@435 | 133 | virtual bool is_CFG() const { return true; } |
duke@435 | 134 | virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash |
duke@435 | 135 | virtual bool depends_only_on_test() const { return false; } |
duke@435 | 136 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 137 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 138 | virtual uint match_edge(uint idx) const; |
duke@435 | 139 | virtual uint ideal_reg() const { return NotAMachineReg; } |
duke@435 | 140 | #ifndef PRODUCT |
duke@435 | 141 | virtual void dump_req() const; |
duke@435 | 142 | #endif |
duke@435 | 143 | }; |
duke@435 | 144 | |
duke@435 | 145 | |
duke@435 | 146 | //------------------------------TailCallNode----------------------------------- |
duke@435 | 147 | // Pop stack frame and jump indirect |
duke@435 | 148 | class TailCallNode : public ReturnNode { |
duke@435 | 149 | public: |
duke@435 | 150 | TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) |
duke@435 | 151 | : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { |
duke@435 | 152 | init_req(TypeFunc::Parms, target); |
duke@435 | 153 | init_req(TypeFunc::Parms+1, moop); |
duke@435 | 154 | } |
duke@435 | 155 | |
duke@435 | 156 | virtual int Opcode() const; |
duke@435 | 157 | virtual uint match_edge(uint idx) const; |
duke@435 | 158 | }; |
duke@435 | 159 | |
duke@435 | 160 | //------------------------------TailJumpNode----------------------------------- |
duke@435 | 161 | // Pop stack frame and jump indirect |
duke@435 | 162 | class TailJumpNode : public ReturnNode { |
duke@435 | 163 | public: |
duke@435 | 164 | TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) |
duke@435 | 165 | : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { |
duke@435 | 166 | init_req(TypeFunc::Parms, target); |
duke@435 | 167 | init_req(TypeFunc::Parms+1, ex_oop); |
duke@435 | 168 | } |
duke@435 | 169 | |
duke@435 | 170 | virtual int Opcode() const; |
duke@435 | 171 | virtual uint match_edge(uint idx) const; |
duke@435 | 172 | }; |
duke@435 | 173 | |
duke@435 | 174 | //-------------------------------JVMState------------------------------------- |
duke@435 | 175 | // A linked list of JVMState nodes captures the whole interpreter state, |
duke@435 | 176 | // plus GC roots, for all active calls at some call site in this compilation |
duke@435 | 177 | // unit. (If there is no inlining, then the list has exactly one link.) |
duke@435 | 178 | // This provides a way to map the optimized program back into the interpreter, |
duke@435 | 179 | // or to let the GC mark the stack. |
duke@435 | 180 | class JVMState : public ResourceObj { |
duke@435 | 181 | private: |
duke@435 | 182 | JVMState* _caller; // List pointer for forming scope chains |
duke@435 | 183 | uint _depth; // One mroe than caller depth, or one. |
duke@435 | 184 | uint _locoff; // Offset to locals in input edge mapping |
duke@435 | 185 | uint _stkoff; // Offset to stack in input edge mapping |
duke@435 | 186 | uint _monoff; // Offset to monitors in input edge mapping |
duke@435 | 187 | uint _endoff; // Offset to end of input edge mapping |
duke@435 | 188 | uint _sp; // Jave Expression Stack Pointer for this state |
duke@435 | 189 | int _bci; // Byte Code Index of this JVM point |
duke@435 | 190 | ciMethod* _method; // Method Pointer |
duke@435 | 191 | SafePointNode* _map; // Map node associated with this scope |
duke@435 | 192 | public: |
duke@435 | 193 | friend class Compile; |
duke@435 | 194 | |
duke@435 | 195 | // Because JVMState objects live over the entire lifetime of the |
duke@435 | 196 | // Compile object, they are allocated into the comp_arena, which |
duke@435 | 197 | // does not get resource marked or reset during the compile process |
duke@435 | 198 | void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); } |
duke@435 | 199 | void operator delete( void * ) { } // fast deallocation |
duke@435 | 200 | |
duke@435 | 201 | // Create a new JVMState, ready for abstract interpretation. |
duke@435 | 202 | JVMState(ciMethod* method, JVMState* caller); |
duke@435 | 203 | JVMState(int stack_size); // root state; has a null method |
duke@435 | 204 | |
duke@435 | 205 | // Access functions for the JVM |
duke@435 | 206 | uint locoff() const { return _locoff; } |
duke@435 | 207 | uint stkoff() const { return _stkoff; } |
duke@435 | 208 | uint argoff() const { return _stkoff + _sp; } |
duke@435 | 209 | uint monoff() const { return _monoff; } |
duke@435 | 210 | uint endoff() const { return _endoff; } |
duke@435 | 211 | uint oopoff() const { return debug_end(); } |
duke@435 | 212 | |
duke@435 | 213 | int loc_size() const { return _stkoff - _locoff; } |
duke@435 | 214 | int stk_size() const { return _monoff - _stkoff; } |
duke@435 | 215 | int mon_size() const { return _endoff - _monoff; } |
duke@435 | 216 | |
duke@435 | 217 | bool is_loc(uint i) const { return i >= _locoff && i < _stkoff; } |
duke@435 | 218 | bool is_stk(uint i) const { return i >= _stkoff && i < _monoff; } |
duke@435 | 219 | bool is_mon(uint i) const { return i >= _monoff && i < _endoff; } |
duke@435 | 220 | |
duke@435 | 221 | uint sp() const { return _sp; } |
duke@435 | 222 | int bci() const { return _bci; } |
duke@435 | 223 | bool has_method() const { return _method != NULL; } |
duke@435 | 224 | ciMethod* method() const { assert(has_method(), ""); return _method; } |
duke@435 | 225 | JVMState* caller() const { return _caller; } |
duke@435 | 226 | SafePointNode* map() const { return _map; } |
duke@435 | 227 | uint depth() const { return _depth; } |
duke@435 | 228 | uint debug_start() const; // returns locoff of root caller |
duke@435 | 229 | uint debug_end() const; // returns endoff of self |
duke@435 | 230 | uint debug_size() const { return loc_size() + sp() + mon_size(); } |
duke@435 | 231 | uint debug_depth() const; // returns sum of debug_size values at all depths |
duke@435 | 232 | |
duke@435 | 233 | // Returns the JVM state at the desired depth (1 == root). |
duke@435 | 234 | JVMState* of_depth(int d) const; |
duke@435 | 235 | |
duke@435 | 236 | // Tells if two JVM states have the same call chain (depth, methods, & bcis). |
duke@435 | 237 | bool same_calls_as(const JVMState* that) const; |
duke@435 | 238 | |
duke@435 | 239 | // Monitors (monitors are stored as (boxNode, objNode) pairs |
duke@435 | 240 | enum { logMonitorEdges = 1 }; |
duke@435 | 241 | int nof_monitors() const { return mon_size() >> logMonitorEdges; } |
duke@435 | 242 | int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } |
duke@435 | 243 | int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } |
duke@435 | 244 | int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } |
duke@435 | 245 | bool is_monitor_box(uint off) const { |
duke@435 | 246 | assert(is_mon(off), "should be called only for monitor edge"); |
duke@435 | 247 | return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); |
duke@435 | 248 | } |
duke@435 | 249 | bool is_monitor_use(uint off) const { return (is_mon(off) |
duke@435 | 250 | && is_monitor_box(off)) |
duke@435 | 251 | || (caller() && caller()->is_monitor_use(off)); } |
duke@435 | 252 | |
duke@435 | 253 | // Initialization functions for the JVM |
duke@435 | 254 | void set_locoff(uint off) { _locoff = off; } |
duke@435 | 255 | void set_stkoff(uint off) { _stkoff = off; } |
duke@435 | 256 | void set_monoff(uint off) { _monoff = off; } |
duke@435 | 257 | void set_endoff(uint off) { _endoff = off; } |
duke@435 | 258 | void set_offsets(uint off) { _locoff = _stkoff = _monoff = _endoff = off; } |
duke@435 | 259 | void set_map(SafePointNode *map) { _map = map; } |
duke@435 | 260 | void set_sp(uint sp) { _sp = sp; } |
duke@435 | 261 | void set_bci(int bci) { _bci = bci; } |
duke@435 | 262 | |
duke@435 | 263 | // Miscellaneous utility functions |
duke@435 | 264 | JVMState* clone_deep(Compile* C) const; // recursively clones caller chain |
duke@435 | 265 | JVMState* clone_shallow(Compile* C) const; // retains uncloned caller |
duke@435 | 266 | |
duke@435 | 267 | #ifndef PRODUCT |
duke@435 | 268 | void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; |
duke@435 | 269 | void dump_spec(outputStream *st) const; |
duke@435 | 270 | void dump_on(outputStream* st) const; |
duke@435 | 271 | void dump() const { |
duke@435 | 272 | dump_on(tty); |
duke@435 | 273 | } |
duke@435 | 274 | #endif |
duke@435 | 275 | }; |
duke@435 | 276 | |
duke@435 | 277 | //------------------------------SafePointNode---------------------------------- |
duke@435 | 278 | // A SafePointNode is a subclass of a MultiNode for convenience (and |
duke@435 | 279 | // potential code sharing) only - conceptually it is independent of |
duke@435 | 280 | // the Node semantics. |
duke@435 | 281 | class SafePointNode : public MultiNode { |
duke@435 | 282 | virtual uint cmp( const Node &n ) const; |
duke@435 | 283 | virtual uint size_of() const; // Size is bigger |
duke@435 | 284 | |
duke@435 | 285 | public: |
duke@435 | 286 | SafePointNode(uint edges, JVMState* jvms, |
duke@435 | 287 | // A plain safepoint advertises no memory effects (NULL): |
duke@435 | 288 | const TypePtr* adr_type = NULL) |
duke@435 | 289 | : MultiNode( edges ), |
duke@435 | 290 | _jvms(jvms), |
duke@435 | 291 | _oop_map(NULL), |
duke@435 | 292 | _adr_type(adr_type) |
duke@435 | 293 | { |
duke@435 | 294 | init_class_id(Class_SafePoint); |
duke@435 | 295 | } |
duke@435 | 296 | |
duke@435 | 297 | OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC |
duke@435 | 298 | JVMState* const _jvms; // Pointer to list of JVM State objects |
duke@435 | 299 | const TypePtr* _adr_type; // What type of memory does this node produce? |
duke@435 | 300 | |
duke@435 | 301 | // Many calls take *all* of memory as input, |
duke@435 | 302 | // but some produce a limited subset of that memory as output. |
duke@435 | 303 | // The adr_type reports the call's behavior as a store, not a load. |
duke@435 | 304 | |
duke@435 | 305 | virtual JVMState* jvms() const { return _jvms; } |
duke@435 | 306 | void set_jvms(JVMState* s) { |
duke@435 | 307 | *(JVMState**)&_jvms = s; // override const attribute in the accessor |
duke@435 | 308 | } |
duke@435 | 309 | OopMap *oop_map() const { return _oop_map; } |
duke@435 | 310 | void set_oop_map(OopMap *om) { _oop_map = om; } |
duke@435 | 311 | |
duke@435 | 312 | // Functionality from old debug nodes which has changed |
duke@435 | 313 | Node *local(JVMState* jvms, uint idx) const { |
duke@435 | 314 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 315 | return in(jvms->locoff() + idx); |
duke@435 | 316 | } |
duke@435 | 317 | Node *stack(JVMState* jvms, uint idx) const { |
duke@435 | 318 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 319 | return in(jvms->stkoff() + idx); |
duke@435 | 320 | } |
duke@435 | 321 | Node *argument(JVMState* jvms, uint idx) const { |
duke@435 | 322 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 323 | return in(jvms->argoff() + idx); |
duke@435 | 324 | } |
duke@435 | 325 | Node *monitor_box(JVMState* jvms, uint idx) const { |
duke@435 | 326 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 327 | return in(jvms->monitor_box_offset(idx)); |
duke@435 | 328 | } |
duke@435 | 329 | Node *monitor_obj(JVMState* jvms, uint idx) const { |
duke@435 | 330 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 331 | return in(jvms->monitor_obj_offset(idx)); |
duke@435 | 332 | } |
duke@435 | 333 | |
duke@435 | 334 | void set_local(JVMState* jvms, uint idx, Node *c); |
duke@435 | 335 | |
duke@435 | 336 | void set_stack(JVMState* jvms, uint idx, Node *c) { |
duke@435 | 337 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 338 | set_req(jvms->stkoff() + idx, c); |
duke@435 | 339 | } |
duke@435 | 340 | void set_argument(JVMState* jvms, uint idx, Node *c) { |
duke@435 | 341 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 342 | set_req(jvms->argoff() + idx, c); |
duke@435 | 343 | } |
duke@435 | 344 | void ensure_stack(JVMState* jvms, uint stk_size) { |
duke@435 | 345 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 346 | int grow_by = (int)stk_size - (int)jvms->stk_size(); |
duke@435 | 347 | if (grow_by > 0) grow_stack(jvms, grow_by); |
duke@435 | 348 | } |
duke@435 | 349 | void grow_stack(JVMState* jvms, uint grow_by); |
duke@435 | 350 | // Handle monitor stack |
duke@435 | 351 | void push_monitor( const FastLockNode *lock ); |
duke@435 | 352 | void pop_monitor (); |
duke@435 | 353 | Node *peek_monitor_box() const; |
duke@435 | 354 | Node *peek_monitor_obj() const; |
duke@435 | 355 | |
duke@435 | 356 | // Access functions for the JVM |
duke@435 | 357 | Node *control () const { return in(TypeFunc::Control ); } |
duke@435 | 358 | Node *i_o () const { return in(TypeFunc::I_O ); } |
duke@435 | 359 | Node *memory () const { return in(TypeFunc::Memory ); } |
duke@435 | 360 | Node *returnadr() const { return in(TypeFunc::ReturnAdr); } |
duke@435 | 361 | Node *frameptr () const { return in(TypeFunc::FramePtr ); } |
duke@435 | 362 | |
duke@435 | 363 | void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } |
duke@435 | 364 | void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } |
duke@435 | 365 | void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } |
duke@435 | 366 | |
duke@435 | 367 | MergeMemNode* merged_memory() const { |
duke@435 | 368 | return in(TypeFunc::Memory)->as_MergeMem(); |
duke@435 | 369 | } |
duke@435 | 370 | |
duke@435 | 371 | // The parser marks useless maps as dead when it's done with them: |
duke@435 | 372 | bool is_killed() { return in(TypeFunc::Control) == NULL; } |
duke@435 | 373 | |
duke@435 | 374 | // Exception states bubbling out of subgraphs such as inlined calls |
duke@435 | 375 | // are recorded here. (There might be more than one, hence the "next".) |
duke@435 | 376 | // This feature is used only for safepoints which serve as "maps" |
duke@435 | 377 | // for JVM states during parsing, intrinsic expansion, etc. |
duke@435 | 378 | SafePointNode* next_exception() const; |
duke@435 | 379 | void set_next_exception(SafePointNode* n); |
duke@435 | 380 | bool has_exceptions() const { return next_exception() != NULL; } |
duke@435 | 381 | |
duke@435 | 382 | // Standard Node stuff |
duke@435 | 383 | virtual int Opcode() const; |
duke@435 | 384 | virtual bool pinned() const { return true; } |
duke@435 | 385 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 386 | virtual const Type *bottom_type() const { return Type::CONTROL; } |
duke@435 | 387 | virtual const TypePtr *adr_type() const { return _adr_type; } |
duke@435 | 388 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 389 | virtual Node *Identity( PhaseTransform *phase ); |
duke@435 | 390 | virtual uint ideal_reg() const { return 0; } |
duke@435 | 391 | virtual const RegMask &in_RegMask(uint) const; |
duke@435 | 392 | virtual const RegMask &out_RegMask() const; |
duke@435 | 393 | virtual uint match_edge(uint idx) const; |
duke@435 | 394 | |
duke@435 | 395 | static bool needs_polling_address_input(); |
duke@435 | 396 | |
duke@435 | 397 | #ifndef PRODUCT |
duke@435 | 398 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 399 | #endif |
duke@435 | 400 | }; |
duke@435 | 401 | |
duke@435 | 402 | //------------------------------CallNode--------------------------------------- |
duke@435 | 403 | // Call nodes now subsume the function of debug nodes at callsites, so they |
duke@435 | 404 | // contain the functionality of a full scope chain of debug nodes. |
duke@435 | 405 | class CallNode : public SafePointNode { |
duke@435 | 406 | public: |
duke@435 | 407 | const TypeFunc *_tf; // Function type |
duke@435 | 408 | address _entry_point; // Address of method being called |
duke@435 | 409 | float _cnt; // Estimate of number of times called |
duke@435 | 410 | PointsToNode::EscapeState _escape_state; |
duke@435 | 411 | |
duke@435 | 412 | CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) |
duke@435 | 413 | : SafePointNode(tf->domain()->cnt(), NULL, adr_type), |
duke@435 | 414 | _tf(tf), |
duke@435 | 415 | _entry_point(addr), |
duke@435 | 416 | _cnt(COUNT_UNKNOWN) |
duke@435 | 417 | { |
duke@435 | 418 | init_class_id(Class_Call); |
duke@435 | 419 | init_flags(Flag_is_Call); |
duke@435 | 420 | _escape_state = PointsToNode::UnknownEscape; |
duke@435 | 421 | } |
duke@435 | 422 | |
duke@435 | 423 | const TypeFunc* tf() const { return _tf; } |
duke@435 | 424 | const address entry_point() const { return _entry_point; } |
duke@435 | 425 | const float cnt() const { return _cnt; } |
duke@435 | 426 | |
duke@435 | 427 | void set_tf(const TypeFunc* tf) { _tf = tf; } |
duke@435 | 428 | void set_entry_point(address p) { _entry_point = p; } |
duke@435 | 429 | void set_cnt(float c) { _cnt = c; } |
duke@435 | 430 | |
duke@435 | 431 | virtual const Type *bottom_type() const; |
duke@435 | 432 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 433 | virtual Node *Identity( PhaseTransform *phase ) { return this; } |
duke@435 | 434 | virtual uint cmp( const Node &n ) const; |
duke@435 | 435 | virtual uint size_of() const = 0; |
duke@435 | 436 | virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; |
duke@435 | 437 | virtual Node *match( const ProjNode *proj, const Matcher *m ); |
duke@435 | 438 | virtual uint ideal_reg() const { return NotAMachineReg; } |
duke@435 | 439 | // Are we guaranteed that this node is a safepoint? Not true for leaf calls and |
duke@435 | 440 | // for some macro nodes whose expansion does not have a safepoint on the fast path. |
duke@435 | 441 | virtual bool guaranteed_safepoint() { return true; } |
duke@435 | 442 | // For macro nodes, the JVMState gets modified during expansion, so when cloning |
duke@435 | 443 | // the node the JVMState must be cloned. |
duke@435 | 444 | virtual void clone_jvms() { } // default is not to clone |
duke@435 | 445 | |
duke@435 | 446 | virtual uint match_edge(uint idx) const; |
duke@435 | 447 | |
duke@435 | 448 | #ifndef PRODUCT |
duke@435 | 449 | virtual void dump_req() const; |
duke@435 | 450 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 451 | #endif |
duke@435 | 452 | }; |
duke@435 | 453 | |
duke@435 | 454 | //------------------------------CallJavaNode----------------------------------- |
duke@435 | 455 | // Make a static or dynamic subroutine call node using Java calling |
duke@435 | 456 | // convention. (The "Java" calling convention is the compiler's calling |
duke@435 | 457 | // convention, as opposed to the interpreter's or that of native C.) |
duke@435 | 458 | class CallJavaNode : public CallNode { |
duke@435 | 459 | protected: |
duke@435 | 460 | virtual uint cmp( const Node &n ) const; |
duke@435 | 461 | virtual uint size_of() const; // Size is bigger |
duke@435 | 462 | |
duke@435 | 463 | bool _optimized_virtual; |
duke@435 | 464 | ciMethod* _method; // Method being direct called |
duke@435 | 465 | public: |
duke@435 | 466 | const int _bci; // Byte Code Index of call byte code |
duke@435 | 467 | CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) |
duke@435 | 468 | : CallNode(tf, addr, TypePtr::BOTTOM), |
duke@435 | 469 | _method(method), _bci(bci), _optimized_virtual(false) |
duke@435 | 470 | { |
duke@435 | 471 | init_class_id(Class_CallJava); |
duke@435 | 472 | } |
duke@435 | 473 | |
duke@435 | 474 | virtual int Opcode() const; |
duke@435 | 475 | ciMethod* method() const { return _method; } |
duke@435 | 476 | void set_method(ciMethod *m) { _method = m; } |
duke@435 | 477 | void set_optimized_virtual(bool f) { _optimized_virtual = f; } |
duke@435 | 478 | bool is_optimized_virtual() const { return _optimized_virtual; } |
duke@435 | 479 | |
duke@435 | 480 | #ifndef PRODUCT |
duke@435 | 481 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 482 | #endif |
duke@435 | 483 | }; |
duke@435 | 484 | |
duke@435 | 485 | //------------------------------CallStaticJavaNode----------------------------- |
duke@435 | 486 | // Make a direct subroutine call using Java calling convention (for static |
duke@435 | 487 | // calls and optimized virtual calls, plus calls to wrappers for run-time |
duke@435 | 488 | // routines); generates static stub. |
duke@435 | 489 | class CallStaticJavaNode : public CallJavaNode { |
duke@435 | 490 | virtual uint cmp( const Node &n ) const; |
duke@435 | 491 | virtual uint size_of() const; // Size is bigger |
duke@435 | 492 | public: |
duke@435 | 493 | CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci) |
duke@435 | 494 | : CallJavaNode(tf, addr, method, bci), _name(NULL) { |
duke@435 | 495 | init_class_id(Class_CallStaticJava); |
duke@435 | 496 | } |
duke@435 | 497 | CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, |
duke@435 | 498 | const TypePtr* adr_type) |
duke@435 | 499 | : CallJavaNode(tf, addr, NULL, bci), _name(name) { |
duke@435 | 500 | init_class_id(Class_CallStaticJava); |
duke@435 | 501 | // This node calls a runtime stub, which often has narrow memory effects. |
duke@435 | 502 | _adr_type = adr_type; |
duke@435 | 503 | } |
duke@435 | 504 | const char *_name; // Runtime wrapper name |
duke@435 | 505 | |
duke@435 | 506 | // If this is an uncommon trap, return the request code, else zero. |
duke@435 | 507 | int uncommon_trap_request() const; |
duke@435 | 508 | static int extract_uncommon_trap_request(const Node* call); |
duke@435 | 509 | |
duke@435 | 510 | virtual int Opcode() const; |
duke@435 | 511 | #ifndef PRODUCT |
duke@435 | 512 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 513 | #endif |
duke@435 | 514 | }; |
duke@435 | 515 | |
duke@435 | 516 | //------------------------------CallDynamicJavaNode---------------------------- |
duke@435 | 517 | // Make a dispatched call using Java calling convention. |
duke@435 | 518 | class CallDynamicJavaNode : public CallJavaNode { |
duke@435 | 519 | virtual uint cmp( const Node &n ) const; |
duke@435 | 520 | virtual uint size_of() const; // Size is bigger |
duke@435 | 521 | public: |
duke@435 | 522 | CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { |
duke@435 | 523 | init_class_id(Class_CallDynamicJava); |
duke@435 | 524 | } |
duke@435 | 525 | |
duke@435 | 526 | int _vtable_index; |
duke@435 | 527 | virtual int Opcode() const; |
duke@435 | 528 | #ifndef PRODUCT |
duke@435 | 529 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 530 | #endif |
duke@435 | 531 | }; |
duke@435 | 532 | |
duke@435 | 533 | //------------------------------CallRuntimeNode-------------------------------- |
duke@435 | 534 | // Make a direct subroutine call node into compiled C++ code. |
duke@435 | 535 | class CallRuntimeNode : public CallNode { |
duke@435 | 536 | virtual uint cmp( const Node &n ) const; |
duke@435 | 537 | virtual uint size_of() const; // Size is bigger |
duke@435 | 538 | public: |
duke@435 | 539 | CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, |
duke@435 | 540 | const TypePtr* adr_type) |
duke@435 | 541 | : CallNode(tf, addr, adr_type), |
duke@435 | 542 | _name(name) |
duke@435 | 543 | { |
duke@435 | 544 | init_class_id(Class_CallRuntime); |
duke@435 | 545 | } |
duke@435 | 546 | |
duke@435 | 547 | const char *_name; // Printable name, if _method is NULL |
duke@435 | 548 | virtual int Opcode() const; |
duke@435 | 549 | virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; |
duke@435 | 550 | |
duke@435 | 551 | #ifndef PRODUCT |
duke@435 | 552 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 553 | #endif |
duke@435 | 554 | }; |
duke@435 | 555 | |
duke@435 | 556 | //------------------------------CallLeafNode----------------------------------- |
duke@435 | 557 | // Make a direct subroutine call node into compiled C++ code, without |
duke@435 | 558 | // safepoints |
duke@435 | 559 | class CallLeafNode : public CallRuntimeNode { |
duke@435 | 560 | public: |
duke@435 | 561 | CallLeafNode(const TypeFunc* tf, address addr, const char* name, |
duke@435 | 562 | const TypePtr* adr_type) |
duke@435 | 563 | : CallRuntimeNode(tf, addr, name, adr_type) |
duke@435 | 564 | { |
duke@435 | 565 | init_class_id(Class_CallLeaf); |
duke@435 | 566 | } |
duke@435 | 567 | virtual int Opcode() const; |
duke@435 | 568 | virtual bool guaranteed_safepoint() { return false; } |
duke@435 | 569 | #ifndef PRODUCT |
duke@435 | 570 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 571 | #endif |
duke@435 | 572 | }; |
duke@435 | 573 | |
duke@435 | 574 | //------------------------------CallLeafNoFPNode------------------------------- |
duke@435 | 575 | // CallLeafNode, not using floating point or using it in the same manner as |
duke@435 | 576 | // the generated code |
duke@435 | 577 | class CallLeafNoFPNode : public CallLeafNode { |
duke@435 | 578 | public: |
duke@435 | 579 | CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, |
duke@435 | 580 | const TypePtr* adr_type) |
duke@435 | 581 | : CallLeafNode(tf, addr, name, adr_type) |
duke@435 | 582 | { |
duke@435 | 583 | } |
duke@435 | 584 | virtual int Opcode() const; |
duke@435 | 585 | }; |
duke@435 | 586 | |
duke@435 | 587 | |
duke@435 | 588 | //------------------------------Allocate--------------------------------------- |
duke@435 | 589 | // High-level memory allocation |
duke@435 | 590 | // |
duke@435 | 591 | // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will |
duke@435 | 592 | // get expanded into a code sequence containing a call. Unlike other CallNodes, |
duke@435 | 593 | // they have 2 memory projections and 2 i_o projections (which are distinguished by |
duke@435 | 594 | // the _is_io_use flag in the projection.) This is needed when expanding the node in |
duke@435 | 595 | // order to differentiate the uses of the projection on the normal control path from |
duke@435 | 596 | // those on the exception return path. |
duke@435 | 597 | // |
duke@435 | 598 | class AllocateNode : public CallNode { |
duke@435 | 599 | public: |
duke@435 | 600 | enum { |
duke@435 | 601 | // Output: |
duke@435 | 602 | RawAddress = TypeFunc::Parms, // the newly-allocated raw address |
duke@435 | 603 | // Inputs: |
duke@435 | 604 | AllocSize = TypeFunc::Parms, // size (in bytes) of the new object |
duke@435 | 605 | KlassNode, // type (maybe dynamic) of the obj. |
duke@435 | 606 | InitialTest, // slow-path test (may be constant) |
duke@435 | 607 | ALength, // array length (or TOP if none) |
duke@435 | 608 | ParmLimit |
duke@435 | 609 | }; |
duke@435 | 610 | |
duke@435 | 611 | static const TypeFunc* alloc_type() { |
duke@435 | 612 | const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); |
duke@435 | 613 | fields[AllocSize] = TypeInt::POS; |
duke@435 | 614 | fields[KlassNode] = TypeInstPtr::NOTNULL; |
duke@435 | 615 | fields[InitialTest] = TypeInt::BOOL; |
duke@435 | 616 | fields[ALength] = TypeInt::INT; // length (can be a bad length) |
duke@435 | 617 | |
duke@435 | 618 | const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); |
duke@435 | 619 | |
duke@435 | 620 | // create result type (range) |
duke@435 | 621 | fields = TypeTuple::fields(1); |
duke@435 | 622 | fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop |
duke@435 | 623 | |
duke@435 | 624 | const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); |
duke@435 | 625 | |
duke@435 | 626 | return TypeFunc::make(domain, range); |
duke@435 | 627 | } |
duke@435 | 628 | |
kvn@474 | 629 | bool _is_scalar_replaceable; // Result of Escape Analysis |
kvn@474 | 630 | |
duke@435 | 631 | virtual uint size_of() const; // Size is bigger |
duke@435 | 632 | AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, |
duke@435 | 633 | Node *size, Node *klass_node, Node *initial_test); |
duke@435 | 634 | // Expansion modifies the JVMState, so we need to clone it |
duke@435 | 635 | virtual void clone_jvms() { |
duke@435 | 636 | set_jvms(jvms()->clone_deep(Compile::current())); |
duke@435 | 637 | } |
duke@435 | 638 | virtual int Opcode() const; |
duke@435 | 639 | virtual uint ideal_reg() const { return Op_RegP; } |
duke@435 | 640 | virtual bool guaranteed_safepoint() { return false; } |
duke@435 | 641 | |
duke@435 | 642 | // Pattern-match a possible usage of AllocateNode. |
duke@435 | 643 | // Return null if no allocation is recognized. |
duke@435 | 644 | // The operand is the pointer produced by the (possible) allocation. |
duke@435 | 645 | // It must be a projection of the Allocate or its subsequent CastPP. |
duke@435 | 646 | // (Note: This function is defined in file graphKit.cpp, near |
duke@435 | 647 | // GraphKit::new_instance/new_array, whose output it recognizes.) |
duke@435 | 648 | // The 'ptr' may not have an offset unless the 'offset' argument is given. |
duke@435 | 649 | static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); |
duke@435 | 650 | |
duke@435 | 651 | // Fancy version which uses AddPNode::Ideal_base_and_offset to strip |
duke@435 | 652 | // an offset, which is reported back to the caller. |
duke@435 | 653 | // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) |
duke@435 | 654 | static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, |
duke@435 | 655 | intptr_t& offset); |
duke@435 | 656 | |
duke@435 | 657 | // Dig the klass operand out of a (possible) allocation site. |
duke@435 | 658 | static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { |
duke@435 | 659 | AllocateNode* allo = Ideal_allocation(ptr, phase); |
duke@435 | 660 | return (allo == NULL) ? NULL : allo->in(KlassNode); |
duke@435 | 661 | } |
duke@435 | 662 | |
duke@435 | 663 | // Conservatively small estimate of offset of first non-header byte. |
duke@435 | 664 | int minimum_header_size() { |
duke@435 | 665 | return is_AllocateArray() ? sizeof(arrayOopDesc) : sizeof(oopDesc); |
duke@435 | 666 | } |
duke@435 | 667 | |
duke@435 | 668 | // Return the corresponding initialization barrier (or null if none). |
duke@435 | 669 | // Walks out edges to find it... |
duke@435 | 670 | // (Note: Both InitializeNode::allocation and AllocateNode::initialization |
duke@435 | 671 | // are defined in graphKit.cpp, which sets up the bidirectional relation.) |
duke@435 | 672 | InitializeNode* initialization(); |
duke@435 | 673 | |
duke@435 | 674 | // Convenience for initialization->maybe_set_complete(phase) |
duke@435 | 675 | bool maybe_set_complete(PhaseGVN* phase); |
duke@435 | 676 | }; |
duke@435 | 677 | |
duke@435 | 678 | //------------------------------AllocateArray--------------------------------- |
duke@435 | 679 | // |
duke@435 | 680 | // High-level array allocation |
duke@435 | 681 | // |
duke@435 | 682 | class AllocateArrayNode : public AllocateNode { |
duke@435 | 683 | public: |
duke@435 | 684 | AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, |
duke@435 | 685 | Node* size, Node* klass_node, Node* initial_test, |
duke@435 | 686 | Node* count_val |
duke@435 | 687 | ) |
duke@435 | 688 | : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, |
duke@435 | 689 | initial_test) |
duke@435 | 690 | { |
duke@435 | 691 | init_class_id(Class_AllocateArray); |
duke@435 | 692 | set_req(AllocateNode::ALength, count_val); |
duke@435 | 693 | } |
duke@435 | 694 | virtual int Opcode() const; |
duke@435 | 695 | virtual uint size_of() const; // Size is bigger |
duke@435 | 696 | |
duke@435 | 697 | // Pattern-match a possible usage of AllocateArrayNode. |
duke@435 | 698 | // Return null if no allocation is recognized. |
duke@435 | 699 | static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { |
duke@435 | 700 | AllocateNode* allo = Ideal_allocation(ptr, phase); |
duke@435 | 701 | return (allo == NULL || !allo->is_AllocateArray()) |
duke@435 | 702 | ? NULL : allo->as_AllocateArray(); |
duke@435 | 703 | } |
duke@435 | 704 | |
duke@435 | 705 | // Dig the length operand out of a (possible) array allocation site. |
duke@435 | 706 | static Node* Ideal_length(Node* ptr, PhaseTransform* phase) { |
duke@435 | 707 | AllocateArrayNode* allo = Ideal_array_allocation(ptr, phase); |
duke@435 | 708 | return (allo == NULL) ? NULL : allo->in(AllocateNode::ALength); |
duke@435 | 709 | } |
duke@435 | 710 | }; |
duke@435 | 711 | |
duke@435 | 712 | //------------------------------AbstractLockNode----------------------------------- |
duke@435 | 713 | class AbstractLockNode: public CallNode { |
duke@435 | 714 | private: |
duke@435 | 715 | bool _eliminate; // indicates this lock can be safely eliminated |
duke@435 | 716 | #ifndef PRODUCT |
duke@435 | 717 | NamedCounter* _counter; |
duke@435 | 718 | #endif |
duke@435 | 719 | |
duke@435 | 720 | protected: |
duke@435 | 721 | // helper functions for lock elimination |
duke@435 | 722 | // |
duke@435 | 723 | |
duke@435 | 724 | bool find_matching_unlock(const Node* ctrl, LockNode* lock, |
duke@435 | 725 | GrowableArray<AbstractLockNode*> &lock_ops); |
duke@435 | 726 | bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, |
duke@435 | 727 | GrowableArray<AbstractLockNode*> &lock_ops); |
duke@435 | 728 | bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, |
duke@435 | 729 | GrowableArray<AbstractLockNode*> &lock_ops); |
duke@435 | 730 | LockNode *find_matching_lock(UnlockNode* unlock); |
duke@435 | 731 | |
duke@435 | 732 | |
duke@435 | 733 | public: |
duke@435 | 734 | AbstractLockNode(const TypeFunc *tf) |
duke@435 | 735 | : CallNode(tf, NULL, TypeRawPtr::BOTTOM), |
duke@435 | 736 | _eliminate(false) |
duke@435 | 737 | { |
duke@435 | 738 | #ifndef PRODUCT |
duke@435 | 739 | _counter = NULL; |
duke@435 | 740 | #endif |
duke@435 | 741 | } |
duke@435 | 742 | virtual int Opcode() const = 0; |
duke@435 | 743 | Node * obj_node() const {return in(TypeFunc::Parms + 0); } |
duke@435 | 744 | Node * box_node() const {return in(TypeFunc::Parms + 1); } |
duke@435 | 745 | Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } |
duke@435 | 746 | const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} |
duke@435 | 747 | |
duke@435 | 748 | virtual uint size_of() const { return sizeof(*this); } |
duke@435 | 749 | |
duke@435 | 750 | bool is_eliminated() {return _eliminate; } |
duke@435 | 751 | // mark node as eliminated and update the counter if there is one |
duke@435 | 752 | void set_eliminated(); |
duke@435 | 753 | |
duke@435 | 754 | #ifndef PRODUCT |
duke@435 | 755 | void create_lock_counter(JVMState* s); |
duke@435 | 756 | NamedCounter* counter() const { return _counter; } |
duke@435 | 757 | #endif |
duke@435 | 758 | }; |
duke@435 | 759 | |
duke@435 | 760 | //------------------------------Lock--------------------------------------- |
duke@435 | 761 | // High-level lock operation |
duke@435 | 762 | // |
duke@435 | 763 | // This is a subclass of CallNode because it is a macro node which gets expanded |
duke@435 | 764 | // into a code sequence containing a call. This node takes 3 "parameters": |
duke@435 | 765 | // 0 - object to lock |
duke@435 | 766 | // 1 - a BoxLockNode |
duke@435 | 767 | // 2 - a FastLockNode |
duke@435 | 768 | // |
duke@435 | 769 | class LockNode : public AbstractLockNode { |
duke@435 | 770 | public: |
duke@435 | 771 | |
duke@435 | 772 | static const TypeFunc *lock_type() { |
duke@435 | 773 | // create input type (domain) |
duke@435 | 774 | const Type **fields = TypeTuple::fields(3); |
duke@435 | 775 | fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked |
duke@435 | 776 | fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock |
duke@435 | 777 | fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock |
duke@435 | 778 | const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); |
duke@435 | 779 | |
duke@435 | 780 | // create result type (range) |
duke@435 | 781 | fields = TypeTuple::fields(0); |
duke@435 | 782 | |
duke@435 | 783 | const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); |
duke@435 | 784 | |
duke@435 | 785 | return TypeFunc::make(domain,range); |
duke@435 | 786 | } |
duke@435 | 787 | |
duke@435 | 788 | virtual int Opcode() const; |
duke@435 | 789 | virtual uint size_of() const; // Size is bigger |
duke@435 | 790 | LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { |
duke@435 | 791 | init_class_id(Class_Lock); |
duke@435 | 792 | init_flags(Flag_is_macro); |
duke@435 | 793 | C->add_macro_node(this); |
duke@435 | 794 | } |
duke@435 | 795 | virtual bool guaranteed_safepoint() { return false; } |
duke@435 | 796 | |
duke@435 | 797 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 798 | // Expansion modifies the JVMState, so we need to clone it |
duke@435 | 799 | virtual void clone_jvms() { |
duke@435 | 800 | set_jvms(jvms()->clone_deep(Compile::current())); |
duke@435 | 801 | } |
duke@435 | 802 | }; |
duke@435 | 803 | |
duke@435 | 804 | //------------------------------Unlock--------------------------------------- |
duke@435 | 805 | // High-level unlock operation |
duke@435 | 806 | class UnlockNode : public AbstractLockNode { |
duke@435 | 807 | public: |
duke@435 | 808 | virtual int Opcode() const; |
duke@435 | 809 | virtual uint size_of() const; // Size is bigger |
duke@435 | 810 | UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { |
duke@435 | 811 | init_class_id(Class_Unlock); |
duke@435 | 812 | init_flags(Flag_is_macro); |
duke@435 | 813 | C->add_macro_node(this); |
duke@435 | 814 | } |
duke@435 | 815 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 816 | // unlock is never a safepoint |
duke@435 | 817 | virtual bool guaranteed_safepoint() { return false; } |
duke@435 | 818 | }; |