duke@435: /* coleenp@5614: * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_OPTO_CALLNODE_HPP stefank@2314: #define SHARE_VM_OPTO_CALLNODE_HPP stefank@2314: stefank@2314: #include "opto/connode.hpp" stefank@2314: #include "opto/mulnode.hpp" stefank@2314: #include "opto/multnode.hpp" stefank@2314: #include "opto/opcodes.hpp" stefank@2314: #include "opto/phaseX.hpp" stefank@2314: #include "opto/type.hpp" stefank@2314: duke@435: // Portions of code courtesy of Clifford Click duke@435: duke@435: // Optimization - Graph Style duke@435: duke@435: class Chaitin; duke@435: class NamedCounter; duke@435: class MultiNode; duke@435: class SafePointNode; duke@435: class CallNode; duke@435: class CallJavaNode; duke@435: class CallStaticJavaNode; duke@435: class CallDynamicJavaNode; duke@435: class CallRuntimeNode; duke@435: class CallLeafNode; duke@435: class CallLeafNoFPNode; duke@435: class AllocateNode; kvn@468: class AllocateArrayNode; kvn@5110: class BoxLockNode; duke@435: class LockNode; duke@435: class UnlockNode; duke@435: class JVMState; duke@435: class OopMap; duke@435: class State; duke@435: class StartNode; duke@435: class MachCallNode; duke@435: class FastLockNode; duke@435: duke@435: //------------------------------StartNode-------------------------------------- duke@435: // The method start node duke@435: class StartNode : public MultiNode { duke@435: virtual uint cmp( const Node &n ) const; duke@435: virtual uint size_of() const; // Size is bigger duke@435: public: duke@435: const TypeTuple *_domain; duke@435: StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { duke@435: init_class_id(Class_Start); duke@435: init_req(0,this); duke@435: init_req(1,root); duke@435: } duke@435: virtual int Opcode() const; duke@435: virtual bool pinned() const { return true; }; duke@435: virtual const Type *bottom_type() const; duke@435: virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } duke@435: virtual const Type *Value( PhaseTransform *phase ) const; duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; duke@435: virtual const RegMask &in_RegMask(uint) const; duke@435: virtual Node *match( const ProjNode *proj, const Matcher *m ); duke@435: virtual uint ideal_reg() const { return 0; } duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------StartOSRNode----------------------------------- duke@435: // The method start node for on stack replacement code duke@435: class StartOSRNode : public StartNode { duke@435: public: duke@435: StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} duke@435: virtual int Opcode() const; duke@435: static const TypeTuple *osr_domain(); duke@435: }; duke@435: duke@435: duke@435: //------------------------------ParmNode--------------------------------------- duke@435: // Incoming parameters duke@435: class ParmNode : public ProjNode { duke@435: static const char * const names[TypeFunc::Parms+1]; duke@435: public: kvn@468: ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { kvn@468: init_class_id(Class_Parm); kvn@468: } duke@435: virtual int Opcode() const; duke@435: virtual bool is_CFG() const { return (_con == TypeFunc::Control); } duke@435: virtual uint ideal_reg() const; duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: }; duke@435: duke@435: duke@435: //------------------------------ReturnNode------------------------------------- duke@435: // Return from subroutine node duke@435: class ReturnNode : public Node { duke@435: public: duke@435: ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); duke@435: virtual int Opcode() const; duke@435: virtual bool is_CFG() const { return true; } duke@435: virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash duke@435: virtual bool depends_only_on_test() const { return false; } duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual const Type *Value( PhaseTransform *phase ) const; duke@435: virtual uint ideal_reg() const { return NotAMachineReg; } duke@435: virtual uint match_edge(uint idx) const; duke@435: #ifndef PRODUCT kvn@4478: virtual void dump_req(outputStream *st = tty) const; duke@435: #endif duke@435: }; duke@435: duke@435: duke@435: //------------------------------RethrowNode------------------------------------ duke@435: // Rethrow of exception at call site. Ends a procedure before rethrowing; duke@435: // ends the current basic block like a ReturnNode. Restores registers and duke@435: // unwinds stack. Rethrow happens in the caller's method. duke@435: class RethrowNode : public Node { duke@435: public: duke@435: RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); duke@435: virtual int Opcode() const; duke@435: virtual bool is_CFG() const { return true; } duke@435: virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash duke@435: virtual bool depends_only_on_test() const { return false; } duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual const Type *Value( PhaseTransform *phase ) const; duke@435: virtual uint match_edge(uint idx) const; duke@435: virtual uint ideal_reg() const { return NotAMachineReg; } duke@435: #ifndef PRODUCT kvn@4478: virtual void dump_req(outputStream *st = tty) const; duke@435: #endif duke@435: }; duke@435: duke@435: duke@435: //------------------------------TailCallNode----------------------------------- duke@435: // Pop stack frame and jump indirect duke@435: class TailCallNode : public ReturnNode { duke@435: public: duke@435: TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) duke@435: : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { duke@435: init_req(TypeFunc::Parms, target); duke@435: init_req(TypeFunc::Parms+1, moop); duke@435: } duke@435: duke@435: virtual int Opcode() const; duke@435: virtual uint match_edge(uint idx) const; duke@435: }; duke@435: duke@435: //------------------------------TailJumpNode----------------------------------- duke@435: // Pop stack frame and jump indirect duke@435: class TailJumpNode : public ReturnNode { duke@435: public: duke@435: TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) duke@435: : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { duke@435: init_req(TypeFunc::Parms, target); duke@435: init_req(TypeFunc::Parms+1, ex_oop); duke@435: } duke@435: duke@435: virtual int Opcode() const; duke@435: virtual uint match_edge(uint idx) const; duke@435: }; duke@435: duke@435: //-------------------------------JVMState------------------------------------- duke@435: // A linked list of JVMState nodes captures the whole interpreter state, duke@435: // plus GC roots, for all active calls at some call site in this compilation duke@435: // unit. (If there is no inlining, then the list has exactly one link.) duke@435: // This provides a way to map the optimized program back into the interpreter, duke@435: // or to let the GC mark the stack. duke@435: class JVMState : public ResourceObj { never@3138: friend class VMStructs; cfang@1335: public: cfang@1335: typedef enum { cfang@1335: Reexecute_Undefined = -1, // not defined -- will be translated into false later cfang@1335: Reexecute_False = 0, // false -- do not reexecute cfang@1335: Reexecute_True = 1 // true -- reexecute the bytecode cfang@1335: } ReexecuteState; //Reexecute State cfang@1335: duke@435: private: duke@435: JVMState* _caller; // List pointer for forming scope chains twisti@3969: uint _depth; // One more than caller depth, or one. duke@435: uint _locoff; // Offset to locals in input edge mapping duke@435: uint _stkoff; // Offset to stack in input edge mapping duke@435: uint _monoff; // Offset to monitors in input edge mapping kvn@498: uint _scloff; // Offset to fields of scalar objs in input edge mapping duke@435: uint _endoff; // Offset to end of input edge mapping duke@435: uint _sp; // Jave Expression Stack Pointer for this state duke@435: int _bci; // Byte Code Index of this JVM point cfang@1335: ReexecuteState _reexecute; // Whether this bytecode need to be re-executed duke@435: ciMethod* _method; // Method Pointer duke@435: SafePointNode* _map; // Map node associated with this scope duke@435: public: duke@435: friend class Compile; cfang@1335: friend class PreserveReexecuteState; duke@435: duke@435: // Because JVMState objects live over the entire lifetime of the duke@435: // Compile object, they are allocated into the comp_arena, which duke@435: // does not get resource marked or reset during the compile process coleenp@5614: void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } duke@435: void operator delete( void * ) { } // fast deallocation duke@435: duke@435: // Create a new JVMState, ready for abstract interpretation. duke@435: JVMState(ciMethod* method, JVMState* caller); duke@435: JVMState(int stack_size); // root state; has a null method duke@435: duke@435: // Access functions for the JVM twisti@3969: // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| twisti@3969: // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff duke@435: uint locoff() const { return _locoff; } duke@435: uint stkoff() const { return _stkoff; } duke@435: uint argoff() const { return _stkoff + _sp; } duke@435: uint monoff() const { return _monoff; } kvn@498: uint scloff() const { return _scloff; } duke@435: uint endoff() const { return _endoff; } duke@435: uint oopoff() const { return debug_end(); } duke@435: twisti@3969: int loc_size() const { return stkoff() - locoff(); } twisti@3969: int stk_size() const { return monoff() - stkoff(); } twisti@3969: int mon_size() const { return scloff() - monoff(); } twisti@3969: int scl_size() const { return endoff() - scloff(); } duke@435: twisti@3969: bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } twisti@3969: bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } twisti@3969: bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } twisti@3969: bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } duke@435: cfang@1335: uint sp() const { return _sp; } cfang@1335: int bci() const { return _bci; } cfang@1335: bool should_reexecute() const { return _reexecute==Reexecute_True; } cfang@1335: bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } cfang@1335: bool has_method() const { return _method != NULL; } cfang@1335: ciMethod* method() const { assert(has_method(), ""); return _method; } cfang@1335: JVMState* caller() const { return _caller; } cfang@1335: SafePointNode* map() const { return _map; } cfang@1335: uint depth() const { return _depth; } cfang@1335: uint debug_start() const; // returns locoff of root caller cfang@1335: uint debug_end() const; // returns endoff of self cfang@1335: uint debug_size() const { kvn@498: return loc_size() + sp() + mon_size() + scl_size(); kvn@498: } duke@435: uint debug_depth() const; // returns sum of debug_size values at all depths duke@435: duke@435: // Returns the JVM state at the desired depth (1 == root). duke@435: JVMState* of_depth(int d) const; duke@435: duke@435: // Tells if two JVM states have the same call chain (depth, methods, & bcis). duke@435: bool same_calls_as(const JVMState* that) const; duke@435: duke@435: // Monitors (monitors are stored as (boxNode, objNode) pairs duke@435: enum { logMonitorEdges = 1 }; duke@435: int nof_monitors() const { return mon_size() >> logMonitorEdges; } duke@435: int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } duke@435: int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } duke@435: int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } duke@435: bool is_monitor_box(uint off) const { duke@435: assert(is_mon(off), "should be called only for monitor edge"); duke@435: return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); duke@435: } duke@435: bool is_monitor_use(uint off) const { return (is_mon(off) duke@435: && is_monitor_box(off)) duke@435: || (caller() && caller()->is_monitor_use(off)); } duke@435: duke@435: // Initialization functions for the JVM duke@435: void set_locoff(uint off) { _locoff = off; } duke@435: void set_stkoff(uint off) { _stkoff = off; } duke@435: void set_monoff(uint off) { _monoff = off; } kvn@498: void set_scloff(uint off) { _scloff = off; } duke@435: void set_endoff(uint off) { _endoff = off; } kvn@498: void set_offsets(uint off) { kvn@498: _locoff = _stkoff = _monoff = _scloff = _endoff = off; kvn@498: } duke@435: void set_map(SafePointNode *map) { _map = map; } duke@435: void set_sp(uint sp) { _sp = sp; } cfang@1335: // _reexecute is initialized to "undefined" for a new bci cfang@1335: void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } cfang@1335: void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} duke@435: duke@435: // Miscellaneous utility functions duke@435: JVMState* clone_deep(Compile* C) const; // recursively clones caller chain duke@435: JVMState* clone_shallow(Compile* C) const; // retains uncloned caller kvn@5110: void set_map_deep(SafePointNode *map);// reset map for all callers goetz@6499: void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. roland@6723: int interpreter_frame_size() const; duke@435: duke@435: #ifndef PRODUCT duke@435: void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; duke@435: void dump_spec(outputStream *st) const; duke@435: void dump_on(outputStream* st) const; duke@435: void dump() const { duke@435: dump_on(tty); duke@435: } duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------SafePointNode---------------------------------- duke@435: // A SafePointNode is a subclass of a MultiNode for convenience (and duke@435: // potential code sharing) only - conceptually it is independent of duke@435: // the Node semantics. duke@435: class SafePointNode : public MultiNode { duke@435: virtual uint cmp( const Node &n ) const; duke@435: virtual uint size_of() const; // Size is bigger duke@435: duke@435: public: duke@435: SafePointNode(uint edges, JVMState* jvms, duke@435: // A plain safepoint advertises no memory effects (NULL): duke@435: const TypePtr* adr_type = NULL) duke@435: : MultiNode( edges ), duke@435: _jvms(jvms), duke@435: _oop_map(NULL), duke@435: _adr_type(adr_type) duke@435: { duke@435: init_class_id(Class_SafePoint); duke@435: } duke@435: duke@435: OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC duke@435: JVMState* const _jvms; // Pointer to list of JVM State objects duke@435: const TypePtr* _adr_type; // What type of memory does this node produce? duke@435: duke@435: // Many calls take *all* of memory as input, duke@435: // but some produce a limited subset of that memory as output. duke@435: // The adr_type reports the call's behavior as a store, not a load. duke@435: duke@435: virtual JVMState* jvms() const { return _jvms; } duke@435: void set_jvms(JVMState* s) { duke@435: *(JVMState**)&_jvms = s; // override const attribute in the accessor duke@435: } duke@435: OopMap *oop_map() const { return _oop_map; } duke@435: void set_oop_map(OopMap *om) { _oop_map = om; } duke@435: twisti@4313: private: twisti@4313: void verify_input(JVMState* jvms, uint idx) const { twisti@4313: assert(verify_jvms(jvms), "jvms must match"); twisti@4313: Node* n = in(idx); twisti@4313: assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || twisti@4313: in(idx + 1)->is_top(), "2nd half of long/double"); twisti@4313: } twisti@4313: twisti@4313: public: duke@435: // Functionality from old debug nodes which has changed duke@435: Node *local(JVMState* jvms, uint idx) const { twisti@4313: verify_input(jvms, jvms->locoff() + idx); duke@435: return in(jvms->locoff() + idx); duke@435: } duke@435: Node *stack(JVMState* jvms, uint idx) const { twisti@4313: verify_input(jvms, jvms->stkoff() + idx); duke@435: return in(jvms->stkoff() + idx); duke@435: } duke@435: Node *argument(JVMState* jvms, uint idx) const { twisti@4313: verify_input(jvms, jvms->argoff() + idx); duke@435: return in(jvms->argoff() + idx); duke@435: } duke@435: Node *monitor_box(JVMState* jvms, uint idx) const { duke@435: assert(verify_jvms(jvms), "jvms must match"); duke@435: return in(jvms->monitor_box_offset(idx)); duke@435: } duke@435: Node *monitor_obj(JVMState* jvms, uint idx) const { duke@435: assert(verify_jvms(jvms), "jvms must match"); duke@435: return in(jvms->monitor_obj_offset(idx)); duke@435: } duke@435: duke@435: void set_local(JVMState* jvms, uint idx, Node *c); duke@435: duke@435: void set_stack(JVMState* jvms, uint idx, Node *c) { duke@435: assert(verify_jvms(jvms), "jvms must match"); duke@435: set_req(jvms->stkoff() + idx, c); duke@435: } duke@435: void set_argument(JVMState* jvms, uint idx, Node *c) { duke@435: assert(verify_jvms(jvms), "jvms must match"); duke@435: set_req(jvms->argoff() + idx, c); duke@435: } duke@435: void ensure_stack(JVMState* jvms, uint stk_size) { duke@435: assert(verify_jvms(jvms), "jvms must match"); duke@435: int grow_by = (int)stk_size - (int)jvms->stk_size(); duke@435: if (grow_by > 0) grow_stack(jvms, grow_by); duke@435: } duke@435: void grow_stack(JVMState* jvms, uint grow_by); duke@435: // Handle monitor stack duke@435: void push_monitor( const FastLockNode *lock ); duke@435: void pop_monitor (); duke@435: Node *peek_monitor_box() const; duke@435: Node *peek_monitor_obj() const; duke@435: duke@435: // Access functions for the JVM duke@435: Node *control () const { return in(TypeFunc::Control ); } duke@435: Node *i_o () const { return in(TypeFunc::I_O ); } duke@435: Node *memory () const { return in(TypeFunc::Memory ); } duke@435: Node *returnadr() const { return in(TypeFunc::ReturnAdr); } duke@435: Node *frameptr () const { return in(TypeFunc::FramePtr ); } duke@435: duke@435: void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } duke@435: void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } duke@435: void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } duke@435: duke@435: MergeMemNode* merged_memory() const { duke@435: return in(TypeFunc::Memory)->as_MergeMem(); duke@435: } duke@435: duke@435: // The parser marks useless maps as dead when it's done with them: duke@435: bool is_killed() { return in(TypeFunc::Control) == NULL; } duke@435: duke@435: // Exception states bubbling out of subgraphs such as inlined calls duke@435: // are recorded here. (There might be more than one, hence the "next".) duke@435: // This feature is used only for safepoints which serve as "maps" duke@435: // for JVM states during parsing, intrinsic expansion, etc. duke@435: SafePointNode* next_exception() const; duke@435: void set_next_exception(SafePointNode* n); duke@435: bool has_exceptions() const { return next_exception() != NULL; } duke@435: duke@435: // Standard Node stuff duke@435: virtual int Opcode() const; duke@435: virtual bool pinned() const { return true; } duke@435: virtual const Type *Value( PhaseTransform *phase ) const; duke@435: virtual const Type *bottom_type() const { return Type::CONTROL; } duke@435: virtual const TypePtr *adr_type() const { return _adr_type; } duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual Node *Identity( PhaseTransform *phase ); duke@435: virtual uint ideal_reg() const { return 0; } duke@435: virtual const RegMask &in_RegMask(uint) const; duke@435: virtual const RegMask &out_RegMask() const; duke@435: virtual uint match_edge(uint idx) const; duke@435: duke@435: static bool needs_polling_address_input(); duke@435: duke@435: #ifndef PRODUCT kvn@5110: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: }; duke@435: kvn@498: //------------------------------SafePointScalarObjectNode---------------------- kvn@498: // A SafePointScalarObjectNode represents the state of a scalarized object kvn@498: // at a safepoint. kvn@498: kvn@498: class SafePointScalarObjectNode: public TypeNode { kvn@5626: uint _first_index; // First input edge relative index of a SafePoint node where kvn@498: // states of the scalarized object fields are collected. kvn@5626: // It is relative to the last (youngest) jvms->_scloff. kvn@498: uint _n_fields; // Number of non-static fields of the scalarized object. kvn@509: DEBUG_ONLY(AllocateNode* _alloc;) kvn@3311: kvn@3311: virtual uint hash() const ; // { return NO_HASH; } kvn@3311: virtual uint cmp( const Node &n ) const; kvn@3311: kvn@5626: uint first_index() const { return _first_index; } kvn@5626: kvn@498: public: kvn@498: SafePointScalarObjectNode(const TypeOopPtr* tp, kvn@498: #ifdef ASSERT kvn@498: AllocateNode* alloc, kvn@498: #endif kvn@498: uint first_index, uint n_fields); kvn@498: virtual int Opcode() const; kvn@498: virtual uint ideal_reg() const; kvn@498: virtual const RegMask &in_RegMask(uint) const; kvn@498: virtual const RegMask &out_RegMask() const; kvn@498: virtual uint match_edge(uint idx) const; kvn@498: kvn@5626: uint first_index(JVMState* jvms) const { kvn@5626: assert(jvms != NULL, "missed JVMS"); kvn@5626: return jvms->scloff() + _first_index; kvn@5626: } kvn@498: uint n_fields() const { return _n_fields; } kvn@498: kvn@3311: #ifdef ASSERT kvn@3311: AllocateNode* alloc() const { return _alloc; } kvn@3311: #endif kvn@1036: kvn@498: virtual uint size_of() const { return sizeof(*this); } kvn@498: kvn@498: // Assumes that "this" is an argument to a safepoint node "s", and that kvn@498: // "new_call" is being created to correspond to "s". But the difference kvn@498: // between the start index of the jvmstates of "new_call" and "s" is kvn@498: // "jvms_adj". Produce and return a SafePointScalarObjectNode that kvn@498: // corresponds appropriately to "this" in "new_call". Assumes that kvn@498: // "sosn_map" is a map, specific to the translation of "s" to "new_call", kvn@498: // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. kvn@5626: SafePointScalarObjectNode* clone(Dict* sosn_map) const; kvn@498: kvn@498: #ifndef PRODUCT kvn@498: virtual void dump_spec(outputStream *st) const; kvn@498: #endif kvn@498: }; kvn@498: never@1515: never@1515: // Simple container for the outgoing projections of a call. Useful never@1515: // for serious surgery on calls. never@1515: class CallProjections : public StackObj { never@1515: public: never@1515: Node* fallthrough_proj; never@1515: Node* fallthrough_catchproj; never@1515: Node* fallthrough_memproj; never@1515: Node* fallthrough_ioproj; never@1515: Node* catchall_catchproj; never@1515: Node* catchall_memproj; never@1515: Node* catchall_ioproj; never@1515: Node* resproj; never@1515: Node* exobj; never@1515: }; never@1515: roland@4409: class CallGenerator; never@1515: duke@435: //------------------------------CallNode--------------------------------------- duke@435: // Call nodes now subsume the function of debug nodes at callsites, so they duke@435: // contain the functionality of a full scope chain of debug nodes. duke@435: class CallNode : public SafePointNode { never@3138: friend class VMStructs; duke@435: public: duke@435: const TypeFunc *_tf; // Function type duke@435: address _entry_point; // Address of method being called duke@435: float _cnt; // Estimate of number of times called roland@4409: CallGenerator* _generator; // corresponding CallGenerator for some late inline calls duke@435: duke@435: CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) duke@435: : SafePointNode(tf->domain()->cnt(), NULL, adr_type), duke@435: _tf(tf), duke@435: _entry_point(addr), roland@4409: _cnt(COUNT_UNKNOWN), roland@4409: _generator(NULL) duke@435: { duke@435: init_class_id(Class_Call); duke@435: } duke@435: roland@4409: const TypeFunc* tf() const { return _tf; } roland@4409: const address entry_point() const { return _entry_point; } roland@4409: const float cnt() const { return _cnt; } roland@4409: CallGenerator* generator() const { return _generator; } duke@435: roland@4409: void set_tf(const TypeFunc* tf) { _tf = tf; } roland@4409: void set_entry_point(address p) { _entry_point = p; } roland@4409: void set_cnt(float c) { _cnt = c; } roland@4409: void set_generator(CallGenerator* cg) { _generator = cg; } duke@435: duke@435: virtual const Type *bottom_type() const; duke@435: virtual const Type *Value( PhaseTransform *phase ) const; roland@4409: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual Node *Identity( PhaseTransform *phase ) { return this; } duke@435: virtual uint cmp( const Node &n ) const; duke@435: virtual uint size_of() const = 0; duke@435: virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; duke@435: virtual Node *match( const ProjNode *proj, const Matcher *m ); duke@435: virtual uint ideal_reg() const { return NotAMachineReg; } duke@435: // Are we guaranteed that this node is a safepoint? Not true for leaf calls and duke@435: // for some macro nodes whose expansion does not have a safepoint on the fast path. duke@435: virtual bool guaranteed_safepoint() { return true; } goetz@6499: // For macro nodes, the JVMState gets modified during expansion. If calls goetz@6499: // use MachConstantBase, it gets modified during matching. So when cloning goetz@6499: // the node the JVMState must be cloned. Default is not to clone. goetz@6499: virtual void clone_jvms(Compile* C) { goetz@6499: if (C->needs_clone_jvms() && jvms() != NULL) { goetz@6499: set_jvms(jvms()->clone_deep(C)); goetz@6499: jvms()->set_map_deep(this); goetz@6499: } goetz@6499: } duke@435: kvn@500: // Returns true if the call may modify n kvn@5110: virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase); kvn@500: // Does this node have a use of n other than in debug information? kvn@603: bool has_non_debug_use(Node *n); kvn@500: // Returns the unique CheckCastPP of a call kvn@500: // or result projection is there are several CheckCastPP kvn@500: // or returns NULL if there is no one. kvn@500: Node *result_cast(); kvn@3651: // Does this node returns pointer? kvn@3651: bool returns_pointer() const { kvn@3651: const TypeTuple *r = tf()->range(); kvn@3651: return (r->cnt() > TypeFunc::Parms && kvn@3651: r->field_at(TypeFunc::Parms)->isa_ptr()); kvn@3651: } kvn@500: never@1515: // Collect all the interesting edges from a call for use in never@1515: // replacing the call by something else. Used by macro expansion never@1515: // and the late inlining support. never@1515: void extract_projections(CallProjections* projs, bool separate_io_proj); never@1515: duke@435: virtual uint match_edge(uint idx) const; duke@435: duke@435: #ifndef PRODUCT kvn@4478: virtual void dump_req(outputStream *st = tty) const; duke@435: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: }; duke@435: never@1515: duke@435: //------------------------------CallJavaNode----------------------------------- duke@435: // Make a static or dynamic subroutine call node using Java calling duke@435: // convention. (The "Java" calling convention is the compiler's calling duke@435: // convention, as opposed to the interpreter's or that of native C.) duke@435: class CallJavaNode : public CallNode { never@3138: friend class VMStructs; duke@435: protected: duke@435: virtual uint cmp( const Node &n ) const; duke@435: virtual uint size_of() const; // Size is bigger duke@435: duke@435: bool _optimized_virtual; twisti@1572: bool _method_handle_invoke; duke@435: ciMethod* _method; // Method being direct called duke@435: public: duke@435: const int _bci; // Byte Code Index of call byte code duke@435: CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) duke@435: : CallNode(tf, addr, TypePtr::BOTTOM), twisti@1572: _method(method), _bci(bci), twisti@1572: _optimized_virtual(false), twisti@1572: _method_handle_invoke(false) duke@435: { duke@435: init_class_id(Class_CallJava); duke@435: } duke@435: duke@435: virtual int Opcode() const; duke@435: ciMethod* method() const { return _method; } duke@435: void set_method(ciMethod *m) { _method = m; } duke@435: void set_optimized_virtual(bool f) { _optimized_virtual = f; } duke@435: bool is_optimized_virtual() const { return _optimized_virtual; } twisti@1572: void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } twisti@1572: bool is_method_handle_invoke() const { return _method_handle_invoke; } duke@435: duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------CallStaticJavaNode----------------------------- duke@435: // Make a direct subroutine call using Java calling convention (for static duke@435: // calls and optimized virtual calls, plus calls to wrappers for run-time duke@435: // routines); generates static stub. duke@435: class CallStaticJavaNode : public CallJavaNode { duke@435: virtual uint cmp( const Node &n ) const; duke@435: virtual uint size_of() const; // Size is bigger duke@435: public: kvn@5110: CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci) duke@435: : CallJavaNode(tf, addr, method, bci), _name(NULL) { duke@435: init_class_id(Class_CallStaticJava); kvn@5110: if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { kvn@5110: init_flags(Flag_is_macro); kvn@5110: C->add_macro_node(this); kvn@5110: } kvn@5110: _is_scalar_replaceable = false; kvn@5110: _is_non_escaping = false; duke@435: } duke@435: CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, duke@435: const TypePtr* adr_type) duke@435: : CallJavaNode(tf, addr, NULL, bci), _name(name) { duke@435: init_class_id(Class_CallStaticJava); duke@435: // This node calls a runtime stub, which often has narrow memory effects. duke@435: _adr_type = adr_type; kvn@5110: _is_scalar_replaceable = false; kvn@5110: _is_non_escaping = false; duke@435: } kvn@5110: const char *_name; // Runtime wrapper name kvn@5110: kvn@5110: // Result of Escape Analysis kvn@5110: bool _is_scalar_replaceable; kvn@5110: bool _is_non_escaping; duke@435: duke@435: // If this is an uncommon trap, return the request code, else zero. duke@435: int uncommon_trap_request() const; duke@435: static int extract_uncommon_trap_request(const Node* call); duke@435: kvn@5110: bool is_boxing_method() const { kvn@5110: return is_macro() && (method() != NULL) && method()->is_boxing_method(); kvn@5110: } kvn@5110: // Later inlining modifies the JVMState, so we need to clone it kvn@5110: // when the call node is cloned (because it is macro node). kvn@5110: virtual void clone_jvms(Compile* C) { kvn@5110: if ((jvms() != NULL) && is_boxing_method()) { kvn@5110: set_jvms(jvms()->clone_deep(C)); kvn@5110: jvms()->set_map_deep(this); kvn@5110: } kvn@5110: } kvn@5110: duke@435: virtual int Opcode() const; duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------CallDynamicJavaNode---------------------------- duke@435: // Make a dispatched call using Java calling convention. duke@435: class CallDynamicJavaNode : public CallJavaNode { duke@435: virtual uint cmp( const Node &n ) const; duke@435: virtual uint size_of() const; // Size is bigger duke@435: public: duke@435: CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { duke@435: init_class_id(Class_CallDynamicJava); duke@435: } duke@435: duke@435: int _vtable_index; duke@435: virtual int Opcode() const; duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------CallRuntimeNode-------------------------------- duke@435: // Make a direct subroutine call node into compiled C++ code. duke@435: class CallRuntimeNode : public CallNode { duke@435: virtual uint cmp( const Node &n ) const; duke@435: virtual uint size_of() const; // Size is bigger duke@435: public: duke@435: CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, duke@435: const TypePtr* adr_type) duke@435: : CallNode(tf, addr, adr_type), duke@435: _name(name) duke@435: { duke@435: init_class_id(Class_CallRuntime); duke@435: } duke@435: duke@435: const char *_name; // Printable name, if _method is NULL duke@435: virtual int Opcode() const; duke@435: virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; duke@435: duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------CallLeafNode----------------------------------- duke@435: // Make a direct subroutine call node into compiled C++ code, without duke@435: // safepoints duke@435: class CallLeafNode : public CallRuntimeNode { duke@435: public: duke@435: CallLeafNode(const TypeFunc* tf, address addr, const char* name, duke@435: const TypePtr* adr_type) duke@435: : CallRuntimeNode(tf, addr, name, adr_type) duke@435: { duke@435: init_class_id(Class_CallLeaf); duke@435: } duke@435: virtual int Opcode() const; duke@435: virtual bool guaranteed_safepoint() { return false; } duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------CallLeafNoFPNode------------------------------- duke@435: // CallLeafNode, not using floating point or using it in the same manner as duke@435: // the generated code duke@435: class CallLeafNoFPNode : public CallLeafNode { duke@435: public: duke@435: CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, duke@435: const TypePtr* adr_type) duke@435: : CallLeafNode(tf, addr, name, adr_type) duke@435: { duke@435: } duke@435: virtual int Opcode() const; duke@435: }; duke@435: duke@435: duke@435: //------------------------------Allocate--------------------------------------- duke@435: // High-level memory allocation duke@435: // duke@435: // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will duke@435: // get expanded into a code sequence containing a call. Unlike other CallNodes, duke@435: // they have 2 memory projections and 2 i_o projections (which are distinguished by duke@435: // the _is_io_use flag in the projection.) This is needed when expanding the node in duke@435: // order to differentiate the uses of the projection on the normal control path from duke@435: // those on the exception return path. duke@435: // duke@435: class AllocateNode : public CallNode { duke@435: public: duke@435: enum { duke@435: // Output: duke@435: RawAddress = TypeFunc::Parms, // the newly-allocated raw address duke@435: // Inputs: duke@435: AllocSize = TypeFunc::Parms, // size (in bytes) of the new object duke@435: KlassNode, // type (maybe dynamic) of the obj. duke@435: InitialTest, // slow-path test (may be constant) duke@435: ALength, // array length (or TOP if none) duke@435: ParmLimit duke@435: }; duke@435: kvn@5110: static const TypeFunc* alloc_type(const Type* t) { duke@435: const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); duke@435: fields[AllocSize] = TypeInt::POS; duke@435: fields[KlassNode] = TypeInstPtr::NOTNULL; duke@435: fields[InitialTest] = TypeInt::BOOL; kvn@5110: fields[ALength] = t; // length (can be a bad length) duke@435: duke@435: const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); duke@435: duke@435: // create result type (range) duke@435: fields = TypeTuple::fields(1); duke@435: fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop duke@435: duke@435: const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); duke@435: duke@435: return TypeFunc::make(domain, range); duke@435: } duke@435: kvn@5110: // Result of Escape Analysis kvn@5110: bool _is_scalar_replaceable; kvn@5110: bool _is_non_escaping; kvn@474: duke@435: virtual uint size_of() const; // Size is bigger duke@435: AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, duke@435: Node *size, Node *klass_node, Node *initial_test); duke@435: // Expansion modifies the JVMState, so we need to clone it kvn@5110: virtual void clone_jvms(Compile* C) { kvn@5110: if (jvms() != NULL) { kvn@5110: set_jvms(jvms()->clone_deep(C)); kvn@5110: jvms()->set_map_deep(this); kvn@5110: } duke@435: } duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return Op_RegP; } duke@435: virtual bool guaranteed_safepoint() { return false; } duke@435: kvn@500: // allocations do not modify their arguments kvn@5110: virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} kvn@500: duke@435: // Pattern-match a possible usage of AllocateNode. duke@435: // Return null if no allocation is recognized. duke@435: // The operand is the pointer produced by the (possible) allocation. duke@435: // It must be a projection of the Allocate or its subsequent CastPP. duke@435: // (Note: This function is defined in file graphKit.cpp, near duke@435: // GraphKit::new_instance/new_array, whose output it recognizes.) duke@435: // The 'ptr' may not have an offset unless the 'offset' argument is given. duke@435: static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); duke@435: duke@435: // Fancy version which uses AddPNode::Ideal_base_and_offset to strip duke@435: // an offset, which is reported back to the caller. duke@435: // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) duke@435: static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, duke@435: intptr_t& offset); duke@435: duke@435: // Dig the klass operand out of a (possible) allocation site. duke@435: static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { duke@435: AllocateNode* allo = Ideal_allocation(ptr, phase); duke@435: return (allo == NULL) ? NULL : allo->in(KlassNode); duke@435: } duke@435: duke@435: // Conservatively small estimate of offset of first non-header byte. duke@435: int minimum_header_size() { coleenp@548: return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : coleenp@548: instanceOopDesc::base_offset_in_bytes(); duke@435: } duke@435: duke@435: // Return the corresponding initialization barrier (or null if none). duke@435: // Walks out edges to find it... duke@435: // (Note: Both InitializeNode::allocation and AllocateNode::initialization duke@435: // are defined in graphKit.cpp, which sets up the bidirectional relation.) duke@435: InitializeNode* initialization(); duke@435: duke@435: // Convenience for initialization->maybe_set_complete(phase) duke@435: bool maybe_set_complete(PhaseGVN* phase); duke@435: }; duke@435: duke@435: //------------------------------AllocateArray--------------------------------- duke@435: // duke@435: // High-level array allocation duke@435: // duke@435: class AllocateArrayNode : public AllocateNode { duke@435: public: duke@435: AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, duke@435: Node* size, Node* klass_node, Node* initial_test, duke@435: Node* count_val duke@435: ) duke@435: : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, duke@435: initial_test) duke@435: { duke@435: init_class_id(Class_AllocateArray); duke@435: set_req(AllocateNode::ALength, count_val); duke@435: } duke@435: virtual int Opcode() const; kvn@1139: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: rasbold@801: // Dig the length operand out of a array allocation site. rasbold@801: Node* Ideal_length() { rasbold@801: return in(AllocateNode::ALength); rasbold@801: } rasbold@801: rasbold@801: // Dig the length operand out of a array allocation site and narrow the rasbold@801: // type with a CastII, if necesssary rasbold@801: Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); rasbold@801: duke@435: // Pattern-match a possible usage of AllocateArrayNode. duke@435: // Return null if no allocation is recognized. duke@435: static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { duke@435: AllocateNode* allo = Ideal_allocation(ptr, phase); duke@435: return (allo == NULL || !allo->is_AllocateArray()) duke@435: ? NULL : allo->as_AllocateArray(); duke@435: } duke@435: }; duke@435: duke@435: //------------------------------AbstractLockNode----------------------------------- duke@435: class AbstractLockNode: public CallNode { duke@435: private: kvn@3406: enum { kvn@3406: Regular = 0, // Normal lock kvn@3406: NonEscObj, // Lock is used for non escaping object kvn@3406: Coarsened, // Lock was coarsened kvn@3406: Nested // Nested lock kvn@3406: } _kind; duke@435: #ifndef PRODUCT duke@435: NamedCounter* _counter; duke@435: #endif duke@435: duke@435: protected: duke@435: // helper functions for lock elimination duke@435: // duke@435: duke@435: bool find_matching_unlock(const Node* ctrl, LockNode* lock, duke@435: GrowableArray &lock_ops); duke@435: bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, duke@435: GrowableArray &lock_ops); duke@435: bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, duke@435: GrowableArray &lock_ops); duke@435: LockNode *find_matching_lock(UnlockNode* unlock); duke@435: kvn@3406: // Update the counter to indicate that this lock was eliminated. kvn@3406: void set_eliminated_lock_counter() PRODUCT_RETURN; duke@435: duke@435: public: duke@435: AbstractLockNode(const TypeFunc *tf) duke@435: : CallNode(tf, NULL, TypeRawPtr::BOTTOM), kvn@3406: _kind(Regular) duke@435: { duke@435: #ifndef PRODUCT duke@435: _counter = NULL; duke@435: #endif duke@435: } duke@435: virtual int Opcode() const = 0; duke@435: Node * obj_node() const {return in(TypeFunc::Parms + 0); } duke@435: Node * box_node() const {return in(TypeFunc::Parms + 1); } duke@435: Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } kvn@3406: void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } kvn@3406: duke@435: const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} duke@435: duke@435: virtual uint size_of() const { return sizeof(*this); } duke@435: kvn@3406: bool is_eliminated() const { return (_kind != Regular); } kvn@3406: bool is_non_esc_obj() const { return (_kind == NonEscObj); } kvn@3406: bool is_coarsened() const { return (_kind == Coarsened); } kvn@3406: bool is_nested() const { return (_kind == Nested); } duke@435: kvn@3406: void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } kvn@3406: void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } kvn@3406: void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } kvn@895: kvn@500: // locking does not modify its arguments kvn@5110: virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} kvn@500: duke@435: #ifndef PRODUCT duke@435: void create_lock_counter(JVMState* s); duke@435: NamedCounter* counter() const { return _counter; } duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------Lock--------------------------------------- duke@435: // High-level lock operation duke@435: // duke@435: // This is a subclass of CallNode because it is a macro node which gets expanded duke@435: // into a code sequence containing a call. This node takes 3 "parameters": duke@435: // 0 - object to lock duke@435: // 1 - a BoxLockNode duke@435: // 2 - a FastLockNode duke@435: // duke@435: class LockNode : public AbstractLockNode { duke@435: public: duke@435: duke@435: static const TypeFunc *lock_type() { duke@435: // create input type (domain) duke@435: const Type **fields = TypeTuple::fields(3); duke@435: fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked duke@435: fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock duke@435: fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock duke@435: const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); duke@435: duke@435: // create result type (range) duke@435: fields = TypeTuple::fields(0); duke@435: duke@435: const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); duke@435: duke@435: return TypeFunc::make(domain,range); duke@435: } duke@435: duke@435: virtual int Opcode() const; duke@435: virtual uint size_of() const; // Size is bigger duke@435: LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { duke@435: init_class_id(Class_Lock); duke@435: init_flags(Flag_is_macro); duke@435: C->add_macro_node(this); duke@435: } duke@435: virtual bool guaranteed_safepoint() { return false; } duke@435: duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: // Expansion modifies the JVMState, so we need to clone it kvn@5110: virtual void clone_jvms(Compile* C) { kvn@5110: if (jvms() != NULL) { kvn@5110: set_jvms(jvms()->clone_deep(C)); kvn@5110: jvms()->set_map_deep(this); kvn@5110: } duke@435: } kvn@3406: kvn@3406: bool is_nested_lock_region(); // Is this Lock nested? duke@435: }; duke@435: duke@435: //------------------------------Unlock--------------------------------------- duke@435: // High-level unlock operation duke@435: class UnlockNode : public AbstractLockNode { duke@435: public: duke@435: virtual int Opcode() const; duke@435: virtual uint size_of() const; // Size is bigger duke@435: UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { duke@435: init_class_id(Class_Unlock); duke@435: init_flags(Flag_is_macro); duke@435: C->add_macro_node(this); duke@435: } duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: // unlock is never a safepoint duke@435: virtual bool guaranteed_safepoint() { return false; } duke@435: }; stefank@2314: stefank@2314: #endif // SHARE_VM_OPTO_CALLNODE_HPP