aoqi@0: /* aoqi@0: * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #ifndef SHARE_VM_OPTO_CALLNODE_HPP aoqi@0: #define SHARE_VM_OPTO_CALLNODE_HPP aoqi@0: aoqi@0: #include "opto/connode.hpp" aoqi@0: #include "opto/mulnode.hpp" aoqi@0: #include "opto/multnode.hpp" aoqi@0: #include "opto/opcodes.hpp" aoqi@0: #include "opto/phaseX.hpp" aoqi@0: #include "opto/type.hpp" aoqi@0: aoqi@0: // Portions of code courtesy of Clifford Click aoqi@0: aoqi@0: // Optimization - Graph Style aoqi@0: aoqi@0: class Chaitin; aoqi@0: class NamedCounter; aoqi@0: class MultiNode; aoqi@0: class SafePointNode; aoqi@0: class CallNode; aoqi@0: class CallJavaNode; aoqi@0: class CallStaticJavaNode; aoqi@0: class CallDynamicJavaNode; aoqi@0: class CallRuntimeNode; aoqi@0: class CallLeafNode; aoqi@0: class CallLeafNoFPNode; aoqi@0: class AllocateNode; aoqi@0: class AllocateArrayNode; aoqi@0: class BoxLockNode; aoqi@0: class LockNode; aoqi@0: class UnlockNode; aoqi@0: class JVMState; aoqi@0: class OopMap; aoqi@0: class State; aoqi@0: class StartNode; aoqi@0: class MachCallNode; aoqi@0: class FastLockNode; aoqi@0: aoqi@0: //------------------------------StartNode-------------------------------------- aoqi@0: // The method start node aoqi@0: class StartNode : public MultiNode { aoqi@0: virtual uint cmp( const Node &n ) const; aoqi@0: virtual uint size_of() const; // Size is bigger aoqi@0: public: aoqi@0: const TypeTuple *_domain; aoqi@0: StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { aoqi@0: init_class_id(Class_Start); aoqi@0: init_req(0,this); aoqi@0: init_req(1,root); aoqi@0: } aoqi@0: virtual int Opcode() const; aoqi@0: virtual bool pinned() const { return true; }; aoqi@0: virtual const Type *bottom_type() const; aoqi@0: virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } aoqi@0: virtual const Type *Value( PhaseTransform *phase ) const; aoqi@0: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); aoqi@0: virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; aoqi@0: virtual const RegMask &in_RegMask(uint) const; aoqi@0: virtual Node *match( const ProjNode *proj, const Matcher *m ); aoqi@0: virtual uint ideal_reg() const { return 0; } aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_spec(outputStream *st) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: //------------------------------StartOSRNode----------------------------------- aoqi@0: // The method start node for on stack replacement code aoqi@0: class StartOSRNode : public StartNode { aoqi@0: public: aoqi@0: StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} aoqi@0: virtual int Opcode() const; aoqi@0: static const TypeTuple *osr_domain(); aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: //------------------------------ParmNode--------------------------------------- aoqi@0: // Incoming parameters aoqi@0: class ParmNode : public ProjNode { aoqi@0: static const char * const names[TypeFunc::Parms+1]; aoqi@0: public: aoqi@0: ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { aoqi@0: init_class_id(Class_Parm); aoqi@0: } aoqi@0: virtual int Opcode() const; aoqi@0: virtual bool is_CFG() const { return (_con == TypeFunc::Control); } aoqi@0: virtual uint ideal_reg() const; aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_spec(outputStream *st) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: //------------------------------ReturnNode------------------------------------- aoqi@0: // Return from subroutine node aoqi@0: class ReturnNode : public Node { aoqi@0: public: aoqi@0: ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); aoqi@0: virtual int Opcode() const; aoqi@0: virtual bool is_CFG() const { return true; } aoqi@0: virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash aoqi@0: virtual bool depends_only_on_test() const { return false; } aoqi@0: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); aoqi@0: virtual const Type *Value( PhaseTransform *phase ) const; aoqi@0: virtual uint ideal_reg() const { return NotAMachineReg; } aoqi@0: virtual uint match_edge(uint idx) const; aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_req(outputStream *st = tty) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: //------------------------------RethrowNode------------------------------------ aoqi@0: // Rethrow of exception at call site. Ends a procedure before rethrowing; aoqi@0: // ends the current basic block like a ReturnNode. Restores registers and aoqi@0: // unwinds stack. Rethrow happens in the caller's method. aoqi@0: class RethrowNode : public Node { aoqi@0: public: aoqi@0: RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); aoqi@0: virtual int Opcode() const; aoqi@0: virtual bool is_CFG() const { return true; } aoqi@0: virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash aoqi@0: virtual bool depends_only_on_test() const { return false; } aoqi@0: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); aoqi@0: virtual const Type *Value( PhaseTransform *phase ) const; aoqi@0: virtual uint match_edge(uint idx) const; aoqi@0: virtual uint ideal_reg() const { return NotAMachineReg; } aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_req(outputStream *st = tty) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: //------------------------------TailCallNode----------------------------------- aoqi@0: // Pop stack frame and jump indirect aoqi@0: class TailCallNode : public ReturnNode { aoqi@0: public: aoqi@0: TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) aoqi@0: : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { aoqi@0: init_req(TypeFunc::Parms, target); aoqi@0: init_req(TypeFunc::Parms+1, moop); aoqi@0: } aoqi@0: aoqi@0: virtual int Opcode() const; aoqi@0: virtual uint match_edge(uint idx) const; aoqi@0: }; aoqi@0: aoqi@0: //------------------------------TailJumpNode----------------------------------- aoqi@0: // Pop stack frame and jump indirect aoqi@0: class TailJumpNode : public ReturnNode { aoqi@0: public: aoqi@0: TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) aoqi@0: : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { aoqi@0: init_req(TypeFunc::Parms, target); aoqi@0: init_req(TypeFunc::Parms+1, ex_oop); aoqi@0: } aoqi@0: aoqi@0: virtual int Opcode() const; aoqi@0: virtual uint match_edge(uint idx) const; aoqi@0: }; aoqi@0: aoqi@0: //-------------------------------JVMState------------------------------------- aoqi@0: // A linked list of JVMState nodes captures the whole interpreter state, aoqi@0: // plus GC roots, for all active calls at some call site in this compilation aoqi@0: // unit. (If there is no inlining, then the list has exactly one link.) aoqi@0: // This provides a way to map the optimized program back into the interpreter, aoqi@0: // or to let the GC mark the stack. aoqi@0: class JVMState : public ResourceObj { aoqi@0: friend class VMStructs; aoqi@0: public: aoqi@0: typedef enum { aoqi@0: Reexecute_Undefined = -1, // not defined -- will be translated into false later aoqi@0: Reexecute_False = 0, // false -- do not reexecute aoqi@0: Reexecute_True = 1 // true -- reexecute the bytecode aoqi@0: } ReexecuteState; //Reexecute State aoqi@0: aoqi@0: private: aoqi@0: JVMState* _caller; // List pointer for forming scope chains aoqi@0: uint _depth; // One more than caller depth, or one. aoqi@0: uint _locoff; // Offset to locals in input edge mapping aoqi@0: uint _stkoff; // Offset to stack in input edge mapping aoqi@0: uint _monoff; // Offset to monitors in input edge mapping aoqi@0: uint _scloff; // Offset to fields of scalar objs in input edge mapping aoqi@0: uint _endoff; // Offset to end of input edge mapping aoqi@0: uint _sp; // Jave Expression Stack Pointer for this state aoqi@0: int _bci; // Byte Code Index of this JVM point aoqi@0: ReexecuteState _reexecute; // Whether this bytecode need to be re-executed aoqi@0: ciMethod* _method; // Method Pointer aoqi@0: SafePointNode* _map; // Map node associated with this scope aoqi@0: public: aoqi@0: friend class Compile; aoqi@0: friend class PreserveReexecuteState; aoqi@0: aoqi@0: // Because JVMState objects live over the entire lifetime of the aoqi@0: // Compile object, they are allocated into the comp_arena, which aoqi@0: // does not get resource marked or reset during the compile process aoqi@0: void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } aoqi@0: void operator delete( void * ) { } // fast deallocation aoqi@0: aoqi@0: // Create a new JVMState, ready for abstract interpretation. aoqi@0: JVMState(ciMethod* method, JVMState* caller); aoqi@0: JVMState(int stack_size); // root state; has a null method aoqi@0: aoqi@0: // Access functions for the JVM aoqi@0: // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| aoqi@0: // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff aoqi@0: uint locoff() const { return _locoff; } aoqi@0: uint stkoff() const { return _stkoff; } aoqi@0: uint argoff() const { return _stkoff + _sp; } aoqi@0: uint monoff() const { return _monoff; } aoqi@0: uint scloff() const { return _scloff; } aoqi@0: uint endoff() const { return _endoff; } aoqi@0: uint oopoff() const { return debug_end(); } aoqi@0: aoqi@0: int loc_size() const { return stkoff() - locoff(); } aoqi@0: int stk_size() const { return monoff() - stkoff(); } aoqi@0: int mon_size() const { return scloff() - monoff(); } aoqi@0: int scl_size() const { return endoff() - scloff(); } aoqi@0: aoqi@0: bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } aoqi@0: bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } aoqi@0: bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } aoqi@0: bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } aoqi@0: aoqi@0: uint sp() const { return _sp; } aoqi@0: int bci() const { return _bci; } aoqi@0: bool should_reexecute() const { return _reexecute==Reexecute_True; } aoqi@0: bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } aoqi@0: bool has_method() const { return _method != NULL; } aoqi@0: ciMethod* method() const { assert(has_method(), ""); return _method; } aoqi@0: JVMState* caller() const { return _caller; } aoqi@0: SafePointNode* map() const { return _map; } aoqi@0: uint depth() const { return _depth; } aoqi@0: uint debug_start() const; // returns locoff of root caller aoqi@0: uint debug_end() const; // returns endoff of self aoqi@0: uint debug_size() const { aoqi@0: return loc_size() + sp() + mon_size() + scl_size(); aoqi@0: } aoqi@0: uint debug_depth() const; // returns sum of debug_size values at all depths aoqi@0: aoqi@0: // Returns the JVM state at the desired depth (1 == root). aoqi@0: JVMState* of_depth(int d) const; aoqi@0: aoqi@0: // Tells if two JVM states have the same call chain (depth, methods, & bcis). aoqi@0: bool same_calls_as(const JVMState* that) const; aoqi@0: aoqi@0: // Monitors (monitors are stored as (boxNode, objNode) pairs aoqi@0: enum { logMonitorEdges = 1 }; aoqi@0: int nof_monitors() const { return mon_size() >> logMonitorEdges; } aoqi@0: int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } aoqi@0: int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } aoqi@0: int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } aoqi@0: bool is_monitor_box(uint off) const { aoqi@0: assert(is_mon(off), "should be called only for monitor edge"); aoqi@0: return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); aoqi@0: } aoqi@0: bool is_monitor_use(uint off) const { return (is_mon(off) aoqi@0: && is_monitor_box(off)) aoqi@0: || (caller() && caller()->is_monitor_use(off)); } aoqi@0: aoqi@0: // Initialization functions for the JVM aoqi@0: void set_locoff(uint off) { _locoff = off; } aoqi@0: void set_stkoff(uint off) { _stkoff = off; } aoqi@0: void set_monoff(uint off) { _monoff = off; } aoqi@0: void set_scloff(uint off) { _scloff = off; } aoqi@0: void set_endoff(uint off) { _endoff = off; } aoqi@0: void set_offsets(uint off) { aoqi@0: _locoff = _stkoff = _monoff = _scloff = _endoff = off; aoqi@0: } aoqi@0: void set_map(SafePointNode *map) { _map = map; } aoqi@0: void set_sp(uint sp) { _sp = sp; } aoqi@0: // _reexecute is initialized to "undefined" for a new bci aoqi@0: void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } aoqi@0: void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} aoqi@0: aoqi@0: // Miscellaneous utility functions aoqi@0: JVMState* clone_deep(Compile* C) const; // recursively clones caller chain aoqi@0: JVMState* clone_shallow(Compile* C) const; // retains uncloned caller aoqi@0: void set_map_deep(SafePointNode *map);// reset map for all callers aoqi@0: void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. aoqi@0: int interpreter_frame_size() const; aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; aoqi@0: void dump_spec(outputStream *st) const; aoqi@0: void dump_on(outputStream* st) const; aoqi@0: void dump() const { aoqi@0: dump_on(tty); aoqi@0: } aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: //------------------------------SafePointNode---------------------------------- aoqi@0: // A SafePointNode is a subclass of a MultiNode for convenience (and aoqi@0: // potential code sharing) only - conceptually it is independent of aoqi@0: // the Node semantics. aoqi@0: class SafePointNode : public MultiNode { aoqi@0: virtual uint cmp( const Node &n ) const; aoqi@0: virtual uint size_of() const; // Size is bigger aoqi@0: aoqi@0: public: aoqi@0: SafePointNode(uint edges, JVMState* jvms, aoqi@0: // A plain safepoint advertises no memory effects (NULL): aoqi@0: const TypePtr* adr_type = NULL) aoqi@0: : MultiNode( edges ), aoqi@0: _jvms(jvms), aoqi@0: _oop_map(NULL), aoqi@0: _adr_type(adr_type) aoqi@0: { aoqi@0: init_class_id(Class_SafePoint); aoqi@0: } aoqi@0: aoqi@0: OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC aoqi@0: JVMState* const _jvms; // Pointer to list of JVM State objects aoqi@0: const TypePtr* _adr_type; // What type of memory does this node produce? aoqi@0: aoqi@0: // Many calls take *all* of memory as input, aoqi@0: // but some produce a limited subset of that memory as output. aoqi@0: // The adr_type reports the call's behavior as a store, not a load. aoqi@0: aoqi@0: virtual JVMState* jvms() const { return _jvms; } aoqi@0: void set_jvms(JVMState* s) { aoqi@0: *(JVMState**)&_jvms = s; // override const attribute in the accessor aoqi@0: } aoqi@0: OopMap *oop_map() const { return _oop_map; } aoqi@0: void set_oop_map(OopMap *om) { _oop_map = om; } aoqi@0: aoqi@0: private: aoqi@0: void verify_input(JVMState* jvms, uint idx) const { aoqi@0: assert(verify_jvms(jvms), "jvms must match"); aoqi@0: Node* n = in(idx); aoqi@0: assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || aoqi@0: in(idx + 1)->is_top(), "2nd half of long/double"); aoqi@0: } aoqi@0: aoqi@0: public: aoqi@0: // Functionality from old debug nodes which has changed aoqi@0: Node *local(JVMState* jvms, uint idx) const { aoqi@0: verify_input(jvms, jvms->locoff() + idx); aoqi@0: return in(jvms->locoff() + idx); aoqi@0: } aoqi@0: Node *stack(JVMState* jvms, uint idx) const { aoqi@0: verify_input(jvms, jvms->stkoff() + idx); aoqi@0: return in(jvms->stkoff() + idx); aoqi@0: } aoqi@0: Node *argument(JVMState* jvms, uint idx) const { aoqi@0: verify_input(jvms, jvms->argoff() + idx); aoqi@0: return in(jvms->argoff() + idx); aoqi@0: } aoqi@0: Node *monitor_box(JVMState* jvms, uint idx) const { aoqi@0: assert(verify_jvms(jvms), "jvms must match"); aoqi@0: return in(jvms->monitor_box_offset(idx)); aoqi@0: } aoqi@0: Node *monitor_obj(JVMState* jvms, uint idx) const { aoqi@0: assert(verify_jvms(jvms), "jvms must match"); aoqi@0: return in(jvms->monitor_obj_offset(idx)); aoqi@0: } aoqi@0: aoqi@0: void set_local(JVMState* jvms, uint idx, Node *c); aoqi@0: aoqi@0: void set_stack(JVMState* jvms, uint idx, Node *c) { aoqi@0: assert(verify_jvms(jvms), "jvms must match"); aoqi@0: set_req(jvms->stkoff() + idx, c); aoqi@0: } aoqi@0: void set_argument(JVMState* jvms, uint idx, Node *c) { aoqi@0: assert(verify_jvms(jvms), "jvms must match"); aoqi@0: set_req(jvms->argoff() + idx, c); aoqi@0: } aoqi@0: void ensure_stack(JVMState* jvms, uint stk_size) { aoqi@0: assert(verify_jvms(jvms), "jvms must match"); aoqi@0: int grow_by = (int)stk_size - (int)jvms->stk_size(); aoqi@0: if (grow_by > 0) grow_stack(jvms, grow_by); aoqi@0: } aoqi@0: void grow_stack(JVMState* jvms, uint grow_by); aoqi@0: // Handle monitor stack aoqi@0: void push_monitor( const FastLockNode *lock ); aoqi@0: void pop_monitor (); aoqi@0: Node *peek_monitor_box() const; aoqi@0: Node *peek_monitor_obj() const; aoqi@0: aoqi@0: // Access functions for the JVM aoqi@0: Node *control () const { return in(TypeFunc::Control ); } aoqi@0: Node *i_o () const { return in(TypeFunc::I_O ); } aoqi@0: Node *memory () const { return in(TypeFunc::Memory ); } aoqi@0: Node *returnadr() const { return in(TypeFunc::ReturnAdr); } aoqi@0: Node *frameptr () const { return in(TypeFunc::FramePtr ); } aoqi@0: aoqi@0: void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } aoqi@0: void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } aoqi@0: void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } aoqi@0: aoqi@0: MergeMemNode* merged_memory() const { aoqi@0: return in(TypeFunc::Memory)->as_MergeMem(); aoqi@0: } aoqi@0: aoqi@0: // The parser marks useless maps as dead when it's done with them: aoqi@0: bool is_killed() { return in(TypeFunc::Control) == NULL; } aoqi@0: aoqi@0: // Exception states bubbling out of subgraphs such as inlined calls aoqi@0: // are recorded here. (There might be more than one, hence the "next".) aoqi@0: // This feature is used only for safepoints which serve as "maps" aoqi@0: // for JVM states during parsing, intrinsic expansion, etc. aoqi@0: SafePointNode* next_exception() const; aoqi@0: void set_next_exception(SafePointNode* n); aoqi@0: bool has_exceptions() const { return next_exception() != NULL; } aoqi@0: aoqi@0: // Standard Node stuff aoqi@0: virtual int Opcode() const; aoqi@0: virtual bool pinned() const { return true; } aoqi@0: virtual const Type *Value( PhaseTransform *phase ) const; aoqi@0: virtual const Type *bottom_type() const { return Type::CONTROL; } aoqi@0: virtual const TypePtr *adr_type() const { return _adr_type; } aoqi@0: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); aoqi@0: virtual Node *Identity( PhaseTransform *phase ); aoqi@0: virtual uint ideal_reg() const { return 0; } aoqi@0: virtual const RegMask &in_RegMask(uint) const; aoqi@0: virtual const RegMask &out_RegMask() const; aoqi@0: virtual uint match_edge(uint idx) const; aoqi@0: aoqi@0: static bool needs_polling_address_input(); aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_spec(outputStream *st) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: //------------------------------SafePointScalarObjectNode---------------------- aoqi@0: // A SafePointScalarObjectNode represents the state of a scalarized object aoqi@0: // at a safepoint. aoqi@0: aoqi@0: class SafePointScalarObjectNode: public TypeNode { aoqi@0: uint _first_index; // First input edge relative index of a SafePoint node where aoqi@0: // states of the scalarized object fields are collected. aoqi@0: // It is relative to the last (youngest) jvms->_scloff. aoqi@0: uint _n_fields; // Number of non-static fields of the scalarized object. aoqi@0: DEBUG_ONLY(AllocateNode* _alloc;) aoqi@0: aoqi@0: virtual uint hash() const ; // { return NO_HASH; } aoqi@0: virtual uint cmp( const Node &n ) const; aoqi@0: aoqi@0: uint first_index() const { return _first_index; } aoqi@0: aoqi@0: public: aoqi@0: SafePointScalarObjectNode(const TypeOopPtr* tp, aoqi@0: #ifdef ASSERT aoqi@0: AllocateNode* alloc, aoqi@0: #endif aoqi@0: uint first_index, uint n_fields); aoqi@0: virtual int Opcode() const; aoqi@0: virtual uint ideal_reg() const; aoqi@0: virtual const RegMask &in_RegMask(uint) const; aoqi@0: virtual const RegMask &out_RegMask() const; aoqi@0: virtual uint match_edge(uint idx) const; aoqi@0: aoqi@0: uint first_index(JVMState* jvms) const { aoqi@0: assert(jvms != NULL, "missed JVMS"); aoqi@0: return jvms->scloff() + _first_index; aoqi@0: } aoqi@0: uint n_fields() const { return _n_fields; } aoqi@0: aoqi@0: #ifdef ASSERT aoqi@0: AllocateNode* alloc() const { return _alloc; } aoqi@0: #endif aoqi@0: aoqi@0: virtual uint size_of() const { return sizeof(*this); } aoqi@0: aoqi@0: // Assumes that "this" is an argument to a safepoint node "s", and that aoqi@0: // "new_call" is being created to correspond to "s". But the difference aoqi@0: // between the start index of the jvmstates of "new_call" and "s" is aoqi@0: // "jvms_adj". Produce and return a SafePointScalarObjectNode that aoqi@0: // corresponds appropriately to "this" in "new_call". Assumes that aoqi@0: // "sosn_map" is a map, specific to the translation of "s" to "new_call", aoqi@0: // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. aoqi@0: SafePointScalarObjectNode* clone(Dict* sosn_map) const; aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_spec(outputStream *st) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: // Simple container for the outgoing projections of a call. Useful aoqi@0: // for serious surgery on calls. aoqi@0: class CallProjections : public StackObj { aoqi@0: public: aoqi@0: Node* fallthrough_proj; aoqi@0: Node* fallthrough_catchproj; aoqi@0: Node* fallthrough_memproj; aoqi@0: Node* fallthrough_ioproj; aoqi@0: Node* catchall_catchproj; aoqi@0: Node* catchall_memproj; aoqi@0: Node* catchall_ioproj; aoqi@0: Node* resproj; aoqi@0: Node* exobj; aoqi@0: }; aoqi@0: aoqi@0: class CallGenerator; aoqi@0: aoqi@0: //------------------------------CallNode--------------------------------------- aoqi@0: // Call nodes now subsume the function of debug nodes at callsites, so they aoqi@0: // contain the functionality of a full scope chain of debug nodes. aoqi@0: class CallNode : public SafePointNode { aoqi@0: friend class VMStructs; aoqi@0: public: aoqi@0: const TypeFunc *_tf; // Function type aoqi@0: address _entry_point; // Address of method being called aoqi@0: float _cnt; // Estimate of number of times called aoqi@0: CallGenerator* _generator; // corresponding CallGenerator for some late inline calls aoqi@0: aoqi@0: CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) aoqi@0: : SafePointNode(tf->domain()->cnt(), NULL, adr_type), aoqi@0: _tf(tf), aoqi@0: _entry_point(addr), aoqi@0: _cnt(COUNT_UNKNOWN), aoqi@0: _generator(NULL) aoqi@0: { aoqi@0: init_class_id(Class_Call); aoqi@0: } aoqi@0: aoqi@0: const TypeFunc* tf() const { return _tf; } aoqi@0: const address entry_point() const { return _entry_point; } aoqi@0: const float cnt() const { return _cnt; } aoqi@0: CallGenerator* generator() const { return _generator; } aoqi@0: aoqi@0: void set_tf(const TypeFunc* tf) { _tf = tf; } aoqi@0: void set_entry_point(address p) { _entry_point = p; } aoqi@0: void set_cnt(float c) { _cnt = c; } aoqi@0: void set_generator(CallGenerator* cg) { _generator = cg; } aoqi@0: aoqi@0: virtual const Type *bottom_type() const; aoqi@0: virtual const Type *Value( PhaseTransform *phase ) const; aoqi@0: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); aoqi@0: virtual Node *Identity( PhaseTransform *phase ) { return this; } aoqi@0: virtual uint cmp( const Node &n ) const; aoqi@0: virtual uint size_of() const = 0; aoqi@0: virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; aoqi@0: virtual Node *match( const ProjNode *proj, const Matcher *m ); aoqi@0: virtual uint ideal_reg() const { return NotAMachineReg; } aoqi@0: // Are we guaranteed that this node is a safepoint? Not true for leaf calls and aoqi@0: // for some macro nodes whose expansion does not have a safepoint on the fast path. aoqi@0: virtual bool guaranteed_safepoint() { return true; } aoqi@0: // For macro nodes, the JVMState gets modified during expansion. If calls aoqi@0: // use MachConstantBase, it gets modified during matching. So when cloning aoqi@0: // the node the JVMState must be cloned. Default is not to clone. aoqi@0: virtual void clone_jvms(Compile* C) { aoqi@0: if (C->needs_clone_jvms() && jvms() != NULL) { aoqi@0: set_jvms(jvms()->clone_deep(C)); aoqi@0: jvms()->set_map_deep(this); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Returns true if the call may modify n aoqi@0: virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase); aoqi@0: // Does this node have a use of n other than in debug information? aoqi@0: bool has_non_debug_use(Node *n); aoqi@0: // Returns the unique CheckCastPP of a call aoqi@0: // or result projection is there are several CheckCastPP aoqi@0: // or returns NULL if there is no one. aoqi@0: Node *result_cast(); aoqi@0: // Does this node returns pointer? aoqi@0: bool returns_pointer() const { aoqi@0: const TypeTuple *r = tf()->range(); aoqi@0: return (r->cnt() > TypeFunc::Parms && aoqi@0: r->field_at(TypeFunc::Parms)->isa_ptr()); aoqi@0: } aoqi@0: aoqi@0: // Collect all the interesting edges from a call for use in aoqi@0: // replacing the call by something else. Used by macro expansion aoqi@0: // and the late inlining support. aoqi@0: void extract_projections(CallProjections* projs, bool separate_io_proj); aoqi@0: aoqi@0: virtual uint match_edge(uint idx) const; aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_req(outputStream *st = tty) const; aoqi@0: virtual void dump_spec(outputStream *st) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: //------------------------------CallJavaNode----------------------------------- aoqi@0: // Make a static or dynamic subroutine call node using Java calling aoqi@0: // convention. (The "Java" calling convention is the compiler's calling aoqi@0: // convention, as opposed to the interpreter's or that of native C.) aoqi@0: class CallJavaNode : public CallNode { aoqi@0: friend class VMStructs; aoqi@0: protected: aoqi@0: virtual uint cmp( const Node &n ) const; aoqi@0: virtual uint size_of() const; // Size is bigger aoqi@0: aoqi@0: bool _optimized_virtual; aoqi@0: bool _method_handle_invoke; aoqi@0: ciMethod* _method; // Method being direct called aoqi@0: public: aoqi@0: const int _bci; // Byte Code Index of call byte code aoqi@0: CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) aoqi@0: : CallNode(tf, addr, TypePtr::BOTTOM), aoqi@0: _method(method), _bci(bci), aoqi@0: _optimized_virtual(false), aoqi@0: _method_handle_invoke(false) aoqi@0: { aoqi@0: init_class_id(Class_CallJava); aoqi@0: } aoqi@0: aoqi@0: virtual int Opcode() const; aoqi@0: ciMethod* method() const { return _method; } aoqi@0: void set_method(ciMethod *m) { _method = m; } aoqi@0: void set_optimized_virtual(bool f) { _optimized_virtual = f; } aoqi@0: bool is_optimized_virtual() const { return _optimized_virtual; } aoqi@0: void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } aoqi@0: bool is_method_handle_invoke() const { return _method_handle_invoke; } aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_spec(outputStream *st) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: //------------------------------CallStaticJavaNode----------------------------- aoqi@0: // Make a direct subroutine call using Java calling convention (for static aoqi@0: // calls and optimized virtual calls, plus calls to wrappers for run-time aoqi@0: // routines); generates static stub. aoqi@0: class CallStaticJavaNode : public CallJavaNode { aoqi@0: virtual uint cmp( const Node &n ) const; aoqi@0: virtual uint size_of() const; // Size is bigger aoqi@0: public: aoqi@0: CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci) aoqi@0: : CallJavaNode(tf, addr, method, bci), _name(NULL) { aoqi@0: init_class_id(Class_CallStaticJava); aoqi@0: if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { aoqi@0: init_flags(Flag_is_macro); aoqi@0: C->add_macro_node(this); aoqi@0: } aoqi@0: _is_scalar_replaceable = false; aoqi@0: _is_non_escaping = false; aoqi@0: } aoqi@0: CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, aoqi@0: const TypePtr* adr_type) aoqi@0: : CallJavaNode(tf, addr, NULL, bci), _name(name) { aoqi@0: init_class_id(Class_CallStaticJava); aoqi@0: // This node calls a runtime stub, which often has narrow memory effects. aoqi@0: _adr_type = adr_type; aoqi@0: _is_scalar_replaceable = false; aoqi@0: _is_non_escaping = false; aoqi@0: } aoqi@0: const char *_name; // Runtime wrapper name aoqi@0: aoqi@0: // Result of Escape Analysis aoqi@0: bool _is_scalar_replaceable; aoqi@0: bool _is_non_escaping; aoqi@0: aoqi@0: // If this is an uncommon trap, return the request code, else zero. aoqi@0: int uncommon_trap_request() const; aoqi@0: static int extract_uncommon_trap_request(const Node* call); aoqi@0: aoqi@0: bool is_boxing_method() const { aoqi@0: return is_macro() && (method() != NULL) && method()->is_boxing_method(); aoqi@0: } aoqi@0: // Later inlining modifies the JVMState, so we need to clone it aoqi@0: // when the call node is cloned (because it is macro node). aoqi@0: virtual void clone_jvms(Compile* C) { aoqi@0: if ((jvms() != NULL) && is_boxing_method()) { aoqi@0: set_jvms(jvms()->clone_deep(C)); aoqi@0: jvms()->set_map_deep(this); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: virtual int Opcode() const; aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_spec(outputStream *st) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: //------------------------------CallDynamicJavaNode---------------------------- aoqi@0: // Make a dispatched call using Java calling convention. aoqi@0: class CallDynamicJavaNode : public CallJavaNode { aoqi@0: virtual uint cmp( const Node &n ) const; aoqi@0: virtual uint size_of() const; // Size is bigger aoqi@0: public: aoqi@0: CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { aoqi@0: init_class_id(Class_CallDynamicJava); aoqi@0: } aoqi@0: aoqi@0: int _vtable_index; aoqi@0: virtual int Opcode() const; aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_spec(outputStream *st) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: //------------------------------CallRuntimeNode-------------------------------- aoqi@0: // Make a direct subroutine call node into compiled C++ code. aoqi@0: class CallRuntimeNode : public CallNode { aoqi@0: virtual uint cmp( const Node &n ) const; aoqi@0: virtual uint size_of() const; // Size is bigger aoqi@0: public: aoqi@0: CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, aoqi@0: const TypePtr* adr_type) aoqi@0: : CallNode(tf, addr, adr_type), aoqi@0: _name(name) aoqi@0: { aoqi@0: init_class_id(Class_CallRuntime); aoqi@0: } aoqi@0: aoqi@0: const char *_name; // Printable name, if _method is NULL aoqi@0: virtual int Opcode() const; aoqi@0: virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_spec(outputStream *st) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: //------------------------------CallLeafNode----------------------------------- aoqi@0: // Make a direct subroutine call node into compiled C++ code, without aoqi@0: // safepoints aoqi@0: class CallLeafNode : public CallRuntimeNode { aoqi@0: public: aoqi@0: CallLeafNode(const TypeFunc* tf, address addr, const char* name, aoqi@0: const TypePtr* adr_type) aoqi@0: : CallRuntimeNode(tf, addr, name, adr_type) aoqi@0: { aoqi@0: init_class_id(Class_CallLeaf); aoqi@0: } aoqi@0: virtual int Opcode() const; aoqi@0: virtual bool guaranteed_safepoint() { return false; } aoqi@0: #ifndef PRODUCT aoqi@0: virtual void dump_spec(outputStream *st) const; aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: //------------------------------CallLeafNoFPNode------------------------------- aoqi@0: // CallLeafNode, not using floating point or using it in the same manner as aoqi@0: // the generated code aoqi@0: class CallLeafNoFPNode : public CallLeafNode { aoqi@0: public: aoqi@0: CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, aoqi@0: const TypePtr* adr_type) aoqi@0: : CallLeafNode(tf, addr, name, adr_type) aoqi@0: { aoqi@0: } aoqi@0: virtual int Opcode() const; aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: //------------------------------Allocate--------------------------------------- aoqi@0: // High-level memory allocation aoqi@0: // aoqi@0: // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will aoqi@0: // get expanded into a code sequence containing a call. Unlike other CallNodes, aoqi@0: // they have 2 memory projections and 2 i_o projections (which are distinguished by aoqi@0: // the _is_io_use flag in the projection.) This is needed when expanding the node in aoqi@0: // order to differentiate the uses of the projection on the normal control path from aoqi@0: // those on the exception return path. aoqi@0: // aoqi@0: class AllocateNode : public CallNode { aoqi@0: public: aoqi@0: enum { aoqi@0: // Output: aoqi@0: RawAddress = TypeFunc::Parms, // the newly-allocated raw address aoqi@0: // Inputs: aoqi@0: AllocSize = TypeFunc::Parms, // size (in bytes) of the new object aoqi@0: KlassNode, // type (maybe dynamic) of the obj. aoqi@0: InitialTest, // slow-path test (may be constant) aoqi@0: ALength, // array length (or TOP if none) aoqi@0: ParmLimit aoqi@0: }; aoqi@0: aoqi@0: static const TypeFunc* alloc_type(const Type* t) { aoqi@0: const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); aoqi@0: fields[AllocSize] = TypeInt::POS; aoqi@0: fields[KlassNode] = TypeInstPtr::NOTNULL; aoqi@0: fields[InitialTest] = TypeInt::BOOL; aoqi@0: fields[ALength] = t; // length (can be a bad length) aoqi@0: aoqi@0: const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); aoqi@0: aoqi@0: // create result type (range) aoqi@0: fields = TypeTuple::fields(1); aoqi@0: fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop aoqi@0: aoqi@0: const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); aoqi@0: aoqi@0: return TypeFunc::make(domain, range); aoqi@0: } aoqi@0: aoqi@0: // Result of Escape Analysis aoqi@0: bool _is_scalar_replaceable; aoqi@0: bool _is_non_escaping; aoqi@0: aoqi@0: virtual uint size_of() const; // Size is bigger aoqi@0: AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, aoqi@0: Node *size, Node *klass_node, Node *initial_test); aoqi@0: // Expansion modifies the JVMState, so we need to clone it aoqi@0: virtual void clone_jvms(Compile* C) { aoqi@0: if (jvms() != NULL) { aoqi@0: set_jvms(jvms()->clone_deep(C)); aoqi@0: jvms()->set_map_deep(this); aoqi@0: } aoqi@0: } aoqi@0: virtual int Opcode() const; aoqi@0: virtual uint ideal_reg() const { return Op_RegP; } aoqi@0: virtual bool guaranteed_safepoint() { return false; } aoqi@0: aoqi@0: // allocations do not modify their arguments aoqi@0: virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} aoqi@0: aoqi@0: // Pattern-match a possible usage of AllocateNode. aoqi@0: // Return null if no allocation is recognized. aoqi@0: // The operand is the pointer produced by the (possible) allocation. aoqi@0: // It must be a projection of the Allocate or its subsequent CastPP. aoqi@0: // (Note: This function is defined in file graphKit.cpp, near aoqi@0: // GraphKit::new_instance/new_array, whose output it recognizes.) aoqi@0: // The 'ptr' may not have an offset unless the 'offset' argument is given. aoqi@0: static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); aoqi@0: aoqi@0: // Fancy version which uses AddPNode::Ideal_base_and_offset to strip aoqi@0: // an offset, which is reported back to the caller. aoqi@0: // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) aoqi@0: static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, aoqi@0: intptr_t& offset); aoqi@0: aoqi@0: // Dig the klass operand out of a (possible) allocation site. aoqi@0: static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { aoqi@0: AllocateNode* allo = Ideal_allocation(ptr, phase); aoqi@0: return (allo == NULL) ? NULL : allo->in(KlassNode); aoqi@0: } aoqi@0: aoqi@0: // Conservatively small estimate of offset of first non-header byte. aoqi@0: int minimum_header_size() { aoqi@0: return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : aoqi@0: instanceOopDesc::base_offset_in_bytes(); aoqi@0: } aoqi@0: aoqi@0: // Return the corresponding initialization barrier (or null if none). aoqi@0: // Walks out edges to find it... aoqi@0: // (Note: Both InitializeNode::allocation and AllocateNode::initialization aoqi@0: // are defined in graphKit.cpp, which sets up the bidirectional relation.) aoqi@0: InitializeNode* initialization(); aoqi@0: aoqi@0: // Convenience for initialization->maybe_set_complete(phase) aoqi@0: bool maybe_set_complete(PhaseGVN* phase); aoqi@0: }; aoqi@0: aoqi@0: //------------------------------AllocateArray--------------------------------- aoqi@0: // aoqi@0: // High-level array allocation aoqi@0: // aoqi@0: class AllocateArrayNode : public AllocateNode { aoqi@0: public: aoqi@0: AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, aoqi@0: Node* size, Node* klass_node, Node* initial_test, aoqi@0: Node* count_val aoqi@0: ) aoqi@0: : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, aoqi@0: initial_test) aoqi@0: { aoqi@0: init_class_id(Class_AllocateArray); aoqi@0: set_req(AllocateNode::ALength, count_val); aoqi@0: } aoqi@0: virtual int Opcode() const; aoqi@0: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); aoqi@0: aoqi@0: // Dig the length operand out of a array allocation site. aoqi@0: Node* Ideal_length() { aoqi@0: return in(AllocateNode::ALength); aoqi@0: } aoqi@0: aoqi@0: // Dig the length operand out of a array allocation site and narrow the aoqi@0: // type with a CastII, if necesssary aoqi@0: Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); aoqi@0: aoqi@0: // Pattern-match a possible usage of AllocateArrayNode. aoqi@0: // Return null if no allocation is recognized. aoqi@0: static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { aoqi@0: AllocateNode* allo = Ideal_allocation(ptr, phase); aoqi@0: return (allo == NULL || !allo->is_AllocateArray()) aoqi@0: ? NULL : allo->as_AllocateArray(); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: //------------------------------AbstractLockNode----------------------------------- aoqi@0: class AbstractLockNode: public CallNode { aoqi@0: private: aoqi@0: enum { aoqi@0: Regular = 0, // Normal lock aoqi@0: NonEscObj, // Lock is used for non escaping object aoqi@0: Coarsened, // Lock was coarsened aoqi@0: Nested // Nested lock aoqi@0: } _kind; aoqi@0: #ifndef PRODUCT aoqi@0: NamedCounter* _counter; aoqi@0: #endif aoqi@0: aoqi@0: protected: aoqi@0: // helper functions for lock elimination aoqi@0: // aoqi@0: aoqi@0: bool find_matching_unlock(const Node* ctrl, LockNode* lock, aoqi@0: GrowableArray &lock_ops); aoqi@0: bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, aoqi@0: GrowableArray &lock_ops); aoqi@0: bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, aoqi@0: GrowableArray &lock_ops); aoqi@0: LockNode *find_matching_lock(UnlockNode* unlock); aoqi@0: aoqi@0: // Update the counter to indicate that this lock was eliminated. aoqi@0: void set_eliminated_lock_counter() PRODUCT_RETURN; aoqi@0: aoqi@0: public: aoqi@0: AbstractLockNode(const TypeFunc *tf) aoqi@0: : CallNode(tf, NULL, TypeRawPtr::BOTTOM), aoqi@0: _kind(Regular) aoqi@0: { aoqi@0: #ifndef PRODUCT aoqi@0: _counter = NULL; aoqi@0: #endif aoqi@0: } aoqi@0: virtual int Opcode() const = 0; aoqi@0: Node * obj_node() const {return in(TypeFunc::Parms + 0); } aoqi@0: Node * box_node() const {return in(TypeFunc::Parms + 1); } aoqi@0: Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } aoqi@0: void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } aoqi@0: aoqi@0: const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} aoqi@0: aoqi@0: virtual uint size_of() const { return sizeof(*this); } aoqi@0: aoqi@0: bool is_eliminated() const { return (_kind != Regular); } aoqi@0: bool is_non_esc_obj() const { return (_kind == NonEscObj); } aoqi@0: bool is_coarsened() const { return (_kind == Coarsened); } aoqi@0: bool is_nested() const { return (_kind == Nested); } aoqi@0: aoqi@0: void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } aoqi@0: void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } aoqi@0: void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } aoqi@0: aoqi@0: // locking does not modify its arguments aoqi@0: virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: void create_lock_counter(JVMState* s); aoqi@0: NamedCounter* counter() const { return _counter; } aoqi@0: #endif aoqi@0: }; aoqi@0: aoqi@0: //------------------------------Lock--------------------------------------- aoqi@0: // High-level lock operation aoqi@0: // aoqi@0: // This is a subclass of CallNode because it is a macro node which gets expanded aoqi@0: // into a code sequence containing a call. This node takes 3 "parameters": aoqi@0: // 0 - object to lock aoqi@0: // 1 - a BoxLockNode aoqi@0: // 2 - a FastLockNode aoqi@0: // aoqi@0: class LockNode : public AbstractLockNode { aoqi@0: public: aoqi@0: aoqi@0: static const TypeFunc *lock_type() { aoqi@0: // create input type (domain) aoqi@0: const Type **fields = TypeTuple::fields(3); aoqi@0: fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked aoqi@0: fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock aoqi@0: fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock aoqi@0: const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); aoqi@0: aoqi@0: // create result type (range) aoqi@0: fields = TypeTuple::fields(0); aoqi@0: aoqi@0: const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); aoqi@0: aoqi@0: return TypeFunc::make(domain,range); aoqi@0: } aoqi@0: aoqi@0: virtual int Opcode() const; aoqi@0: virtual uint size_of() const; // Size is bigger aoqi@0: LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { aoqi@0: init_class_id(Class_Lock); aoqi@0: init_flags(Flag_is_macro); aoqi@0: C->add_macro_node(this); aoqi@0: } aoqi@0: virtual bool guaranteed_safepoint() { return false; } aoqi@0: aoqi@0: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); aoqi@0: // Expansion modifies the JVMState, so we need to clone it aoqi@0: virtual void clone_jvms(Compile* C) { aoqi@0: if (jvms() != NULL) { aoqi@0: set_jvms(jvms()->clone_deep(C)); aoqi@0: jvms()->set_map_deep(this); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: bool is_nested_lock_region(); // Is this Lock nested? aoqi@0: }; aoqi@0: aoqi@0: //------------------------------Unlock--------------------------------------- aoqi@0: // High-level unlock operation aoqi@0: class UnlockNode : public AbstractLockNode { aoqi@0: public: aoqi@0: virtual int Opcode() const; aoqi@0: virtual uint size_of() const; // Size is bigger aoqi@0: UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { aoqi@0: init_class_id(Class_Unlock); aoqi@0: init_flags(Flag_is_macro); aoqi@0: C->add_macro_node(this); aoqi@0: } aoqi@0: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); aoqi@0: // unlock is never a safepoint aoqi@0: virtual bool guaranteed_safepoint() { return false; } aoqi@0: }; aoqi@0: aoqi@0: #endif // SHARE_VM_OPTO_CALLNODE_HPP