duke@435: /* duke@435: * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: // Portions of code courtesy of Clifford Click duke@435: duke@435: class MultiNode; duke@435: class PhaseCCP; duke@435: class PhaseTransform; duke@435: duke@435: //------------------------------MemNode---------------------------------------- duke@435: // Load or Store, possibly throwing a NULL pointer exception duke@435: class MemNode : public Node { duke@435: protected: duke@435: #ifdef ASSERT duke@435: const TypePtr* _adr_type; // What kind of memory is being addressed? duke@435: #endif duke@435: virtual uint size_of() const; // Size is bigger (ASSERT only) duke@435: public: duke@435: enum { Control, // When is it safe to do this load? duke@435: Memory, // Chunk of memory is being loaded from duke@435: Address, // Actually address, derived from base duke@435: ValueIn, // Value to store duke@435: OopStore // Preceeding oop store, only in StoreCM duke@435: }; duke@435: protected: duke@435: MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) duke@435: : Node(c0,c1,c2 ) { duke@435: init_class_id(Class_Mem); duke@435: debug_only(_adr_type=at; adr_type();) duke@435: } duke@435: MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) duke@435: : Node(c0,c1,c2,c3) { duke@435: init_class_id(Class_Mem); duke@435: debug_only(_adr_type=at; adr_type();) duke@435: } duke@435: MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) duke@435: : Node(c0,c1,c2,c3,c4) { duke@435: init_class_id(Class_Mem); duke@435: debug_only(_adr_type=at; adr_type();) duke@435: } duke@435: kvn@468: public: duke@435: // Helpers for the optimizer. Documented in memnode.cpp. duke@435: static bool detect_ptr_independence(Node* p1, AllocateNode* a1, duke@435: Node* p2, AllocateNode* a2, duke@435: PhaseTransform* phase); duke@435: static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); duke@435: kvn@509: static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); kvn@509: static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); duke@435: // This one should probably be a phase-specific function: kvn@520: static bool all_controls_dominate(Node* dom, Node* sub); duke@435: kvn@598: // Find any cast-away of null-ness and keep its control. kvn@598: static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ); duke@435: virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); duke@435: duke@435: virtual const class TypePtr *adr_type() const; // returns bottom_type of address duke@435: duke@435: // Shared code for Ideal methods: duke@435: Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. duke@435: duke@435: // Helper function for adr_type() implementations. duke@435: static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); duke@435: duke@435: // Raw access function, to allow copying of adr_type efficiently in duke@435: // product builds and retain the debug info for debug builds. duke@435: const TypePtr *raw_adr_type() const { duke@435: #ifdef ASSERT duke@435: return _adr_type; duke@435: #else duke@435: return 0; duke@435: #endif duke@435: } duke@435: duke@435: // Map a load or store opcode to its corresponding store opcode. duke@435: // (Return -1 if unknown.) duke@435: virtual int store_Opcode() const { return -1; } duke@435: duke@435: // What is the type of the value in memory? (T_VOID mean "unspecified".) duke@435: virtual BasicType memory_type() const = 0; kvn@464: virtual int memory_size() const { kvn@464: #ifdef ASSERT kvn@464: return type2aelembytes(memory_type(), true); kvn@464: #else kvn@464: return type2aelembytes(memory_type()); kvn@464: #endif kvn@464: } duke@435: duke@435: // Search through memory states which precede this node (load or store). duke@435: // Look for an exact match for the address, with no intervening duke@435: // aliased stores. duke@435: Node* find_previous_store(PhaseTransform* phase); duke@435: duke@435: // Can this node (load or store) accurately see a stored value in duke@435: // the given memory state? (The state may or may not be in(Memory).) duke@435: Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; duke@435: duke@435: #ifndef PRODUCT duke@435: static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); duke@435: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------LoadNode--------------------------------------- duke@435: // Load value; requires Memory and Address duke@435: class LoadNode : public MemNode { duke@435: protected: duke@435: virtual uint cmp( const Node &n ) const; duke@435: virtual uint size_of() const; // Size is bigger duke@435: const Type* const _type; // What kind of value is loaded? duke@435: public: duke@435: duke@435: LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt ) duke@435: : MemNode(c,mem,adr,at), _type(rt) { duke@435: init_class_id(Class_Load); duke@435: } duke@435: duke@435: // Polymorphic factory method: coleenp@548: static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr, coleenp@548: const TypePtr* at, const Type *rt, BasicType bt ); duke@435: duke@435: virtual uint hash() const; // Check the type duke@435: duke@435: // Handle algebraic identities here. If we have an identity, return the Node duke@435: // we are equivalent to. We look for Load of a Store. duke@435: virtual Node *Identity( PhaseTransform *phase ); duke@435: duke@435: // If the load is from Field memory and the pointer is non-null, we can duke@435: // zero out the control input. duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: kvn@598: // Split instance field load through Phi. kvn@598: Node* split_through_phi(PhaseGVN *phase); kvn@598: never@452: // Recover original value from boxed values never@452: Node *eliminate_autobox(PhaseGVN *phase); never@452: duke@435: // Compute a new Type for this node. Basically we just do the pre-check, duke@435: // then call the virtual add() to set the type. duke@435: virtual const Type *Value( PhaseTransform *phase ) const; duke@435: kvn@599: // Common methods for LoadKlass and LoadNKlass nodes. kvn@599: const Type *klass_value_common( PhaseTransform *phase ) const; kvn@599: Node *klass_identity_common( PhaseTransform *phase ); kvn@599: duke@435: virtual uint ideal_reg() const; duke@435: virtual const Type *bottom_type() const; duke@435: // Following method is copied from TypeNode: duke@435: void set_type(const Type* t) { duke@435: assert(t != NULL, "sanity"); duke@435: debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); duke@435: *(const Type**)&_type = t; // cast away const-ness duke@435: // If this node is in the hash table, make sure it doesn't need a rehash. duke@435: assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); duke@435: } duke@435: const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; duke@435: duke@435: // Do not match memory edge duke@435: virtual uint match_edge(uint idx) const; duke@435: duke@435: // Map a load opcode to its corresponding store opcode. duke@435: virtual int store_Opcode() const = 0; duke@435: kvn@499: // Check if the load's memory input is a Phi node with the same control. kvn@499: bool is_instance_field_load_with_local_phi(Node* ctrl); kvn@499: duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: protected: duke@435: const Type* load_array_final_field(const TypeKlassPtr *tkls, duke@435: ciKlass* klass) const; duke@435: }; duke@435: duke@435: //------------------------------LoadBNode-------------------------------------- duke@435: // Load a byte (8bits signed) from memory duke@435: class LoadBNode : public LoadNode { duke@435: public: duke@435: LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE ) duke@435: : LoadNode(c,mem,adr,at,ti) {} duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return Op_RegI; } duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual int store_Opcode() const { return Op_StoreB; } duke@435: virtual BasicType memory_type() const { return T_BYTE; } duke@435: }; duke@435: duke@435: //------------------------------LoadCNode-------------------------------------- duke@435: // Load a char (16bits unsigned) from memory duke@435: class LoadCNode : public LoadNode { duke@435: public: duke@435: LoadCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR ) duke@435: : LoadNode(c,mem,adr,at,ti) {} duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return Op_RegI; } duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual int store_Opcode() const { return Op_StoreC; } duke@435: virtual BasicType memory_type() const { return T_CHAR; } duke@435: }; duke@435: duke@435: //------------------------------LoadINode-------------------------------------- duke@435: // Load an integer from memory duke@435: class LoadINode : public LoadNode { duke@435: public: duke@435: LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT ) duke@435: : LoadNode(c,mem,adr,at,ti) {} duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return Op_RegI; } duke@435: virtual int store_Opcode() const { return Op_StoreI; } duke@435: virtual BasicType memory_type() const { return T_INT; } duke@435: }; duke@435: duke@435: //------------------------------LoadRangeNode---------------------------------- duke@435: // Load an array length from the array duke@435: class LoadRangeNode : public LoadINode { duke@435: public: duke@435: LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS ) duke@435: : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {} duke@435: virtual int Opcode() const; duke@435: virtual const Type *Value( PhaseTransform *phase ) const; duke@435: virtual Node *Identity( PhaseTransform *phase ); duke@435: }; duke@435: duke@435: //------------------------------LoadLNode-------------------------------------- duke@435: // Load a long from memory duke@435: class LoadLNode : public LoadNode { duke@435: virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } duke@435: virtual uint cmp( const Node &n ) const { duke@435: return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access duke@435: && LoadNode::cmp(n); duke@435: } duke@435: virtual uint size_of() const { return sizeof(*this); } duke@435: const bool _require_atomic_access; // is piecewise load forbidden? duke@435: duke@435: public: duke@435: LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, duke@435: const TypeLong *tl = TypeLong::LONG, duke@435: bool require_atomic_access = false ) duke@435: : LoadNode(c,mem,adr,at,tl) duke@435: , _require_atomic_access(require_atomic_access) duke@435: {} duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return Op_RegL; } duke@435: virtual int store_Opcode() const { return Op_StoreL; } duke@435: virtual BasicType memory_type() const { return T_LONG; } duke@435: bool require_atomic_access() { return _require_atomic_access; } duke@435: static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt); duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const { duke@435: LoadNode::dump_spec(st); duke@435: if (_require_atomic_access) st->print(" Atomic!"); duke@435: } duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------LoadL_unalignedNode---------------------------- duke@435: // Load a long from unaligned memory duke@435: class LoadL_unalignedNode : public LoadLNode { duke@435: public: duke@435: LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at ) duke@435: : LoadLNode(c,mem,adr,at) {} duke@435: virtual int Opcode() const; duke@435: }; duke@435: duke@435: //------------------------------LoadFNode-------------------------------------- duke@435: // Load a float (64 bits) from memory duke@435: class LoadFNode : public LoadNode { duke@435: public: duke@435: LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT ) duke@435: : LoadNode(c,mem,adr,at,t) {} duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return Op_RegF; } duke@435: virtual int store_Opcode() const { return Op_StoreF; } duke@435: virtual BasicType memory_type() const { return T_FLOAT; } duke@435: }; duke@435: duke@435: //------------------------------LoadDNode-------------------------------------- duke@435: // Load a double (64 bits) from memory duke@435: class LoadDNode : public LoadNode { duke@435: public: duke@435: LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE ) duke@435: : LoadNode(c,mem,adr,at,t) {} duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return Op_RegD; } duke@435: virtual int store_Opcode() const { return Op_StoreD; } duke@435: virtual BasicType memory_type() const { return T_DOUBLE; } duke@435: }; duke@435: duke@435: //------------------------------LoadD_unalignedNode---------------------------- duke@435: // Load a double from unaligned memory duke@435: class LoadD_unalignedNode : public LoadDNode { duke@435: public: duke@435: LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at ) duke@435: : LoadDNode(c,mem,adr,at) {} duke@435: virtual int Opcode() const; duke@435: }; duke@435: duke@435: //------------------------------LoadPNode-------------------------------------- duke@435: // Load a pointer from memory (either object or array) duke@435: class LoadPNode : public LoadNode { duke@435: public: duke@435: LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t ) duke@435: : LoadNode(c,mem,adr,at,t) {} duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return Op_RegP; } duke@435: virtual int store_Opcode() const { return Op_StoreP; } duke@435: virtual BasicType memory_type() const { return T_ADDRESS; } duke@435: // depends_only_on_test is almost always true, and needs to be almost always duke@435: // true to enable key hoisting & commoning optimizations. However, for the duke@435: // special case of RawPtr loads from TLS top & end, the control edge carries duke@435: // the dependence preventing hoisting past a Safepoint instead of the memory duke@435: // edge. (An unfortunate consequence of having Safepoints not set Raw duke@435: // Memory; itself an unfortunate consequence of having Nodes which produce duke@435: // results (new raw memory state) inside of loops preventing all manner of duke@435: // other optimizations). Basically, it's ugly but so is the alternative. duke@435: // See comment in macro.cpp, around line 125 expand_allocate_common(). duke@435: virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } duke@435: }; duke@435: coleenp@548: coleenp@548: //------------------------------LoadNNode-------------------------------------- coleenp@548: // Load a narrow oop from memory (either object or array) coleenp@548: class LoadNNode : public LoadNode { coleenp@548: public: coleenp@548: LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t ) coleenp@548: : LoadNode(c,mem,adr,at,t) {} coleenp@548: virtual int Opcode() const; coleenp@548: virtual uint ideal_reg() const { return Op_RegN; } coleenp@548: virtual int store_Opcode() const { return Op_StoreN; } coleenp@548: virtual BasicType memory_type() const { return T_NARROWOOP; } coleenp@548: // depends_only_on_test is almost always true, and needs to be almost always coleenp@548: // true to enable key hoisting & commoning optimizations. However, for the coleenp@548: // special case of RawPtr loads from TLS top & end, the control edge carries coleenp@548: // the dependence preventing hoisting past a Safepoint instead of the memory coleenp@548: // edge. (An unfortunate consequence of having Safepoints not set Raw coleenp@548: // Memory; itself an unfortunate consequence of having Nodes which produce coleenp@548: // results (new raw memory state) inside of loops preventing all manner of coleenp@548: // other optimizations). Basically, it's ugly but so is the alternative. coleenp@548: // See comment in macro.cpp, around line 125 expand_allocate_common(). coleenp@548: virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } coleenp@548: }; coleenp@548: duke@435: //------------------------------LoadKlassNode---------------------------------- duke@435: // Load a Klass from an object duke@435: class LoadKlassNode : public LoadPNode { duke@435: public: kvn@599: LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk ) duke@435: : LoadPNode(c,mem,adr,at,tk) {} duke@435: virtual int Opcode() const; duke@435: virtual const Type *Value( PhaseTransform *phase ) const; duke@435: virtual Node *Identity( PhaseTransform *phase ); duke@435: virtual bool depends_only_on_test() const { return true; } kvn@599: kvn@599: // Polymorphic factory method: kvn@599: static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, kvn@599: const TypeKlassPtr *tk = TypeKlassPtr::OBJECT ); duke@435: }; duke@435: kvn@599: //------------------------------LoadNKlassNode--------------------------------- kvn@599: // Load a narrow Klass from an object. kvn@599: class LoadNKlassNode : public LoadNNode { kvn@599: public: kvn@599: LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk ) kvn@599: : LoadNNode(c,mem,adr,at,tk) {} kvn@599: virtual int Opcode() const; kvn@599: virtual uint ideal_reg() const { return Op_RegN; } kvn@599: virtual int store_Opcode() const { return Op_StoreN; } kvn@599: virtual BasicType memory_type() const { return T_NARROWOOP; } kvn@599: kvn@599: virtual const Type *Value( PhaseTransform *phase ) const; kvn@599: virtual Node *Identity( PhaseTransform *phase ); kvn@599: virtual bool depends_only_on_test() const { return true; } kvn@599: }; kvn@599: kvn@599: duke@435: //------------------------------LoadSNode-------------------------------------- duke@435: // Load a short (16bits signed) from memory duke@435: class LoadSNode : public LoadNode { duke@435: public: duke@435: LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT ) duke@435: : LoadNode(c,mem,adr,at,ti) {} duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return Op_RegI; } duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual int store_Opcode() const { return Op_StoreC; } duke@435: virtual BasicType memory_type() const { return T_SHORT; } duke@435: }; duke@435: duke@435: //------------------------------StoreNode-------------------------------------- duke@435: // Store value; requires Store, Address and Value duke@435: class StoreNode : public MemNode { duke@435: protected: duke@435: virtual uint cmp( const Node &n ) const; duke@435: virtual bool depends_only_on_test() const { return false; } duke@435: duke@435: Node *Ideal_masked_input (PhaseGVN *phase, uint mask); duke@435: Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); duke@435: duke@435: public: duke@435: StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) duke@435: : MemNode(c,mem,adr,at,val) { duke@435: init_class_id(Class_Store); duke@435: } duke@435: StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store ) duke@435: : MemNode(c,mem,adr,at,val,oop_store) { duke@435: init_class_id(Class_Store); duke@435: } duke@435: duke@435: // Polymorphic factory method: coleenp@548: static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr, coleenp@548: const TypePtr* at, Node *val, BasicType bt ); duke@435: duke@435: virtual uint hash() const; // Check the type duke@435: duke@435: // If the store is to Field memory and the pointer is non-null, we can duke@435: // zero out the control input. duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: duke@435: // Compute a new Type for this node. Basically we just do the pre-check, duke@435: // then call the virtual add() to set the type. duke@435: virtual const Type *Value( PhaseTransform *phase ) const; duke@435: duke@435: // Check for identity function on memory (Load then Store at same address) duke@435: virtual Node *Identity( PhaseTransform *phase ); duke@435: duke@435: // Do not match memory edge duke@435: virtual uint match_edge(uint idx) const; duke@435: duke@435: virtual const Type *bottom_type() const; // returns Type::MEMORY duke@435: duke@435: // Map a store opcode to its corresponding own opcode, trivially. duke@435: virtual int store_Opcode() const { return Opcode(); } duke@435: duke@435: // have all possible loads of the value stored been optimized away? duke@435: bool value_never_loaded(PhaseTransform *phase) const; duke@435: }; duke@435: duke@435: //------------------------------StoreBNode------------------------------------- duke@435: // Store byte to memory duke@435: class StoreBNode : public StoreNode { duke@435: public: duke@435: StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} duke@435: virtual int Opcode() const; duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual BasicType memory_type() const { return T_BYTE; } duke@435: }; duke@435: duke@435: //------------------------------StoreCNode------------------------------------- duke@435: // Store char/short to memory duke@435: class StoreCNode : public StoreNode { duke@435: public: duke@435: StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} duke@435: virtual int Opcode() const; duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual BasicType memory_type() const { return T_CHAR; } duke@435: }; duke@435: duke@435: //------------------------------StoreINode------------------------------------- duke@435: // Store int to memory duke@435: class StoreINode : public StoreNode { duke@435: public: duke@435: StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} duke@435: virtual int Opcode() const; duke@435: virtual BasicType memory_type() const { return T_INT; } duke@435: }; duke@435: duke@435: //------------------------------StoreLNode------------------------------------- duke@435: // Store long to memory duke@435: class StoreLNode : public StoreNode { duke@435: virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } duke@435: virtual uint cmp( const Node &n ) const { duke@435: return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access duke@435: && StoreNode::cmp(n); duke@435: } duke@435: virtual uint size_of() const { return sizeof(*this); } duke@435: const bool _require_atomic_access; // is piecewise store forbidden? duke@435: duke@435: public: duke@435: StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, duke@435: bool require_atomic_access = false ) duke@435: : StoreNode(c,mem,adr,at,val) duke@435: , _require_atomic_access(require_atomic_access) duke@435: {} duke@435: virtual int Opcode() const; duke@435: virtual BasicType memory_type() const { return T_LONG; } duke@435: bool require_atomic_access() { return _require_atomic_access; } duke@435: static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val); duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const { duke@435: StoreNode::dump_spec(st); duke@435: if (_require_atomic_access) st->print(" Atomic!"); duke@435: } duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------StoreFNode------------------------------------- duke@435: // Store float to memory duke@435: class StoreFNode : public StoreNode { duke@435: public: duke@435: StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} duke@435: virtual int Opcode() const; duke@435: virtual BasicType memory_type() const { return T_FLOAT; } duke@435: }; duke@435: duke@435: //------------------------------StoreDNode------------------------------------- duke@435: // Store double to memory duke@435: class StoreDNode : public StoreNode { duke@435: public: duke@435: StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} duke@435: virtual int Opcode() const; duke@435: virtual BasicType memory_type() const { return T_DOUBLE; } duke@435: }; duke@435: duke@435: //------------------------------StorePNode------------------------------------- duke@435: // Store pointer to memory duke@435: class StorePNode : public StoreNode { duke@435: public: duke@435: StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} duke@435: virtual int Opcode() const; duke@435: virtual BasicType memory_type() const { return T_ADDRESS; } duke@435: }; duke@435: coleenp@548: //------------------------------StoreNNode------------------------------------- coleenp@548: // Store narrow oop to memory coleenp@548: class StoreNNode : public StoreNode { coleenp@548: public: coleenp@548: StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} coleenp@548: virtual int Opcode() const; coleenp@548: virtual BasicType memory_type() const { return T_NARROWOOP; } coleenp@548: }; coleenp@548: duke@435: //------------------------------StoreCMNode----------------------------------- duke@435: // Store card-mark byte to memory for CM duke@435: // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store duke@435: // Preceeding equivalent StoreCMs may be eliminated. duke@435: class StoreCMNode : public StoreNode { duke@435: public: duke@435: StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store ) : StoreNode(c,mem,adr,at,val,oop_store) {} duke@435: virtual int Opcode() const; duke@435: virtual Node *Identity( PhaseTransform *phase ); duke@435: virtual const Type *Value( PhaseTransform *phase ) const; duke@435: virtual BasicType memory_type() const { return T_VOID; } // unspecific duke@435: }; duke@435: duke@435: //------------------------------LoadPLockedNode--------------------------------- duke@435: // Load-locked a pointer from memory (either object or array). duke@435: // On Sparc & Intel this is implemented as a normal pointer load. duke@435: // On PowerPC and friends it's a real load-locked. duke@435: class LoadPLockedNode : public LoadPNode { duke@435: public: duke@435: LoadPLockedNode( Node *c, Node *mem, Node *adr ) duke@435: : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {} duke@435: virtual int Opcode() const; duke@435: virtual int store_Opcode() const { return Op_StorePConditional; } duke@435: virtual bool depends_only_on_test() const { return true; } duke@435: }; duke@435: duke@435: //------------------------------LoadLLockedNode--------------------------------- duke@435: // Load-locked a pointer from memory (either object or array). duke@435: // On Sparc & Intel this is implemented as a normal long load. duke@435: class LoadLLockedNode : public LoadLNode { duke@435: public: duke@435: LoadLLockedNode( Node *c, Node *mem, Node *adr ) duke@435: : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {} duke@435: virtual int Opcode() const; duke@435: virtual int store_Opcode() const { return Op_StoreLConditional; } duke@435: }; duke@435: duke@435: //------------------------------SCMemProjNode--------------------------------------- duke@435: // This class defines a projection of the memory state of a store conditional node. duke@435: // These nodes return a value, but also update memory. duke@435: class SCMemProjNode : public ProjNode { duke@435: public: duke@435: enum {SCMEMPROJCON = (uint)-2}; duke@435: SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } duke@435: virtual int Opcode() const; duke@435: virtual bool is_CFG() const { return false; } duke@435: virtual const Type *bottom_type() const {return Type::MEMORY;} duke@435: virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();} duke@435: virtual uint ideal_reg() const { return 0;} // memory projections don't have a register duke@435: virtual const Type *Value( PhaseTransform *phase ) const; duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const {}; duke@435: #endif duke@435: }; duke@435: duke@435: //------------------------------LoadStoreNode--------------------------- duke@435: class LoadStoreNode : public Node { duke@435: public: duke@435: enum { duke@435: ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode duke@435: }; duke@435: LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex); duke@435: virtual bool depends_only_on_test() const { return false; } duke@435: virtual const Type *bottom_type() const { return TypeInt::BOOL; } duke@435: virtual uint ideal_reg() const { return Op_RegI; } duke@435: virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } duke@435: }; duke@435: duke@435: //------------------------------StorePConditionalNode--------------------------- duke@435: // Conditionally store pointer to memory, if no change since prior duke@435: // load-locked. Sets flags for success or failure of the store. duke@435: class StorePConditionalNode : public LoadStoreNode { duke@435: public: duke@435: StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { } duke@435: virtual int Opcode() const; duke@435: // Produces flags duke@435: virtual uint ideal_reg() const { return Op_RegFlags; } duke@435: }; duke@435: duke@435: //------------------------------StoreLConditionalNode--------------------------- duke@435: // Conditionally store long to memory, if no change since prior duke@435: // load-locked. Sets flags for success or failure of the store. duke@435: class StoreLConditionalNode : public LoadStoreNode { duke@435: public: duke@435: StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { } duke@435: virtual int Opcode() const; duke@435: }; duke@435: duke@435: duke@435: //------------------------------CompareAndSwapLNode--------------------------- duke@435: class CompareAndSwapLNode : public LoadStoreNode { duke@435: public: duke@435: CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { } duke@435: virtual int Opcode() const; duke@435: }; duke@435: duke@435: duke@435: //------------------------------CompareAndSwapINode--------------------------- duke@435: class CompareAndSwapINode : public LoadStoreNode { duke@435: public: duke@435: CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { } duke@435: virtual int Opcode() const; duke@435: }; duke@435: duke@435: duke@435: //------------------------------CompareAndSwapPNode--------------------------- duke@435: class CompareAndSwapPNode : public LoadStoreNode { duke@435: public: duke@435: CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { } duke@435: virtual int Opcode() const; duke@435: }; duke@435: coleenp@548: //------------------------------CompareAndSwapNNode--------------------------- coleenp@548: class CompareAndSwapNNode : public LoadStoreNode { coleenp@548: public: coleenp@548: CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { } coleenp@548: virtual int Opcode() const; coleenp@548: }; coleenp@548: duke@435: //------------------------------ClearArray------------------------------------- duke@435: class ClearArrayNode: public Node { duke@435: public: duke@435: ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) : Node(ctrl,arymem,word_cnt,base) {} duke@435: virtual int Opcode() const; duke@435: virtual const Type *bottom_type() const { return Type::MEMORY; } duke@435: // ClearArray modifies array elements, and so affects only the duke@435: // array memory addressed by the bottom_type of its base address. duke@435: virtual const class TypePtr *adr_type() const; duke@435: virtual Node *Identity( PhaseTransform *phase ); duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual uint match_edge(uint idx) const; duke@435: duke@435: // Clear the given area of an object or array. duke@435: // The start offset must always be aligned mod BytesPerInt. duke@435: // The end offset must always be aligned mod BytesPerLong. duke@435: // Return the new memory. duke@435: static Node* clear_memory(Node* control, Node* mem, Node* dest, duke@435: intptr_t start_offset, duke@435: intptr_t end_offset, duke@435: PhaseGVN* phase); duke@435: static Node* clear_memory(Node* control, Node* mem, Node* dest, duke@435: intptr_t start_offset, duke@435: Node* end_offset, duke@435: PhaseGVN* phase); duke@435: static Node* clear_memory(Node* control, Node* mem, Node* dest, duke@435: Node* start_offset, duke@435: Node* end_offset, duke@435: PhaseGVN* phase); duke@435: }; duke@435: duke@435: //------------------------------StrComp------------------------------------- duke@435: class StrCompNode: public Node { duke@435: public: duke@435: StrCompNode(Node *control, duke@435: Node* char_array_mem, duke@435: Node* value_mem, duke@435: Node* count_mem, duke@435: Node* offset_mem, duke@435: Node* s1, Node* s2): Node(control, duke@435: char_array_mem, duke@435: value_mem, duke@435: count_mem, duke@435: offset_mem, duke@435: s1, s2) {}; duke@435: virtual int Opcode() const; duke@435: virtual bool depends_only_on_test() const { return false; } duke@435: virtual const Type* bottom_type() const { return TypeInt::INT; } duke@435: // a StrCompNode (conservatively) aliases with everything: duke@435: virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; } duke@435: virtual uint match_edge(uint idx) const; duke@435: virtual uint ideal_reg() const { return Op_RegI; } duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: }; duke@435: duke@435: //------------------------------MemBar----------------------------------------- duke@435: // There are different flavors of Memory Barriers to match the Java Memory duke@435: // Model. Monitor-enter and volatile-load act as Aquires: no following ref duke@435: // can be moved to before them. We insert a MemBar-Acquire after a FastLock or duke@435: // volatile-load. Monitor-exit and volatile-store act as Release: no duke@435: // preceeding ref can be moved to after them. We insert a MemBar-Release duke@435: // before a FastUnlock or volatile-store. All volatiles need to be duke@435: // serialized, so we follow all volatile-stores with a MemBar-Volatile to duke@435: // seperate it from any following volatile-load. duke@435: class MemBarNode: public MultiNode { duke@435: virtual uint hash() const ; // { return NO_HASH; } duke@435: virtual uint cmp( const Node &n ) const ; // Always fail, except on self duke@435: duke@435: virtual uint size_of() const { return sizeof(*this); } duke@435: // Memory type this node is serializing. Usually either rawptr or bottom. duke@435: const TypePtr* _adr_type; duke@435: duke@435: public: duke@435: enum { duke@435: Precedent = TypeFunc::Parms // optional edge to force precedence duke@435: }; duke@435: MemBarNode(Compile* C, int alias_idx, Node* precedent); duke@435: virtual int Opcode() const = 0; duke@435: virtual const class TypePtr *adr_type() const { return _adr_type; } duke@435: virtual const Type *Value( PhaseTransform *phase ) const; duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual uint match_edge(uint idx) const { return 0; } duke@435: virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } duke@435: virtual Node *match( const ProjNode *proj, const Matcher *m ); duke@435: // Factory method. Builds a wide or narrow membar. duke@435: // Optional 'precedent' becomes an extra edge if not null. duke@435: static MemBarNode* make(Compile* C, int opcode, duke@435: int alias_idx = Compile::AliasIdxBot, duke@435: Node* precedent = NULL); duke@435: }; duke@435: duke@435: // "Acquire" - no following ref can move before (but earlier refs can duke@435: // follow, like an early Load stalled in cache). Requires multi-cpu duke@435: // visibility. Inserted after a volatile load or FastLock. duke@435: class MemBarAcquireNode: public MemBarNode { duke@435: public: duke@435: MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) duke@435: : MemBarNode(C, alias_idx, precedent) {} duke@435: virtual int Opcode() const; duke@435: }; duke@435: duke@435: // "Release" - no earlier ref can move after (but later refs can move duke@435: // up, like a speculative pipelined cache-hitting Load). Requires duke@435: // multi-cpu visibility. Inserted before a volatile store or FastUnLock. duke@435: class MemBarReleaseNode: public MemBarNode { duke@435: public: duke@435: MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) duke@435: : MemBarNode(C, alias_idx, precedent) {} duke@435: virtual int Opcode() const; duke@435: }; duke@435: duke@435: // Ordering between a volatile store and a following volatile load. duke@435: // Requires multi-CPU visibility? duke@435: class MemBarVolatileNode: public MemBarNode { duke@435: public: duke@435: MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) duke@435: : MemBarNode(C, alias_idx, precedent) {} duke@435: virtual int Opcode() const; duke@435: }; duke@435: duke@435: // Ordering within the same CPU. Used to order unsafe memory references duke@435: // inside the compiler when we lack alias info. Not needed "outside" the duke@435: // compiler because the CPU does all the ordering for us. duke@435: class MemBarCPUOrderNode: public MemBarNode { duke@435: public: duke@435: MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) duke@435: : MemBarNode(C, alias_idx, precedent) {} duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return 0; } // not matched in the AD file duke@435: }; duke@435: duke@435: // Isolation of object setup after an AllocateNode and before next safepoint. duke@435: // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) duke@435: class InitializeNode: public MemBarNode { duke@435: friend class AllocateNode; duke@435: duke@435: bool _is_complete; duke@435: duke@435: public: duke@435: enum { duke@435: Control = TypeFunc::Control, duke@435: Memory = TypeFunc::Memory, // MergeMem for states affected by this op duke@435: RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address duke@435: RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) duke@435: }; duke@435: duke@435: InitializeNode(Compile* C, int adr_type, Node* rawoop); duke@435: virtual int Opcode() const; duke@435: virtual uint size_of() const { return sizeof(*this); } duke@435: virtual uint ideal_reg() const { return 0; } // not matched in the AD file duke@435: virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress duke@435: duke@435: // Manage incoming memory edges via a MergeMem on in(Memory): duke@435: Node* memory(uint alias_idx); duke@435: duke@435: // The raw memory edge coming directly from the Allocation. duke@435: // The contents of this memory are *always* all-zero-bits. duke@435: Node* zero_memory() { return memory(Compile::AliasIdxRaw); } duke@435: duke@435: // Return the corresponding allocation for this initialization (or null if none). duke@435: // (Note: Both InitializeNode::allocation and AllocateNode::initialization duke@435: // are defined in graphKit.cpp, which sets up the bidirectional relation.) duke@435: AllocateNode* allocation(); duke@435: duke@435: // Anything other than zeroing in this init? duke@435: bool is_non_zero(); duke@435: duke@435: // An InitializeNode must completed before macro expansion is done. duke@435: // Completion requires that the AllocateNode must be followed by duke@435: // initialization of the new memory to zero, then to any initializers. duke@435: bool is_complete() { return _is_complete; } duke@435: duke@435: // Mark complete. (Must not yet be complete.) duke@435: void set_complete(PhaseGVN* phase); duke@435: duke@435: #ifdef ASSERT duke@435: // ensure all non-degenerate stores are ordered and non-overlapping duke@435: bool stores_are_sane(PhaseTransform* phase); duke@435: #endif //ASSERT duke@435: duke@435: // See if this store can be captured; return offset where it initializes. duke@435: // Return 0 if the store cannot be moved (any sort of problem). duke@435: intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase); duke@435: duke@435: // Capture another store; reformat it to write my internal raw memory. duke@435: // Return the captured copy, else NULL if there is some sort of problem. duke@435: Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase); duke@435: duke@435: // Find captured store which corresponds to the range [start..start+size). duke@435: // Return my own memory projection (meaning the initial zero bits) duke@435: // if there is no such store. Return NULL if there is a problem. duke@435: Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); duke@435: duke@435: // Called when the associated AllocateNode is expanded into CFG. duke@435: Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, duke@435: intptr_t header_size, Node* size_in_bytes, duke@435: PhaseGVN* phase); duke@435: duke@435: private: duke@435: void remove_extra_zeroes(); duke@435: duke@435: // Find out where a captured store should be placed (or already is placed). duke@435: int captured_store_insertion_point(intptr_t start, int size_in_bytes, duke@435: PhaseTransform* phase); duke@435: duke@435: static intptr_t get_store_offset(Node* st, PhaseTransform* phase); duke@435: duke@435: Node* make_raw_address(intptr_t offset, PhaseTransform* phase); duke@435: duke@435: bool detect_init_independence(Node* n, bool st_is_pinned, int& count); duke@435: duke@435: void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, duke@435: PhaseGVN* phase); duke@435: duke@435: intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); duke@435: }; duke@435: duke@435: //------------------------------MergeMem--------------------------------------- duke@435: // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) duke@435: class MergeMemNode: public Node { duke@435: virtual uint hash() const ; // { return NO_HASH; } duke@435: virtual uint cmp( const Node &n ) const ; // Always fail, except on self duke@435: friend class MergeMemStream; duke@435: MergeMemNode(Node* def); // clients use MergeMemNode::make duke@435: duke@435: public: duke@435: // If the input is a whole memory state, clone it with all its slices intact. duke@435: // Otherwise, make a new memory state with just that base memory input. duke@435: // In either case, the result is a newly created MergeMem. duke@435: static MergeMemNode* make(Compile* C, Node* base_memory); duke@435: duke@435: virtual int Opcode() const; duke@435: virtual Node *Identity( PhaseTransform *phase ); duke@435: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); duke@435: virtual uint ideal_reg() const { return NotAMachineReg; } duke@435: virtual uint match_edge(uint idx) const { return 0; } duke@435: virtual const RegMask &out_RegMask() const; duke@435: virtual const Type *bottom_type() const { return Type::MEMORY; } duke@435: virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } duke@435: // sparse accessors duke@435: // Fetch the previously stored "set_memory_at", or else the base memory. duke@435: // (Caller should clone it if it is a phi-nest.) duke@435: Node* memory_at(uint alias_idx) const; duke@435: // set the memory, regardless of its previous value duke@435: void set_memory_at(uint alias_idx, Node* n); duke@435: // the "base" is the memory that provides the non-finite support duke@435: Node* base_memory() const { return in(Compile::AliasIdxBot); } duke@435: // warning: setting the base can implicitly set any of the other slices too duke@435: void set_base_memory(Node* def); duke@435: // sentinel value which denotes a copy of the base memory: duke@435: Node* empty_memory() const { return in(Compile::AliasIdxTop); } duke@435: static Node* make_empty_memory(); // where the sentinel comes from duke@435: bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } duke@435: // hook for the iterator, to perform any necessary setup duke@435: void iteration_setup(const MergeMemNode* other = NULL); duke@435: // push sentinels until I am at least as long as the other (semantic no-op) duke@435: void grow_to_match(const MergeMemNode* other); duke@435: bool verify_sparse() const PRODUCT_RETURN0; duke@435: #ifndef PRODUCT duke@435: virtual void dump_spec(outputStream *st) const; duke@435: #endif duke@435: }; duke@435: duke@435: class MergeMemStream : public StackObj { duke@435: private: duke@435: MergeMemNode* _mm; duke@435: const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations duke@435: Node* _mm_base; // loop-invariant base memory of _mm duke@435: int _idx; duke@435: int _cnt; duke@435: Node* _mem; duke@435: Node* _mem2; duke@435: int _cnt2; duke@435: duke@435: void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { duke@435: // subsume_node will break sparseness at times, whenever a memory slice duke@435: // folds down to a copy of the base ("fat") memory. In such a case, duke@435: // the raw edge will update to base, although it should be top. duke@435: // This iterator will recognize either top or base_memory as an duke@435: // "empty" slice. See is_empty, is_empty2, and next below. duke@435: // duke@435: // The sparseness property is repaired in MergeMemNode::Ideal. duke@435: // As long as access to a MergeMem goes through this iterator duke@435: // or the memory_at accessor, flaws in the sparseness will duke@435: // never be observed. duke@435: // duke@435: // Also, iteration_setup repairs sparseness. duke@435: assert(mm->verify_sparse(), "please, no dups of base"); duke@435: assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); duke@435: duke@435: _mm = mm; duke@435: _mm_base = mm->base_memory(); duke@435: _mm2 = mm2; duke@435: _cnt = mm->req(); duke@435: _idx = Compile::AliasIdxBot-1; // start at the base memory duke@435: _mem = NULL; duke@435: _mem2 = NULL; duke@435: } duke@435: duke@435: #ifdef ASSERT duke@435: Node* check_memory() const { duke@435: if (at_base_memory()) duke@435: return _mm->base_memory(); duke@435: else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) duke@435: return _mm->memory_at(_idx); duke@435: else duke@435: return _mm_base; duke@435: } duke@435: Node* check_memory2() const { duke@435: return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); duke@435: } duke@435: #endif duke@435: duke@435: static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; duke@435: void assert_synch() const { duke@435: assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), duke@435: "no side-effects except through the stream"); duke@435: } duke@435: duke@435: public: duke@435: duke@435: // expected usages: duke@435: // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } duke@435: // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } duke@435: duke@435: // iterate over one merge duke@435: MergeMemStream(MergeMemNode* mm) { duke@435: mm->iteration_setup(); duke@435: init(mm); duke@435: debug_only(_cnt2 = 999); duke@435: } duke@435: // iterate in parallel over two merges duke@435: // only iterates through non-empty elements of mm2 duke@435: MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { duke@435: assert(mm2, "second argument must be a MergeMem also"); duke@435: ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state duke@435: mm->iteration_setup(mm2); duke@435: init(mm, mm2); duke@435: _cnt2 = mm2->req(); duke@435: } duke@435: #ifdef ASSERT duke@435: ~MergeMemStream() { duke@435: assert_synch(); duke@435: } duke@435: #endif duke@435: duke@435: MergeMemNode* all_memory() const { duke@435: return _mm; duke@435: } duke@435: Node* base_memory() const { duke@435: assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); duke@435: return _mm_base; duke@435: } duke@435: const MergeMemNode* all_memory2() const { duke@435: assert(_mm2 != NULL, ""); duke@435: return _mm2; duke@435: } duke@435: bool at_base_memory() const { duke@435: return _idx == Compile::AliasIdxBot; duke@435: } duke@435: int alias_idx() const { duke@435: assert(_mem, "must call next 1st"); duke@435: return _idx; duke@435: } duke@435: duke@435: const TypePtr* adr_type() const { duke@435: return Compile::current()->get_adr_type(alias_idx()); duke@435: } duke@435: duke@435: const TypePtr* adr_type(Compile* C) const { duke@435: return C->get_adr_type(alias_idx()); duke@435: } duke@435: bool is_empty() const { duke@435: assert(_mem, "must call next 1st"); duke@435: assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); duke@435: return _mem->is_top(); duke@435: } duke@435: bool is_empty2() const { duke@435: assert(_mem2, "must call next 1st"); duke@435: assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); duke@435: return _mem2->is_top(); duke@435: } duke@435: Node* memory() const { duke@435: assert(!is_empty(), "must not be empty"); duke@435: assert_synch(); duke@435: return _mem; duke@435: } duke@435: // get the current memory, regardless of empty or non-empty status duke@435: Node* force_memory() const { duke@435: assert(!is_empty() || !at_base_memory(), ""); duke@435: // Use _mm_base to defend against updates to _mem->base_memory(). duke@435: Node *mem = _mem->is_top() ? _mm_base : _mem; duke@435: assert(mem == check_memory(), ""); duke@435: return mem; duke@435: } duke@435: Node* memory2() const { duke@435: assert(_mem2 == check_memory2(), ""); duke@435: return _mem2; duke@435: } duke@435: void set_memory(Node* mem) { duke@435: if (at_base_memory()) { duke@435: // Note that this does not change the invariant _mm_base. duke@435: _mm->set_base_memory(mem); duke@435: } else { duke@435: _mm->set_memory_at(_idx, mem); duke@435: } duke@435: _mem = mem; duke@435: assert_synch(); duke@435: } duke@435: duke@435: // Recover from a side effect to the MergeMemNode. duke@435: void set_memory() { duke@435: _mem = _mm->in(_idx); duke@435: } duke@435: duke@435: bool next() { return next(false); } duke@435: bool next2() { return next(true); } duke@435: duke@435: bool next_non_empty() { return next_non_empty(false); } duke@435: bool next_non_empty2() { return next_non_empty(true); } duke@435: // next_non_empty2 can yield states where is_empty() is true duke@435: duke@435: private: duke@435: // find the next item, which might be empty duke@435: bool next(bool have_mm2) { duke@435: assert((_mm2 != NULL) == have_mm2, "use other next"); duke@435: assert_synch(); duke@435: if (++_idx < _cnt) { duke@435: // Note: This iterator allows _mm to be non-sparse. duke@435: // It behaves the same whether _mem is top or base_memory. duke@435: _mem = _mm->in(_idx); duke@435: if (have_mm2) duke@435: _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); duke@435: return true; duke@435: } duke@435: return false; duke@435: } duke@435: duke@435: // find the next non-empty item duke@435: bool next_non_empty(bool have_mm2) { duke@435: while (next(have_mm2)) { duke@435: if (!is_empty()) { duke@435: // make sure _mem2 is filled in sensibly duke@435: if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); duke@435: return true; duke@435: } else if (have_mm2 && !is_empty2()) { duke@435: return true; // is_empty() == true duke@435: } duke@435: } duke@435: return false; duke@435: } duke@435: }; duke@435: duke@435: //------------------------------Prefetch--------------------------------------- duke@435: duke@435: // Non-faulting prefetch load. Prefetch for many reads. duke@435: class PrefetchReadNode : public Node { duke@435: public: duke@435: PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {} duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return NotAMachineReg; } duke@435: virtual uint match_edge(uint idx) const { return idx==2; } duke@435: virtual const Type *bottom_type() const { return Type::ABIO; } duke@435: }; duke@435: duke@435: // Non-faulting prefetch load. Prefetch for many reads & many writes. duke@435: class PrefetchWriteNode : public Node { duke@435: public: duke@435: PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {} duke@435: virtual int Opcode() const; duke@435: virtual uint ideal_reg() const { return NotAMachineReg; } duke@435: virtual uint match_edge(uint idx) const { return idx==2; } duke@435: virtual const Type *bottom_type() const { return Type::ABIO; } duke@435: };