src/share/vm/opto/memnode.hpp

Tue, 02 Aug 2011 18:36:40 +0200

author
roland
date
Tue, 02 Aug 2011 18:36:40 +0200
changeset 3047
f1c12354c3f7
parent 2708
1d1603768966
child 3052
1af104d6cf99
permissions
-rw-r--r--

7074017: Introduce MemBarAcquireLock/MemBarReleaseLock nodes for monitor enter/exit code paths
Summary: replace MemBarAcquire/MemBarRelease nodes on the monitor enter/exit code paths with new MemBarAcquireLock/MemBarReleaseLock nodes
Reviewed-by: kvn, twisti

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP
    26 #define SHARE_VM_OPTO_MEMNODE_HPP
    28 #include "opto/multnode.hpp"
    29 #include "opto/node.hpp"
    30 #include "opto/opcodes.hpp"
    31 #include "opto/type.hpp"
    33 // Portions of code courtesy of Clifford Click
    35 class MultiNode;
    36 class PhaseCCP;
    37 class PhaseTransform;
    39 //------------------------------MemNode----------------------------------------
    40 // Load or Store, possibly throwing a NULL pointer exception
    41 class MemNode : public Node {
    42 protected:
    43 #ifdef ASSERT
    44   const TypePtr* _adr_type;     // What kind of memory is being addressed?
    45 #endif
    46   virtual uint size_of() const; // Size is bigger (ASSERT only)
    47 public:
    48   enum { Control,               // When is it safe to do this load?
    49          Memory,                // Chunk of memory is being loaded from
    50          Address,               // Actually address, derived from base
    51          ValueIn,               // Value to store
    52          OopStore               // Preceeding oop store, only in StoreCM
    53   };
    54 protected:
    55   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
    56     : Node(c0,c1,c2   ) {
    57     init_class_id(Class_Mem);
    58     debug_only(_adr_type=at; adr_type();)
    59   }
    60   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
    61     : Node(c0,c1,c2,c3) {
    62     init_class_id(Class_Mem);
    63     debug_only(_adr_type=at; adr_type();)
    64   }
    65   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
    66     : Node(c0,c1,c2,c3,c4) {
    67     init_class_id(Class_Mem);
    68     debug_only(_adr_type=at; adr_type();)
    69   }
    71 public:
    72   // Helpers for the optimizer.  Documented in memnode.cpp.
    73   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
    74                                       Node* p2, AllocateNode* a2,
    75                                       PhaseTransform* phase);
    76   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
    78   static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
    79   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
    80   // This one should probably be a phase-specific function:
    81   static bool all_controls_dominate(Node* dom, Node* sub);
    83   // Find any cast-away of null-ness and keep its control.
    84   static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
    85   virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
    87   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
    89   // Shared code for Ideal methods:
    90   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
    92   // Helper function for adr_type() implementations.
    93   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
    95   // Raw access function, to allow copying of adr_type efficiently in
    96   // product builds and retain the debug info for debug builds.
    97   const TypePtr *raw_adr_type() const {
    98 #ifdef ASSERT
    99     return _adr_type;
   100 #else
   101     return 0;
   102 #endif
   103   }
   105   // Map a load or store opcode to its corresponding store opcode.
   106   // (Return -1 if unknown.)
   107   virtual int store_Opcode() const { return -1; }
   109   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
   110   virtual BasicType memory_type() const = 0;
   111   virtual int memory_size() const {
   112 #ifdef ASSERT
   113     return type2aelembytes(memory_type(), true);
   114 #else
   115     return type2aelembytes(memory_type());
   116 #endif
   117   }
   119   // Search through memory states which precede this node (load or store).
   120   // Look for an exact match for the address, with no intervening
   121   // aliased stores.
   122   Node* find_previous_store(PhaseTransform* phase);
   124   // Can this node (load or store) accurately see a stored value in
   125   // the given memory state?  (The state may or may not be in(Memory).)
   126   Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
   128 #ifndef PRODUCT
   129   static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
   130   virtual void dump_spec(outputStream *st) const;
   131 #endif
   132 };
   134 //------------------------------LoadNode---------------------------------------
   135 // Load value; requires Memory and Address
   136 class LoadNode : public MemNode {
   137 protected:
   138   virtual uint cmp( const Node &n ) const;
   139   virtual uint size_of() const; // Size is bigger
   140   const Type* const _type;      // What kind of value is loaded?
   141 public:
   143   LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
   144     : MemNode(c,mem,adr,at), _type(rt) {
   145     init_class_id(Class_Load);
   146   }
   148   // Polymorphic factory method:
   149   static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
   150                      const TypePtr* at, const Type *rt, BasicType bt );
   152   virtual uint hash()   const;  // Check the type
   154   // Handle algebraic identities here.  If we have an identity, return the Node
   155   // we are equivalent to.  We look for Load of a Store.
   156   virtual Node *Identity( PhaseTransform *phase );
   158   // If the load is from Field memory and the pointer is non-null, we can
   159   // zero out the control input.
   160   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   162   // Split instance field load through Phi.
   163   Node* split_through_phi(PhaseGVN *phase);
   165   // Recover original value from boxed values
   166   Node *eliminate_autobox(PhaseGVN *phase);
   168   // Compute a new Type for this node.  Basically we just do the pre-check,
   169   // then call the virtual add() to set the type.
   170   virtual const Type *Value( PhaseTransform *phase ) const;
   172   // Common methods for LoadKlass and LoadNKlass nodes.
   173   const Type *klass_value_common( PhaseTransform *phase ) const;
   174   Node *klass_identity_common( PhaseTransform *phase );
   176   virtual uint ideal_reg() const;
   177   virtual const Type *bottom_type() const;
   178   // Following method is copied from TypeNode:
   179   void set_type(const Type* t) {
   180     assert(t != NULL, "sanity");
   181     debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
   182     *(const Type**)&_type = t;   // cast away const-ness
   183     // If this node is in the hash table, make sure it doesn't need a rehash.
   184     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
   185   }
   186   const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
   188   // Do not match memory edge
   189   virtual uint match_edge(uint idx) const;
   191   // Map a load opcode to its corresponding store opcode.
   192   virtual int store_Opcode() const = 0;
   194   // Check if the load's memory input is a Phi node with the same control.
   195   bool is_instance_field_load_with_local_phi(Node* ctrl);
   197 #ifndef PRODUCT
   198   virtual void dump_spec(outputStream *st) const;
   199 #endif
   200 #ifdef ASSERT
   201   // Helper function to allow a raw load without control edge for some cases
   202   static bool is_immutable_value(Node* adr);
   203 #endif
   204 protected:
   205   const Type* load_array_final_field(const TypeKlassPtr *tkls,
   206                                      ciKlass* klass) const;
   207 };
   209 //------------------------------LoadBNode--------------------------------------
   210 // Load a byte (8bits signed) from memory
   211 class LoadBNode : public LoadNode {
   212 public:
   213   LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
   214     : LoadNode(c,mem,adr,at,ti) {}
   215   virtual int Opcode() const;
   216   virtual uint ideal_reg() const { return Op_RegI; }
   217   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   218   virtual int store_Opcode() const { return Op_StoreB; }
   219   virtual BasicType memory_type() const { return T_BYTE; }
   220 };
   222 //------------------------------LoadUBNode-------------------------------------
   223 // Load a unsigned byte (8bits unsigned) from memory
   224 class LoadUBNode : public LoadNode {
   225 public:
   226   LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE )
   227     : LoadNode(c, mem, adr, at, ti) {}
   228   virtual int Opcode() const;
   229   virtual uint ideal_reg() const { return Op_RegI; }
   230   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
   231   virtual int store_Opcode() const { return Op_StoreB; }
   232   virtual BasicType memory_type() const { return T_BYTE; }
   233 };
   235 //------------------------------LoadUSNode-------------------------------------
   236 // Load an unsigned short/char (16bits unsigned) from memory
   237 class LoadUSNode : public LoadNode {
   238 public:
   239   LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
   240     : LoadNode(c,mem,adr,at,ti) {}
   241   virtual int Opcode() const;
   242   virtual uint ideal_reg() const { return Op_RegI; }
   243   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   244   virtual int store_Opcode() const { return Op_StoreC; }
   245   virtual BasicType memory_type() const { return T_CHAR; }
   246 };
   248 //------------------------------LoadINode--------------------------------------
   249 // Load an integer from memory
   250 class LoadINode : public LoadNode {
   251 public:
   252   LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
   253     : LoadNode(c,mem,adr,at,ti) {}
   254   virtual int Opcode() const;
   255   virtual uint ideal_reg() const { return Op_RegI; }
   256   virtual int store_Opcode() const { return Op_StoreI; }
   257   virtual BasicType memory_type() const { return T_INT; }
   258 };
   260 //------------------------------LoadUI2LNode-----------------------------------
   261 // Load an unsigned integer into long from memory
   262 class LoadUI2LNode : public LoadNode {
   263 public:
   264   LoadUI2LNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeLong* t = TypeLong::UINT)
   265     : LoadNode(c, mem, adr, at, t) {}
   266   virtual int Opcode() const;
   267   virtual uint ideal_reg() const { return Op_RegL; }
   268   virtual int store_Opcode() const { return Op_StoreL; }
   269   virtual BasicType memory_type() const { return T_LONG; }
   270 };
   272 //------------------------------LoadRangeNode----------------------------------
   273 // Load an array length from the array
   274 class LoadRangeNode : public LoadINode {
   275 public:
   276   LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
   277     : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
   278   virtual int Opcode() const;
   279   virtual const Type *Value( PhaseTransform *phase ) const;
   280   virtual Node *Identity( PhaseTransform *phase );
   281   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   282 };
   284 //------------------------------LoadLNode--------------------------------------
   285 // Load a long from memory
   286 class LoadLNode : public LoadNode {
   287   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
   288   virtual uint cmp( const Node &n ) const {
   289     return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
   290       && LoadNode::cmp(n);
   291   }
   292   virtual uint size_of() const { return sizeof(*this); }
   293   const bool _require_atomic_access;  // is piecewise load forbidden?
   295 public:
   296   LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
   297              const TypeLong *tl = TypeLong::LONG,
   298              bool require_atomic_access = false )
   299     : LoadNode(c,mem,adr,at,tl)
   300     , _require_atomic_access(require_atomic_access)
   301   {}
   302   virtual int Opcode() const;
   303   virtual uint ideal_reg() const { return Op_RegL; }
   304   virtual int store_Opcode() const { return Op_StoreL; }
   305   virtual BasicType memory_type() const { return T_LONG; }
   306   bool require_atomic_access() { return _require_atomic_access; }
   307   static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
   308 #ifndef PRODUCT
   309   virtual void dump_spec(outputStream *st) const {
   310     LoadNode::dump_spec(st);
   311     if (_require_atomic_access)  st->print(" Atomic!");
   312   }
   313 #endif
   314 };
   316 //------------------------------LoadL_unalignedNode----------------------------
   317 // Load a long from unaligned memory
   318 class LoadL_unalignedNode : public LoadLNode {
   319 public:
   320   LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
   321     : LoadLNode(c,mem,adr,at) {}
   322   virtual int Opcode() const;
   323 };
   325 //------------------------------LoadFNode--------------------------------------
   326 // Load a float (64 bits) from memory
   327 class LoadFNode : public LoadNode {
   328 public:
   329   LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
   330     : LoadNode(c,mem,adr,at,t) {}
   331   virtual int Opcode() const;
   332   virtual uint ideal_reg() const { return Op_RegF; }
   333   virtual int store_Opcode() const { return Op_StoreF; }
   334   virtual BasicType memory_type() const { return T_FLOAT; }
   335 };
   337 //------------------------------LoadDNode--------------------------------------
   338 // Load a double (64 bits) from memory
   339 class LoadDNode : public LoadNode {
   340 public:
   341   LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
   342     : LoadNode(c,mem,adr,at,t) {}
   343   virtual int Opcode() const;
   344   virtual uint ideal_reg() const { return Op_RegD; }
   345   virtual int store_Opcode() const { return Op_StoreD; }
   346   virtual BasicType memory_type() const { return T_DOUBLE; }
   347 };
   349 //------------------------------LoadD_unalignedNode----------------------------
   350 // Load a double from unaligned memory
   351 class LoadD_unalignedNode : public LoadDNode {
   352 public:
   353   LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
   354     : LoadDNode(c,mem,adr,at) {}
   355   virtual int Opcode() const;
   356 };
   358 //------------------------------LoadPNode--------------------------------------
   359 // Load a pointer from memory (either object or array)
   360 class LoadPNode : public LoadNode {
   361 public:
   362   LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
   363     : LoadNode(c,mem,adr,at,t) {}
   364   virtual int Opcode() const;
   365   virtual uint ideal_reg() const { return Op_RegP; }
   366   virtual int store_Opcode() const { return Op_StoreP; }
   367   virtual BasicType memory_type() const { return T_ADDRESS; }
   368   // depends_only_on_test is almost always true, and needs to be almost always
   369   // true to enable key hoisting & commoning optimizations.  However, for the
   370   // special case of RawPtr loads from TLS top & end, the control edge carries
   371   // the dependence preventing hoisting past a Safepoint instead of the memory
   372   // edge.  (An unfortunate consequence of having Safepoints not set Raw
   373   // Memory; itself an unfortunate consequence of having Nodes which produce
   374   // results (new raw memory state) inside of loops preventing all manner of
   375   // other optimizations).  Basically, it's ugly but so is the alternative.
   376   // See comment in macro.cpp, around line 125 expand_allocate_common().
   377   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
   378 };
   381 //------------------------------LoadNNode--------------------------------------
   382 // Load a narrow oop from memory (either object or array)
   383 class LoadNNode : public LoadNode {
   384 public:
   385   LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
   386     : LoadNode(c,mem,adr,at,t) {}
   387   virtual int Opcode() const;
   388   virtual uint ideal_reg() const { return Op_RegN; }
   389   virtual int store_Opcode() const { return Op_StoreN; }
   390   virtual BasicType memory_type() const { return T_NARROWOOP; }
   391   // depends_only_on_test is almost always true, and needs to be almost always
   392   // true to enable key hoisting & commoning optimizations.  However, for the
   393   // special case of RawPtr loads from TLS top & end, the control edge carries
   394   // the dependence preventing hoisting past a Safepoint instead of the memory
   395   // edge.  (An unfortunate consequence of having Safepoints not set Raw
   396   // Memory; itself an unfortunate consequence of having Nodes which produce
   397   // results (new raw memory state) inside of loops preventing all manner of
   398   // other optimizations).  Basically, it's ugly but so is the alternative.
   399   // See comment in macro.cpp, around line 125 expand_allocate_common().
   400   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
   401 };
   403 //------------------------------LoadKlassNode----------------------------------
   404 // Load a Klass from an object
   405 class LoadKlassNode : public LoadPNode {
   406 public:
   407   LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
   408     : LoadPNode(c,mem,adr,at,tk) {}
   409   virtual int Opcode() const;
   410   virtual const Type *Value( PhaseTransform *phase ) const;
   411   virtual Node *Identity( PhaseTransform *phase );
   412   virtual bool depends_only_on_test() const { return true; }
   414   // Polymorphic factory method:
   415   static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
   416                      const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
   417 };
   419 //------------------------------LoadNKlassNode---------------------------------
   420 // Load a narrow Klass from an object.
   421 class LoadNKlassNode : public LoadNNode {
   422 public:
   423   LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk )
   424     : LoadNNode(c,mem,adr,at,tk) {}
   425   virtual int Opcode() const;
   426   virtual uint ideal_reg() const { return Op_RegN; }
   427   virtual int store_Opcode() const { return Op_StoreN; }
   428   virtual BasicType memory_type() const { return T_NARROWOOP; }
   430   virtual const Type *Value( PhaseTransform *phase ) const;
   431   virtual Node *Identity( PhaseTransform *phase );
   432   virtual bool depends_only_on_test() const { return true; }
   433 };
   436 //------------------------------LoadSNode--------------------------------------
   437 // Load a short (16bits signed) from memory
   438 class LoadSNode : public LoadNode {
   439 public:
   440   LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
   441     : LoadNode(c,mem,adr,at,ti) {}
   442   virtual int Opcode() const;
   443   virtual uint ideal_reg() const { return Op_RegI; }
   444   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   445   virtual int store_Opcode() const { return Op_StoreC; }
   446   virtual BasicType memory_type() const { return T_SHORT; }
   447 };
   449 //------------------------------StoreNode--------------------------------------
   450 // Store value; requires Store, Address and Value
   451 class StoreNode : public MemNode {
   452 protected:
   453   virtual uint cmp( const Node &n ) const;
   454   virtual bool depends_only_on_test() const { return false; }
   456   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
   457   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
   459 public:
   460   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
   461     : MemNode(c,mem,adr,at,val) {
   462     init_class_id(Class_Store);
   463   }
   464   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
   465     : MemNode(c,mem,adr,at,val,oop_store) {
   466     init_class_id(Class_Store);
   467   }
   469   // Polymorphic factory method:
   470   static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
   471                           const TypePtr* at, Node *val, BasicType bt );
   473   virtual uint hash() const;    // Check the type
   475   // If the store is to Field memory and the pointer is non-null, we can
   476   // zero out the control input.
   477   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   479   // Compute a new Type for this node.  Basically we just do the pre-check,
   480   // then call the virtual add() to set the type.
   481   virtual const Type *Value( PhaseTransform *phase ) const;
   483   // Check for identity function on memory (Load then Store at same address)
   484   virtual Node *Identity( PhaseTransform *phase );
   486   // Do not match memory edge
   487   virtual uint match_edge(uint idx) const;
   489   virtual const Type *bottom_type() const;  // returns Type::MEMORY
   491   // Map a store opcode to its corresponding own opcode, trivially.
   492   virtual int store_Opcode() const { return Opcode(); }
   494   // have all possible loads of the value stored been optimized away?
   495   bool value_never_loaded(PhaseTransform *phase) const;
   496 };
   498 //------------------------------StoreBNode-------------------------------------
   499 // Store byte to memory
   500 class StoreBNode : public StoreNode {
   501 public:
   502   StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   503   virtual int Opcode() const;
   504   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   505   virtual BasicType memory_type() const { return T_BYTE; }
   506 };
   508 //------------------------------StoreCNode-------------------------------------
   509 // Store char/short to memory
   510 class StoreCNode : public StoreNode {
   511 public:
   512   StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   513   virtual int Opcode() const;
   514   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   515   virtual BasicType memory_type() const { return T_CHAR; }
   516 };
   518 //------------------------------StoreINode-------------------------------------
   519 // Store int to memory
   520 class StoreINode : public StoreNode {
   521 public:
   522   StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   523   virtual int Opcode() const;
   524   virtual BasicType memory_type() const { return T_INT; }
   525 };
   527 //------------------------------StoreLNode-------------------------------------
   528 // Store long to memory
   529 class StoreLNode : public StoreNode {
   530   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
   531   virtual uint cmp( const Node &n ) const {
   532     return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
   533       && StoreNode::cmp(n);
   534   }
   535   virtual uint size_of() const { return sizeof(*this); }
   536   const bool _require_atomic_access;  // is piecewise store forbidden?
   538 public:
   539   StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
   540               bool require_atomic_access = false )
   541     : StoreNode(c,mem,adr,at,val)
   542     , _require_atomic_access(require_atomic_access)
   543   {}
   544   virtual int Opcode() const;
   545   virtual BasicType memory_type() const { return T_LONG; }
   546   bool require_atomic_access() { return _require_atomic_access; }
   547   static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
   548 #ifndef PRODUCT
   549   virtual void dump_spec(outputStream *st) const {
   550     StoreNode::dump_spec(st);
   551     if (_require_atomic_access)  st->print(" Atomic!");
   552   }
   553 #endif
   554 };
   556 //------------------------------StoreFNode-------------------------------------
   557 // Store float to memory
   558 class StoreFNode : public StoreNode {
   559 public:
   560   StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   561   virtual int Opcode() const;
   562   virtual BasicType memory_type() const { return T_FLOAT; }
   563 };
   565 //------------------------------StoreDNode-------------------------------------
   566 // Store double to memory
   567 class StoreDNode : public StoreNode {
   568 public:
   569   StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   570   virtual int Opcode() const;
   571   virtual BasicType memory_type() const { return T_DOUBLE; }
   572 };
   574 //------------------------------StorePNode-------------------------------------
   575 // Store pointer to memory
   576 class StorePNode : public StoreNode {
   577 public:
   578   StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   579   virtual int Opcode() const;
   580   virtual BasicType memory_type() const { return T_ADDRESS; }
   581 };
   583 //------------------------------StoreNNode-------------------------------------
   584 // Store narrow oop to memory
   585 class StoreNNode : public StoreNode {
   586 public:
   587   StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   588   virtual int Opcode() const;
   589   virtual BasicType memory_type() const { return T_NARROWOOP; }
   590 };
   592 //------------------------------StoreCMNode-----------------------------------
   593 // Store card-mark byte to memory for CM
   594 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
   595 // Preceeding equivalent StoreCMs may be eliminated.
   596 class StoreCMNode : public StoreNode {
   597  private:
   598   virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
   599   virtual uint cmp( const Node &n ) const {
   600     return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
   601       && StoreNode::cmp(n);
   602   }
   603   virtual uint size_of() const { return sizeof(*this); }
   604   int _oop_alias_idx;   // The alias_idx of OopStore
   606 public:
   607   StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
   608     StoreNode(c,mem,adr,at,val,oop_store),
   609     _oop_alias_idx(oop_alias_idx) {
   610     assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
   611            _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
   612            "bad oop alias idx");
   613   }
   614   virtual int Opcode() const;
   615   virtual Node *Identity( PhaseTransform *phase );
   616   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   617   virtual const Type *Value( PhaseTransform *phase ) const;
   618   virtual BasicType memory_type() const { return T_VOID; } // unspecific
   619   int oop_alias_idx() const { return _oop_alias_idx; }
   620 };
   622 //------------------------------LoadPLockedNode---------------------------------
   623 // Load-locked a pointer from memory (either object or array).
   624 // On Sparc & Intel this is implemented as a normal pointer load.
   625 // On PowerPC and friends it's a real load-locked.
   626 class LoadPLockedNode : public LoadPNode {
   627 public:
   628   LoadPLockedNode( Node *c, Node *mem, Node *adr )
   629     : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
   630   virtual int Opcode() const;
   631   virtual int store_Opcode() const { return Op_StorePConditional; }
   632   virtual bool depends_only_on_test() const { return true; }
   633 };
   635 //------------------------------LoadLLockedNode---------------------------------
   636 // Load-locked a pointer from memory (either object or array).
   637 // On Sparc & Intel this is implemented as a normal long load.
   638 class LoadLLockedNode : public LoadLNode {
   639 public:
   640   LoadLLockedNode( Node *c, Node *mem, Node *adr )
   641     : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
   642   virtual int Opcode() const;
   643   virtual int store_Opcode() const { return Op_StoreLConditional; }
   644 };
   646 //------------------------------SCMemProjNode---------------------------------------
   647 // This class defines a projection of the memory  state of a store conditional node.
   648 // These nodes return a value, but also update memory.
   649 class SCMemProjNode : public ProjNode {
   650 public:
   651   enum {SCMEMPROJCON = (uint)-2};
   652   SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
   653   virtual int Opcode() const;
   654   virtual bool      is_CFG() const  { return false; }
   655   virtual const Type *bottom_type() const {return Type::MEMORY;}
   656   virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
   657   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
   658   virtual const Type *Value( PhaseTransform *phase ) const;
   659 #ifndef PRODUCT
   660   virtual void dump_spec(outputStream *st) const {};
   661 #endif
   662 };
   664 //------------------------------LoadStoreNode---------------------------
   665 // Note: is_Mem() method returns 'true' for this class.
   666 class LoadStoreNode : public Node {
   667 public:
   668   enum {
   669     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
   670   };
   671   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex);
   672   virtual bool depends_only_on_test() const { return false; }
   673   virtual const Type *bottom_type() const { return TypeInt::BOOL; }
   674   virtual uint ideal_reg() const { return Op_RegI; }
   675   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
   676 };
   678 //------------------------------StorePConditionalNode---------------------------
   679 // Conditionally store pointer to memory, if no change since prior
   680 // load-locked.  Sets flags for success or failure of the store.
   681 class StorePConditionalNode : public LoadStoreNode {
   682 public:
   683   StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
   684   virtual int Opcode() const;
   685   // Produces flags
   686   virtual uint ideal_reg() const { return Op_RegFlags; }
   687 };
   689 //------------------------------StoreIConditionalNode---------------------------
   690 // Conditionally store int to memory, if no change since prior
   691 // load-locked.  Sets flags for success or failure of the store.
   692 class StoreIConditionalNode : public LoadStoreNode {
   693 public:
   694   StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreNode(c, mem, adr, val, ii) { }
   695   virtual int Opcode() const;
   696   // Produces flags
   697   virtual uint ideal_reg() const { return Op_RegFlags; }
   698 };
   700 //------------------------------StoreLConditionalNode---------------------------
   701 // Conditionally store long to memory, if no change since prior
   702 // load-locked.  Sets flags for success or failure of the store.
   703 class StoreLConditionalNode : public LoadStoreNode {
   704 public:
   705   StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
   706   virtual int Opcode() const;
   707   // Produces flags
   708   virtual uint ideal_reg() const { return Op_RegFlags; }
   709 };
   712 //------------------------------CompareAndSwapLNode---------------------------
   713 class CompareAndSwapLNode : public LoadStoreNode {
   714 public:
   715   CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   716   virtual int Opcode() const;
   717 };
   720 //------------------------------CompareAndSwapINode---------------------------
   721 class CompareAndSwapINode : public LoadStoreNode {
   722 public:
   723   CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   724   virtual int Opcode() const;
   725 };
   728 //------------------------------CompareAndSwapPNode---------------------------
   729 class CompareAndSwapPNode : public LoadStoreNode {
   730 public:
   731   CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   732   virtual int Opcode() const;
   733 };
   735 //------------------------------CompareAndSwapNNode---------------------------
   736 class CompareAndSwapNNode : public LoadStoreNode {
   737 public:
   738   CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   739   virtual int Opcode() const;
   740 };
   742 //------------------------------ClearArray-------------------------------------
   743 class ClearArrayNode: public Node {
   744 public:
   745   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
   746     : Node(ctrl,arymem,word_cnt,base) {
   747     init_class_id(Class_ClearArray);
   748   }
   749   virtual int         Opcode() const;
   750   virtual const Type *bottom_type() const { return Type::MEMORY; }
   751   // ClearArray modifies array elements, and so affects only the
   752   // array memory addressed by the bottom_type of its base address.
   753   virtual const class TypePtr *adr_type() const;
   754   virtual Node *Identity( PhaseTransform *phase );
   755   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   756   virtual uint match_edge(uint idx) const;
   758   // Clear the given area of an object or array.
   759   // The start offset must always be aligned mod BytesPerInt.
   760   // The end offset must always be aligned mod BytesPerLong.
   761   // Return the new memory.
   762   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   763                             intptr_t start_offset,
   764                             intptr_t end_offset,
   765                             PhaseGVN* phase);
   766   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   767                             intptr_t start_offset,
   768                             Node* end_offset,
   769                             PhaseGVN* phase);
   770   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   771                             Node* start_offset,
   772                             Node* end_offset,
   773                             PhaseGVN* phase);
   774   // Return allocation input memory edge if it is different instance
   775   // or itself if it is the one we are looking for.
   776   static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
   777 };
   779 //------------------------------StrIntrinsic-------------------------------
   780 // Base class for Ideal nodes used in String instrinsic code.
   781 class StrIntrinsicNode: public Node {
   782 public:
   783   StrIntrinsicNode(Node* control, Node* char_array_mem,
   784                    Node* s1, Node* c1, Node* s2, Node* c2):
   785     Node(control, char_array_mem, s1, c1, s2, c2) {
   786   }
   788   StrIntrinsicNode(Node* control, Node* char_array_mem,
   789                    Node* s1, Node* s2, Node* c):
   790     Node(control, char_array_mem, s1, s2, c) {
   791   }
   793   StrIntrinsicNode(Node* control, Node* char_array_mem,
   794                    Node* s1, Node* s2):
   795     Node(control, char_array_mem, s1, s2) {
   796   }
   798   virtual bool depends_only_on_test() const { return false; }
   799   virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
   800   virtual uint match_edge(uint idx) const;
   801   virtual uint ideal_reg() const { return Op_RegI; }
   802   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   803 };
   805 //------------------------------StrComp-------------------------------------
   806 class StrCompNode: public StrIntrinsicNode {
   807 public:
   808   StrCompNode(Node* control, Node* char_array_mem,
   809               Node* s1, Node* c1, Node* s2, Node* c2):
   810     StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
   811   virtual int Opcode() const;
   812   virtual const Type* bottom_type() const { return TypeInt::INT; }
   813 };
   815 //------------------------------StrEquals-------------------------------------
   816 class StrEqualsNode: public StrIntrinsicNode {
   817 public:
   818   StrEqualsNode(Node* control, Node* char_array_mem,
   819                 Node* s1, Node* s2, Node* c):
   820     StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
   821   virtual int Opcode() const;
   822   virtual const Type* bottom_type() const { return TypeInt::BOOL; }
   823 };
   825 //------------------------------StrIndexOf-------------------------------------
   826 class StrIndexOfNode: public StrIntrinsicNode {
   827 public:
   828   StrIndexOfNode(Node* control, Node* char_array_mem,
   829               Node* s1, Node* c1, Node* s2, Node* c2):
   830     StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
   831   virtual int Opcode() const;
   832   virtual const Type* bottom_type() const { return TypeInt::INT; }
   833 };
   835 //------------------------------AryEq---------------------------------------
   836 class AryEqNode: public StrIntrinsicNode {
   837 public:
   838   AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
   839     StrIntrinsicNode(control, char_array_mem, s1, s2) {};
   840   virtual int Opcode() const;
   841   virtual const Type* bottom_type() const { return TypeInt::BOOL; }
   842 };
   844 //------------------------------MemBar-----------------------------------------
   845 // There are different flavors of Memory Barriers to match the Java Memory
   846 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
   847 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
   848 // volatile-load.  Monitor-exit and volatile-store act as Release: no
   849 // preceding ref can be moved to after them.  We insert a MemBar-Release
   850 // before a FastUnlock or volatile-store.  All volatiles need to be
   851 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
   852 // separate it from any following volatile-load.
   853 class MemBarNode: public MultiNode {
   854   virtual uint hash() const ;                  // { return NO_HASH; }
   855   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
   857   virtual uint size_of() const { return sizeof(*this); }
   858   // Memory type this node is serializing.  Usually either rawptr or bottom.
   859   const TypePtr* _adr_type;
   861 public:
   862   enum {
   863     Precedent = TypeFunc::Parms  // optional edge to force precedence
   864   };
   865   MemBarNode(Compile* C, int alias_idx, Node* precedent);
   866   virtual int Opcode() const = 0;
   867   virtual const class TypePtr *adr_type() const { return _adr_type; }
   868   virtual const Type *Value( PhaseTransform *phase ) const;
   869   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   870   virtual uint match_edge(uint idx) const { return 0; }
   871   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
   872   virtual Node *match( const ProjNode *proj, const Matcher *m );
   873   // Factory method.  Builds a wide or narrow membar.
   874   // Optional 'precedent' becomes an extra edge if not null.
   875   static MemBarNode* make(Compile* C, int opcode,
   876                           int alias_idx = Compile::AliasIdxBot,
   877                           Node* precedent = NULL);
   878 };
   880 // "Acquire" - no following ref can move before (but earlier refs can
   881 // follow, like an early Load stalled in cache).  Requires multi-cpu
   882 // visibility.  Inserted after a volatile load.
   883 class MemBarAcquireNode: public MemBarNode {
   884 public:
   885   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
   886     : MemBarNode(C, alias_idx, precedent) {}
   887   virtual int Opcode() const;
   888 };
   890 // "Release" - no earlier ref can move after (but later refs can move
   891 // up, like a speculative pipelined cache-hitting Load).  Requires
   892 // multi-cpu visibility.  Inserted before a volatile store.
   893 class MemBarReleaseNode: public MemBarNode {
   894 public:
   895   MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
   896     : MemBarNode(C, alias_idx, precedent) {}
   897   virtual int Opcode() const;
   898 };
   900 // "Acquire" - no following ref can move before (but earlier refs can
   901 // follow, like an early Load stalled in cache).  Requires multi-cpu
   902 // visibility.  Inserted after a FastLock.
   903 class MemBarAcquireLockNode: public MemBarNode {
   904 public:
   905   MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
   906     : MemBarNode(C, alias_idx, precedent) {}
   907   virtual int Opcode() const;
   908 };
   910 // "Release" - no earlier ref can move after (but later refs can move
   911 // up, like a speculative pipelined cache-hitting Load).  Requires
   912 // multi-cpu visibility.  Inserted before a FastUnLock.
   913 class MemBarReleaseLockNode: public MemBarNode {
   914 public:
   915   MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
   916     : MemBarNode(C, alias_idx, precedent) {}
   917   virtual int Opcode() const;
   918 };
   920 // Ordering between a volatile store and a following volatile load.
   921 // Requires multi-CPU visibility?
   922 class MemBarVolatileNode: public MemBarNode {
   923 public:
   924   MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
   925     : MemBarNode(C, alias_idx, precedent) {}
   926   virtual int Opcode() const;
   927 };
   929 // Ordering within the same CPU.  Used to order unsafe memory references
   930 // inside the compiler when we lack alias info.  Not needed "outside" the
   931 // compiler because the CPU does all the ordering for us.
   932 class MemBarCPUOrderNode: public MemBarNode {
   933 public:
   934   MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
   935     : MemBarNode(C, alias_idx, precedent) {}
   936   virtual int Opcode() const;
   937   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
   938 };
   940 // Isolation of object setup after an AllocateNode and before next safepoint.
   941 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
   942 class InitializeNode: public MemBarNode {
   943   friend class AllocateNode;
   945   bool _is_complete;
   947 public:
   948   enum {
   949     Control    = TypeFunc::Control,
   950     Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
   951     RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
   952     RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
   953   };
   955   InitializeNode(Compile* C, int adr_type, Node* rawoop);
   956   virtual int Opcode() const;
   957   virtual uint size_of() const { return sizeof(*this); }
   958   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
   959   virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
   961   // Manage incoming memory edges via a MergeMem on in(Memory):
   962   Node* memory(uint alias_idx);
   964   // The raw memory edge coming directly from the Allocation.
   965   // The contents of this memory are *always* all-zero-bits.
   966   Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
   968   // Return the corresponding allocation for this initialization (or null if none).
   969   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
   970   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
   971   AllocateNode* allocation();
   973   // Anything other than zeroing in this init?
   974   bool is_non_zero();
   976   // An InitializeNode must completed before macro expansion is done.
   977   // Completion requires that the AllocateNode must be followed by
   978   // initialization of the new memory to zero, then to any initializers.
   979   bool is_complete() { return _is_complete; }
   981   // Mark complete.  (Must not yet be complete.)
   982   void set_complete(PhaseGVN* phase);
   984 #ifdef ASSERT
   985   // ensure all non-degenerate stores are ordered and non-overlapping
   986   bool stores_are_sane(PhaseTransform* phase);
   987 #endif //ASSERT
   989   // See if this store can be captured; return offset where it initializes.
   990   // Return 0 if the store cannot be moved (any sort of problem).
   991   intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
   993   // Capture another store; reformat it to write my internal raw memory.
   994   // Return the captured copy, else NULL if there is some sort of problem.
   995   Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
   997   // Find captured store which corresponds to the range [start..start+size).
   998   // Return my own memory projection (meaning the initial zero bits)
   999   // if there is no such store.  Return NULL if there is a problem.
  1000   Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
  1002   // Called when the associated AllocateNode is expanded into CFG.
  1003   Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
  1004                         intptr_t header_size, Node* size_in_bytes,
  1005                         PhaseGVN* phase);
  1007  private:
  1008   void remove_extra_zeroes();
  1010   // Find out where a captured store should be placed (or already is placed).
  1011   int captured_store_insertion_point(intptr_t start, int size_in_bytes,
  1012                                      PhaseTransform* phase);
  1014   static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
  1016   Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
  1018   bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
  1020   void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
  1021                                PhaseGVN* phase);
  1023   intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
  1024 };
  1026 //------------------------------MergeMem---------------------------------------
  1027 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
  1028 class MergeMemNode: public Node {
  1029   virtual uint hash() const ;                  // { return NO_HASH; }
  1030   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
  1031   friend class MergeMemStream;
  1032   MergeMemNode(Node* def);  // clients use MergeMemNode::make
  1034 public:
  1035   // If the input is a whole memory state, clone it with all its slices intact.
  1036   // Otherwise, make a new memory state with just that base memory input.
  1037   // In either case, the result is a newly created MergeMem.
  1038   static MergeMemNode* make(Compile* C, Node* base_memory);
  1040   virtual int Opcode() const;
  1041   virtual Node *Identity( PhaseTransform *phase );
  1042   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  1043   virtual uint ideal_reg() const { return NotAMachineReg; }
  1044   virtual uint match_edge(uint idx) const { return 0; }
  1045   virtual const RegMask &out_RegMask() const;
  1046   virtual const Type *bottom_type() const { return Type::MEMORY; }
  1047   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  1048   // sparse accessors
  1049   // Fetch the previously stored "set_memory_at", or else the base memory.
  1050   // (Caller should clone it if it is a phi-nest.)
  1051   Node* memory_at(uint alias_idx) const;
  1052   // set the memory, regardless of its previous value
  1053   void set_memory_at(uint alias_idx, Node* n);
  1054   // the "base" is the memory that provides the non-finite support
  1055   Node* base_memory() const       { return in(Compile::AliasIdxBot); }
  1056   // warning: setting the base can implicitly set any of the other slices too
  1057   void set_base_memory(Node* def);
  1058   // sentinel value which denotes a copy of the base memory:
  1059   Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
  1060   static Node* make_empty_memory(); // where the sentinel comes from
  1061   bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
  1062   // hook for the iterator, to perform any necessary setup
  1063   void iteration_setup(const MergeMemNode* other = NULL);
  1064   // push sentinels until I am at least as long as the other (semantic no-op)
  1065   void grow_to_match(const MergeMemNode* other);
  1066   bool verify_sparse() const PRODUCT_RETURN0;
  1067 #ifndef PRODUCT
  1068   virtual void dump_spec(outputStream *st) const;
  1069 #endif
  1070 };
  1072 class MergeMemStream : public StackObj {
  1073  private:
  1074   MergeMemNode*       _mm;
  1075   const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
  1076   Node*               _mm_base;  // loop-invariant base memory of _mm
  1077   int                 _idx;
  1078   int                 _cnt;
  1079   Node*               _mem;
  1080   Node*               _mem2;
  1081   int                 _cnt2;
  1083   void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
  1084     // subsume_node will break sparseness at times, whenever a memory slice
  1085     // folds down to a copy of the base ("fat") memory.  In such a case,
  1086     // the raw edge will update to base, although it should be top.
  1087     // This iterator will recognize either top or base_memory as an
  1088     // "empty" slice.  See is_empty, is_empty2, and next below.
  1089     //
  1090     // The sparseness property is repaired in MergeMemNode::Ideal.
  1091     // As long as access to a MergeMem goes through this iterator
  1092     // or the memory_at accessor, flaws in the sparseness will
  1093     // never be observed.
  1094     //
  1095     // Also, iteration_setup repairs sparseness.
  1096     assert(mm->verify_sparse(), "please, no dups of base");
  1097     assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
  1099     _mm  = mm;
  1100     _mm_base = mm->base_memory();
  1101     _mm2 = mm2;
  1102     _cnt = mm->req();
  1103     _idx = Compile::AliasIdxBot-1; // start at the base memory
  1104     _mem = NULL;
  1105     _mem2 = NULL;
  1108 #ifdef ASSERT
  1109   Node* check_memory() const {
  1110     if (at_base_memory())
  1111       return _mm->base_memory();
  1112     else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
  1113       return _mm->memory_at(_idx);
  1114     else
  1115       return _mm_base;
  1117   Node* check_memory2() const {
  1118     return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
  1120 #endif
  1122   static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
  1123   void assert_synch() const {
  1124     assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
  1125            "no side-effects except through the stream");
  1128  public:
  1130   // expected usages:
  1131   // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
  1132   // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
  1134   // iterate over one merge
  1135   MergeMemStream(MergeMemNode* mm) {
  1136     mm->iteration_setup();
  1137     init(mm);
  1138     debug_only(_cnt2 = 999);
  1140   // iterate in parallel over two merges
  1141   // only iterates through non-empty elements of mm2
  1142   MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
  1143     assert(mm2, "second argument must be a MergeMem also");
  1144     ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
  1145     mm->iteration_setup(mm2);
  1146     init(mm, mm2);
  1147     _cnt2 = mm2->req();
  1149 #ifdef ASSERT
  1150   ~MergeMemStream() {
  1151     assert_synch();
  1153 #endif
  1155   MergeMemNode* all_memory() const {
  1156     return _mm;
  1158   Node* base_memory() const {
  1159     assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
  1160     return _mm_base;
  1162   const MergeMemNode* all_memory2() const {
  1163     assert(_mm2 != NULL, "");
  1164     return _mm2;
  1166   bool at_base_memory() const {
  1167     return _idx == Compile::AliasIdxBot;
  1169   int alias_idx() const {
  1170     assert(_mem, "must call next 1st");
  1171     return _idx;
  1174   const TypePtr* adr_type() const {
  1175     return Compile::current()->get_adr_type(alias_idx());
  1178   const TypePtr* adr_type(Compile* C) const {
  1179     return C->get_adr_type(alias_idx());
  1181   bool is_empty() const {
  1182     assert(_mem, "must call next 1st");
  1183     assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
  1184     return _mem->is_top();
  1186   bool is_empty2() const {
  1187     assert(_mem2, "must call next 1st");
  1188     assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
  1189     return _mem2->is_top();
  1191   Node* memory() const {
  1192     assert(!is_empty(), "must not be empty");
  1193     assert_synch();
  1194     return _mem;
  1196   // get the current memory, regardless of empty or non-empty status
  1197   Node* force_memory() const {
  1198     assert(!is_empty() || !at_base_memory(), "");
  1199     // Use _mm_base to defend against updates to _mem->base_memory().
  1200     Node *mem = _mem->is_top() ? _mm_base : _mem;
  1201     assert(mem == check_memory(), "");
  1202     return mem;
  1204   Node* memory2() const {
  1205     assert(_mem2 == check_memory2(), "");
  1206     return _mem2;
  1208   void set_memory(Node* mem) {
  1209     if (at_base_memory()) {
  1210       // Note that this does not change the invariant _mm_base.
  1211       _mm->set_base_memory(mem);
  1212     } else {
  1213       _mm->set_memory_at(_idx, mem);
  1215     _mem = mem;
  1216     assert_synch();
  1219   // Recover from a side effect to the MergeMemNode.
  1220   void set_memory() {
  1221     _mem = _mm->in(_idx);
  1224   bool next()  { return next(false); }
  1225   bool next2() { return next(true); }
  1227   bool next_non_empty()  { return next_non_empty(false); }
  1228   bool next_non_empty2() { return next_non_empty(true); }
  1229   // next_non_empty2 can yield states where is_empty() is true
  1231  private:
  1232   // find the next item, which might be empty
  1233   bool next(bool have_mm2) {
  1234     assert((_mm2 != NULL) == have_mm2, "use other next");
  1235     assert_synch();
  1236     if (++_idx < _cnt) {
  1237       // Note:  This iterator allows _mm to be non-sparse.
  1238       // It behaves the same whether _mem is top or base_memory.
  1239       _mem = _mm->in(_idx);
  1240       if (have_mm2)
  1241         _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
  1242       return true;
  1244     return false;
  1247   // find the next non-empty item
  1248   bool next_non_empty(bool have_mm2) {
  1249     while (next(have_mm2)) {
  1250       if (!is_empty()) {
  1251         // make sure _mem2 is filled in sensibly
  1252         if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
  1253         return true;
  1254       } else if (have_mm2 && !is_empty2()) {
  1255         return true;   // is_empty() == true
  1258     return false;
  1260 };
  1262 //------------------------------Prefetch---------------------------------------
  1264 // Non-faulting prefetch load.  Prefetch for many reads.
  1265 class PrefetchReadNode : public Node {
  1266 public:
  1267   PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1268   virtual int Opcode() const;
  1269   virtual uint ideal_reg() const { return NotAMachineReg; }
  1270   virtual uint match_edge(uint idx) const { return idx==2; }
  1271   virtual const Type *bottom_type() const { return Type::ABIO; }
  1272 };
  1274 // Non-faulting prefetch load.  Prefetch for many reads & many writes.
  1275 class PrefetchWriteNode : public Node {
  1276 public:
  1277   PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1278   virtual int Opcode() const;
  1279   virtual uint ideal_reg() const { return NotAMachineReg; }
  1280   virtual uint match_edge(uint idx) const { return idx==2; }
  1281   virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
  1282 };
  1284 #endif // SHARE_VM_OPTO_MEMNODE_HPP

mercurial