src/share/vm/opto/memnode.hpp

Mon, 01 Feb 2010 17:35:05 -0700

author
dcubed
date
Mon, 01 Feb 2010 17:35:05 -0700
changeset 1648
6deeaebad47a
parent 1535
f96a1a986f7b
child 1633
8d9bfe6a446b
permissions
-rw-r--r--

6902182: 4/4 Starting with jdwp agent should not incur performance penalty
Summary: Rename can_post_exceptions support to can_post_on_exceptions. Add support for should_post_on_exceptions flag to permit per JavaThread optimizations.
Reviewed-by: never, kvn, dcubed
Contributed-by: tom.deneau@amd.com

     1 /*
     2  * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // Portions of code courtesy of Clifford Click
    27 class MultiNode;
    28 class PhaseCCP;
    29 class PhaseTransform;
    31 //------------------------------MemNode----------------------------------------
    32 // Load or Store, possibly throwing a NULL pointer exception
    33 class MemNode : public Node {
    34 protected:
    35 #ifdef ASSERT
    36   const TypePtr* _adr_type;     // What kind of memory is being addressed?
    37 #endif
    38   virtual uint size_of() const; // Size is bigger (ASSERT only)
    39 public:
    40   enum { Control,               // When is it safe to do this load?
    41          Memory,                // Chunk of memory is being loaded from
    42          Address,               // Actually address, derived from base
    43          ValueIn,               // Value to store
    44          OopStore               // Preceeding oop store, only in StoreCM
    45   };
    46 protected:
    47   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
    48     : Node(c0,c1,c2   ) {
    49     init_class_id(Class_Mem);
    50     debug_only(_adr_type=at; adr_type();)
    51   }
    52   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
    53     : Node(c0,c1,c2,c3) {
    54     init_class_id(Class_Mem);
    55     debug_only(_adr_type=at; adr_type();)
    56   }
    57   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
    58     : Node(c0,c1,c2,c3,c4) {
    59     init_class_id(Class_Mem);
    60     debug_only(_adr_type=at; adr_type();)
    61   }
    63 public:
    64   // Helpers for the optimizer.  Documented in memnode.cpp.
    65   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
    66                                       Node* p2, AllocateNode* a2,
    67                                       PhaseTransform* phase);
    68   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
    70   static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
    71   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
    72   // This one should probably be a phase-specific function:
    73   static bool all_controls_dominate(Node* dom, Node* sub);
    75   // Find any cast-away of null-ness and keep its control.
    76   static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
    77   virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
    79   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
    81   // Shared code for Ideal methods:
    82   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
    84   // Helper function for adr_type() implementations.
    85   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
    87   // Raw access function, to allow copying of adr_type efficiently in
    88   // product builds and retain the debug info for debug builds.
    89   const TypePtr *raw_adr_type() const {
    90 #ifdef ASSERT
    91     return _adr_type;
    92 #else
    93     return 0;
    94 #endif
    95   }
    97   // Map a load or store opcode to its corresponding store opcode.
    98   // (Return -1 if unknown.)
    99   virtual int store_Opcode() const { return -1; }
   101   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
   102   virtual BasicType memory_type() const = 0;
   103   virtual int memory_size() const {
   104 #ifdef ASSERT
   105     return type2aelembytes(memory_type(), true);
   106 #else
   107     return type2aelembytes(memory_type());
   108 #endif
   109   }
   111   // Search through memory states which precede this node (load or store).
   112   // Look for an exact match for the address, with no intervening
   113   // aliased stores.
   114   Node* find_previous_store(PhaseTransform* phase);
   116   // Can this node (load or store) accurately see a stored value in
   117   // the given memory state?  (The state may or may not be in(Memory).)
   118   Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
   120 #ifndef PRODUCT
   121   static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
   122   virtual void dump_spec(outputStream *st) const;
   123 #endif
   124 };
   126 //------------------------------LoadNode---------------------------------------
   127 // Load value; requires Memory and Address
   128 class LoadNode : public MemNode {
   129 protected:
   130   virtual uint cmp( const Node &n ) const;
   131   virtual uint size_of() const; // Size is bigger
   132   const Type* const _type;      // What kind of value is loaded?
   133 public:
   135   LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
   136     : MemNode(c,mem,adr,at), _type(rt) {
   137     init_class_id(Class_Load);
   138   }
   140   // Polymorphic factory method:
   141   static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
   142                      const TypePtr* at, const Type *rt, BasicType bt );
   144   virtual uint hash()   const;  // Check the type
   146   // Handle algebraic identities here.  If we have an identity, return the Node
   147   // we are equivalent to.  We look for Load of a Store.
   148   virtual Node *Identity( PhaseTransform *phase );
   150   // If the load is from Field memory and the pointer is non-null, we can
   151   // zero out the control input.
   152   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   154   // Split instance field load through Phi.
   155   Node* split_through_phi(PhaseGVN *phase);
   157   // Recover original value from boxed values
   158   Node *eliminate_autobox(PhaseGVN *phase);
   160   // Compute a new Type for this node.  Basically we just do the pre-check,
   161   // then call the virtual add() to set the type.
   162   virtual const Type *Value( PhaseTransform *phase ) const;
   164   // Common methods for LoadKlass and LoadNKlass nodes.
   165   const Type *klass_value_common( PhaseTransform *phase ) const;
   166   Node *klass_identity_common( PhaseTransform *phase );
   168   virtual uint ideal_reg() const;
   169   virtual const Type *bottom_type() const;
   170   // Following method is copied from TypeNode:
   171   void set_type(const Type* t) {
   172     assert(t != NULL, "sanity");
   173     debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
   174     *(const Type**)&_type = t;   // cast away const-ness
   175     // If this node is in the hash table, make sure it doesn't need a rehash.
   176     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
   177   }
   178   const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
   180   // Do not match memory edge
   181   virtual uint match_edge(uint idx) const;
   183   // Map a load opcode to its corresponding store opcode.
   184   virtual int store_Opcode() const = 0;
   186   // Check if the load's memory input is a Phi node with the same control.
   187   bool is_instance_field_load_with_local_phi(Node* ctrl);
   189 #ifndef PRODUCT
   190   virtual void dump_spec(outputStream *st) const;
   191 #endif
   192 protected:
   193   const Type* load_array_final_field(const TypeKlassPtr *tkls,
   194                                      ciKlass* klass) const;
   195 };
   197 //------------------------------LoadBNode--------------------------------------
   198 // Load a byte (8bits signed) from memory
   199 class LoadBNode : public LoadNode {
   200 public:
   201   LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
   202     : LoadNode(c,mem,adr,at,ti) {}
   203   virtual int Opcode() const;
   204   virtual uint ideal_reg() const { return Op_RegI; }
   205   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   206   virtual int store_Opcode() const { return Op_StoreB; }
   207   virtual BasicType memory_type() const { return T_BYTE; }
   208 };
   210 //------------------------------LoadUBNode-------------------------------------
   211 // Load a unsigned byte (8bits unsigned) from memory
   212 class LoadUBNode : public LoadNode {
   213 public:
   214   LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE )
   215     : LoadNode(c, mem, adr, at, ti) {}
   216   virtual int Opcode() const;
   217   virtual uint ideal_reg() const { return Op_RegI; }
   218   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
   219   virtual int store_Opcode() const { return Op_StoreB; }
   220   virtual BasicType memory_type() const { return T_BYTE; }
   221 };
   223 //------------------------------LoadUSNode-------------------------------------
   224 // Load an unsigned short/char (16bits unsigned) from memory
   225 class LoadUSNode : public LoadNode {
   226 public:
   227   LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
   228     : LoadNode(c,mem,adr,at,ti) {}
   229   virtual int Opcode() const;
   230   virtual uint ideal_reg() const { return Op_RegI; }
   231   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   232   virtual int store_Opcode() const { return Op_StoreC; }
   233   virtual BasicType memory_type() const { return T_CHAR; }
   234 };
   236 //------------------------------LoadINode--------------------------------------
   237 // Load an integer from memory
   238 class LoadINode : public LoadNode {
   239 public:
   240   LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
   241     : LoadNode(c,mem,adr,at,ti) {}
   242   virtual int Opcode() const;
   243   virtual uint ideal_reg() const { return Op_RegI; }
   244   virtual int store_Opcode() const { return Op_StoreI; }
   245   virtual BasicType memory_type() const { return T_INT; }
   246 };
   248 //------------------------------LoadUI2LNode-----------------------------------
   249 // Load an unsigned integer into long from memory
   250 class LoadUI2LNode : public LoadNode {
   251 public:
   252   LoadUI2LNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeLong* t = TypeLong::UINT)
   253     : LoadNode(c, mem, adr, at, t) {}
   254   virtual int Opcode() const;
   255   virtual uint ideal_reg() const { return Op_RegL; }
   256   virtual int store_Opcode() const { return Op_StoreL; }
   257   virtual BasicType memory_type() const { return T_LONG; }
   258 };
   260 //------------------------------LoadRangeNode----------------------------------
   261 // Load an array length from the array
   262 class LoadRangeNode : public LoadINode {
   263 public:
   264   LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
   265     : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
   266   virtual int Opcode() const;
   267   virtual const Type *Value( PhaseTransform *phase ) const;
   268   virtual Node *Identity( PhaseTransform *phase );
   269   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   270 };
   272 //------------------------------LoadLNode--------------------------------------
   273 // Load a long from memory
   274 class LoadLNode : public LoadNode {
   275   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
   276   virtual uint cmp( const Node &n ) const {
   277     return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
   278       && LoadNode::cmp(n);
   279   }
   280   virtual uint size_of() const { return sizeof(*this); }
   281   const bool _require_atomic_access;  // is piecewise load forbidden?
   283 public:
   284   LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
   285              const TypeLong *tl = TypeLong::LONG,
   286              bool require_atomic_access = false )
   287     : LoadNode(c,mem,adr,at,tl)
   288     , _require_atomic_access(require_atomic_access)
   289   {}
   290   virtual int Opcode() const;
   291   virtual uint ideal_reg() const { return Op_RegL; }
   292   virtual int store_Opcode() const { return Op_StoreL; }
   293   virtual BasicType memory_type() const { return T_LONG; }
   294   bool require_atomic_access() { return _require_atomic_access; }
   295   static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
   296 #ifndef PRODUCT
   297   virtual void dump_spec(outputStream *st) const {
   298     LoadNode::dump_spec(st);
   299     if (_require_atomic_access)  st->print(" Atomic!");
   300   }
   301 #endif
   302 };
   304 //------------------------------LoadL_unalignedNode----------------------------
   305 // Load a long from unaligned memory
   306 class LoadL_unalignedNode : public LoadLNode {
   307 public:
   308   LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
   309     : LoadLNode(c,mem,adr,at) {}
   310   virtual int Opcode() const;
   311 };
   313 //------------------------------LoadFNode--------------------------------------
   314 // Load a float (64 bits) from memory
   315 class LoadFNode : public LoadNode {
   316 public:
   317   LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
   318     : LoadNode(c,mem,adr,at,t) {}
   319   virtual int Opcode() const;
   320   virtual uint ideal_reg() const { return Op_RegF; }
   321   virtual int store_Opcode() const { return Op_StoreF; }
   322   virtual BasicType memory_type() const { return T_FLOAT; }
   323 };
   325 //------------------------------LoadDNode--------------------------------------
   326 // Load a double (64 bits) from memory
   327 class LoadDNode : public LoadNode {
   328 public:
   329   LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
   330     : LoadNode(c,mem,adr,at,t) {}
   331   virtual int Opcode() const;
   332   virtual uint ideal_reg() const { return Op_RegD; }
   333   virtual int store_Opcode() const { return Op_StoreD; }
   334   virtual BasicType memory_type() const { return T_DOUBLE; }
   335 };
   337 //------------------------------LoadD_unalignedNode----------------------------
   338 // Load a double from unaligned memory
   339 class LoadD_unalignedNode : public LoadDNode {
   340 public:
   341   LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
   342     : LoadDNode(c,mem,adr,at) {}
   343   virtual int Opcode() const;
   344 };
   346 //------------------------------LoadPNode--------------------------------------
   347 // Load a pointer from memory (either object or array)
   348 class LoadPNode : public LoadNode {
   349 public:
   350   LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
   351     : LoadNode(c,mem,adr,at,t) {}
   352   virtual int Opcode() const;
   353   virtual uint ideal_reg() const { return Op_RegP; }
   354   virtual int store_Opcode() const { return Op_StoreP; }
   355   virtual BasicType memory_type() const { return T_ADDRESS; }
   356   // depends_only_on_test is almost always true, and needs to be almost always
   357   // true to enable key hoisting & commoning optimizations.  However, for the
   358   // special case of RawPtr loads from TLS top & end, the control edge carries
   359   // the dependence preventing hoisting past a Safepoint instead of the memory
   360   // edge.  (An unfortunate consequence of having Safepoints not set Raw
   361   // Memory; itself an unfortunate consequence of having Nodes which produce
   362   // results (new raw memory state) inside of loops preventing all manner of
   363   // other optimizations).  Basically, it's ugly but so is the alternative.
   364   // See comment in macro.cpp, around line 125 expand_allocate_common().
   365   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
   366 };
   369 //------------------------------LoadNNode--------------------------------------
   370 // Load a narrow oop from memory (either object or array)
   371 class LoadNNode : public LoadNode {
   372 public:
   373   LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
   374     : LoadNode(c,mem,adr,at,t) {}
   375   virtual int Opcode() const;
   376   virtual uint ideal_reg() const { return Op_RegN; }
   377   virtual int store_Opcode() const { return Op_StoreN; }
   378   virtual BasicType memory_type() const { return T_NARROWOOP; }
   379   // depends_only_on_test is almost always true, and needs to be almost always
   380   // true to enable key hoisting & commoning optimizations.  However, for the
   381   // special case of RawPtr loads from TLS top & end, the control edge carries
   382   // the dependence preventing hoisting past a Safepoint instead of the memory
   383   // edge.  (An unfortunate consequence of having Safepoints not set Raw
   384   // Memory; itself an unfortunate consequence of having Nodes which produce
   385   // results (new raw memory state) inside of loops preventing all manner of
   386   // other optimizations).  Basically, it's ugly but so is the alternative.
   387   // See comment in macro.cpp, around line 125 expand_allocate_common().
   388   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
   389 };
   391 //------------------------------LoadKlassNode----------------------------------
   392 // Load a Klass from an object
   393 class LoadKlassNode : public LoadPNode {
   394 public:
   395   LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
   396     : LoadPNode(c,mem,adr,at,tk) {}
   397   virtual int Opcode() const;
   398   virtual const Type *Value( PhaseTransform *phase ) const;
   399   virtual Node *Identity( PhaseTransform *phase );
   400   virtual bool depends_only_on_test() const { return true; }
   402   // Polymorphic factory method:
   403   static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
   404                      const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
   405 };
   407 //------------------------------LoadNKlassNode---------------------------------
   408 // Load a narrow Klass from an object.
   409 class LoadNKlassNode : public LoadNNode {
   410 public:
   411   LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk )
   412     : LoadNNode(c,mem,adr,at,tk) {}
   413   virtual int Opcode() const;
   414   virtual uint ideal_reg() const { return Op_RegN; }
   415   virtual int store_Opcode() const { return Op_StoreN; }
   416   virtual BasicType memory_type() const { return T_NARROWOOP; }
   418   virtual const Type *Value( PhaseTransform *phase ) const;
   419   virtual Node *Identity( PhaseTransform *phase );
   420   virtual bool depends_only_on_test() const { return true; }
   421 };
   424 //------------------------------LoadSNode--------------------------------------
   425 // Load a short (16bits signed) from memory
   426 class LoadSNode : public LoadNode {
   427 public:
   428   LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
   429     : LoadNode(c,mem,adr,at,ti) {}
   430   virtual int Opcode() const;
   431   virtual uint ideal_reg() const { return Op_RegI; }
   432   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   433   virtual int store_Opcode() const { return Op_StoreC; }
   434   virtual BasicType memory_type() const { return T_SHORT; }
   435 };
   437 //------------------------------StoreNode--------------------------------------
   438 // Store value; requires Store, Address and Value
   439 class StoreNode : public MemNode {
   440 protected:
   441   virtual uint cmp( const Node &n ) const;
   442   virtual bool depends_only_on_test() const { return false; }
   444   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
   445   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
   447 public:
   448   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
   449     : MemNode(c,mem,adr,at,val) {
   450     init_class_id(Class_Store);
   451   }
   452   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
   453     : MemNode(c,mem,adr,at,val,oop_store) {
   454     init_class_id(Class_Store);
   455   }
   457   // Polymorphic factory method:
   458   static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
   459                           const TypePtr* at, Node *val, BasicType bt );
   461   virtual uint hash() const;    // Check the type
   463   // If the store is to Field memory and the pointer is non-null, we can
   464   // zero out the control input.
   465   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   467   // Compute a new Type for this node.  Basically we just do the pre-check,
   468   // then call the virtual add() to set the type.
   469   virtual const Type *Value( PhaseTransform *phase ) const;
   471   // Check for identity function on memory (Load then Store at same address)
   472   virtual Node *Identity( PhaseTransform *phase );
   474   // Do not match memory edge
   475   virtual uint match_edge(uint idx) const;
   477   virtual const Type *bottom_type() const;  // returns Type::MEMORY
   479   // Map a store opcode to its corresponding own opcode, trivially.
   480   virtual int store_Opcode() const { return Opcode(); }
   482   // have all possible loads of the value stored been optimized away?
   483   bool value_never_loaded(PhaseTransform *phase) const;
   484 };
   486 //------------------------------StoreBNode-------------------------------------
   487 // Store byte to memory
   488 class StoreBNode : public StoreNode {
   489 public:
   490   StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   491   virtual int Opcode() const;
   492   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   493   virtual BasicType memory_type() const { return T_BYTE; }
   494 };
   496 //------------------------------StoreCNode-------------------------------------
   497 // Store char/short to memory
   498 class StoreCNode : public StoreNode {
   499 public:
   500   StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   501   virtual int Opcode() const;
   502   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   503   virtual BasicType memory_type() const { return T_CHAR; }
   504 };
   506 //------------------------------StoreINode-------------------------------------
   507 // Store int to memory
   508 class StoreINode : public StoreNode {
   509 public:
   510   StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   511   virtual int Opcode() const;
   512   virtual BasicType memory_type() const { return T_INT; }
   513 };
   515 //------------------------------StoreLNode-------------------------------------
   516 // Store long to memory
   517 class StoreLNode : public StoreNode {
   518   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
   519   virtual uint cmp( const Node &n ) const {
   520     return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
   521       && StoreNode::cmp(n);
   522   }
   523   virtual uint size_of() const { return sizeof(*this); }
   524   const bool _require_atomic_access;  // is piecewise store forbidden?
   526 public:
   527   StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
   528               bool require_atomic_access = false )
   529     : StoreNode(c,mem,adr,at,val)
   530     , _require_atomic_access(require_atomic_access)
   531   {}
   532   virtual int Opcode() const;
   533   virtual BasicType memory_type() const { return T_LONG; }
   534   bool require_atomic_access() { return _require_atomic_access; }
   535   static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
   536 #ifndef PRODUCT
   537   virtual void dump_spec(outputStream *st) const {
   538     StoreNode::dump_spec(st);
   539     if (_require_atomic_access)  st->print(" Atomic!");
   540   }
   541 #endif
   542 };
   544 //------------------------------StoreFNode-------------------------------------
   545 // Store float to memory
   546 class StoreFNode : public StoreNode {
   547 public:
   548   StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   549   virtual int Opcode() const;
   550   virtual BasicType memory_type() const { return T_FLOAT; }
   551 };
   553 //------------------------------StoreDNode-------------------------------------
   554 // Store double to memory
   555 class StoreDNode : public StoreNode {
   556 public:
   557   StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   558   virtual int Opcode() const;
   559   virtual BasicType memory_type() const { return T_DOUBLE; }
   560 };
   562 //------------------------------StorePNode-------------------------------------
   563 // Store pointer to memory
   564 class StorePNode : public StoreNode {
   565 public:
   566   StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   567   virtual int Opcode() const;
   568   virtual BasicType memory_type() const { return T_ADDRESS; }
   569 };
   571 //------------------------------StoreNNode-------------------------------------
   572 // Store narrow oop to memory
   573 class StoreNNode : public StoreNode {
   574 public:
   575   StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   576   virtual int Opcode() const;
   577   virtual BasicType memory_type() const { return T_NARROWOOP; }
   578 };
   580 //------------------------------StoreCMNode-----------------------------------
   581 // Store card-mark byte to memory for CM
   582 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
   583 // Preceeding equivalent StoreCMs may be eliminated.
   584 class StoreCMNode : public StoreNode {
   585  private:
   586   int _oop_alias_idx;   // The alias_idx of OopStore
   587 public:
   588   StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : StoreNode(c,mem,adr,at,val,oop_store), _oop_alias_idx(oop_alias_idx) {}
   589   virtual int Opcode() const;
   590   virtual Node *Identity( PhaseTransform *phase );
   591   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   592   virtual const Type *Value( PhaseTransform *phase ) const;
   593   virtual BasicType memory_type() const { return T_VOID; } // unspecific
   594   int oop_alias_idx() const { return _oop_alias_idx; }
   595 };
   597 //------------------------------LoadPLockedNode---------------------------------
   598 // Load-locked a pointer from memory (either object or array).
   599 // On Sparc & Intel this is implemented as a normal pointer load.
   600 // On PowerPC and friends it's a real load-locked.
   601 class LoadPLockedNode : public LoadPNode {
   602 public:
   603   LoadPLockedNode( Node *c, Node *mem, Node *adr )
   604     : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
   605   virtual int Opcode() const;
   606   virtual int store_Opcode() const { return Op_StorePConditional; }
   607   virtual bool depends_only_on_test() const { return true; }
   608 };
   610 //------------------------------LoadLLockedNode---------------------------------
   611 // Load-locked a pointer from memory (either object or array).
   612 // On Sparc & Intel this is implemented as a normal long load.
   613 class LoadLLockedNode : public LoadLNode {
   614 public:
   615   LoadLLockedNode( Node *c, Node *mem, Node *adr )
   616     : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
   617   virtual int Opcode() const;
   618   virtual int store_Opcode() const { return Op_StoreLConditional; }
   619 };
   621 //------------------------------SCMemProjNode---------------------------------------
   622 // This class defines a projection of the memory  state of a store conditional node.
   623 // These nodes return a value, but also update memory.
   624 class SCMemProjNode : public ProjNode {
   625 public:
   626   enum {SCMEMPROJCON = (uint)-2};
   627   SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
   628   virtual int Opcode() const;
   629   virtual bool      is_CFG() const  { return false; }
   630   virtual const Type *bottom_type() const {return Type::MEMORY;}
   631   virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
   632   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
   633   virtual const Type *Value( PhaseTransform *phase ) const;
   634 #ifndef PRODUCT
   635   virtual void dump_spec(outputStream *st) const {};
   636 #endif
   637 };
   639 //------------------------------LoadStoreNode---------------------------
   640 // Note: is_Mem() method returns 'true' for this class.
   641 class LoadStoreNode : public Node {
   642 public:
   643   enum {
   644     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
   645   };
   646   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex);
   647   virtual bool depends_only_on_test() const { return false; }
   648   virtual const Type *bottom_type() const { return TypeInt::BOOL; }
   649   virtual uint ideal_reg() const { return Op_RegI; }
   650   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
   651 };
   653 //------------------------------StorePConditionalNode---------------------------
   654 // Conditionally store pointer to memory, if no change since prior
   655 // load-locked.  Sets flags for success or failure of the store.
   656 class StorePConditionalNode : public LoadStoreNode {
   657 public:
   658   StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
   659   virtual int Opcode() const;
   660   // Produces flags
   661   virtual uint ideal_reg() const { return Op_RegFlags; }
   662 };
   664 //------------------------------StoreIConditionalNode---------------------------
   665 // Conditionally store int to memory, if no change since prior
   666 // load-locked.  Sets flags for success or failure of the store.
   667 class StoreIConditionalNode : public LoadStoreNode {
   668 public:
   669   StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreNode(c, mem, adr, val, ii) { }
   670   virtual int Opcode() const;
   671   // Produces flags
   672   virtual uint ideal_reg() const { return Op_RegFlags; }
   673 };
   675 //------------------------------StoreLConditionalNode---------------------------
   676 // Conditionally store long to memory, if no change since prior
   677 // load-locked.  Sets flags for success or failure of the store.
   678 class StoreLConditionalNode : public LoadStoreNode {
   679 public:
   680   StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
   681   virtual int Opcode() const;
   682   // Produces flags
   683   virtual uint ideal_reg() const { return Op_RegFlags; }
   684 };
   687 //------------------------------CompareAndSwapLNode---------------------------
   688 class CompareAndSwapLNode : public LoadStoreNode {
   689 public:
   690   CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   691   virtual int Opcode() const;
   692 };
   695 //------------------------------CompareAndSwapINode---------------------------
   696 class CompareAndSwapINode : public LoadStoreNode {
   697 public:
   698   CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   699   virtual int Opcode() const;
   700 };
   703 //------------------------------CompareAndSwapPNode---------------------------
   704 class CompareAndSwapPNode : public LoadStoreNode {
   705 public:
   706   CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   707   virtual int Opcode() const;
   708 };
   710 //------------------------------CompareAndSwapNNode---------------------------
   711 class CompareAndSwapNNode : public LoadStoreNode {
   712 public:
   713   CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   714   virtual int Opcode() const;
   715 };
   717 //------------------------------ClearArray-------------------------------------
   718 class ClearArrayNode: public Node {
   719 public:
   720   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
   721     : Node(ctrl,arymem,word_cnt,base) {
   722     init_class_id(Class_ClearArray);
   723   }
   724   virtual int         Opcode() const;
   725   virtual const Type *bottom_type() const { return Type::MEMORY; }
   726   // ClearArray modifies array elements, and so affects only the
   727   // array memory addressed by the bottom_type of its base address.
   728   virtual const class TypePtr *adr_type() const;
   729   virtual Node *Identity( PhaseTransform *phase );
   730   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   731   virtual uint match_edge(uint idx) const;
   733   // Clear the given area of an object or array.
   734   // The start offset must always be aligned mod BytesPerInt.
   735   // The end offset must always be aligned mod BytesPerLong.
   736   // Return the new memory.
   737   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   738                             intptr_t start_offset,
   739                             intptr_t end_offset,
   740                             PhaseGVN* phase);
   741   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   742                             intptr_t start_offset,
   743                             Node* end_offset,
   744                             PhaseGVN* phase);
   745   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   746                             Node* start_offset,
   747                             Node* end_offset,
   748                             PhaseGVN* phase);
   749   // Return allocation input memory edge if it is different instance
   750   // or itself if it is the one we are looking for.
   751   static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
   752 };
   754 //------------------------------StrComp-------------------------------------
   755 class StrCompNode: public Node {
   756 public:
   757   StrCompNode(Node* control, Node* char_array_mem,
   758               Node* s1, Node* c1,
   759               Node* s2, Node* c2): Node(control, char_array_mem,
   760                                         s1, c1,
   761                                         s2, c2) {};
   762   virtual int Opcode() const;
   763   virtual bool depends_only_on_test() const { return false; }
   764   virtual const Type* bottom_type() const { return TypeInt::INT; }
   765   virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
   766   virtual uint match_edge(uint idx) const;
   767   virtual uint ideal_reg() const { return Op_RegI; }
   768   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   769 };
   771 //------------------------------StrEquals-------------------------------------
   772 class StrEqualsNode: public Node {
   773 public:
   774   StrEqualsNode(Node* control, Node* char_array_mem,
   775                 Node* s1, Node* s2, Node* c): Node(control, char_array_mem,
   776                                                    s1, s2, c) {};
   777   virtual int Opcode() const;
   778   virtual bool depends_only_on_test() const { return false; }
   779   virtual const Type* bottom_type() const { return TypeInt::BOOL; }
   780   virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
   781   virtual uint match_edge(uint idx) const;
   782   virtual uint ideal_reg() const { return Op_RegI; }
   783   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   784 };
   786 //------------------------------StrIndexOf-------------------------------------
   787 class StrIndexOfNode: public Node {
   788 public:
   789   StrIndexOfNode(Node* control, Node* char_array_mem,
   790                  Node* s1, Node* c1,
   791                  Node* s2, Node* c2): Node(control, char_array_mem,
   792                                            s1, c1,
   793                                            s2, c2) {};
   794   virtual int Opcode() const;
   795   virtual bool depends_only_on_test() const { return false; }
   796   virtual const Type* bottom_type() const { return TypeInt::INT; }
   797   virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
   798   virtual uint match_edge(uint idx) const;
   799   virtual uint ideal_reg() const { return Op_RegI; }
   800   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   801 };
   803 //------------------------------AryEq---------------------------------------
   804 class AryEqNode: public Node {
   805 public:
   806   AryEqNode(Node* control, Node* char_array_mem,
   807             Node* s1, Node* s2): Node(control, char_array_mem, s1, s2) {};
   808   virtual int Opcode() const;
   809   virtual bool depends_only_on_test() const { return false; }
   810   virtual const Type* bottom_type() const { return TypeInt::BOOL; }
   811   virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
   812   virtual uint match_edge(uint idx) const;
   813   virtual uint ideal_reg() const { return Op_RegI; }
   814   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   815 };
   817 //------------------------------MemBar-----------------------------------------
   818 // There are different flavors of Memory Barriers to match the Java Memory
   819 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
   820 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
   821 // volatile-load.  Monitor-exit and volatile-store act as Release: no
   822 // preceding ref can be moved to after them.  We insert a MemBar-Release
   823 // before a FastUnlock or volatile-store.  All volatiles need to be
   824 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
   825 // separate it from any following volatile-load.
   826 class MemBarNode: public MultiNode {
   827   virtual uint hash() const ;                  // { return NO_HASH; }
   828   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
   830   virtual uint size_of() const { return sizeof(*this); }
   831   // Memory type this node is serializing.  Usually either rawptr or bottom.
   832   const TypePtr* _adr_type;
   834 public:
   835   enum {
   836     Precedent = TypeFunc::Parms  // optional edge to force precedence
   837   };
   838   MemBarNode(Compile* C, int alias_idx, Node* precedent);
   839   virtual int Opcode() const = 0;
   840   virtual const class TypePtr *adr_type() const { return _adr_type; }
   841   virtual const Type *Value( PhaseTransform *phase ) const;
   842   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   843   virtual uint match_edge(uint idx) const { return 0; }
   844   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
   845   virtual Node *match( const ProjNode *proj, const Matcher *m );
   846   // Factory method.  Builds a wide or narrow membar.
   847   // Optional 'precedent' becomes an extra edge if not null.
   848   static MemBarNode* make(Compile* C, int opcode,
   849                           int alias_idx = Compile::AliasIdxBot,
   850                           Node* precedent = NULL);
   851 };
   853 // "Acquire" - no following ref can move before (but earlier refs can
   854 // follow, like an early Load stalled in cache).  Requires multi-cpu
   855 // visibility.  Inserted after a volatile load or FastLock.
   856 class MemBarAcquireNode: public MemBarNode {
   857 public:
   858   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
   859     : MemBarNode(C, alias_idx, precedent) {}
   860   virtual int Opcode() const;
   861 };
   863 // "Release" - no earlier ref can move after (but later refs can move
   864 // up, like a speculative pipelined cache-hitting Load).  Requires
   865 // multi-cpu visibility.  Inserted before a volatile store or FastUnLock.
   866 class MemBarReleaseNode: public MemBarNode {
   867 public:
   868   MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
   869     : MemBarNode(C, alias_idx, precedent) {}
   870   virtual int Opcode() const;
   871 };
   873 // Ordering between a volatile store and a following volatile load.
   874 // Requires multi-CPU visibility?
   875 class MemBarVolatileNode: public MemBarNode {
   876 public:
   877   MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
   878     : MemBarNode(C, alias_idx, precedent) {}
   879   virtual int Opcode() const;
   880 };
   882 // Ordering within the same CPU.  Used to order unsafe memory references
   883 // inside the compiler when we lack alias info.  Not needed "outside" the
   884 // compiler because the CPU does all the ordering for us.
   885 class MemBarCPUOrderNode: public MemBarNode {
   886 public:
   887   MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
   888     : MemBarNode(C, alias_idx, precedent) {}
   889   virtual int Opcode() const;
   890   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
   891 };
   893 // Isolation of object setup after an AllocateNode and before next safepoint.
   894 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
   895 class InitializeNode: public MemBarNode {
   896   friend class AllocateNode;
   898   bool _is_complete;
   900 public:
   901   enum {
   902     Control    = TypeFunc::Control,
   903     Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
   904     RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
   905     RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
   906   };
   908   InitializeNode(Compile* C, int adr_type, Node* rawoop);
   909   virtual int Opcode() const;
   910   virtual uint size_of() const { return sizeof(*this); }
   911   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
   912   virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
   914   // Manage incoming memory edges via a MergeMem on in(Memory):
   915   Node* memory(uint alias_idx);
   917   // The raw memory edge coming directly from the Allocation.
   918   // The contents of this memory are *always* all-zero-bits.
   919   Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
   921   // Return the corresponding allocation for this initialization (or null if none).
   922   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
   923   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
   924   AllocateNode* allocation();
   926   // Anything other than zeroing in this init?
   927   bool is_non_zero();
   929   // An InitializeNode must completed before macro expansion is done.
   930   // Completion requires that the AllocateNode must be followed by
   931   // initialization of the new memory to zero, then to any initializers.
   932   bool is_complete() { return _is_complete; }
   934   // Mark complete.  (Must not yet be complete.)
   935   void set_complete(PhaseGVN* phase);
   937 #ifdef ASSERT
   938   // ensure all non-degenerate stores are ordered and non-overlapping
   939   bool stores_are_sane(PhaseTransform* phase);
   940 #endif //ASSERT
   942   // See if this store can be captured; return offset where it initializes.
   943   // Return 0 if the store cannot be moved (any sort of problem).
   944   intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
   946   // Capture another store; reformat it to write my internal raw memory.
   947   // Return the captured copy, else NULL if there is some sort of problem.
   948   Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
   950   // Find captured store which corresponds to the range [start..start+size).
   951   // Return my own memory projection (meaning the initial zero bits)
   952   // if there is no such store.  Return NULL if there is a problem.
   953   Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
   955   // Called when the associated AllocateNode is expanded into CFG.
   956   Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
   957                         intptr_t header_size, Node* size_in_bytes,
   958                         PhaseGVN* phase);
   960  private:
   961   void remove_extra_zeroes();
   963   // Find out where a captured store should be placed (or already is placed).
   964   int captured_store_insertion_point(intptr_t start, int size_in_bytes,
   965                                      PhaseTransform* phase);
   967   static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
   969   Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
   971   bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
   973   void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
   974                                PhaseGVN* phase);
   976   intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
   977 };
   979 //------------------------------MergeMem---------------------------------------
   980 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
   981 class MergeMemNode: public Node {
   982   virtual uint hash() const ;                  // { return NO_HASH; }
   983   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
   984   friend class MergeMemStream;
   985   MergeMemNode(Node* def);  // clients use MergeMemNode::make
   987 public:
   988   // If the input is a whole memory state, clone it with all its slices intact.
   989   // Otherwise, make a new memory state with just that base memory input.
   990   // In either case, the result is a newly created MergeMem.
   991   static MergeMemNode* make(Compile* C, Node* base_memory);
   993   virtual int Opcode() const;
   994   virtual Node *Identity( PhaseTransform *phase );
   995   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   996   virtual uint ideal_reg() const { return NotAMachineReg; }
   997   virtual uint match_edge(uint idx) const { return 0; }
   998   virtual const RegMask &out_RegMask() const;
   999   virtual const Type *bottom_type() const { return Type::MEMORY; }
  1000   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  1001   // sparse accessors
  1002   // Fetch the previously stored "set_memory_at", or else the base memory.
  1003   // (Caller should clone it if it is a phi-nest.)
  1004   Node* memory_at(uint alias_idx) const;
  1005   // set the memory, regardless of its previous value
  1006   void set_memory_at(uint alias_idx, Node* n);
  1007   // the "base" is the memory that provides the non-finite support
  1008   Node* base_memory() const       { return in(Compile::AliasIdxBot); }
  1009   // warning: setting the base can implicitly set any of the other slices too
  1010   void set_base_memory(Node* def);
  1011   // sentinel value which denotes a copy of the base memory:
  1012   Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
  1013   static Node* make_empty_memory(); // where the sentinel comes from
  1014   bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
  1015   // hook for the iterator, to perform any necessary setup
  1016   void iteration_setup(const MergeMemNode* other = NULL);
  1017   // push sentinels until I am at least as long as the other (semantic no-op)
  1018   void grow_to_match(const MergeMemNode* other);
  1019   bool verify_sparse() const PRODUCT_RETURN0;
  1020 #ifndef PRODUCT
  1021   virtual void dump_spec(outputStream *st) const;
  1022 #endif
  1023 };
  1025 class MergeMemStream : public StackObj {
  1026  private:
  1027   MergeMemNode*       _mm;
  1028   const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
  1029   Node*               _mm_base;  // loop-invariant base memory of _mm
  1030   int                 _idx;
  1031   int                 _cnt;
  1032   Node*               _mem;
  1033   Node*               _mem2;
  1034   int                 _cnt2;
  1036   void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
  1037     // subsume_node will break sparseness at times, whenever a memory slice
  1038     // folds down to a copy of the base ("fat") memory.  In such a case,
  1039     // the raw edge will update to base, although it should be top.
  1040     // This iterator will recognize either top or base_memory as an
  1041     // "empty" slice.  See is_empty, is_empty2, and next below.
  1042     //
  1043     // The sparseness property is repaired in MergeMemNode::Ideal.
  1044     // As long as access to a MergeMem goes through this iterator
  1045     // or the memory_at accessor, flaws in the sparseness will
  1046     // never be observed.
  1047     //
  1048     // Also, iteration_setup repairs sparseness.
  1049     assert(mm->verify_sparse(), "please, no dups of base");
  1050     assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
  1052     _mm  = mm;
  1053     _mm_base = mm->base_memory();
  1054     _mm2 = mm2;
  1055     _cnt = mm->req();
  1056     _idx = Compile::AliasIdxBot-1; // start at the base memory
  1057     _mem = NULL;
  1058     _mem2 = NULL;
  1061 #ifdef ASSERT
  1062   Node* check_memory() const {
  1063     if (at_base_memory())
  1064       return _mm->base_memory();
  1065     else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
  1066       return _mm->memory_at(_idx);
  1067     else
  1068       return _mm_base;
  1070   Node* check_memory2() const {
  1071     return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
  1073 #endif
  1075   static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
  1076   void assert_synch() const {
  1077     assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
  1078            "no side-effects except through the stream");
  1081  public:
  1083   // expected usages:
  1084   // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
  1085   // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
  1087   // iterate over one merge
  1088   MergeMemStream(MergeMemNode* mm) {
  1089     mm->iteration_setup();
  1090     init(mm);
  1091     debug_only(_cnt2 = 999);
  1093   // iterate in parallel over two merges
  1094   // only iterates through non-empty elements of mm2
  1095   MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
  1096     assert(mm2, "second argument must be a MergeMem also");
  1097     ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
  1098     mm->iteration_setup(mm2);
  1099     init(mm, mm2);
  1100     _cnt2 = mm2->req();
  1102 #ifdef ASSERT
  1103   ~MergeMemStream() {
  1104     assert_synch();
  1106 #endif
  1108   MergeMemNode* all_memory() const {
  1109     return _mm;
  1111   Node* base_memory() const {
  1112     assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
  1113     return _mm_base;
  1115   const MergeMemNode* all_memory2() const {
  1116     assert(_mm2 != NULL, "");
  1117     return _mm2;
  1119   bool at_base_memory() const {
  1120     return _idx == Compile::AliasIdxBot;
  1122   int alias_idx() const {
  1123     assert(_mem, "must call next 1st");
  1124     return _idx;
  1127   const TypePtr* adr_type() const {
  1128     return Compile::current()->get_adr_type(alias_idx());
  1131   const TypePtr* adr_type(Compile* C) const {
  1132     return C->get_adr_type(alias_idx());
  1134   bool is_empty() const {
  1135     assert(_mem, "must call next 1st");
  1136     assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
  1137     return _mem->is_top();
  1139   bool is_empty2() const {
  1140     assert(_mem2, "must call next 1st");
  1141     assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
  1142     return _mem2->is_top();
  1144   Node* memory() const {
  1145     assert(!is_empty(), "must not be empty");
  1146     assert_synch();
  1147     return _mem;
  1149   // get the current memory, regardless of empty or non-empty status
  1150   Node* force_memory() const {
  1151     assert(!is_empty() || !at_base_memory(), "");
  1152     // Use _mm_base to defend against updates to _mem->base_memory().
  1153     Node *mem = _mem->is_top() ? _mm_base : _mem;
  1154     assert(mem == check_memory(), "");
  1155     return mem;
  1157   Node* memory2() const {
  1158     assert(_mem2 == check_memory2(), "");
  1159     return _mem2;
  1161   void set_memory(Node* mem) {
  1162     if (at_base_memory()) {
  1163       // Note that this does not change the invariant _mm_base.
  1164       _mm->set_base_memory(mem);
  1165     } else {
  1166       _mm->set_memory_at(_idx, mem);
  1168     _mem = mem;
  1169     assert_synch();
  1172   // Recover from a side effect to the MergeMemNode.
  1173   void set_memory() {
  1174     _mem = _mm->in(_idx);
  1177   bool next()  { return next(false); }
  1178   bool next2() { return next(true); }
  1180   bool next_non_empty()  { return next_non_empty(false); }
  1181   bool next_non_empty2() { return next_non_empty(true); }
  1182   // next_non_empty2 can yield states where is_empty() is true
  1184  private:
  1185   // find the next item, which might be empty
  1186   bool next(bool have_mm2) {
  1187     assert((_mm2 != NULL) == have_mm2, "use other next");
  1188     assert_synch();
  1189     if (++_idx < _cnt) {
  1190       // Note:  This iterator allows _mm to be non-sparse.
  1191       // It behaves the same whether _mem is top or base_memory.
  1192       _mem = _mm->in(_idx);
  1193       if (have_mm2)
  1194         _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
  1195       return true;
  1197     return false;
  1200   // find the next non-empty item
  1201   bool next_non_empty(bool have_mm2) {
  1202     while (next(have_mm2)) {
  1203       if (!is_empty()) {
  1204         // make sure _mem2 is filled in sensibly
  1205         if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
  1206         return true;
  1207       } else if (have_mm2 && !is_empty2()) {
  1208         return true;   // is_empty() == true
  1211     return false;
  1213 };
  1215 //------------------------------Prefetch---------------------------------------
  1217 // Non-faulting prefetch load.  Prefetch for many reads.
  1218 class PrefetchReadNode : public Node {
  1219 public:
  1220   PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1221   virtual int Opcode() const;
  1222   virtual uint ideal_reg() const { return NotAMachineReg; }
  1223   virtual uint match_edge(uint idx) const { return idx==2; }
  1224   virtual const Type *bottom_type() const { return Type::ABIO; }
  1225 };
  1227 // Non-faulting prefetch load.  Prefetch for many reads & many writes.
  1228 class PrefetchWriteNode : public Node {
  1229 public:
  1230   PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1231   virtual int Opcode() const;
  1232   virtual uint ideal_reg() const { return NotAMachineReg; }
  1233   virtual uint match_edge(uint idx) const { return idx==2; }
  1234   virtual const Type *bottom_type() const { return Type::ABIO; }
  1235 };

mercurial