src/share/vm/opto/memnode.hpp

Fri, 27 Feb 2009 13:27:09 -0800

author
twisti
date
Fri, 27 Feb 2009 13:27:09 -0800
changeset 1040
98cb887364d3
parent 993
3b5ac9e7e6ea
child 1059
337400e7a5dd
permissions
-rw-r--r--

6810672: Comment typos
Summary: I have collected some typos I have found while looking at the code.
Reviewed-by: kvn, never

     1 /*
     2  * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // Portions of code courtesy of Clifford Click
    27 class MultiNode;
    28 class PhaseCCP;
    29 class PhaseTransform;
    31 //------------------------------MemNode----------------------------------------
    32 // Load or Store, possibly throwing a NULL pointer exception
    33 class MemNode : public Node {
    34 protected:
    35 #ifdef ASSERT
    36   const TypePtr* _adr_type;     // What kind of memory is being addressed?
    37 #endif
    38   virtual uint size_of() const; // Size is bigger (ASSERT only)
    39 public:
    40   enum { Control,               // When is it safe to do this load?
    41          Memory,                // Chunk of memory is being loaded from
    42          Address,               // Actually address, derived from base
    43          ValueIn,               // Value to store
    44          OopStore               // Preceeding oop store, only in StoreCM
    45   };
    46 protected:
    47   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
    48     : Node(c0,c1,c2   ) {
    49     init_class_id(Class_Mem);
    50     debug_only(_adr_type=at; adr_type();)
    51   }
    52   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
    53     : Node(c0,c1,c2,c3) {
    54     init_class_id(Class_Mem);
    55     debug_only(_adr_type=at; adr_type();)
    56   }
    57   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
    58     : Node(c0,c1,c2,c3,c4) {
    59     init_class_id(Class_Mem);
    60     debug_only(_adr_type=at; adr_type();)
    61   }
    63 public:
    64   // Helpers for the optimizer.  Documented in memnode.cpp.
    65   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
    66                                       Node* p2, AllocateNode* a2,
    67                                       PhaseTransform* phase);
    68   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
    70   static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
    71   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
    72   // This one should probably be a phase-specific function:
    73   static bool all_controls_dominate(Node* dom, Node* sub);
    75   // Find any cast-away of null-ness and keep its control.
    76   static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
    77   virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
    79   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
    81   // Shared code for Ideal methods:
    82   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
    84   // Helper function for adr_type() implementations.
    85   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
    87   // Raw access function, to allow copying of adr_type efficiently in
    88   // product builds and retain the debug info for debug builds.
    89   const TypePtr *raw_adr_type() const {
    90 #ifdef ASSERT
    91     return _adr_type;
    92 #else
    93     return 0;
    94 #endif
    95   }
    97   // Map a load or store opcode to its corresponding store opcode.
    98   // (Return -1 if unknown.)
    99   virtual int store_Opcode() const { return -1; }
   101   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
   102   virtual BasicType memory_type() const = 0;
   103   virtual int memory_size() const {
   104 #ifdef ASSERT
   105     return type2aelembytes(memory_type(), true);
   106 #else
   107     return type2aelembytes(memory_type());
   108 #endif
   109   }
   111   // Search through memory states which precede this node (load or store).
   112   // Look for an exact match for the address, with no intervening
   113   // aliased stores.
   114   Node* find_previous_store(PhaseTransform* phase);
   116   // Can this node (load or store) accurately see a stored value in
   117   // the given memory state?  (The state may or may not be in(Memory).)
   118   Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
   120 #ifndef PRODUCT
   121   static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
   122   virtual void dump_spec(outputStream *st) const;
   123 #endif
   124 };
   126 //------------------------------LoadNode---------------------------------------
   127 // Load value; requires Memory and Address
   128 class LoadNode : public MemNode {
   129 protected:
   130   virtual uint cmp( const Node &n ) const;
   131   virtual uint size_of() const; // Size is bigger
   132   const Type* const _type;      // What kind of value is loaded?
   133 public:
   135   LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
   136     : MemNode(c,mem,adr,at), _type(rt) {
   137     init_class_id(Class_Load);
   138   }
   140   // Polymorphic factory method:
   141   static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
   142                      const TypePtr* at, const Type *rt, BasicType bt );
   144   virtual uint hash()   const;  // Check the type
   146   // Handle algebraic identities here.  If we have an identity, return the Node
   147   // we are equivalent to.  We look for Load of a Store.
   148   virtual Node *Identity( PhaseTransform *phase );
   150   // If the load is from Field memory and the pointer is non-null, we can
   151   // zero out the control input.
   152   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   154   // Split instance field load through Phi.
   155   Node* split_through_phi(PhaseGVN *phase);
   157   // Recover original value from boxed values
   158   Node *eliminate_autobox(PhaseGVN *phase);
   160   // Compute a new Type for this node.  Basically we just do the pre-check,
   161   // then call the virtual add() to set the type.
   162   virtual const Type *Value( PhaseTransform *phase ) const;
   164   // Common methods for LoadKlass and LoadNKlass nodes.
   165   const Type *klass_value_common( PhaseTransform *phase ) const;
   166   Node *klass_identity_common( PhaseTransform *phase );
   168   virtual uint ideal_reg() const;
   169   virtual const Type *bottom_type() const;
   170   // Following method is copied from TypeNode:
   171   void set_type(const Type* t) {
   172     assert(t != NULL, "sanity");
   173     debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
   174     *(const Type**)&_type = t;   // cast away const-ness
   175     // If this node is in the hash table, make sure it doesn't need a rehash.
   176     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
   177   }
   178   const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
   180   // Do not match memory edge
   181   virtual uint match_edge(uint idx) const;
   183   // Map a load opcode to its corresponding store opcode.
   184   virtual int store_Opcode() const = 0;
   186   // Check if the load's memory input is a Phi node with the same control.
   187   bool is_instance_field_load_with_local_phi(Node* ctrl);
   189 #ifndef PRODUCT
   190   virtual void dump_spec(outputStream *st) const;
   191 #endif
   192 protected:
   193   const Type* load_array_final_field(const TypeKlassPtr *tkls,
   194                                      ciKlass* klass) const;
   195 };
   197 //------------------------------LoadBNode--------------------------------------
   198 // Load a byte (8bits signed) from memory
   199 class LoadBNode : public LoadNode {
   200 public:
   201   LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
   202     : LoadNode(c,mem,adr,at,ti) {}
   203   virtual int Opcode() const;
   204   virtual uint ideal_reg() const { return Op_RegI; }
   205   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   206   virtual int store_Opcode() const { return Op_StoreB; }
   207   virtual BasicType memory_type() const { return T_BYTE; }
   208 };
   210 //------------------------------LoadUSNode-------------------------------------
   211 // Load an unsigned short/char (16bits unsigned) from memory
   212 class LoadUSNode : public LoadNode {
   213 public:
   214   LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
   215     : LoadNode(c,mem,adr,at,ti) {}
   216   virtual int Opcode() const;
   217   virtual uint ideal_reg() const { return Op_RegI; }
   218   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   219   virtual int store_Opcode() const { return Op_StoreC; }
   220   virtual BasicType memory_type() const { return T_CHAR; }
   221 };
   223 //------------------------------LoadINode--------------------------------------
   224 // Load an integer from memory
   225 class LoadINode : public LoadNode {
   226 public:
   227   LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
   228     : LoadNode(c,mem,adr,at,ti) {}
   229   virtual int Opcode() const;
   230   virtual uint ideal_reg() const { return Op_RegI; }
   231   virtual int store_Opcode() const { return Op_StoreI; }
   232   virtual BasicType memory_type() const { return T_INT; }
   233 };
   235 //------------------------------LoadRangeNode----------------------------------
   236 // Load an array length from the array
   237 class LoadRangeNode : public LoadINode {
   238 public:
   239   LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
   240     : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
   241   virtual int Opcode() const;
   242   virtual const Type *Value( PhaseTransform *phase ) const;
   243   virtual Node *Identity( PhaseTransform *phase );
   244   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   245 };
   247 //------------------------------LoadLNode--------------------------------------
   248 // Load a long from memory
   249 class LoadLNode : public LoadNode {
   250   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
   251   virtual uint cmp( const Node &n ) const {
   252     return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
   253       && LoadNode::cmp(n);
   254   }
   255   virtual uint size_of() const { return sizeof(*this); }
   256   const bool _require_atomic_access;  // is piecewise load forbidden?
   258 public:
   259   LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
   260              const TypeLong *tl = TypeLong::LONG,
   261              bool require_atomic_access = false )
   262     : LoadNode(c,mem,adr,at,tl)
   263     , _require_atomic_access(require_atomic_access)
   264   {}
   265   virtual int Opcode() const;
   266   virtual uint ideal_reg() const { return Op_RegL; }
   267   virtual int store_Opcode() const { return Op_StoreL; }
   268   virtual BasicType memory_type() const { return T_LONG; }
   269   bool require_atomic_access() { return _require_atomic_access; }
   270   static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
   271 #ifndef PRODUCT
   272   virtual void dump_spec(outputStream *st) const {
   273     LoadNode::dump_spec(st);
   274     if (_require_atomic_access)  st->print(" Atomic!");
   275   }
   276 #endif
   277 };
   279 //------------------------------LoadL_unalignedNode----------------------------
   280 // Load a long from unaligned memory
   281 class LoadL_unalignedNode : public LoadLNode {
   282 public:
   283   LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
   284     : LoadLNode(c,mem,adr,at) {}
   285   virtual int Opcode() const;
   286 };
   288 //------------------------------LoadFNode--------------------------------------
   289 // Load a float (64 bits) from memory
   290 class LoadFNode : public LoadNode {
   291 public:
   292   LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
   293     : LoadNode(c,mem,adr,at,t) {}
   294   virtual int Opcode() const;
   295   virtual uint ideal_reg() const { return Op_RegF; }
   296   virtual int store_Opcode() const { return Op_StoreF; }
   297   virtual BasicType memory_type() const { return T_FLOAT; }
   298 };
   300 //------------------------------LoadDNode--------------------------------------
   301 // Load a double (64 bits) from memory
   302 class LoadDNode : public LoadNode {
   303 public:
   304   LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
   305     : LoadNode(c,mem,adr,at,t) {}
   306   virtual int Opcode() const;
   307   virtual uint ideal_reg() const { return Op_RegD; }
   308   virtual int store_Opcode() const { return Op_StoreD; }
   309   virtual BasicType memory_type() const { return T_DOUBLE; }
   310 };
   312 //------------------------------LoadD_unalignedNode----------------------------
   313 // Load a double from unaligned memory
   314 class LoadD_unalignedNode : public LoadDNode {
   315 public:
   316   LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
   317     : LoadDNode(c,mem,adr,at) {}
   318   virtual int Opcode() const;
   319 };
   321 //------------------------------LoadPNode--------------------------------------
   322 // Load a pointer from memory (either object or array)
   323 class LoadPNode : public LoadNode {
   324 public:
   325   LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
   326     : LoadNode(c,mem,adr,at,t) {}
   327   virtual int Opcode() const;
   328   virtual uint ideal_reg() const { return Op_RegP; }
   329   virtual int store_Opcode() const { return Op_StoreP; }
   330   virtual BasicType memory_type() const { return T_ADDRESS; }
   331   // depends_only_on_test is almost always true, and needs to be almost always
   332   // true to enable key hoisting & commoning optimizations.  However, for the
   333   // special case of RawPtr loads from TLS top & end, the control edge carries
   334   // the dependence preventing hoisting past a Safepoint instead of the memory
   335   // edge.  (An unfortunate consequence of having Safepoints not set Raw
   336   // Memory; itself an unfortunate consequence of having Nodes which produce
   337   // results (new raw memory state) inside of loops preventing all manner of
   338   // other optimizations).  Basically, it's ugly but so is the alternative.
   339   // See comment in macro.cpp, around line 125 expand_allocate_common().
   340   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
   341 };
   344 //------------------------------LoadNNode--------------------------------------
   345 // Load a narrow oop from memory (either object or array)
   346 class LoadNNode : public LoadNode {
   347 public:
   348   LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
   349     : LoadNode(c,mem,adr,at,t) {}
   350   virtual int Opcode() const;
   351   virtual uint ideal_reg() const { return Op_RegN; }
   352   virtual int store_Opcode() const { return Op_StoreN; }
   353   virtual BasicType memory_type() const { return T_NARROWOOP; }
   354   // depends_only_on_test is almost always true, and needs to be almost always
   355   // true to enable key hoisting & commoning optimizations.  However, for the
   356   // special case of RawPtr loads from TLS top & end, the control edge carries
   357   // the dependence preventing hoisting past a Safepoint instead of the memory
   358   // edge.  (An unfortunate consequence of having Safepoints not set Raw
   359   // Memory; itself an unfortunate consequence of having Nodes which produce
   360   // results (new raw memory state) inside of loops preventing all manner of
   361   // other optimizations).  Basically, it's ugly but so is the alternative.
   362   // See comment in macro.cpp, around line 125 expand_allocate_common().
   363   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
   364 };
   366 //------------------------------LoadKlassNode----------------------------------
   367 // Load a Klass from an object
   368 class LoadKlassNode : public LoadPNode {
   369 public:
   370   LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
   371     : LoadPNode(c,mem,adr,at,tk) {}
   372   virtual int Opcode() const;
   373   virtual const Type *Value( PhaseTransform *phase ) const;
   374   virtual Node *Identity( PhaseTransform *phase );
   375   virtual bool depends_only_on_test() const { return true; }
   377   // Polymorphic factory method:
   378   static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
   379                      const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
   380 };
   382 //------------------------------LoadNKlassNode---------------------------------
   383 // Load a narrow Klass from an object.
   384 class LoadNKlassNode : public LoadNNode {
   385 public:
   386   LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk )
   387     : LoadNNode(c,mem,adr,at,tk) {}
   388   virtual int Opcode() const;
   389   virtual uint ideal_reg() const { return Op_RegN; }
   390   virtual int store_Opcode() const { return Op_StoreN; }
   391   virtual BasicType memory_type() const { return T_NARROWOOP; }
   393   virtual const Type *Value( PhaseTransform *phase ) const;
   394   virtual Node *Identity( PhaseTransform *phase );
   395   virtual bool depends_only_on_test() const { return true; }
   396 };
   399 //------------------------------LoadSNode--------------------------------------
   400 // Load a short (16bits signed) from memory
   401 class LoadSNode : public LoadNode {
   402 public:
   403   LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
   404     : LoadNode(c,mem,adr,at,ti) {}
   405   virtual int Opcode() const;
   406   virtual uint ideal_reg() const { return Op_RegI; }
   407   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   408   virtual int store_Opcode() const { return Op_StoreC; }
   409   virtual BasicType memory_type() const { return T_SHORT; }
   410 };
   412 //------------------------------StoreNode--------------------------------------
   413 // Store value; requires Store, Address and Value
   414 class StoreNode : public MemNode {
   415 protected:
   416   virtual uint cmp( const Node &n ) const;
   417   virtual bool depends_only_on_test() const { return false; }
   419   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
   420   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
   422 public:
   423   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
   424     : MemNode(c,mem,adr,at,val) {
   425     init_class_id(Class_Store);
   426   }
   427   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
   428     : MemNode(c,mem,adr,at,val,oop_store) {
   429     init_class_id(Class_Store);
   430   }
   432   // Polymorphic factory method:
   433   static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
   434                           const TypePtr* at, Node *val, BasicType bt );
   436   virtual uint hash() const;    // Check the type
   438   // If the store is to Field memory and the pointer is non-null, we can
   439   // zero out the control input.
   440   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   442   // Compute a new Type for this node.  Basically we just do the pre-check,
   443   // then call the virtual add() to set the type.
   444   virtual const Type *Value( PhaseTransform *phase ) const;
   446   // Check for identity function on memory (Load then Store at same address)
   447   virtual Node *Identity( PhaseTransform *phase );
   449   // Do not match memory edge
   450   virtual uint match_edge(uint idx) const;
   452   virtual const Type *bottom_type() const;  // returns Type::MEMORY
   454   // Map a store opcode to its corresponding own opcode, trivially.
   455   virtual int store_Opcode() const { return Opcode(); }
   457   // have all possible loads of the value stored been optimized away?
   458   bool value_never_loaded(PhaseTransform *phase) const;
   459 };
   461 //------------------------------StoreBNode-------------------------------------
   462 // Store byte to memory
   463 class StoreBNode : public StoreNode {
   464 public:
   465   StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   466   virtual int Opcode() const;
   467   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   468   virtual BasicType memory_type() const { return T_BYTE; }
   469 };
   471 //------------------------------StoreCNode-------------------------------------
   472 // Store char/short to memory
   473 class StoreCNode : public StoreNode {
   474 public:
   475   StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   476   virtual int Opcode() const;
   477   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   478   virtual BasicType memory_type() const { return T_CHAR; }
   479 };
   481 //------------------------------StoreINode-------------------------------------
   482 // Store int to memory
   483 class StoreINode : public StoreNode {
   484 public:
   485   StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   486   virtual int Opcode() const;
   487   virtual BasicType memory_type() const { return T_INT; }
   488 };
   490 //------------------------------StoreLNode-------------------------------------
   491 // Store long to memory
   492 class StoreLNode : public StoreNode {
   493   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
   494   virtual uint cmp( const Node &n ) const {
   495     return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
   496       && StoreNode::cmp(n);
   497   }
   498   virtual uint size_of() const { return sizeof(*this); }
   499   const bool _require_atomic_access;  // is piecewise store forbidden?
   501 public:
   502   StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
   503               bool require_atomic_access = false )
   504     : StoreNode(c,mem,adr,at,val)
   505     , _require_atomic_access(require_atomic_access)
   506   {}
   507   virtual int Opcode() const;
   508   virtual BasicType memory_type() const { return T_LONG; }
   509   bool require_atomic_access() { return _require_atomic_access; }
   510   static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
   511 #ifndef PRODUCT
   512   virtual void dump_spec(outputStream *st) const {
   513     StoreNode::dump_spec(st);
   514     if (_require_atomic_access)  st->print(" Atomic!");
   515   }
   516 #endif
   517 };
   519 //------------------------------StoreFNode-------------------------------------
   520 // Store float to memory
   521 class StoreFNode : public StoreNode {
   522 public:
   523   StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   524   virtual int Opcode() const;
   525   virtual BasicType memory_type() const { return T_FLOAT; }
   526 };
   528 //------------------------------StoreDNode-------------------------------------
   529 // Store double to memory
   530 class StoreDNode : public StoreNode {
   531 public:
   532   StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   533   virtual int Opcode() const;
   534   virtual BasicType memory_type() const { return T_DOUBLE; }
   535 };
   537 //------------------------------StorePNode-------------------------------------
   538 // Store pointer to memory
   539 class StorePNode : public StoreNode {
   540 public:
   541   StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   542   virtual int Opcode() const;
   543   virtual BasicType memory_type() const { return T_ADDRESS; }
   544 };
   546 //------------------------------StoreNNode-------------------------------------
   547 // Store narrow oop to memory
   548 class StoreNNode : public StoreNode {
   549 public:
   550   StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   551   virtual int Opcode() const;
   552   virtual BasicType memory_type() const { return T_NARROWOOP; }
   553 };
   555 //------------------------------StoreCMNode-----------------------------------
   556 // Store card-mark byte to memory for CM
   557 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
   558 // Preceeding equivalent StoreCMs may be eliminated.
   559 class StoreCMNode : public StoreNode {
   560 public:
   561   StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store ) : StoreNode(c,mem,adr,at,val,oop_store) {}
   562   virtual int Opcode() const;
   563   virtual Node *Identity( PhaseTransform *phase );
   564   virtual const Type *Value( PhaseTransform *phase ) const;
   565   virtual BasicType memory_type() const { return T_VOID; } // unspecific
   566 };
   568 //------------------------------LoadPLockedNode---------------------------------
   569 // Load-locked a pointer from memory (either object or array).
   570 // On Sparc & Intel this is implemented as a normal pointer load.
   571 // On PowerPC and friends it's a real load-locked.
   572 class LoadPLockedNode : public LoadPNode {
   573 public:
   574   LoadPLockedNode( Node *c, Node *mem, Node *adr )
   575     : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
   576   virtual int Opcode() const;
   577   virtual int store_Opcode() const { return Op_StorePConditional; }
   578   virtual bool depends_only_on_test() const { return true; }
   579 };
   581 //------------------------------LoadLLockedNode---------------------------------
   582 // Load-locked a pointer from memory (either object or array).
   583 // On Sparc & Intel this is implemented as a normal long load.
   584 class LoadLLockedNode : public LoadLNode {
   585 public:
   586   LoadLLockedNode( Node *c, Node *mem, Node *adr )
   587     : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
   588   virtual int Opcode() const;
   589   virtual int store_Opcode() const { return Op_StoreLConditional; }
   590 };
   592 //------------------------------SCMemProjNode---------------------------------------
   593 // This class defines a projection of the memory  state of a store conditional node.
   594 // These nodes return a value, but also update memory.
   595 class SCMemProjNode : public ProjNode {
   596 public:
   597   enum {SCMEMPROJCON = (uint)-2};
   598   SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
   599   virtual int Opcode() const;
   600   virtual bool      is_CFG() const  { return false; }
   601   virtual const Type *bottom_type() const {return Type::MEMORY;}
   602   virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
   603   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
   604   virtual const Type *Value( PhaseTransform *phase ) const;
   605 #ifndef PRODUCT
   606   virtual void dump_spec(outputStream *st) const {};
   607 #endif
   608 };
   610 //------------------------------LoadStoreNode---------------------------
   611 // Note: is_Mem() method returns 'true' for this class.
   612 class LoadStoreNode : public Node {
   613 public:
   614   enum {
   615     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
   616   };
   617   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex);
   618   virtual bool depends_only_on_test() const { return false; }
   619   virtual const Type *bottom_type() const { return TypeInt::BOOL; }
   620   virtual uint ideal_reg() const { return Op_RegI; }
   621   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
   622 };
   624 //------------------------------StorePConditionalNode---------------------------
   625 // Conditionally store pointer to memory, if no change since prior
   626 // load-locked.  Sets flags for success or failure of the store.
   627 class StorePConditionalNode : public LoadStoreNode {
   628 public:
   629   StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
   630   virtual int Opcode() const;
   631   // Produces flags
   632   virtual uint ideal_reg() const { return Op_RegFlags; }
   633 };
   635 //------------------------------StoreIConditionalNode---------------------------
   636 // Conditionally store int to memory, if no change since prior
   637 // load-locked.  Sets flags for success or failure of the store.
   638 class StoreIConditionalNode : public LoadStoreNode {
   639 public:
   640   StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreNode(c, mem, adr, val, ii) { }
   641   virtual int Opcode() const;
   642   // Produces flags
   643   virtual uint ideal_reg() const { return Op_RegFlags; }
   644 };
   646 //------------------------------StoreLConditionalNode---------------------------
   647 // Conditionally store long to memory, if no change since prior
   648 // load-locked.  Sets flags for success or failure of the store.
   649 class StoreLConditionalNode : public LoadStoreNode {
   650 public:
   651   StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
   652   virtual int Opcode() const;
   653   // Produces flags
   654   virtual uint ideal_reg() const { return Op_RegFlags; }
   655 };
   658 //------------------------------CompareAndSwapLNode---------------------------
   659 class CompareAndSwapLNode : public LoadStoreNode {
   660 public:
   661   CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   662   virtual int Opcode() const;
   663 };
   666 //------------------------------CompareAndSwapINode---------------------------
   667 class CompareAndSwapINode : public LoadStoreNode {
   668 public:
   669   CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   670   virtual int Opcode() const;
   671 };
   674 //------------------------------CompareAndSwapPNode---------------------------
   675 class CompareAndSwapPNode : public LoadStoreNode {
   676 public:
   677   CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   678   virtual int Opcode() const;
   679 };
   681 //------------------------------CompareAndSwapNNode---------------------------
   682 class CompareAndSwapNNode : public LoadStoreNode {
   683 public:
   684   CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   685   virtual int Opcode() const;
   686 };
   688 //------------------------------ClearArray-------------------------------------
   689 class ClearArrayNode: public Node {
   690 public:
   691   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) : Node(ctrl,arymem,word_cnt,base) {}
   692   virtual int         Opcode() const;
   693   virtual const Type *bottom_type() const { return Type::MEMORY; }
   694   // ClearArray modifies array elements, and so affects only the
   695   // array memory addressed by the bottom_type of its base address.
   696   virtual const class TypePtr *adr_type() const;
   697   virtual Node *Identity( PhaseTransform *phase );
   698   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   699   virtual uint match_edge(uint idx) const;
   701   // Clear the given area of an object or array.
   702   // The start offset must always be aligned mod BytesPerInt.
   703   // The end offset must always be aligned mod BytesPerLong.
   704   // Return the new memory.
   705   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   706                             intptr_t start_offset,
   707                             intptr_t end_offset,
   708                             PhaseGVN* phase);
   709   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   710                             intptr_t start_offset,
   711                             Node* end_offset,
   712                             PhaseGVN* phase);
   713   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   714                             Node* start_offset,
   715                             Node* end_offset,
   716                             PhaseGVN* phase);
   717 };
   719 //------------------------------StrComp-------------------------------------
   720 class StrCompNode: public Node {
   721 public:
   722   StrCompNode(Node *control,
   723               Node* char_array_mem,
   724               Node* value_mem,
   725               Node* count_mem,
   726               Node* offset_mem,
   727               Node* s1, Node* s2): Node(control,
   728                                         char_array_mem,
   729                                         value_mem,
   730                                         count_mem,
   731                                         offset_mem,
   732                                         s1, s2) {};
   733   virtual int Opcode() const;
   734   virtual bool depends_only_on_test() const { return false; }
   735   virtual const Type* bottom_type() const { return TypeInt::INT; }
   736   // a StrCompNode (conservatively) aliases with everything:
   737   virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
   738   virtual uint match_edge(uint idx) const;
   739   virtual uint ideal_reg() const { return Op_RegI; }
   740   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   741 };
   743 //------------------------------AryEq---------------------------------------
   744 class AryEqNode: public Node {
   745 public:
   746   AryEqNode(Node *control, Node* s1, Node* s2): Node(control, s1, s2) {};
   747   virtual int Opcode() const;
   748   virtual bool depends_only_on_test() const { return false; }
   749   virtual const Type* bottom_type() const { return TypeInt::BOOL; }
   750   virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
   751   virtual uint ideal_reg() const { return Op_RegI; }
   752   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   753 };
   755 //------------------------------MemBar-----------------------------------------
   756 // There are different flavors of Memory Barriers to match the Java Memory
   757 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
   758 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
   759 // volatile-load.  Monitor-exit and volatile-store act as Release: no
   760 // preceding ref can be moved to after them.  We insert a MemBar-Release
   761 // before a FastUnlock or volatile-store.  All volatiles need to be
   762 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
   763 // separate it from any following volatile-load.
   764 class MemBarNode: public MultiNode {
   765   virtual uint hash() const ;                  // { return NO_HASH; }
   766   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
   768   virtual uint size_of() const { return sizeof(*this); }
   769   // Memory type this node is serializing.  Usually either rawptr or bottom.
   770   const TypePtr* _adr_type;
   772 public:
   773   enum {
   774     Precedent = TypeFunc::Parms  // optional edge to force precedence
   775   };
   776   MemBarNode(Compile* C, int alias_idx, Node* precedent);
   777   virtual int Opcode() const = 0;
   778   virtual const class TypePtr *adr_type() const { return _adr_type; }
   779   virtual const Type *Value( PhaseTransform *phase ) const;
   780   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   781   virtual uint match_edge(uint idx) const { return 0; }
   782   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
   783   virtual Node *match( const ProjNode *proj, const Matcher *m );
   784   // Factory method.  Builds a wide or narrow membar.
   785   // Optional 'precedent' becomes an extra edge if not null.
   786   static MemBarNode* make(Compile* C, int opcode,
   787                           int alias_idx = Compile::AliasIdxBot,
   788                           Node* precedent = NULL);
   789 };
   791 // "Acquire" - no following ref can move before (but earlier refs can
   792 // follow, like an early Load stalled in cache).  Requires multi-cpu
   793 // visibility.  Inserted after a volatile load or FastLock.
   794 class MemBarAcquireNode: public MemBarNode {
   795 public:
   796   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
   797     : MemBarNode(C, alias_idx, precedent) {}
   798   virtual int Opcode() const;
   799 };
   801 // "Release" - no earlier ref can move after (but later refs can move
   802 // up, like a speculative pipelined cache-hitting Load).  Requires
   803 // multi-cpu visibility.  Inserted before a volatile store or FastUnLock.
   804 class MemBarReleaseNode: public MemBarNode {
   805 public:
   806   MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
   807     : MemBarNode(C, alias_idx, precedent) {}
   808   virtual int Opcode() const;
   809 };
   811 // Ordering between a volatile store and a following volatile load.
   812 // Requires multi-CPU visibility?
   813 class MemBarVolatileNode: public MemBarNode {
   814 public:
   815   MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
   816     : MemBarNode(C, alias_idx, precedent) {}
   817   virtual int Opcode() const;
   818 };
   820 // Ordering within the same CPU.  Used to order unsafe memory references
   821 // inside the compiler when we lack alias info.  Not needed "outside" the
   822 // compiler because the CPU does all the ordering for us.
   823 class MemBarCPUOrderNode: public MemBarNode {
   824 public:
   825   MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
   826     : MemBarNode(C, alias_idx, precedent) {}
   827   virtual int Opcode() const;
   828   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
   829 };
   831 // Isolation of object setup after an AllocateNode and before next safepoint.
   832 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
   833 class InitializeNode: public MemBarNode {
   834   friend class AllocateNode;
   836   bool _is_complete;
   838 public:
   839   enum {
   840     Control    = TypeFunc::Control,
   841     Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
   842     RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
   843     RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
   844   };
   846   InitializeNode(Compile* C, int adr_type, Node* rawoop);
   847   virtual int Opcode() const;
   848   virtual uint size_of() const { return sizeof(*this); }
   849   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
   850   virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
   852   // Manage incoming memory edges via a MergeMem on in(Memory):
   853   Node* memory(uint alias_idx);
   855   // The raw memory edge coming directly from the Allocation.
   856   // The contents of this memory are *always* all-zero-bits.
   857   Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
   859   // Return the corresponding allocation for this initialization (or null if none).
   860   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
   861   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
   862   AllocateNode* allocation();
   864   // Anything other than zeroing in this init?
   865   bool is_non_zero();
   867   // An InitializeNode must completed before macro expansion is done.
   868   // Completion requires that the AllocateNode must be followed by
   869   // initialization of the new memory to zero, then to any initializers.
   870   bool is_complete() { return _is_complete; }
   872   // Mark complete.  (Must not yet be complete.)
   873   void set_complete(PhaseGVN* phase);
   875 #ifdef ASSERT
   876   // ensure all non-degenerate stores are ordered and non-overlapping
   877   bool stores_are_sane(PhaseTransform* phase);
   878 #endif //ASSERT
   880   // See if this store can be captured; return offset where it initializes.
   881   // Return 0 if the store cannot be moved (any sort of problem).
   882   intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
   884   // Capture another store; reformat it to write my internal raw memory.
   885   // Return the captured copy, else NULL if there is some sort of problem.
   886   Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
   888   // Find captured store which corresponds to the range [start..start+size).
   889   // Return my own memory projection (meaning the initial zero bits)
   890   // if there is no such store.  Return NULL if there is a problem.
   891   Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
   893   // Called when the associated AllocateNode is expanded into CFG.
   894   Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
   895                         intptr_t header_size, Node* size_in_bytes,
   896                         PhaseGVN* phase);
   898  private:
   899   void remove_extra_zeroes();
   901   // Find out where a captured store should be placed (or already is placed).
   902   int captured_store_insertion_point(intptr_t start, int size_in_bytes,
   903                                      PhaseTransform* phase);
   905   static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
   907   Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
   909   bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
   911   void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
   912                                PhaseGVN* phase);
   914   intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
   915 };
   917 //------------------------------MergeMem---------------------------------------
   918 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
   919 class MergeMemNode: public Node {
   920   virtual uint hash() const ;                  // { return NO_HASH; }
   921   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
   922   friend class MergeMemStream;
   923   MergeMemNode(Node* def);  // clients use MergeMemNode::make
   925 public:
   926   // If the input is a whole memory state, clone it with all its slices intact.
   927   // Otherwise, make a new memory state with just that base memory input.
   928   // In either case, the result is a newly created MergeMem.
   929   static MergeMemNode* make(Compile* C, Node* base_memory);
   931   virtual int Opcode() const;
   932   virtual Node *Identity( PhaseTransform *phase );
   933   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   934   virtual uint ideal_reg() const { return NotAMachineReg; }
   935   virtual uint match_edge(uint idx) const { return 0; }
   936   virtual const RegMask &out_RegMask() const;
   937   virtual const Type *bottom_type() const { return Type::MEMORY; }
   938   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
   939   // sparse accessors
   940   // Fetch the previously stored "set_memory_at", or else the base memory.
   941   // (Caller should clone it if it is a phi-nest.)
   942   Node* memory_at(uint alias_idx) const;
   943   // set the memory, regardless of its previous value
   944   void set_memory_at(uint alias_idx, Node* n);
   945   // the "base" is the memory that provides the non-finite support
   946   Node* base_memory() const       { return in(Compile::AliasIdxBot); }
   947   // warning: setting the base can implicitly set any of the other slices too
   948   void set_base_memory(Node* def);
   949   // sentinel value which denotes a copy of the base memory:
   950   Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
   951   static Node* make_empty_memory(); // where the sentinel comes from
   952   bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
   953   // hook for the iterator, to perform any necessary setup
   954   void iteration_setup(const MergeMemNode* other = NULL);
   955   // push sentinels until I am at least as long as the other (semantic no-op)
   956   void grow_to_match(const MergeMemNode* other);
   957   bool verify_sparse() const PRODUCT_RETURN0;
   958 #ifndef PRODUCT
   959   virtual void dump_spec(outputStream *st) const;
   960 #endif
   961 };
   963 class MergeMemStream : public StackObj {
   964  private:
   965   MergeMemNode*       _mm;
   966   const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
   967   Node*               _mm_base;  // loop-invariant base memory of _mm
   968   int                 _idx;
   969   int                 _cnt;
   970   Node*               _mem;
   971   Node*               _mem2;
   972   int                 _cnt2;
   974   void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
   975     // subsume_node will break sparseness at times, whenever a memory slice
   976     // folds down to a copy of the base ("fat") memory.  In such a case,
   977     // the raw edge will update to base, although it should be top.
   978     // This iterator will recognize either top or base_memory as an
   979     // "empty" slice.  See is_empty, is_empty2, and next below.
   980     //
   981     // The sparseness property is repaired in MergeMemNode::Ideal.
   982     // As long as access to a MergeMem goes through this iterator
   983     // or the memory_at accessor, flaws in the sparseness will
   984     // never be observed.
   985     //
   986     // Also, iteration_setup repairs sparseness.
   987     assert(mm->verify_sparse(), "please, no dups of base");
   988     assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
   990     _mm  = mm;
   991     _mm_base = mm->base_memory();
   992     _mm2 = mm2;
   993     _cnt = mm->req();
   994     _idx = Compile::AliasIdxBot-1; // start at the base memory
   995     _mem = NULL;
   996     _mem2 = NULL;
   997   }
   999 #ifdef ASSERT
  1000   Node* check_memory() const {
  1001     if (at_base_memory())
  1002       return _mm->base_memory();
  1003     else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
  1004       return _mm->memory_at(_idx);
  1005     else
  1006       return _mm_base;
  1008   Node* check_memory2() const {
  1009     return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
  1011 #endif
  1013   static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
  1014   void assert_synch() const {
  1015     assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
  1016            "no side-effects except through the stream");
  1019  public:
  1021   // expected usages:
  1022   // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
  1023   // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
  1025   // iterate over one merge
  1026   MergeMemStream(MergeMemNode* mm) {
  1027     mm->iteration_setup();
  1028     init(mm);
  1029     debug_only(_cnt2 = 999);
  1031   // iterate in parallel over two merges
  1032   // only iterates through non-empty elements of mm2
  1033   MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
  1034     assert(mm2, "second argument must be a MergeMem also");
  1035     ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
  1036     mm->iteration_setup(mm2);
  1037     init(mm, mm2);
  1038     _cnt2 = mm2->req();
  1040 #ifdef ASSERT
  1041   ~MergeMemStream() {
  1042     assert_synch();
  1044 #endif
  1046   MergeMemNode* all_memory() const {
  1047     return _mm;
  1049   Node* base_memory() const {
  1050     assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
  1051     return _mm_base;
  1053   const MergeMemNode* all_memory2() const {
  1054     assert(_mm2 != NULL, "");
  1055     return _mm2;
  1057   bool at_base_memory() const {
  1058     return _idx == Compile::AliasIdxBot;
  1060   int alias_idx() const {
  1061     assert(_mem, "must call next 1st");
  1062     return _idx;
  1065   const TypePtr* adr_type() const {
  1066     return Compile::current()->get_adr_type(alias_idx());
  1069   const TypePtr* adr_type(Compile* C) const {
  1070     return C->get_adr_type(alias_idx());
  1072   bool is_empty() const {
  1073     assert(_mem, "must call next 1st");
  1074     assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
  1075     return _mem->is_top();
  1077   bool is_empty2() const {
  1078     assert(_mem2, "must call next 1st");
  1079     assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
  1080     return _mem2->is_top();
  1082   Node* memory() const {
  1083     assert(!is_empty(), "must not be empty");
  1084     assert_synch();
  1085     return _mem;
  1087   // get the current memory, regardless of empty or non-empty status
  1088   Node* force_memory() const {
  1089     assert(!is_empty() || !at_base_memory(), "");
  1090     // Use _mm_base to defend against updates to _mem->base_memory().
  1091     Node *mem = _mem->is_top() ? _mm_base : _mem;
  1092     assert(mem == check_memory(), "");
  1093     return mem;
  1095   Node* memory2() const {
  1096     assert(_mem2 == check_memory2(), "");
  1097     return _mem2;
  1099   void set_memory(Node* mem) {
  1100     if (at_base_memory()) {
  1101       // Note that this does not change the invariant _mm_base.
  1102       _mm->set_base_memory(mem);
  1103     } else {
  1104       _mm->set_memory_at(_idx, mem);
  1106     _mem = mem;
  1107     assert_synch();
  1110   // Recover from a side effect to the MergeMemNode.
  1111   void set_memory() {
  1112     _mem = _mm->in(_idx);
  1115   bool next()  { return next(false); }
  1116   bool next2() { return next(true); }
  1118   bool next_non_empty()  { return next_non_empty(false); }
  1119   bool next_non_empty2() { return next_non_empty(true); }
  1120   // next_non_empty2 can yield states where is_empty() is true
  1122  private:
  1123   // find the next item, which might be empty
  1124   bool next(bool have_mm2) {
  1125     assert((_mm2 != NULL) == have_mm2, "use other next");
  1126     assert_synch();
  1127     if (++_idx < _cnt) {
  1128       // Note:  This iterator allows _mm to be non-sparse.
  1129       // It behaves the same whether _mem is top or base_memory.
  1130       _mem = _mm->in(_idx);
  1131       if (have_mm2)
  1132         _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
  1133       return true;
  1135     return false;
  1138   // find the next non-empty item
  1139   bool next_non_empty(bool have_mm2) {
  1140     while (next(have_mm2)) {
  1141       if (!is_empty()) {
  1142         // make sure _mem2 is filled in sensibly
  1143         if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
  1144         return true;
  1145       } else if (have_mm2 && !is_empty2()) {
  1146         return true;   // is_empty() == true
  1149     return false;
  1151 };
  1153 //------------------------------Prefetch---------------------------------------
  1155 // Non-faulting prefetch load.  Prefetch for many reads.
  1156 class PrefetchReadNode : public Node {
  1157 public:
  1158   PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1159   virtual int Opcode() const;
  1160   virtual uint ideal_reg() const { return NotAMachineReg; }
  1161   virtual uint match_edge(uint idx) const { return idx==2; }
  1162   virtual const Type *bottom_type() const { return Type::ABIO; }
  1163 };
  1165 // Non-faulting prefetch load.  Prefetch for many reads & many writes.
  1166 class PrefetchWriteNode : public Node {
  1167 public:
  1168   PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1169   virtual int Opcode() const;
  1170   virtual uint ideal_reg() const { return NotAMachineReg; }
  1171   virtual uint match_edge(uint idx) const { return idx==2; }
  1172   virtual const Type *bottom_type() const { return Type::ABIO; }
  1173 };

mercurial