src/share/vm/opto/memnode.hpp

Thu, 28 Jun 2012 17:03:16 -0400

author
zgu
date
Thu, 28 Jun 2012 17:03:16 -0400
changeset 3900
d2a62e0f25eb
parent 3846
8b0a4867acf0
child 4106
7eca5de9e0b6
permissions
-rw-r--r--

6995781: Native Memory Tracking (Phase 1)
7151532: DCmd for hotspot native memory tracking
Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd
Reviewed-by: acorn, coleenp, fparain

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP
    26 #define SHARE_VM_OPTO_MEMNODE_HPP
    28 #include "opto/multnode.hpp"
    29 #include "opto/node.hpp"
    30 #include "opto/opcodes.hpp"
    31 #include "opto/type.hpp"
    33 // Portions of code courtesy of Clifford Click
    35 class MultiNode;
    36 class PhaseCCP;
    37 class PhaseTransform;
    39 //------------------------------MemNode----------------------------------------
    40 // Load or Store, possibly throwing a NULL pointer exception
    41 class MemNode : public Node {
    42 protected:
    43 #ifdef ASSERT
    44   const TypePtr* _adr_type;     // What kind of memory is being addressed?
    45 #endif
    46   virtual uint size_of() const; // Size is bigger (ASSERT only)
    47 public:
    48   enum { Control,               // When is it safe to do this load?
    49          Memory,                // Chunk of memory is being loaded from
    50          Address,               // Actually address, derived from base
    51          ValueIn,               // Value to store
    52          OopStore               // Preceeding oop store, only in StoreCM
    53   };
    54 protected:
    55   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
    56     : Node(c0,c1,c2   ) {
    57     init_class_id(Class_Mem);
    58     debug_only(_adr_type=at; adr_type();)
    59   }
    60   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
    61     : Node(c0,c1,c2,c3) {
    62     init_class_id(Class_Mem);
    63     debug_only(_adr_type=at; adr_type();)
    64   }
    65   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
    66     : Node(c0,c1,c2,c3,c4) {
    67     init_class_id(Class_Mem);
    68     debug_only(_adr_type=at; adr_type();)
    69   }
    71 public:
    72   // Helpers for the optimizer.  Documented in memnode.cpp.
    73   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
    74                                       Node* p2, AllocateNode* a2,
    75                                       PhaseTransform* phase);
    76   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
    78   static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
    79   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
    80   // This one should probably be a phase-specific function:
    81   static bool all_controls_dominate(Node* dom, Node* sub);
    83   // Find any cast-away of null-ness and keep its control.
    84   static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
    85   virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
    87   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
    89   // Shared code for Ideal methods:
    90   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
    92   // Helper function for adr_type() implementations.
    93   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
    95   // Raw access function, to allow copying of adr_type efficiently in
    96   // product builds and retain the debug info for debug builds.
    97   const TypePtr *raw_adr_type() const {
    98 #ifdef ASSERT
    99     return _adr_type;
   100 #else
   101     return 0;
   102 #endif
   103   }
   105   // Map a load or store opcode to its corresponding store opcode.
   106   // (Return -1 if unknown.)
   107   virtual int store_Opcode() const { return -1; }
   109   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
   110   virtual BasicType memory_type() const = 0;
   111   virtual int memory_size() const {
   112 #ifdef ASSERT
   113     return type2aelembytes(memory_type(), true);
   114 #else
   115     return type2aelembytes(memory_type());
   116 #endif
   117   }
   119   // Search through memory states which precede this node (load or store).
   120   // Look for an exact match for the address, with no intervening
   121   // aliased stores.
   122   Node* find_previous_store(PhaseTransform* phase);
   124   // Can this node (load or store) accurately see a stored value in
   125   // the given memory state?  (The state may or may not be in(Memory).)
   126   Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
   128 #ifndef PRODUCT
   129   static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
   130   virtual void dump_spec(outputStream *st) const;
   131 #endif
   132 };
   134 //------------------------------LoadNode---------------------------------------
   135 // Load value; requires Memory and Address
   136 class LoadNode : public MemNode {
   137 protected:
   138   virtual uint cmp( const Node &n ) const;
   139   virtual uint size_of() const; // Size is bigger
   140   const Type* const _type;      // What kind of value is loaded?
   141 public:
   143   LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
   144     : MemNode(c,mem,adr,at), _type(rt) {
   145     init_class_id(Class_Load);
   146   }
   148   // Polymorphic factory method:
   149   static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
   150                      const TypePtr* at, const Type *rt, BasicType bt );
   152   virtual uint hash()   const;  // Check the type
   154   // Handle algebraic identities here.  If we have an identity, return the Node
   155   // we are equivalent to.  We look for Load of a Store.
   156   virtual Node *Identity( PhaseTransform *phase );
   158   // If the load is from Field memory and the pointer is non-null, we can
   159   // zero out the control input.
   160   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   162   // Split instance field load through Phi.
   163   Node* split_through_phi(PhaseGVN *phase);
   165   // Recover original value from boxed values
   166   Node *eliminate_autobox(PhaseGVN *phase);
   168   // Compute a new Type for this node.  Basically we just do the pre-check,
   169   // then call the virtual add() to set the type.
   170   virtual const Type *Value( PhaseTransform *phase ) const;
   172   // Common methods for LoadKlass and LoadNKlass nodes.
   173   const Type *klass_value_common( PhaseTransform *phase ) const;
   174   Node *klass_identity_common( PhaseTransform *phase );
   176   virtual uint ideal_reg() const;
   177   virtual const Type *bottom_type() const;
   178   // Following method is copied from TypeNode:
   179   void set_type(const Type* t) {
   180     assert(t != NULL, "sanity");
   181     debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
   182     *(const Type**)&_type = t;   // cast away const-ness
   183     // If this node is in the hash table, make sure it doesn't need a rehash.
   184     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
   185   }
   186   const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
   188   // Do not match memory edge
   189   virtual uint match_edge(uint idx) const;
   191   // Map a load opcode to its corresponding store opcode.
   192   virtual int store_Opcode() const = 0;
   194   // Check if the load's memory input is a Phi node with the same control.
   195   bool is_instance_field_load_with_local_phi(Node* ctrl);
   197 #ifndef PRODUCT
   198   virtual void dump_spec(outputStream *st) const;
   199 #endif
   200 #ifdef ASSERT
   201   // Helper function to allow a raw load without control edge for some cases
   202   static bool is_immutable_value(Node* adr);
   203 #endif
   204 protected:
   205   const Type* load_array_final_field(const TypeKlassPtr *tkls,
   206                                      ciKlass* klass) const;
   207 };
   209 //------------------------------LoadBNode--------------------------------------
   210 // Load a byte (8bits signed) from memory
   211 class LoadBNode : public LoadNode {
   212 public:
   213   LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
   214     : LoadNode(c,mem,adr,at,ti) {}
   215   virtual int Opcode() const;
   216   virtual uint ideal_reg() const { return Op_RegI; }
   217   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   218   virtual const Type *Value(PhaseTransform *phase) const;
   219   virtual int store_Opcode() const { return Op_StoreB; }
   220   virtual BasicType memory_type() const { return T_BYTE; }
   221 };
   223 //------------------------------LoadUBNode-------------------------------------
   224 // Load a unsigned byte (8bits unsigned) from memory
   225 class LoadUBNode : public LoadNode {
   226 public:
   227   LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE )
   228     : LoadNode(c, mem, adr, at, ti) {}
   229   virtual int Opcode() const;
   230   virtual uint ideal_reg() const { return Op_RegI; }
   231   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
   232   virtual const Type *Value(PhaseTransform *phase) const;
   233   virtual int store_Opcode() const { return Op_StoreB; }
   234   virtual BasicType memory_type() const { return T_BYTE; }
   235 };
   237 //------------------------------LoadUSNode-------------------------------------
   238 // Load an unsigned short/char (16bits unsigned) from memory
   239 class LoadUSNode : public LoadNode {
   240 public:
   241   LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
   242     : LoadNode(c,mem,adr,at,ti) {}
   243   virtual int Opcode() const;
   244   virtual uint ideal_reg() const { return Op_RegI; }
   245   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   246   virtual const Type *Value(PhaseTransform *phase) const;
   247   virtual int store_Opcode() const { return Op_StoreC; }
   248   virtual BasicType memory_type() const { return T_CHAR; }
   249 };
   251 //------------------------------LoadSNode--------------------------------------
   252 // Load a short (16bits signed) from memory
   253 class LoadSNode : public LoadNode {
   254 public:
   255   LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
   256     : LoadNode(c,mem,adr,at,ti) {}
   257   virtual int Opcode() const;
   258   virtual uint ideal_reg() const { return Op_RegI; }
   259   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   260   virtual const Type *Value(PhaseTransform *phase) const;
   261   virtual int store_Opcode() const { return Op_StoreC; }
   262   virtual BasicType memory_type() const { return T_SHORT; }
   263 };
   265 //------------------------------LoadINode--------------------------------------
   266 // Load an integer from memory
   267 class LoadINode : public LoadNode {
   268 public:
   269   LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
   270     : LoadNode(c,mem,adr,at,ti) {}
   271   virtual int Opcode() const;
   272   virtual uint ideal_reg() const { return Op_RegI; }
   273   virtual int store_Opcode() const { return Op_StoreI; }
   274   virtual BasicType memory_type() const { return T_INT; }
   275 };
   277 //------------------------------LoadUI2LNode-----------------------------------
   278 // Load an unsigned integer into long from memory
   279 class LoadUI2LNode : public LoadNode {
   280 public:
   281   LoadUI2LNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeLong* t = TypeLong::UINT)
   282     : LoadNode(c, mem, adr, at, t) {}
   283   virtual int Opcode() const;
   284   virtual uint ideal_reg() const { return Op_RegL; }
   285   virtual int store_Opcode() const { return Op_StoreL; }
   286   virtual BasicType memory_type() const { return T_LONG; }
   287 };
   289 //------------------------------LoadRangeNode----------------------------------
   290 // Load an array length from the array
   291 class LoadRangeNode : public LoadINode {
   292 public:
   293   LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
   294     : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
   295   virtual int Opcode() const;
   296   virtual const Type *Value( PhaseTransform *phase ) const;
   297   virtual Node *Identity( PhaseTransform *phase );
   298   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   299 };
   301 //------------------------------LoadLNode--------------------------------------
   302 // Load a long from memory
   303 class LoadLNode : public LoadNode {
   304   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
   305   virtual uint cmp( const Node &n ) const {
   306     return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
   307       && LoadNode::cmp(n);
   308   }
   309   virtual uint size_of() const { return sizeof(*this); }
   310   const bool _require_atomic_access;  // is piecewise load forbidden?
   312 public:
   313   LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
   314              const TypeLong *tl = TypeLong::LONG,
   315              bool require_atomic_access = false )
   316     : LoadNode(c,mem,adr,at,tl)
   317     , _require_atomic_access(require_atomic_access)
   318   {}
   319   virtual int Opcode() const;
   320   virtual uint ideal_reg() const { return Op_RegL; }
   321   virtual int store_Opcode() const { return Op_StoreL; }
   322   virtual BasicType memory_type() const { return T_LONG; }
   323   bool require_atomic_access() { return _require_atomic_access; }
   324   static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
   325 #ifndef PRODUCT
   326   virtual void dump_spec(outputStream *st) const {
   327     LoadNode::dump_spec(st);
   328     if (_require_atomic_access)  st->print(" Atomic!");
   329   }
   330 #endif
   331 };
   333 //------------------------------LoadL_unalignedNode----------------------------
   334 // Load a long from unaligned memory
   335 class LoadL_unalignedNode : public LoadLNode {
   336 public:
   337   LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
   338     : LoadLNode(c,mem,adr,at) {}
   339   virtual int Opcode() const;
   340 };
   342 //------------------------------LoadFNode--------------------------------------
   343 // Load a float (64 bits) from memory
   344 class LoadFNode : public LoadNode {
   345 public:
   346   LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
   347     : LoadNode(c,mem,adr,at,t) {}
   348   virtual int Opcode() const;
   349   virtual uint ideal_reg() const { return Op_RegF; }
   350   virtual int store_Opcode() const { return Op_StoreF; }
   351   virtual BasicType memory_type() const { return T_FLOAT; }
   352 };
   354 //------------------------------LoadDNode--------------------------------------
   355 // Load a double (64 bits) from memory
   356 class LoadDNode : public LoadNode {
   357 public:
   358   LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
   359     : LoadNode(c,mem,adr,at,t) {}
   360   virtual int Opcode() const;
   361   virtual uint ideal_reg() const { return Op_RegD; }
   362   virtual int store_Opcode() const { return Op_StoreD; }
   363   virtual BasicType memory_type() const { return T_DOUBLE; }
   364 };
   366 //------------------------------LoadD_unalignedNode----------------------------
   367 // Load a double from unaligned memory
   368 class LoadD_unalignedNode : public LoadDNode {
   369 public:
   370   LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
   371     : LoadDNode(c,mem,adr,at) {}
   372   virtual int Opcode() const;
   373 };
   375 //------------------------------LoadPNode--------------------------------------
   376 // Load a pointer from memory (either object or array)
   377 class LoadPNode : public LoadNode {
   378 public:
   379   LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
   380     : LoadNode(c,mem,adr,at,t) {}
   381   virtual int Opcode() const;
   382   virtual uint ideal_reg() const { return Op_RegP; }
   383   virtual int store_Opcode() const { return Op_StoreP; }
   384   virtual BasicType memory_type() const { return T_ADDRESS; }
   385   // depends_only_on_test is almost always true, and needs to be almost always
   386   // true to enable key hoisting & commoning optimizations.  However, for the
   387   // special case of RawPtr loads from TLS top & end, the control edge carries
   388   // the dependence preventing hoisting past a Safepoint instead of the memory
   389   // edge.  (An unfortunate consequence of having Safepoints not set Raw
   390   // Memory; itself an unfortunate consequence of having Nodes which produce
   391   // results (new raw memory state) inside of loops preventing all manner of
   392   // other optimizations).  Basically, it's ugly but so is the alternative.
   393   // See comment in macro.cpp, around line 125 expand_allocate_common().
   394   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
   395 };
   398 //------------------------------LoadNNode--------------------------------------
   399 // Load a narrow oop from memory (either object or array)
   400 class LoadNNode : public LoadNode {
   401 public:
   402   LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
   403     : LoadNode(c,mem,adr,at,t) {}
   404   virtual int Opcode() const;
   405   virtual uint ideal_reg() const { return Op_RegN; }
   406   virtual int store_Opcode() const { return Op_StoreN; }
   407   virtual BasicType memory_type() const { return T_NARROWOOP; }
   408   // depends_only_on_test is almost always true, and needs to be almost always
   409   // true to enable key hoisting & commoning optimizations.  However, for the
   410   // special case of RawPtr loads from TLS top & end, the control edge carries
   411   // the dependence preventing hoisting past a Safepoint instead of the memory
   412   // edge.  (An unfortunate consequence of having Safepoints not set Raw
   413   // Memory; itself an unfortunate consequence of having Nodes which produce
   414   // results (new raw memory state) inside of loops preventing all manner of
   415   // other optimizations).  Basically, it's ugly but so is the alternative.
   416   // See comment in macro.cpp, around line 125 expand_allocate_common().
   417   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
   418 };
   420 //------------------------------LoadKlassNode----------------------------------
   421 // Load a Klass from an object
   422 class LoadKlassNode : public LoadPNode {
   423 public:
   424   LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
   425     : LoadPNode(c,mem,adr,at,tk) {}
   426   virtual int Opcode() const;
   427   virtual const Type *Value( PhaseTransform *phase ) const;
   428   virtual Node *Identity( PhaseTransform *phase );
   429   virtual bool depends_only_on_test() const { return true; }
   431   // Polymorphic factory method:
   432   static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
   433                      const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
   434 };
   436 //------------------------------LoadNKlassNode---------------------------------
   437 // Load a narrow Klass from an object.
   438 class LoadNKlassNode : public LoadNNode {
   439 public:
   440   LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk )
   441     : LoadNNode(c,mem,adr,at,tk) {}
   442   virtual int Opcode() const;
   443   virtual uint ideal_reg() const { return Op_RegN; }
   444   virtual int store_Opcode() const { return Op_StoreN; }
   445   virtual BasicType memory_type() const { return T_NARROWOOP; }
   447   virtual const Type *Value( PhaseTransform *phase ) const;
   448   virtual Node *Identity( PhaseTransform *phase );
   449   virtual bool depends_only_on_test() const { return true; }
   450 };
   453 //------------------------------StoreNode--------------------------------------
   454 // Store value; requires Store, Address and Value
   455 class StoreNode : public MemNode {
   456 protected:
   457   virtual uint cmp( const Node &n ) const;
   458   virtual bool depends_only_on_test() const { return false; }
   460   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
   461   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
   463 public:
   464   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
   465     : MemNode(c,mem,adr,at,val) {
   466     init_class_id(Class_Store);
   467   }
   468   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
   469     : MemNode(c,mem,adr,at,val,oop_store) {
   470     init_class_id(Class_Store);
   471   }
   473   // Polymorphic factory method:
   474   static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
   475                           const TypePtr* at, Node *val, BasicType bt );
   477   virtual uint hash() const;    // Check the type
   479   // If the store is to Field memory and the pointer is non-null, we can
   480   // zero out the control input.
   481   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   483   // Compute a new Type for this node.  Basically we just do the pre-check,
   484   // then call the virtual add() to set the type.
   485   virtual const Type *Value( PhaseTransform *phase ) const;
   487   // Check for identity function on memory (Load then Store at same address)
   488   virtual Node *Identity( PhaseTransform *phase );
   490   // Do not match memory edge
   491   virtual uint match_edge(uint idx) const;
   493   virtual const Type *bottom_type() const;  // returns Type::MEMORY
   495   // Map a store opcode to its corresponding own opcode, trivially.
   496   virtual int store_Opcode() const { return Opcode(); }
   498   // have all possible loads of the value stored been optimized away?
   499   bool value_never_loaded(PhaseTransform *phase) const;
   500 };
   502 //------------------------------StoreBNode-------------------------------------
   503 // Store byte to memory
   504 class StoreBNode : public StoreNode {
   505 public:
   506   StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   507   virtual int Opcode() const;
   508   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   509   virtual BasicType memory_type() const { return T_BYTE; }
   510 };
   512 //------------------------------StoreCNode-------------------------------------
   513 // Store char/short to memory
   514 class StoreCNode : public StoreNode {
   515 public:
   516   StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   517   virtual int Opcode() const;
   518   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   519   virtual BasicType memory_type() const { return T_CHAR; }
   520 };
   522 //------------------------------StoreINode-------------------------------------
   523 // Store int to memory
   524 class StoreINode : public StoreNode {
   525 public:
   526   StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   527   virtual int Opcode() const;
   528   virtual BasicType memory_type() const { return T_INT; }
   529 };
   531 //------------------------------StoreLNode-------------------------------------
   532 // Store long to memory
   533 class StoreLNode : public StoreNode {
   534   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
   535   virtual uint cmp( const Node &n ) const {
   536     return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
   537       && StoreNode::cmp(n);
   538   }
   539   virtual uint size_of() const { return sizeof(*this); }
   540   const bool _require_atomic_access;  // is piecewise store forbidden?
   542 public:
   543   StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
   544               bool require_atomic_access = false )
   545     : StoreNode(c,mem,adr,at,val)
   546     , _require_atomic_access(require_atomic_access)
   547   {}
   548   virtual int Opcode() const;
   549   virtual BasicType memory_type() const { return T_LONG; }
   550   bool require_atomic_access() { return _require_atomic_access; }
   551   static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
   552 #ifndef PRODUCT
   553   virtual void dump_spec(outputStream *st) const {
   554     StoreNode::dump_spec(st);
   555     if (_require_atomic_access)  st->print(" Atomic!");
   556   }
   557 #endif
   558 };
   560 //------------------------------StoreFNode-------------------------------------
   561 // Store float to memory
   562 class StoreFNode : public StoreNode {
   563 public:
   564   StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   565   virtual int Opcode() const;
   566   virtual BasicType memory_type() const { return T_FLOAT; }
   567 };
   569 //------------------------------StoreDNode-------------------------------------
   570 // Store double to memory
   571 class StoreDNode : public StoreNode {
   572 public:
   573   StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   574   virtual int Opcode() const;
   575   virtual BasicType memory_type() const { return T_DOUBLE; }
   576 };
   578 //------------------------------StorePNode-------------------------------------
   579 // Store pointer to memory
   580 class StorePNode : public StoreNode {
   581 public:
   582   StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   583   virtual int Opcode() const;
   584   virtual BasicType memory_type() const { return T_ADDRESS; }
   585 };
   587 //------------------------------StoreNNode-------------------------------------
   588 // Store narrow oop to memory
   589 class StoreNNode : public StoreNode {
   590 public:
   591   StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   592   virtual int Opcode() const;
   593   virtual BasicType memory_type() const { return T_NARROWOOP; }
   594 };
   596 //------------------------------StoreCMNode-----------------------------------
   597 // Store card-mark byte to memory for CM
   598 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
   599 // Preceeding equivalent StoreCMs may be eliminated.
   600 class StoreCMNode : public StoreNode {
   601  private:
   602   virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
   603   virtual uint cmp( const Node &n ) const {
   604     return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
   605       && StoreNode::cmp(n);
   606   }
   607   virtual uint size_of() const { return sizeof(*this); }
   608   int _oop_alias_idx;   // The alias_idx of OopStore
   610 public:
   611   StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
   612     StoreNode(c,mem,adr,at,val,oop_store),
   613     _oop_alias_idx(oop_alias_idx) {
   614     assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
   615            _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
   616            "bad oop alias idx");
   617   }
   618   virtual int Opcode() const;
   619   virtual Node *Identity( PhaseTransform *phase );
   620   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   621   virtual const Type *Value( PhaseTransform *phase ) const;
   622   virtual BasicType memory_type() const { return T_VOID; } // unspecific
   623   int oop_alias_idx() const { return _oop_alias_idx; }
   624 };
   626 //------------------------------LoadPLockedNode---------------------------------
   627 // Load-locked a pointer from memory (either object or array).
   628 // On Sparc & Intel this is implemented as a normal pointer load.
   629 // On PowerPC and friends it's a real load-locked.
   630 class LoadPLockedNode : public LoadPNode {
   631 public:
   632   LoadPLockedNode( Node *c, Node *mem, Node *adr )
   633     : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
   634   virtual int Opcode() const;
   635   virtual int store_Opcode() const { return Op_StorePConditional; }
   636   virtual bool depends_only_on_test() const { return true; }
   637 };
   639 //------------------------------SCMemProjNode---------------------------------------
   640 // This class defines a projection of the memory  state of a store conditional node.
   641 // These nodes return a value, but also update memory.
   642 class SCMemProjNode : public ProjNode {
   643 public:
   644   enum {SCMEMPROJCON = (uint)-2};
   645   SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
   646   virtual int Opcode() const;
   647   virtual bool      is_CFG() const  { return false; }
   648   virtual const Type *bottom_type() const {return Type::MEMORY;}
   649   virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
   650   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
   651   virtual const Type *Value( PhaseTransform *phase ) const;
   652 #ifndef PRODUCT
   653   virtual void dump_spec(outputStream *st) const {};
   654 #endif
   655 };
   657 //------------------------------LoadStoreNode---------------------------
   658 // Note: is_Mem() method returns 'true' for this class.
   659 class LoadStoreNode : public Node {
   660 public:
   661   enum {
   662     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
   663   };
   664   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex);
   665   virtual bool depends_only_on_test() const { return false; }
   666   virtual const Type *bottom_type() const { return TypeInt::BOOL; }
   667   virtual uint ideal_reg() const { return Op_RegI; }
   668   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
   669 };
   671 //------------------------------StorePConditionalNode---------------------------
   672 // Conditionally store pointer to memory, if no change since prior
   673 // load-locked.  Sets flags for success or failure of the store.
   674 class StorePConditionalNode : public LoadStoreNode {
   675 public:
   676   StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
   677   virtual int Opcode() const;
   678   // Produces flags
   679   virtual uint ideal_reg() const { return Op_RegFlags; }
   680 };
   682 //------------------------------StoreIConditionalNode---------------------------
   683 // Conditionally store int to memory, if no change since prior
   684 // load-locked.  Sets flags for success or failure of the store.
   685 class StoreIConditionalNode : public LoadStoreNode {
   686 public:
   687   StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreNode(c, mem, adr, val, ii) { }
   688   virtual int Opcode() const;
   689   // Produces flags
   690   virtual uint ideal_reg() const { return Op_RegFlags; }
   691 };
   693 //------------------------------StoreLConditionalNode---------------------------
   694 // Conditionally store long to memory, if no change since prior
   695 // load-locked.  Sets flags for success or failure of the store.
   696 class StoreLConditionalNode : public LoadStoreNode {
   697 public:
   698   StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
   699   virtual int Opcode() const;
   700   // Produces flags
   701   virtual uint ideal_reg() const { return Op_RegFlags; }
   702 };
   705 //------------------------------CompareAndSwapLNode---------------------------
   706 class CompareAndSwapLNode : public LoadStoreNode {
   707 public:
   708   CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   709   virtual int Opcode() const;
   710 };
   713 //------------------------------CompareAndSwapINode---------------------------
   714 class CompareAndSwapINode : public LoadStoreNode {
   715 public:
   716   CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   717   virtual int Opcode() const;
   718 };
   721 //------------------------------CompareAndSwapPNode---------------------------
   722 class CompareAndSwapPNode : public LoadStoreNode {
   723 public:
   724   CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   725   virtual int Opcode() const;
   726 };
   728 //------------------------------CompareAndSwapNNode---------------------------
   729 class CompareAndSwapNNode : public LoadStoreNode {
   730 public:
   731   CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   732   virtual int Opcode() const;
   733 };
   735 //------------------------------ClearArray-------------------------------------
   736 class ClearArrayNode: public Node {
   737 public:
   738   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
   739     : Node(ctrl,arymem,word_cnt,base) {
   740     init_class_id(Class_ClearArray);
   741   }
   742   virtual int         Opcode() const;
   743   virtual const Type *bottom_type() const { return Type::MEMORY; }
   744   // ClearArray modifies array elements, and so affects only the
   745   // array memory addressed by the bottom_type of its base address.
   746   virtual const class TypePtr *adr_type() const;
   747   virtual Node *Identity( PhaseTransform *phase );
   748   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   749   virtual uint match_edge(uint idx) const;
   751   // Clear the given area of an object or array.
   752   // The start offset must always be aligned mod BytesPerInt.
   753   // The end offset must always be aligned mod BytesPerLong.
   754   // Return the new memory.
   755   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   756                             intptr_t start_offset,
   757                             intptr_t end_offset,
   758                             PhaseGVN* phase);
   759   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   760                             intptr_t start_offset,
   761                             Node* end_offset,
   762                             PhaseGVN* phase);
   763   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   764                             Node* start_offset,
   765                             Node* end_offset,
   766                             PhaseGVN* phase);
   767   // Return allocation input memory edge if it is different instance
   768   // or itself if it is the one we are looking for.
   769   static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
   770 };
   772 //------------------------------StrIntrinsic-------------------------------
   773 // Base class for Ideal nodes used in String instrinsic code.
   774 class StrIntrinsicNode: public Node {
   775 public:
   776   StrIntrinsicNode(Node* control, Node* char_array_mem,
   777                    Node* s1, Node* c1, Node* s2, Node* c2):
   778     Node(control, char_array_mem, s1, c1, s2, c2) {
   779   }
   781   StrIntrinsicNode(Node* control, Node* char_array_mem,
   782                    Node* s1, Node* s2, Node* c):
   783     Node(control, char_array_mem, s1, s2, c) {
   784   }
   786   StrIntrinsicNode(Node* control, Node* char_array_mem,
   787                    Node* s1, Node* s2):
   788     Node(control, char_array_mem, s1, s2) {
   789   }
   791   virtual bool depends_only_on_test() const { return false; }
   792   virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
   793   virtual uint match_edge(uint idx) const;
   794   virtual uint ideal_reg() const { return Op_RegI; }
   795   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   796   virtual const Type *Value(PhaseTransform *phase) const;
   797 };
   799 //------------------------------StrComp-------------------------------------
   800 class StrCompNode: public StrIntrinsicNode {
   801 public:
   802   StrCompNode(Node* control, Node* char_array_mem,
   803               Node* s1, Node* c1, Node* s2, Node* c2):
   804     StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
   805   virtual int Opcode() const;
   806   virtual const Type* bottom_type() const { return TypeInt::INT; }
   807 };
   809 //------------------------------StrEquals-------------------------------------
   810 class StrEqualsNode: public StrIntrinsicNode {
   811 public:
   812   StrEqualsNode(Node* control, Node* char_array_mem,
   813                 Node* s1, Node* s2, Node* c):
   814     StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
   815   virtual int Opcode() const;
   816   virtual const Type* bottom_type() const { return TypeInt::BOOL; }
   817 };
   819 //------------------------------StrIndexOf-------------------------------------
   820 class StrIndexOfNode: public StrIntrinsicNode {
   821 public:
   822   StrIndexOfNode(Node* control, Node* char_array_mem,
   823               Node* s1, Node* c1, Node* s2, Node* c2):
   824     StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
   825   virtual int Opcode() const;
   826   virtual const Type* bottom_type() const { return TypeInt::INT; }
   827 };
   829 //------------------------------AryEq---------------------------------------
   830 class AryEqNode: public StrIntrinsicNode {
   831 public:
   832   AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
   833     StrIntrinsicNode(control, char_array_mem, s1, s2) {};
   834   virtual int Opcode() const;
   835   virtual const Type* bottom_type() const { return TypeInt::BOOL; }
   836 };
   838 //------------------------------MemBar-----------------------------------------
   839 // There are different flavors of Memory Barriers to match the Java Memory
   840 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
   841 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
   842 // volatile-load.  Monitor-exit and volatile-store act as Release: no
   843 // preceding ref can be moved to after them.  We insert a MemBar-Release
   844 // before a FastUnlock or volatile-store.  All volatiles need to be
   845 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
   846 // separate it from any following volatile-load.
   847 class MemBarNode: public MultiNode {
   848   virtual uint hash() const ;                  // { return NO_HASH; }
   849   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
   851   virtual uint size_of() const { return sizeof(*this); }
   852   // Memory type this node is serializing.  Usually either rawptr or bottom.
   853   const TypePtr* _adr_type;
   855 public:
   856   enum {
   857     Precedent = TypeFunc::Parms  // optional edge to force precedence
   858   };
   859   MemBarNode(Compile* C, int alias_idx, Node* precedent);
   860   virtual int Opcode() const = 0;
   861   virtual const class TypePtr *adr_type() const { return _adr_type; }
   862   virtual const Type *Value( PhaseTransform *phase ) const;
   863   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   864   virtual uint match_edge(uint idx) const { return 0; }
   865   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
   866   virtual Node *match( const ProjNode *proj, const Matcher *m );
   867   // Factory method.  Builds a wide or narrow membar.
   868   // Optional 'precedent' becomes an extra edge if not null.
   869   static MemBarNode* make(Compile* C, int opcode,
   870                           int alias_idx = Compile::AliasIdxBot,
   871                           Node* precedent = NULL);
   872 };
   874 // "Acquire" - no following ref can move before (but earlier refs can
   875 // follow, like an early Load stalled in cache).  Requires multi-cpu
   876 // visibility.  Inserted after a volatile load.
   877 class MemBarAcquireNode: public MemBarNode {
   878 public:
   879   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
   880     : MemBarNode(C, alias_idx, precedent) {}
   881   virtual int Opcode() const;
   882 };
   884 // "Release" - no earlier ref can move after (but later refs can move
   885 // up, like a speculative pipelined cache-hitting Load).  Requires
   886 // multi-cpu visibility.  Inserted before a volatile store.
   887 class MemBarReleaseNode: public MemBarNode {
   888 public:
   889   MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
   890     : MemBarNode(C, alias_idx, precedent) {}
   891   virtual int Opcode() const;
   892 };
   894 // "Acquire" - no following ref can move before (but earlier refs can
   895 // follow, like an early Load stalled in cache).  Requires multi-cpu
   896 // visibility.  Inserted after a FastLock.
   897 class MemBarAcquireLockNode: public MemBarNode {
   898 public:
   899   MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
   900     : MemBarNode(C, alias_idx, precedent) {}
   901   virtual int Opcode() const;
   902 };
   904 // "Release" - no earlier ref can move after (but later refs can move
   905 // up, like a speculative pipelined cache-hitting Load).  Requires
   906 // multi-cpu visibility.  Inserted before a FastUnLock.
   907 class MemBarReleaseLockNode: public MemBarNode {
   908 public:
   909   MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
   910     : MemBarNode(C, alias_idx, precedent) {}
   911   virtual int Opcode() const;
   912 };
   914 class MemBarStoreStoreNode: public MemBarNode {
   915 public:
   916   MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
   917     : MemBarNode(C, alias_idx, precedent) {
   918     init_class_id(Class_MemBarStoreStore);
   919   }
   920   virtual int Opcode() const;
   921 };
   923 // Ordering between a volatile store and a following volatile load.
   924 // Requires multi-CPU visibility?
   925 class MemBarVolatileNode: public MemBarNode {
   926 public:
   927   MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
   928     : MemBarNode(C, alias_idx, precedent) {}
   929   virtual int Opcode() const;
   930 };
   932 // Ordering within the same CPU.  Used to order unsafe memory references
   933 // inside the compiler when we lack alias info.  Not needed "outside" the
   934 // compiler because the CPU does all the ordering for us.
   935 class MemBarCPUOrderNode: public MemBarNode {
   936 public:
   937   MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
   938     : MemBarNode(C, alias_idx, precedent) {}
   939   virtual int Opcode() const;
   940   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
   941 };
   943 // Isolation of object setup after an AllocateNode and before next safepoint.
   944 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
   945 class InitializeNode: public MemBarNode {
   946   friend class AllocateNode;
   948   enum {
   949     Incomplete    = 0,
   950     Complete      = 1,
   951     WithArraycopy = 2
   952   };
   953   int _is_complete;
   955   bool _does_not_escape;
   957 public:
   958   enum {
   959     Control    = TypeFunc::Control,
   960     Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
   961     RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
   962     RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
   963   };
   965   InitializeNode(Compile* C, int adr_type, Node* rawoop);
   966   virtual int Opcode() const;
   967   virtual uint size_of() const { return sizeof(*this); }
   968   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
   969   virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
   971   // Manage incoming memory edges via a MergeMem on in(Memory):
   972   Node* memory(uint alias_idx);
   974   // The raw memory edge coming directly from the Allocation.
   975   // The contents of this memory are *always* all-zero-bits.
   976   Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
   978   // Return the corresponding allocation for this initialization (or null if none).
   979   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
   980   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
   981   AllocateNode* allocation();
   983   // Anything other than zeroing in this init?
   984   bool is_non_zero();
   986   // An InitializeNode must completed before macro expansion is done.
   987   // Completion requires that the AllocateNode must be followed by
   988   // initialization of the new memory to zero, then to any initializers.
   989   bool is_complete() { return _is_complete != Incomplete; }
   990   bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
   992   // Mark complete.  (Must not yet be complete.)
   993   void set_complete(PhaseGVN* phase);
   994   void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
   996   bool does_not_escape() { return _does_not_escape; }
   997   void set_does_not_escape() { _does_not_escape = true; }
   999 #ifdef ASSERT
  1000   // ensure all non-degenerate stores are ordered and non-overlapping
  1001   bool stores_are_sane(PhaseTransform* phase);
  1002 #endif //ASSERT
  1004   // See if this store can be captured; return offset where it initializes.
  1005   // Return 0 if the store cannot be moved (any sort of problem).
  1006   intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
  1008   // Capture another store; reformat it to write my internal raw memory.
  1009   // Return the captured copy, else NULL if there is some sort of problem.
  1010   Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
  1012   // Find captured store which corresponds to the range [start..start+size).
  1013   // Return my own memory projection (meaning the initial zero bits)
  1014   // if there is no such store.  Return NULL if there is a problem.
  1015   Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
  1017   // Called when the associated AllocateNode is expanded into CFG.
  1018   Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
  1019                         intptr_t header_size, Node* size_in_bytes,
  1020                         PhaseGVN* phase);
  1022  private:
  1023   void remove_extra_zeroes();
  1025   // Find out where a captured store should be placed (or already is placed).
  1026   int captured_store_insertion_point(intptr_t start, int size_in_bytes,
  1027                                      PhaseTransform* phase);
  1029   static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
  1031   Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
  1033   bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
  1035   void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
  1036                                PhaseGVN* phase);
  1038   intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
  1039 };
  1041 //------------------------------MergeMem---------------------------------------
  1042 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
  1043 class MergeMemNode: public Node {
  1044   virtual uint hash() const ;                  // { return NO_HASH; }
  1045   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
  1046   friend class MergeMemStream;
  1047   MergeMemNode(Node* def);  // clients use MergeMemNode::make
  1049 public:
  1050   // If the input is a whole memory state, clone it with all its slices intact.
  1051   // Otherwise, make a new memory state with just that base memory input.
  1052   // In either case, the result is a newly created MergeMem.
  1053   static MergeMemNode* make(Compile* C, Node* base_memory);
  1055   virtual int Opcode() const;
  1056   virtual Node *Identity( PhaseTransform *phase );
  1057   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  1058   virtual uint ideal_reg() const { return NotAMachineReg; }
  1059   virtual uint match_edge(uint idx) const { return 0; }
  1060   virtual const RegMask &out_RegMask() const;
  1061   virtual const Type *bottom_type() const { return Type::MEMORY; }
  1062   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  1063   // sparse accessors
  1064   // Fetch the previously stored "set_memory_at", or else the base memory.
  1065   // (Caller should clone it if it is a phi-nest.)
  1066   Node* memory_at(uint alias_idx) const;
  1067   // set the memory, regardless of its previous value
  1068   void set_memory_at(uint alias_idx, Node* n);
  1069   // the "base" is the memory that provides the non-finite support
  1070   Node* base_memory() const       { return in(Compile::AliasIdxBot); }
  1071   // warning: setting the base can implicitly set any of the other slices too
  1072   void set_base_memory(Node* def);
  1073   // sentinel value which denotes a copy of the base memory:
  1074   Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
  1075   static Node* make_empty_memory(); // where the sentinel comes from
  1076   bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
  1077   // hook for the iterator, to perform any necessary setup
  1078   void iteration_setup(const MergeMemNode* other = NULL);
  1079   // push sentinels until I am at least as long as the other (semantic no-op)
  1080   void grow_to_match(const MergeMemNode* other);
  1081   bool verify_sparse() const PRODUCT_RETURN0;
  1082 #ifndef PRODUCT
  1083   virtual void dump_spec(outputStream *st) const;
  1084 #endif
  1085 };
  1087 class MergeMemStream : public StackObj {
  1088  private:
  1089   MergeMemNode*       _mm;
  1090   const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
  1091   Node*               _mm_base;  // loop-invariant base memory of _mm
  1092   int                 _idx;
  1093   int                 _cnt;
  1094   Node*               _mem;
  1095   Node*               _mem2;
  1096   int                 _cnt2;
  1098   void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
  1099     // subsume_node will break sparseness at times, whenever a memory slice
  1100     // folds down to a copy of the base ("fat") memory.  In such a case,
  1101     // the raw edge will update to base, although it should be top.
  1102     // This iterator will recognize either top or base_memory as an
  1103     // "empty" slice.  See is_empty, is_empty2, and next below.
  1104     //
  1105     // The sparseness property is repaired in MergeMemNode::Ideal.
  1106     // As long as access to a MergeMem goes through this iterator
  1107     // or the memory_at accessor, flaws in the sparseness will
  1108     // never be observed.
  1109     //
  1110     // Also, iteration_setup repairs sparseness.
  1111     assert(mm->verify_sparse(), "please, no dups of base");
  1112     assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
  1114     _mm  = mm;
  1115     _mm_base = mm->base_memory();
  1116     _mm2 = mm2;
  1117     _cnt = mm->req();
  1118     _idx = Compile::AliasIdxBot-1; // start at the base memory
  1119     _mem = NULL;
  1120     _mem2 = NULL;
  1123 #ifdef ASSERT
  1124   Node* check_memory() const {
  1125     if (at_base_memory())
  1126       return _mm->base_memory();
  1127     else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
  1128       return _mm->memory_at(_idx);
  1129     else
  1130       return _mm_base;
  1132   Node* check_memory2() const {
  1133     return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
  1135 #endif
  1137   static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
  1138   void assert_synch() const {
  1139     assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
  1140            "no side-effects except through the stream");
  1143  public:
  1145   // expected usages:
  1146   // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
  1147   // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
  1149   // iterate over one merge
  1150   MergeMemStream(MergeMemNode* mm) {
  1151     mm->iteration_setup();
  1152     init(mm);
  1153     debug_only(_cnt2 = 999);
  1155   // iterate in parallel over two merges
  1156   // only iterates through non-empty elements of mm2
  1157   MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
  1158     assert(mm2, "second argument must be a MergeMem also");
  1159     ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
  1160     mm->iteration_setup(mm2);
  1161     init(mm, mm2);
  1162     _cnt2 = mm2->req();
  1164 #ifdef ASSERT
  1165   ~MergeMemStream() {
  1166     assert_synch();
  1168 #endif
  1170   MergeMemNode* all_memory() const {
  1171     return _mm;
  1173   Node* base_memory() const {
  1174     assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
  1175     return _mm_base;
  1177   const MergeMemNode* all_memory2() const {
  1178     assert(_mm2 != NULL, "");
  1179     return _mm2;
  1181   bool at_base_memory() const {
  1182     return _idx == Compile::AliasIdxBot;
  1184   int alias_idx() const {
  1185     assert(_mem, "must call next 1st");
  1186     return _idx;
  1189   const TypePtr* adr_type() const {
  1190     return Compile::current()->get_adr_type(alias_idx());
  1193   const TypePtr* adr_type(Compile* C) const {
  1194     return C->get_adr_type(alias_idx());
  1196   bool is_empty() const {
  1197     assert(_mem, "must call next 1st");
  1198     assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
  1199     return _mem->is_top();
  1201   bool is_empty2() const {
  1202     assert(_mem2, "must call next 1st");
  1203     assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
  1204     return _mem2->is_top();
  1206   Node* memory() const {
  1207     assert(!is_empty(), "must not be empty");
  1208     assert_synch();
  1209     return _mem;
  1211   // get the current memory, regardless of empty or non-empty status
  1212   Node* force_memory() const {
  1213     assert(!is_empty() || !at_base_memory(), "");
  1214     // Use _mm_base to defend against updates to _mem->base_memory().
  1215     Node *mem = _mem->is_top() ? _mm_base : _mem;
  1216     assert(mem == check_memory(), "");
  1217     return mem;
  1219   Node* memory2() const {
  1220     assert(_mem2 == check_memory2(), "");
  1221     return _mem2;
  1223   void set_memory(Node* mem) {
  1224     if (at_base_memory()) {
  1225       // Note that this does not change the invariant _mm_base.
  1226       _mm->set_base_memory(mem);
  1227     } else {
  1228       _mm->set_memory_at(_idx, mem);
  1230     _mem = mem;
  1231     assert_synch();
  1234   // Recover from a side effect to the MergeMemNode.
  1235   void set_memory() {
  1236     _mem = _mm->in(_idx);
  1239   bool next()  { return next(false); }
  1240   bool next2() { return next(true); }
  1242   bool next_non_empty()  { return next_non_empty(false); }
  1243   bool next_non_empty2() { return next_non_empty(true); }
  1244   // next_non_empty2 can yield states where is_empty() is true
  1246  private:
  1247   // find the next item, which might be empty
  1248   bool next(bool have_mm2) {
  1249     assert((_mm2 != NULL) == have_mm2, "use other next");
  1250     assert_synch();
  1251     if (++_idx < _cnt) {
  1252       // Note:  This iterator allows _mm to be non-sparse.
  1253       // It behaves the same whether _mem is top or base_memory.
  1254       _mem = _mm->in(_idx);
  1255       if (have_mm2)
  1256         _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
  1257       return true;
  1259     return false;
  1262   // find the next non-empty item
  1263   bool next_non_empty(bool have_mm2) {
  1264     while (next(have_mm2)) {
  1265       if (!is_empty()) {
  1266         // make sure _mem2 is filled in sensibly
  1267         if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
  1268         return true;
  1269       } else if (have_mm2 && !is_empty2()) {
  1270         return true;   // is_empty() == true
  1273     return false;
  1275 };
  1277 //------------------------------Prefetch---------------------------------------
  1279 // Non-faulting prefetch load.  Prefetch for many reads.
  1280 class PrefetchReadNode : public Node {
  1281 public:
  1282   PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1283   virtual int Opcode() const;
  1284   virtual uint ideal_reg() const { return NotAMachineReg; }
  1285   virtual uint match_edge(uint idx) const { return idx==2; }
  1286   virtual const Type *bottom_type() const { return Type::ABIO; }
  1287 };
  1289 // Non-faulting prefetch load.  Prefetch for many reads & many writes.
  1290 class PrefetchWriteNode : public Node {
  1291 public:
  1292   PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1293   virtual int Opcode() const;
  1294   virtual uint ideal_reg() const { return NotAMachineReg; }
  1295   virtual uint match_edge(uint idx) const { return idx==2; }
  1296   virtual const Type *bottom_type() const { return Type::ABIO; }
  1297 };
  1299 // Allocation prefetch which may fault, TLAB size have to be adjusted.
  1300 class PrefetchAllocationNode : public Node {
  1301 public:
  1302   PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {}
  1303   virtual int Opcode() const;
  1304   virtual uint ideal_reg() const { return NotAMachineReg; }
  1305   virtual uint match_edge(uint idx) const { return idx==2; }
  1306   virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
  1307 };
  1309 #endif // SHARE_VM_OPTO_MEMNODE_HPP

mercurial