src/share/vm/opto/memnode.hpp

Mon, 06 Jan 2014 11:02:21 +0100

author
goetz
date
Mon, 06 Jan 2014 11:02:21 +0100
changeset 6500
4345c6a92f35
parent 6489
50fdb38839eb
child 6503
a9becfeecd1b
permissions
-rw-r--r--

8031188: Fix for 8029015: PPC64 (part 216): opto: trap based null and range checks
Summary: Swap the Projs in the block list so that the new block is added behind the proper node.
Reviewed-by: kvn

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP
    26 #define SHARE_VM_OPTO_MEMNODE_HPP
    28 #include "opto/multnode.hpp"
    29 #include "opto/node.hpp"
    30 #include "opto/opcodes.hpp"
    31 #include "opto/type.hpp"
    33 // Portions of code courtesy of Clifford Click
    35 class MultiNode;
    36 class PhaseCCP;
    37 class PhaseTransform;
    39 //------------------------------MemNode----------------------------------------
    40 // Load or Store, possibly throwing a NULL pointer exception
    41 class MemNode : public Node {
    42 protected:
    43 #ifdef ASSERT
    44   const TypePtr* _adr_type;     // What kind of memory is being addressed?
    45 #endif
    46   virtual uint size_of() const; // Size is bigger (ASSERT only)
    47 public:
    48   enum { Control,               // When is it safe to do this load?
    49          Memory,                // Chunk of memory is being loaded from
    50          Address,               // Actually address, derived from base
    51          ValueIn,               // Value to store
    52          OopStore               // Preceeding oop store, only in StoreCM
    53   };
    54   typedef enum { unordered = 0,
    55                  acquire,       // Load has to acquire or be succeeded by MemBarAcquire.
    56                  release        // Store has to release or be preceded by MemBarRelease.
    57   } MemOrd;
    58 protected:
    59   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
    60     : Node(c0,c1,c2   ) {
    61     init_class_id(Class_Mem);
    62     debug_only(_adr_type=at; adr_type();)
    63   }
    64   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
    65     : Node(c0,c1,c2,c3) {
    66     init_class_id(Class_Mem);
    67     debug_only(_adr_type=at; adr_type();)
    68   }
    69   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
    70     : Node(c0,c1,c2,c3,c4) {
    71     init_class_id(Class_Mem);
    72     debug_only(_adr_type=at; adr_type();)
    73   }
    75 public:
    76   // Helpers for the optimizer.  Documented in memnode.cpp.
    77   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
    78                                       Node* p2, AllocateNode* a2,
    79                                       PhaseTransform* phase);
    80   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
    82   static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
    83   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
    84   // This one should probably be a phase-specific function:
    85   static bool all_controls_dominate(Node* dom, Node* sub);
    87   // Find any cast-away of null-ness and keep its control.
    88   static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
    89   virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
    91   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
    93   // Shared code for Ideal methods:
    94   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
    96   // Helper function for adr_type() implementations.
    97   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
    99   // Raw access function, to allow copying of adr_type efficiently in
   100   // product builds and retain the debug info for debug builds.
   101   const TypePtr *raw_adr_type() const {
   102 #ifdef ASSERT
   103     return _adr_type;
   104 #else
   105     return 0;
   106 #endif
   107   }
   109   // Map a load or store opcode to its corresponding store opcode.
   110   // (Return -1 if unknown.)
   111   virtual int store_Opcode() const { return -1; }
   113   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
   114   virtual BasicType memory_type() const = 0;
   115   virtual int memory_size() const {
   116 #ifdef ASSERT
   117     return type2aelembytes(memory_type(), true);
   118 #else
   119     return type2aelembytes(memory_type());
   120 #endif
   121   }
   123   // Search through memory states which precede this node (load or store).
   124   // Look for an exact match for the address, with no intervening
   125   // aliased stores.
   126   Node* find_previous_store(PhaseTransform* phase);
   128   // Can this node (load or store) accurately see a stored value in
   129   // the given memory state?  (The state may or may not be in(Memory).)
   130   Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
   132 #ifndef PRODUCT
   133   static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
   134   virtual void dump_spec(outputStream *st) const;
   135 #endif
   136 };
   138 //------------------------------LoadNode---------------------------------------
   139 // Load value; requires Memory and Address
   140 class LoadNode : public MemNode {
   141 private:
   142   // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
   143   // loads that can be reordered, and such requiring acquire semantics to
   144   // adhere to the Java specification.  The required behaviour is stored in
   145   // this field.
   146   const MemOrd _mo;
   148 protected:
   149   virtual uint cmp(const Node &n) const;
   150   virtual uint size_of() const; // Size is bigger
   151   const Type* const _type;      // What kind of value is loaded?
   152 public:
   154   LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo)
   155     : MemNode(c,mem,adr,at), _type(rt), _mo(mo) {
   156     init_class_id(Class_Load);
   157   }
   158   inline bool is_unordered() const { return !is_acquire(); }
   159   inline bool is_acquire() const {
   160     assert(_mo == unordered || _mo == acquire, "unexpected");
   161     return _mo == acquire;
   162   }
   164   // Polymorphic factory method:
   165    static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
   166                      const TypePtr* at, const Type *rt, BasicType bt, MemOrd mo);
   168   virtual uint hash()   const;  // Check the type
   170   // Handle algebraic identities here.  If we have an identity, return the Node
   171   // we are equivalent to.  We look for Load of a Store.
   172   virtual Node *Identity( PhaseTransform *phase );
   174   // If the load is from Field memory and the pointer is non-null, we can
   175   // zero out the control input.
   176   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   178   // Split instance field load through Phi.
   179   Node* split_through_phi(PhaseGVN *phase);
   181   // Recover original value from boxed values
   182   Node *eliminate_autobox(PhaseGVN *phase);
   184   // Compute a new Type for this node.  Basically we just do the pre-check,
   185   // then call the virtual add() to set the type.
   186   virtual const Type *Value( PhaseTransform *phase ) const;
   188   // Common methods for LoadKlass and LoadNKlass nodes.
   189   const Type *klass_value_common( PhaseTransform *phase ) const;
   190   Node *klass_identity_common( PhaseTransform *phase );
   192   virtual uint ideal_reg() const;
   193   virtual const Type *bottom_type() const;
   194   // Following method is copied from TypeNode:
   195   void set_type(const Type* t) {
   196     assert(t != NULL, "sanity");
   197     debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
   198     *(const Type**)&_type = t;   // cast away const-ness
   199     // If this node is in the hash table, make sure it doesn't need a rehash.
   200     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
   201   }
   202   const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
   204   // Do not match memory edge
   205   virtual uint match_edge(uint idx) const;
   207   // Map a load opcode to its corresponding store opcode.
   208   virtual int store_Opcode() const = 0;
   210   // Check if the load's memory input is a Phi node with the same control.
   211   bool is_instance_field_load_with_local_phi(Node* ctrl);
   213 #ifndef PRODUCT
   214   virtual void dump_spec(outputStream *st) const;
   215 #endif
   216 #ifdef ASSERT
   217   // Helper function to allow a raw load without control edge for some cases
   218   static bool is_immutable_value(Node* adr);
   219 #endif
   220 protected:
   221   const Type* load_array_final_field(const TypeKlassPtr *tkls,
   222                                      ciKlass* klass) const;
   223   // depends_only_on_test is almost always true, and needs to be almost always
   224   // true to enable key hoisting & commoning optimizations.  However, for the
   225   // special case of RawPtr loads from TLS top & end, and other loads performed by
   226   // GC barriers, the control edge carries the dependence preventing hoisting past
   227   // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
   228   // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
   229   // which produce results (new raw memory state) inside of loops preventing all
   230   // manner of other optimizations).  Basically, it's ugly but so is the alternative.
   231   // See comment in macro.cpp, around line 125 expand_allocate_common().
   232   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
   234 };
   236 //------------------------------LoadBNode--------------------------------------
   237 // Load a byte (8bits signed) from memory
   238 class LoadBNode : public LoadNode {
   239 public:
   240   LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
   241     : LoadNode(c, mem, adr, at, ti, mo) {}
   242   virtual int Opcode() const;
   243   virtual uint ideal_reg() const { return Op_RegI; }
   244   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   245   virtual const Type *Value(PhaseTransform *phase) const;
   246   virtual int store_Opcode() const { return Op_StoreB; }
   247   virtual BasicType memory_type() const { return T_BYTE; }
   248 };
   250 //------------------------------LoadUBNode-------------------------------------
   251 // Load a unsigned byte (8bits unsigned) from memory
   252 class LoadUBNode : public LoadNode {
   253 public:
   254   LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo)
   255     : LoadNode(c, mem, adr, at, ti, mo) {}
   256   virtual int Opcode() const;
   257   virtual uint ideal_reg() const { return Op_RegI; }
   258   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
   259   virtual const Type *Value(PhaseTransform *phase) const;
   260   virtual int store_Opcode() const { return Op_StoreB; }
   261   virtual BasicType memory_type() const { return T_BYTE; }
   262 };
   264 //------------------------------LoadUSNode-------------------------------------
   265 // Load an unsigned short/char (16bits unsigned) from memory
   266 class LoadUSNode : public LoadNode {
   267 public:
   268   LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
   269     : LoadNode(c, mem, adr, at, ti, mo) {}
   270   virtual int Opcode() const;
   271   virtual uint ideal_reg() const { return Op_RegI; }
   272   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   273   virtual const Type *Value(PhaseTransform *phase) const;
   274   virtual int store_Opcode() const { return Op_StoreC; }
   275   virtual BasicType memory_type() const { return T_CHAR; }
   276 };
   278 //------------------------------LoadSNode--------------------------------------
   279 // Load a short (16bits signed) from memory
   280 class LoadSNode : public LoadNode {
   281 public:
   282   LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
   283     : LoadNode(c, mem, adr, at, ti, mo) {}
   284   virtual int Opcode() const;
   285   virtual uint ideal_reg() const { return Op_RegI; }
   286   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   287   virtual const Type *Value(PhaseTransform *phase) const;
   288   virtual int store_Opcode() const { return Op_StoreC; }
   289   virtual BasicType memory_type() const { return T_SHORT; }
   290 };
   292 //------------------------------LoadINode--------------------------------------
   293 // Load an integer from memory
   294 class LoadINode : public LoadNode {
   295 public:
   296   LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
   297     : LoadNode(c, mem, adr, at, ti, mo) {}
   298   virtual int Opcode() const;
   299   virtual uint ideal_reg() const { return Op_RegI; }
   300   virtual int store_Opcode() const { return Op_StoreI; }
   301   virtual BasicType memory_type() const { return T_INT; }
   302 };
   304 //------------------------------LoadRangeNode----------------------------------
   305 // Load an array length from the array
   306 class LoadRangeNode : public LoadINode {
   307 public:
   308   LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
   309     : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
   310   virtual int Opcode() const;
   311   virtual const Type *Value( PhaseTransform *phase ) const;
   312   virtual Node *Identity( PhaseTransform *phase );
   313   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   314 };
   316 //------------------------------LoadLNode--------------------------------------
   317 // Load a long from memory
   318 class LoadLNode : public LoadNode {
   319   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
   320   virtual uint cmp( const Node &n ) const {
   321     return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
   322       && LoadNode::cmp(n);
   323   }
   324   virtual uint size_of() const { return sizeof(*this); }
   325   const bool _require_atomic_access;  // is piecewise load forbidden?
   327 public:
   328   LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
   329             MemOrd mo, bool require_atomic_access = false)
   330     : LoadNode(c, mem, adr, at, tl, mo), _require_atomic_access(require_atomic_access) {}
   331   virtual int Opcode() const;
   332   virtual uint ideal_reg() const { return Op_RegL; }
   333   virtual int store_Opcode() const { return Op_StoreL; }
   334   virtual BasicType memory_type() const { return T_LONG; }
   335   bool require_atomic_access() { return _require_atomic_access; }
   336   static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
   337                                 const Type* rt, MemOrd mo);
   338 #ifndef PRODUCT
   339   virtual void dump_spec(outputStream *st) const {
   340     LoadNode::dump_spec(st);
   341     if (_require_atomic_access)  st->print(" Atomic!");
   342   }
   343 #endif
   344 };
   346 //------------------------------LoadL_unalignedNode----------------------------
   347 // Load a long from unaligned memory
   348 class LoadL_unalignedNode : public LoadLNode {
   349 public:
   350   LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo)
   351     : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo) {}
   352   virtual int Opcode() const;
   353 };
   355 //------------------------------LoadFNode--------------------------------------
   356 // Load a float (64 bits) from memory
   357 class LoadFNode : public LoadNode {
   358 public:
   359   LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo)
   360     : LoadNode(c, mem, adr, at, t, mo) {}
   361   virtual int Opcode() const;
   362   virtual uint ideal_reg() const { return Op_RegF; }
   363   virtual int store_Opcode() const { return Op_StoreF; }
   364   virtual BasicType memory_type() const { return T_FLOAT; }
   365 };
   367 //------------------------------LoadDNode--------------------------------------
   368 // Load a double (64 bits) from memory
   369 class LoadDNode : public LoadNode {
   370 public:
   371   LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo)
   372     : LoadNode(c, mem, adr, at, t, mo) {}
   373   virtual int Opcode() const;
   374   virtual uint ideal_reg() const { return Op_RegD; }
   375   virtual int store_Opcode() const { return Op_StoreD; }
   376   virtual BasicType memory_type() const { return T_DOUBLE; }
   377 };
   379 //------------------------------LoadD_unalignedNode----------------------------
   380 // Load a double from unaligned memory
   381 class LoadD_unalignedNode : public LoadDNode {
   382 public:
   383   LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo)
   384     : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo) {}
   385   virtual int Opcode() const;
   386 };
   388 //------------------------------LoadPNode--------------------------------------
   389 // Load a pointer from memory (either object or array)
   390 class LoadPNode : public LoadNode {
   391 public:
   392   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo)
   393     : LoadNode(c, mem, adr, at, t, mo) {}
   394   virtual int Opcode() const;
   395   virtual uint ideal_reg() const { return Op_RegP; }
   396   virtual int store_Opcode() const { return Op_StoreP; }
   397   virtual BasicType memory_type() const { return T_ADDRESS; }
   398 };
   401 //------------------------------LoadNNode--------------------------------------
   402 // Load a narrow oop from memory (either object or array)
   403 class LoadNNode : public LoadNode {
   404 public:
   405   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo)
   406     : LoadNode(c, mem, adr, at, t, mo) {}
   407   virtual int Opcode() const;
   408   virtual uint ideal_reg() const { return Op_RegN; }
   409   virtual int store_Opcode() const { return Op_StoreN; }
   410   virtual BasicType memory_type() const { return T_NARROWOOP; }
   411 };
   413 //------------------------------LoadKlassNode----------------------------------
   414 // Load a Klass from an object
   415 class LoadKlassNode : public LoadPNode {
   416 public:
   417   LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
   418     : LoadPNode(c, mem, adr, at, tk, mo) {}
   419   virtual int Opcode() const;
   420   virtual const Type *Value( PhaseTransform *phase ) const;
   421   virtual Node *Identity( PhaseTransform *phase );
   422   virtual bool depends_only_on_test() const { return true; }
   424   // Polymorphic factory method:
   425   static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
   426                      const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
   427 };
   429 //------------------------------LoadNKlassNode---------------------------------
   430 // Load a narrow Klass from an object.
   431 class LoadNKlassNode : public LoadNNode {
   432 public:
   433   LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
   434     : LoadNNode(c, mem, adr, at, tk, mo) {}
   435   virtual int Opcode() const;
   436   virtual uint ideal_reg() const { return Op_RegN; }
   437   virtual int store_Opcode() const { return Op_StoreNKlass; }
   438   virtual BasicType memory_type() const { return T_NARROWKLASS; }
   440   virtual const Type *Value( PhaseTransform *phase ) const;
   441   virtual Node *Identity( PhaseTransform *phase );
   442   virtual bool depends_only_on_test() const { return true; }
   443 };
   446 //------------------------------StoreNode--------------------------------------
   447 // Store value; requires Store, Address and Value
   448 class StoreNode : public MemNode {
   449 private:
   450   // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
   451   // stores that can be reordered, and such requiring release semantics to
   452   // adhere to the Java specification.  The required behaviour is stored in
   453   // this field.
   454   const MemOrd _mo;
   455   // Needed for proper cloning.
   456   virtual uint size_of() const { return sizeof(*this); }
   457 protected:
   458   virtual uint cmp( const Node &n ) const;
   459   virtual bool depends_only_on_test() const { return false; }
   461   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
   462   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
   464 public:
   465   // We must ensure that stores of object references will be visible
   466   // only after the object's initialization. So the callers of this
   467   // procedure must indicate that the store requires `release'
   468   // semantics, if the stored value is an object reference that might
   469   // point to a new object and may become externally visible.
   470   StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
   471     : MemNode(c, mem, adr, at, val), _mo(mo) {
   472     init_class_id(Class_Store);
   473   }
   474   StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
   475     : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
   476     init_class_id(Class_Store);
   477   }
   479   inline bool is_unordered() const { return !is_release(); }
   480   inline bool is_release() const {
   481     assert((_mo == unordered || _mo == release), "unexpected");
   482     return _mo == release;
   483   }
   485   // Conservatively release stores of object references in order to
   486   // ensure visibility of object initialization.
   487   static inline MemOrd release_if_reference(const BasicType t) {
   488     const MemOrd mo = (t == T_ARRAY ||
   489                        t == T_ADDRESS || // Might be the address of an object reference (`boxing').
   490                        t == T_OBJECT) ? release : unordered;
   491     return mo;
   492   }
   494   // Polymorphic factory method
   495   //
   496   // We must ensure that stores of object references will be visible
   497   // only after the object's initialization. So the callers of this
   498   // procedure must indicate that the store requires `release'
   499   // semantics, if the stored value is an object reference that might
   500   // point to a new object and may become externally visible.
   501   static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
   502                          const TypePtr* at, Node *val, BasicType bt, MemOrd mo);
   504   virtual uint hash() const;    // Check the type
   506   // If the store is to Field memory and the pointer is non-null, we can
   507   // zero out the control input.
   508   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   510   // Compute a new Type for this node.  Basically we just do the pre-check,
   511   // then call the virtual add() to set the type.
   512   virtual const Type *Value( PhaseTransform *phase ) const;
   514   // Check for identity function on memory (Load then Store at same address)
   515   virtual Node *Identity( PhaseTransform *phase );
   517   // Do not match memory edge
   518   virtual uint match_edge(uint idx) const;
   520   virtual const Type *bottom_type() const;  // returns Type::MEMORY
   522   // Map a store opcode to its corresponding own opcode, trivially.
   523   virtual int store_Opcode() const { return Opcode(); }
   525   // have all possible loads of the value stored been optimized away?
   526   bool value_never_loaded(PhaseTransform *phase) const;
   527 };
   529 //------------------------------StoreBNode-------------------------------------
   530 // Store byte to memory
   531 class StoreBNode : public StoreNode {
   532 public:
   533   StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
   534     : StoreNode(c, mem, adr, at, val, mo) {}
   535   virtual int Opcode() const;
   536   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   537   virtual BasicType memory_type() const { return T_BYTE; }
   538 };
   540 //------------------------------StoreCNode-------------------------------------
   541 // Store char/short to memory
   542 class StoreCNode : public StoreNode {
   543 public:
   544   StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
   545     : StoreNode(c, mem, adr, at, val, mo) {}
   546   virtual int Opcode() const;
   547   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   548   virtual BasicType memory_type() const { return T_CHAR; }
   549 };
   551 //------------------------------StoreINode-------------------------------------
   552 // Store int to memory
   553 class StoreINode : public StoreNode {
   554 public:
   555   StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
   556     : StoreNode(c, mem, adr, at, val, mo) {}
   557   virtual int Opcode() const;
   558   virtual BasicType memory_type() const { return T_INT; }
   559 };
   561 //------------------------------StoreLNode-------------------------------------
   562 // Store long to memory
   563 class StoreLNode : public StoreNode {
   564   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
   565   virtual uint cmp( const Node &n ) const {
   566     return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
   567       && StoreNode::cmp(n);
   568   }
   569   virtual uint size_of() const { return sizeof(*this); }
   570   const bool _require_atomic_access;  // is piecewise store forbidden?
   572 public:
   573   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
   574     : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
   575   virtual int Opcode() const;
   576   virtual BasicType memory_type() const { return T_LONG; }
   577   bool require_atomic_access() { return _require_atomic_access; }
   578   static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
   579 #ifndef PRODUCT
   580   virtual void dump_spec(outputStream *st) const {
   581     StoreNode::dump_spec(st);
   582     if (_require_atomic_access)  st->print(" Atomic!");
   583   }
   584 #endif
   585 };
   587 //------------------------------StoreFNode-------------------------------------
   588 // Store float to memory
   589 class StoreFNode : public StoreNode {
   590 public:
   591   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
   592     : StoreNode(c, mem, adr, at, val, mo) {}
   593   virtual int Opcode() const;
   594   virtual BasicType memory_type() const { return T_FLOAT; }
   595 };
   597 //------------------------------StoreDNode-------------------------------------
   598 // Store double to memory
   599 class StoreDNode : public StoreNode {
   600 public:
   601   StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
   602     : StoreNode(c, mem, adr, at, val, mo) {}
   603   virtual int Opcode() const;
   604   virtual BasicType memory_type() const { return T_DOUBLE; }
   605 };
   607 //------------------------------StorePNode-------------------------------------
   608 // Store pointer to memory
   609 class StorePNode : public StoreNode {
   610 public:
   611   StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
   612     : StoreNode(c, mem, adr, at, val, mo) {}
   613   virtual int Opcode() const;
   614   virtual BasicType memory_type() const { return T_ADDRESS; }
   615 };
   617 //------------------------------StoreNNode-------------------------------------
   618 // Store narrow oop to memory
   619 class StoreNNode : public StoreNode {
   620 public:
   621   StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
   622     : StoreNode(c, mem, adr, at, val, mo) {}
   623   virtual int Opcode() const;
   624   virtual BasicType memory_type() const { return T_NARROWOOP; }
   625 };
   627 //------------------------------StoreNKlassNode--------------------------------------
   628 // Store narrow klass to memory
   629 class StoreNKlassNode : public StoreNNode {
   630 public:
   631   StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
   632     : StoreNNode(c, mem, adr, at, val, mo) {}
   633   virtual int Opcode() const;
   634   virtual BasicType memory_type() const { return T_NARROWKLASS; }
   635 };
   637 //------------------------------StoreCMNode-----------------------------------
   638 // Store card-mark byte to memory for CM
   639 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
   640 // Preceeding equivalent StoreCMs may be eliminated.
   641 class StoreCMNode : public StoreNode {
   642  private:
   643   virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
   644   virtual uint cmp( const Node &n ) const {
   645     return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
   646       && StoreNode::cmp(n);
   647   }
   648   virtual uint size_of() const { return sizeof(*this); }
   649   int _oop_alias_idx;   // The alias_idx of OopStore
   651 public:
   652   StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
   653     StoreNode(c, mem, adr, at, val, oop_store, MemNode::release),
   654     _oop_alias_idx(oop_alias_idx) {
   655     assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
   656            _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
   657            "bad oop alias idx");
   658   }
   659   virtual int Opcode() const;
   660   virtual Node *Identity( PhaseTransform *phase );
   661   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   662   virtual const Type *Value( PhaseTransform *phase ) const;
   663   virtual BasicType memory_type() const { return T_VOID; } // unspecific
   664   int oop_alias_idx() const { return _oop_alias_idx; }
   665 };
   667 //------------------------------LoadPLockedNode---------------------------------
   668 // Load-locked a pointer from memory (either object or array).
   669 // On Sparc & Intel this is implemented as a normal pointer load.
   670 // On PowerPC and friends it's a real load-locked.
   671 class LoadPLockedNode : public LoadPNode {
   672 public:
   673   LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo)
   674     : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {}
   675   virtual int Opcode() const;
   676   virtual int store_Opcode() const { return Op_StorePConditional; }
   677   virtual bool depends_only_on_test() const { return true; }
   678 };
   680 //------------------------------SCMemProjNode---------------------------------------
   681 // This class defines a projection of the memory  state of a store conditional node.
   682 // These nodes return a value, but also update memory.
   683 class SCMemProjNode : public ProjNode {
   684 public:
   685   enum {SCMEMPROJCON = (uint)-2};
   686   SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
   687   virtual int Opcode() const;
   688   virtual bool      is_CFG() const  { return false; }
   689   virtual const Type *bottom_type() const {return Type::MEMORY;}
   690   virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
   691   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
   692   virtual const Type *Value( PhaseTransform *phase ) const;
   693 #ifndef PRODUCT
   694   virtual void dump_spec(outputStream *st) const {};
   695 #endif
   696 };
   698 //------------------------------LoadStoreNode---------------------------
   699 // Note: is_Mem() method returns 'true' for this class.
   700 class LoadStoreNode : public Node {
   701 private:
   702   const Type* const _type;      // What kind of value is loaded?
   703   const TypePtr* _adr_type;     // What kind of memory is being addressed?
   704   virtual uint size_of() const; // Size is bigger
   705 public:
   706   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
   707   virtual bool depends_only_on_test() const { return false; }
   708   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
   710   virtual const Type *bottom_type() const { return _type; }
   711   virtual uint ideal_reg() const;
   712   virtual const class TypePtr *adr_type() const { return _adr_type; }  // returns bottom_type of address
   714   bool result_not_used() const;
   715 };
   717 class LoadStoreConditionalNode : public LoadStoreNode {
   718 public:
   719   enum {
   720     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
   721   };
   722   LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
   723 };
   725 //------------------------------StorePConditionalNode---------------------------
   726 // Conditionally store pointer to memory, if no change since prior
   727 // load-locked.  Sets flags for success or failure of the store.
   728 class StorePConditionalNode : public LoadStoreConditionalNode {
   729 public:
   730   StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
   731   virtual int Opcode() const;
   732   // Produces flags
   733   virtual uint ideal_reg() const { return Op_RegFlags; }
   734 };
   736 //------------------------------StoreIConditionalNode---------------------------
   737 // Conditionally store int to memory, if no change since prior
   738 // load-locked.  Sets flags for success or failure of the store.
   739 class StoreIConditionalNode : public LoadStoreConditionalNode {
   740 public:
   741   StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { }
   742   virtual int Opcode() const;
   743   // Produces flags
   744   virtual uint ideal_reg() const { return Op_RegFlags; }
   745 };
   747 //------------------------------StoreLConditionalNode---------------------------
   748 // Conditionally store long to memory, if no change since prior
   749 // load-locked.  Sets flags for success or failure of the store.
   750 class StoreLConditionalNode : public LoadStoreConditionalNode {
   751 public:
   752   StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
   753   virtual int Opcode() const;
   754   // Produces flags
   755   virtual uint ideal_reg() const { return Op_RegFlags; }
   756 };
   759 //------------------------------CompareAndSwapLNode---------------------------
   760 class CompareAndSwapLNode : public LoadStoreConditionalNode {
   761 public:
   762   CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
   763   virtual int Opcode() const;
   764 };
   767 //------------------------------CompareAndSwapINode---------------------------
   768 class CompareAndSwapINode : public LoadStoreConditionalNode {
   769 public:
   770   CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
   771   virtual int Opcode() const;
   772 };
   775 //------------------------------CompareAndSwapPNode---------------------------
   776 class CompareAndSwapPNode : public LoadStoreConditionalNode {
   777 public:
   778   CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
   779   virtual int Opcode() const;
   780 };
   782 //------------------------------CompareAndSwapNNode---------------------------
   783 class CompareAndSwapNNode : public LoadStoreConditionalNode {
   784 public:
   785   CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
   786   virtual int Opcode() const;
   787 };
   789 //------------------------------GetAndAddINode---------------------------
   790 class GetAndAddINode : public LoadStoreNode {
   791 public:
   792   GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
   793   virtual int Opcode() const;
   794 };
   796 //------------------------------GetAndAddLNode---------------------------
   797 class GetAndAddLNode : public LoadStoreNode {
   798 public:
   799   GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
   800   virtual int Opcode() const;
   801 };
   804 //------------------------------GetAndSetINode---------------------------
   805 class GetAndSetINode : public LoadStoreNode {
   806 public:
   807   GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
   808   virtual int Opcode() const;
   809 };
   811 //------------------------------GetAndSetINode---------------------------
   812 class GetAndSetLNode : public LoadStoreNode {
   813 public:
   814   GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
   815   virtual int Opcode() const;
   816 };
   818 //------------------------------GetAndSetPNode---------------------------
   819 class GetAndSetPNode : public LoadStoreNode {
   820 public:
   821   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
   822   virtual int Opcode() const;
   823 };
   825 //------------------------------GetAndSetNNode---------------------------
   826 class GetAndSetNNode : public LoadStoreNode {
   827 public:
   828   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
   829   virtual int Opcode() const;
   830 };
   832 //------------------------------ClearArray-------------------------------------
   833 class ClearArrayNode: public Node {
   834 public:
   835   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
   836     : Node(ctrl,arymem,word_cnt,base) {
   837     init_class_id(Class_ClearArray);
   838   }
   839   virtual int         Opcode() const;
   840   virtual const Type *bottom_type() const { return Type::MEMORY; }
   841   // ClearArray modifies array elements, and so affects only the
   842   // array memory addressed by the bottom_type of its base address.
   843   virtual const class TypePtr *adr_type() const;
   844   virtual Node *Identity( PhaseTransform *phase );
   845   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   846   virtual uint match_edge(uint idx) const;
   848   // Clear the given area of an object or array.
   849   // The start offset must always be aligned mod BytesPerInt.
   850   // The end offset must always be aligned mod BytesPerLong.
   851   // Return the new memory.
   852   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   853                             intptr_t start_offset,
   854                             intptr_t end_offset,
   855                             PhaseGVN* phase);
   856   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   857                             intptr_t start_offset,
   858                             Node* end_offset,
   859                             PhaseGVN* phase);
   860   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   861                             Node* start_offset,
   862                             Node* end_offset,
   863                             PhaseGVN* phase);
   864   // Return allocation input memory edge if it is different instance
   865   // or itself if it is the one we are looking for.
   866   static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
   867 };
   869 //------------------------------StrIntrinsic-------------------------------
   870 // Base class for Ideal nodes used in String instrinsic code.
   871 class StrIntrinsicNode: public Node {
   872 public:
   873   StrIntrinsicNode(Node* control, Node* char_array_mem,
   874                    Node* s1, Node* c1, Node* s2, Node* c2):
   875     Node(control, char_array_mem, s1, c1, s2, c2) {
   876   }
   878   StrIntrinsicNode(Node* control, Node* char_array_mem,
   879                    Node* s1, Node* s2, Node* c):
   880     Node(control, char_array_mem, s1, s2, c) {
   881   }
   883   StrIntrinsicNode(Node* control, Node* char_array_mem,
   884                    Node* s1, Node* s2):
   885     Node(control, char_array_mem, s1, s2) {
   886   }
   888   virtual bool depends_only_on_test() const { return false; }
   889   virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
   890   virtual uint match_edge(uint idx) const;
   891   virtual uint ideal_reg() const { return Op_RegI; }
   892   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   893   virtual const Type *Value(PhaseTransform *phase) const;
   894 };
   896 //------------------------------StrComp-------------------------------------
   897 class StrCompNode: public StrIntrinsicNode {
   898 public:
   899   StrCompNode(Node* control, Node* char_array_mem,
   900               Node* s1, Node* c1, Node* s2, Node* c2):
   901     StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
   902   virtual int Opcode() const;
   903   virtual const Type* bottom_type() const { return TypeInt::INT; }
   904 };
   906 //------------------------------StrEquals-------------------------------------
   907 class StrEqualsNode: public StrIntrinsicNode {
   908 public:
   909   StrEqualsNode(Node* control, Node* char_array_mem,
   910                 Node* s1, Node* s2, Node* c):
   911     StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
   912   virtual int Opcode() const;
   913   virtual const Type* bottom_type() const { return TypeInt::BOOL; }
   914 };
   916 //------------------------------StrIndexOf-------------------------------------
   917 class StrIndexOfNode: public StrIntrinsicNode {
   918 public:
   919   StrIndexOfNode(Node* control, Node* char_array_mem,
   920               Node* s1, Node* c1, Node* s2, Node* c2):
   921     StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
   922   virtual int Opcode() const;
   923   virtual const Type* bottom_type() const { return TypeInt::INT; }
   924 };
   926 //------------------------------AryEq---------------------------------------
   927 class AryEqNode: public StrIntrinsicNode {
   928 public:
   929   AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
   930     StrIntrinsicNode(control, char_array_mem, s1, s2) {};
   931   virtual int Opcode() const;
   932   virtual const Type* bottom_type() const { return TypeInt::BOOL; }
   933 };
   936 //------------------------------EncodeISOArray--------------------------------
   937 // encode char[] to byte[] in ISO_8859_1
   938 class EncodeISOArrayNode: public Node {
   939 public:
   940   EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
   941   virtual int Opcode() const;
   942   virtual bool depends_only_on_test() const { return false; }
   943   virtual const Type* bottom_type() const { return TypeInt::INT; }
   944   virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
   945   virtual uint match_edge(uint idx) const;
   946   virtual uint ideal_reg() const { return Op_RegI; }
   947   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   948   virtual const Type *Value(PhaseTransform *phase) const;
   949 };
   951 //------------------------------MemBar-----------------------------------------
   952 // There are different flavors of Memory Barriers to match the Java Memory
   953 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
   954 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
   955 // volatile-load.  Monitor-exit and volatile-store act as Release: no
   956 // preceding ref can be moved to after them.  We insert a MemBar-Release
   957 // before a FastUnlock or volatile-store.  All volatiles need to be
   958 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
   959 // separate it from any following volatile-load.
   960 class MemBarNode: public MultiNode {
   961   virtual uint hash() const ;                  // { return NO_HASH; }
   962   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
   964   virtual uint size_of() const { return sizeof(*this); }
   965   // Memory type this node is serializing.  Usually either rawptr or bottom.
   966   const TypePtr* _adr_type;
   968 public:
   969   enum {
   970     Precedent = TypeFunc::Parms  // optional edge to force precedence
   971   };
   972   MemBarNode(Compile* C, int alias_idx, Node* precedent);
   973   virtual int Opcode() const = 0;
   974   virtual const class TypePtr *adr_type() const { return _adr_type; }
   975   virtual const Type *Value( PhaseTransform *phase ) const;
   976   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   977   virtual uint match_edge(uint idx) const { return 0; }
   978   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
   979   virtual Node *match( const ProjNode *proj, const Matcher *m );
   980   // Factory method.  Builds a wide or narrow membar.
   981   // Optional 'precedent' becomes an extra edge if not null.
   982   static MemBarNode* make(Compile* C, int opcode,
   983                           int alias_idx = Compile::AliasIdxBot,
   984                           Node* precedent = NULL);
   985 };
   987 // "Acquire" - no following ref can move before (but earlier refs can
   988 // follow, like an early Load stalled in cache).  Requires multi-cpu
   989 // visibility.  Inserted after a volatile load.
   990 class MemBarAcquireNode: public MemBarNode {
   991 public:
   992   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
   993     : MemBarNode(C, alias_idx, precedent) {}
   994   virtual int Opcode() const;
   995 };
   997 // "Acquire" - no following ref can move before (but earlier refs can
   998 // follow, like an early Load stalled in cache).  Requires multi-cpu
   999 // visibility.  Inserted independ of any load, as required
  1000 // for intrinsic sun.misc.Unsafe.loadFence().
  1001 class LoadFenceNode: public MemBarNode {
  1002 public:
  1003   LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
  1004     : MemBarNode(C, alias_idx, precedent) {}
  1005   virtual int Opcode() const;
  1006 };
  1008 // "Release" - no earlier ref can move after (but later refs can move
  1009 // up, like a speculative pipelined cache-hitting Load).  Requires
  1010 // multi-cpu visibility.  Inserted before a volatile store.
  1011 class MemBarReleaseNode: public MemBarNode {
  1012 public:
  1013   MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
  1014     : MemBarNode(C, alias_idx, precedent) {}
  1015   virtual int Opcode() const;
  1016 };
  1018 // "Release" - no earlier ref can move after (but later refs can move
  1019 // up, like a speculative pipelined cache-hitting Load).  Requires
  1020 // multi-cpu visibility.  Inserted independent of any store, as required
  1021 // for intrinsic sun.misc.Unsafe.storeFence().
  1022 class StoreFenceNode: public MemBarNode {
  1023 public:
  1024   StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
  1025     : MemBarNode(C, alias_idx, precedent) {}
  1026   virtual int Opcode() const;
  1027 };
  1029 // "Acquire" - no following ref can move before (but earlier refs can
  1030 // follow, like an early Load stalled in cache).  Requires multi-cpu
  1031 // visibility.  Inserted after a FastLock.
  1032 class MemBarAcquireLockNode: public MemBarNode {
  1033 public:
  1034   MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
  1035     : MemBarNode(C, alias_idx, precedent) {}
  1036   virtual int Opcode() const;
  1037 };
  1039 // "Release" - no earlier ref can move after (but later refs can move
  1040 // up, like a speculative pipelined cache-hitting Load).  Requires
  1041 // multi-cpu visibility.  Inserted before a FastUnLock.
  1042 class MemBarReleaseLockNode: public MemBarNode {
  1043 public:
  1044   MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
  1045     : MemBarNode(C, alias_idx, precedent) {}
  1046   virtual int Opcode() const;
  1047 };
  1049 class MemBarStoreStoreNode: public MemBarNode {
  1050 public:
  1051   MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
  1052     : MemBarNode(C, alias_idx, precedent) {
  1053     init_class_id(Class_MemBarStoreStore);
  1055   virtual int Opcode() const;
  1056 };
  1058 // Ordering between a volatile store and a following volatile load.
  1059 // Requires multi-CPU visibility?
  1060 class MemBarVolatileNode: public MemBarNode {
  1061 public:
  1062   MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
  1063     : MemBarNode(C, alias_idx, precedent) {}
  1064   virtual int Opcode() const;
  1065 };
  1067 // Ordering within the same CPU.  Used to order unsafe memory references
  1068 // inside the compiler when we lack alias info.  Not needed "outside" the
  1069 // compiler because the CPU does all the ordering for us.
  1070 class MemBarCPUOrderNode: public MemBarNode {
  1071 public:
  1072   MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
  1073     : MemBarNode(C, alias_idx, precedent) {}
  1074   virtual int Opcode() const;
  1075   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
  1076 };
  1078 // Isolation of object setup after an AllocateNode and before next safepoint.
  1079 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
  1080 class InitializeNode: public MemBarNode {
  1081   friend class AllocateNode;
  1083   enum {
  1084     Incomplete    = 0,
  1085     Complete      = 1,
  1086     WithArraycopy = 2
  1087   };
  1088   int _is_complete;
  1090   bool _does_not_escape;
  1092 public:
  1093   enum {
  1094     Control    = TypeFunc::Control,
  1095     Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
  1096     RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
  1097     RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
  1098   };
  1100   InitializeNode(Compile* C, int adr_type, Node* rawoop);
  1101   virtual int Opcode() const;
  1102   virtual uint size_of() const { return sizeof(*this); }
  1103   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
  1104   virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
  1106   // Manage incoming memory edges via a MergeMem on in(Memory):
  1107   Node* memory(uint alias_idx);
  1109   // The raw memory edge coming directly from the Allocation.
  1110   // The contents of this memory are *always* all-zero-bits.
  1111   Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
  1113   // Return the corresponding allocation for this initialization (or null if none).
  1114   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
  1115   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
  1116   AllocateNode* allocation();
  1118   // Anything other than zeroing in this init?
  1119   bool is_non_zero();
  1121   // An InitializeNode must completed before macro expansion is done.
  1122   // Completion requires that the AllocateNode must be followed by
  1123   // initialization of the new memory to zero, then to any initializers.
  1124   bool is_complete() { return _is_complete != Incomplete; }
  1125   bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
  1127   // Mark complete.  (Must not yet be complete.)
  1128   void set_complete(PhaseGVN* phase);
  1129   void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
  1131   bool does_not_escape() { return _does_not_escape; }
  1132   void set_does_not_escape() { _does_not_escape = true; }
  1134 #ifdef ASSERT
  1135   // ensure all non-degenerate stores are ordered and non-overlapping
  1136   bool stores_are_sane(PhaseTransform* phase);
  1137 #endif //ASSERT
  1139   // See if this store can be captured; return offset where it initializes.
  1140   // Return 0 if the store cannot be moved (any sort of problem).
  1141   intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape);
  1143   // Capture another store; reformat it to write my internal raw memory.
  1144   // Return the captured copy, else NULL if there is some sort of problem.
  1145   Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape);
  1147   // Find captured store which corresponds to the range [start..start+size).
  1148   // Return my own memory projection (meaning the initial zero bits)
  1149   // if there is no such store.  Return NULL if there is a problem.
  1150   Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
  1152   // Called when the associated AllocateNode is expanded into CFG.
  1153   Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
  1154                         intptr_t header_size, Node* size_in_bytes,
  1155                         PhaseGVN* phase);
  1157  private:
  1158   void remove_extra_zeroes();
  1160   // Find out where a captured store should be placed (or already is placed).
  1161   int captured_store_insertion_point(intptr_t start, int size_in_bytes,
  1162                                      PhaseTransform* phase);
  1164   static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
  1166   Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
  1168   bool detect_init_independence(Node* n, int& count);
  1170   void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
  1171                                PhaseGVN* phase);
  1173   intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
  1174 };
  1176 //------------------------------MergeMem---------------------------------------
  1177 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
  1178 class MergeMemNode: public Node {
  1179   virtual uint hash() const ;                  // { return NO_HASH; }
  1180   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
  1181   friend class MergeMemStream;
  1182   MergeMemNode(Node* def);  // clients use MergeMemNode::make
  1184 public:
  1185   // If the input is a whole memory state, clone it with all its slices intact.
  1186   // Otherwise, make a new memory state with just that base memory input.
  1187   // In either case, the result is a newly created MergeMem.
  1188   static MergeMemNode* make(Compile* C, Node* base_memory);
  1190   virtual int Opcode() const;
  1191   virtual Node *Identity( PhaseTransform *phase );
  1192   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  1193   virtual uint ideal_reg() const { return NotAMachineReg; }
  1194   virtual uint match_edge(uint idx) const { return 0; }
  1195   virtual const RegMask &out_RegMask() const;
  1196   virtual const Type *bottom_type() const { return Type::MEMORY; }
  1197   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  1198   // sparse accessors
  1199   // Fetch the previously stored "set_memory_at", or else the base memory.
  1200   // (Caller should clone it if it is a phi-nest.)
  1201   Node* memory_at(uint alias_idx) const;
  1202   // set the memory, regardless of its previous value
  1203   void set_memory_at(uint alias_idx, Node* n);
  1204   // the "base" is the memory that provides the non-finite support
  1205   Node* base_memory() const       { return in(Compile::AliasIdxBot); }
  1206   // warning: setting the base can implicitly set any of the other slices too
  1207   void set_base_memory(Node* def);
  1208   // sentinel value which denotes a copy of the base memory:
  1209   Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
  1210   static Node* make_empty_memory(); // where the sentinel comes from
  1211   bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
  1212   // hook for the iterator, to perform any necessary setup
  1213   void iteration_setup(const MergeMemNode* other = NULL);
  1214   // push sentinels until I am at least as long as the other (semantic no-op)
  1215   void grow_to_match(const MergeMemNode* other);
  1216   bool verify_sparse() const PRODUCT_RETURN0;
  1217 #ifndef PRODUCT
  1218   virtual void dump_spec(outputStream *st) const;
  1219 #endif
  1220 };
  1222 class MergeMemStream : public StackObj {
  1223  private:
  1224   MergeMemNode*       _mm;
  1225   const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
  1226   Node*               _mm_base;  // loop-invariant base memory of _mm
  1227   int                 _idx;
  1228   int                 _cnt;
  1229   Node*               _mem;
  1230   Node*               _mem2;
  1231   int                 _cnt2;
  1233   void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
  1234     // subsume_node will break sparseness at times, whenever a memory slice
  1235     // folds down to a copy of the base ("fat") memory.  In such a case,
  1236     // the raw edge will update to base, although it should be top.
  1237     // This iterator will recognize either top or base_memory as an
  1238     // "empty" slice.  See is_empty, is_empty2, and next below.
  1239     //
  1240     // The sparseness property is repaired in MergeMemNode::Ideal.
  1241     // As long as access to a MergeMem goes through this iterator
  1242     // or the memory_at accessor, flaws in the sparseness will
  1243     // never be observed.
  1244     //
  1245     // Also, iteration_setup repairs sparseness.
  1246     assert(mm->verify_sparse(), "please, no dups of base");
  1247     assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
  1249     _mm  = mm;
  1250     _mm_base = mm->base_memory();
  1251     _mm2 = mm2;
  1252     _cnt = mm->req();
  1253     _idx = Compile::AliasIdxBot-1; // start at the base memory
  1254     _mem = NULL;
  1255     _mem2 = NULL;
  1258 #ifdef ASSERT
  1259   Node* check_memory() const {
  1260     if (at_base_memory())
  1261       return _mm->base_memory();
  1262     else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
  1263       return _mm->memory_at(_idx);
  1264     else
  1265       return _mm_base;
  1267   Node* check_memory2() const {
  1268     return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
  1270 #endif
  1272   static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
  1273   void assert_synch() const {
  1274     assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
  1275            "no side-effects except through the stream");
  1278  public:
  1280   // expected usages:
  1281   // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
  1282   // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
  1284   // iterate over one merge
  1285   MergeMemStream(MergeMemNode* mm) {
  1286     mm->iteration_setup();
  1287     init(mm);
  1288     debug_only(_cnt2 = 999);
  1290   // iterate in parallel over two merges
  1291   // only iterates through non-empty elements of mm2
  1292   MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
  1293     assert(mm2, "second argument must be a MergeMem also");
  1294     ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
  1295     mm->iteration_setup(mm2);
  1296     init(mm, mm2);
  1297     _cnt2 = mm2->req();
  1299 #ifdef ASSERT
  1300   ~MergeMemStream() {
  1301     assert_synch();
  1303 #endif
  1305   MergeMemNode* all_memory() const {
  1306     return _mm;
  1308   Node* base_memory() const {
  1309     assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
  1310     return _mm_base;
  1312   const MergeMemNode* all_memory2() const {
  1313     assert(_mm2 != NULL, "");
  1314     return _mm2;
  1316   bool at_base_memory() const {
  1317     return _idx == Compile::AliasIdxBot;
  1319   int alias_idx() const {
  1320     assert(_mem, "must call next 1st");
  1321     return _idx;
  1324   const TypePtr* adr_type() const {
  1325     return Compile::current()->get_adr_type(alias_idx());
  1328   const TypePtr* adr_type(Compile* C) const {
  1329     return C->get_adr_type(alias_idx());
  1331   bool is_empty() const {
  1332     assert(_mem, "must call next 1st");
  1333     assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
  1334     return _mem->is_top();
  1336   bool is_empty2() const {
  1337     assert(_mem2, "must call next 1st");
  1338     assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
  1339     return _mem2->is_top();
  1341   Node* memory() const {
  1342     assert(!is_empty(), "must not be empty");
  1343     assert_synch();
  1344     return _mem;
  1346   // get the current memory, regardless of empty or non-empty status
  1347   Node* force_memory() const {
  1348     assert(!is_empty() || !at_base_memory(), "");
  1349     // Use _mm_base to defend against updates to _mem->base_memory().
  1350     Node *mem = _mem->is_top() ? _mm_base : _mem;
  1351     assert(mem == check_memory(), "");
  1352     return mem;
  1354   Node* memory2() const {
  1355     assert(_mem2 == check_memory2(), "");
  1356     return _mem2;
  1358   void set_memory(Node* mem) {
  1359     if (at_base_memory()) {
  1360       // Note that this does not change the invariant _mm_base.
  1361       _mm->set_base_memory(mem);
  1362     } else {
  1363       _mm->set_memory_at(_idx, mem);
  1365     _mem = mem;
  1366     assert_synch();
  1369   // Recover from a side effect to the MergeMemNode.
  1370   void set_memory() {
  1371     _mem = _mm->in(_idx);
  1374   bool next()  { return next(false); }
  1375   bool next2() { return next(true); }
  1377   bool next_non_empty()  { return next_non_empty(false); }
  1378   bool next_non_empty2() { return next_non_empty(true); }
  1379   // next_non_empty2 can yield states where is_empty() is true
  1381  private:
  1382   // find the next item, which might be empty
  1383   bool next(bool have_mm2) {
  1384     assert((_mm2 != NULL) == have_mm2, "use other next");
  1385     assert_synch();
  1386     if (++_idx < _cnt) {
  1387       // Note:  This iterator allows _mm to be non-sparse.
  1388       // It behaves the same whether _mem is top or base_memory.
  1389       _mem = _mm->in(_idx);
  1390       if (have_mm2)
  1391         _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
  1392       return true;
  1394     return false;
  1397   // find the next non-empty item
  1398   bool next_non_empty(bool have_mm2) {
  1399     while (next(have_mm2)) {
  1400       if (!is_empty()) {
  1401         // make sure _mem2 is filled in sensibly
  1402         if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
  1403         return true;
  1404       } else if (have_mm2 && !is_empty2()) {
  1405         return true;   // is_empty() == true
  1408     return false;
  1410 };
  1412 //------------------------------Prefetch---------------------------------------
  1414 // Non-faulting prefetch load.  Prefetch for many reads.
  1415 class PrefetchReadNode : public Node {
  1416 public:
  1417   PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1418   virtual int Opcode() const;
  1419   virtual uint ideal_reg() const { return NotAMachineReg; }
  1420   virtual uint match_edge(uint idx) const { return idx==2; }
  1421   virtual const Type *bottom_type() const { return Type::ABIO; }
  1422 };
  1424 // Non-faulting prefetch load.  Prefetch for many reads & many writes.
  1425 class PrefetchWriteNode : public Node {
  1426 public:
  1427   PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1428   virtual int Opcode() const;
  1429   virtual uint ideal_reg() const { return NotAMachineReg; }
  1430   virtual uint match_edge(uint idx) const { return idx==2; }
  1431   virtual const Type *bottom_type() const { return Type::ABIO; }
  1432 };
  1434 // Allocation prefetch which may fault, TLAB size have to be adjusted.
  1435 class PrefetchAllocationNode : public Node {
  1436 public:
  1437   PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {}
  1438   virtual int Opcode() const;
  1439   virtual uint ideal_reg() const { return NotAMachineReg; }
  1440   virtual uint match_edge(uint idx) const { return idx==2; }
  1441   virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
  1442 };
  1444 #endif // SHARE_VM_OPTO_MEMNODE_HPP

mercurial