src/share/vm/opto/memnode.hpp

Thu, 20 Mar 2008 15:11:44 -0700

author
kvn
date
Thu, 20 Mar 2008 15:11:44 -0700
changeset 509
2a9af0b9cb1c
parent 499
b8f5ba577b02
child 520
f3b3fe64f59f
child 548
ba764ed4b6f2
permissions
-rw-r--r--

6674600: (Escape Analysis) Optimize memory graph for instance's fields
Summary: EA gives opportunite to do more aggressive memory optimizations.
Reviewed-by: never, jrose

     1 /*
     2  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // Portions of code courtesy of Clifford Click
    27 class MultiNode;
    28 class PhaseCCP;
    29 class PhaseTransform;
    31 //------------------------------MemNode----------------------------------------
    32 // Load or Store, possibly throwing a NULL pointer exception
    33 class MemNode : public Node {
    34 protected:
    35 #ifdef ASSERT
    36   const TypePtr* _adr_type;     // What kind of memory is being addressed?
    37 #endif
    38   virtual uint size_of() const; // Size is bigger (ASSERT only)
    39 public:
    40   enum { Control,               // When is it safe to do this load?
    41          Memory,                // Chunk of memory is being loaded from
    42          Address,               // Actually address, derived from base
    43          ValueIn,               // Value to store
    44          OopStore               // Preceeding oop store, only in StoreCM
    45   };
    46 protected:
    47   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
    48     : Node(c0,c1,c2   ) {
    49     init_class_id(Class_Mem);
    50     debug_only(_adr_type=at; adr_type();)
    51   }
    52   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
    53     : Node(c0,c1,c2,c3) {
    54     init_class_id(Class_Mem);
    55     debug_only(_adr_type=at; adr_type();)
    56   }
    57   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
    58     : Node(c0,c1,c2,c3,c4) {
    59     init_class_id(Class_Mem);
    60     debug_only(_adr_type=at; adr_type();)
    61   }
    63 public:
    64   // Helpers for the optimizer.  Documented in memnode.cpp.
    65   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
    66                                       Node* p2, AllocateNode* a2,
    67                                       PhaseTransform* phase);
    68   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
    70   static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
    71   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
    72   // This one should probably be a phase-specific function:
    73   static bool detect_dominating_control(Node* dom, Node* sub);
    75   // Is this Node a MemNode or some descendent?  Default is YES.
    76   virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
    78   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
    80   // Shared code for Ideal methods:
    81   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
    83   // Helper function for adr_type() implementations.
    84   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
    86   // Raw access function, to allow copying of adr_type efficiently in
    87   // product builds and retain the debug info for debug builds.
    88   const TypePtr *raw_adr_type() const {
    89 #ifdef ASSERT
    90     return _adr_type;
    91 #else
    92     return 0;
    93 #endif
    94   }
    96   // Map a load or store opcode to its corresponding store opcode.
    97   // (Return -1 if unknown.)
    98   virtual int store_Opcode() const { return -1; }
   100   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
   101   virtual BasicType memory_type() const = 0;
   102   virtual int memory_size() const {
   103 #ifdef ASSERT
   104     return type2aelembytes(memory_type(), true);
   105 #else
   106     return type2aelembytes(memory_type());
   107 #endif
   108   }
   110   // Search through memory states which precede this node (load or store).
   111   // Look for an exact match for the address, with no intervening
   112   // aliased stores.
   113   Node* find_previous_store(PhaseTransform* phase);
   115   // Can this node (load or store) accurately see a stored value in
   116   // the given memory state?  (The state may or may not be in(Memory).)
   117   Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
   119 #ifndef PRODUCT
   120   static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
   121   virtual void dump_spec(outputStream *st) const;
   122 #endif
   123 };
   125 //------------------------------LoadNode---------------------------------------
   126 // Load value; requires Memory and Address
   127 class LoadNode : public MemNode {
   128 protected:
   129   virtual uint cmp( const Node &n ) const;
   130   virtual uint size_of() const; // Size is bigger
   131   const Type* const _type;      // What kind of value is loaded?
   132 public:
   134   LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
   135     : MemNode(c,mem,adr,at), _type(rt) {
   136     init_class_id(Class_Load);
   137   }
   139   // Polymorphic factory method:
   140   static LoadNode* make( Compile *C, Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, BasicType bt );
   142   virtual uint hash()   const;  // Check the type
   144   // Handle algebraic identities here.  If we have an identity, return the Node
   145   // we are equivalent to.  We look for Load of a Store.
   146   virtual Node *Identity( PhaseTransform *phase );
   148   // If the load is from Field memory and the pointer is non-null, we can
   149   // zero out the control input.
   150   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   152   // Recover original value from boxed values
   153   Node *eliminate_autobox(PhaseGVN *phase);
   155   // Compute a new Type for this node.  Basically we just do the pre-check,
   156   // then call the virtual add() to set the type.
   157   virtual const Type *Value( PhaseTransform *phase ) const;
   159   virtual uint ideal_reg() const;
   160   virtual const Type *bottom_type() const;
   161   // Following method is copied from TypeNode:
   162   void set_type(const Type* t) {
   163     assert(t != NULL, "sanity");
   164     debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
   165     *(const Type**)&_type = t;   // cast away const-ness
   166     // If this node is in the hash table, make sure it doesn't need a rehash.
   167     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
   168   }
   169   const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
   171   // Do not match memory edge
   172   virtual uint match_edge(uint idx) const;
   174   // Map a load opcode to its corresponding store opcode.
   175   virtual int store_Opcode() const = 0;
   177   // Check if the load's memory input is a Phi node with the same control.
   178   bool is_instance_field_load_with_local_phi(Node* ctrl);
   180 #ifndef PRODUCT
   181   virtual void dump_spec(outputStream *st) const;
   182 #endif
   183 protected:
   184   const Type* load_array_final_field(const TypeKlassPtr *tkls,
   185                                      ciKlass* klass) const;
   186 };
   188 //------------------------------LoadBNode--------------------------------------
   189 // Load a byte (8bits signed) from memory
   190 class LoadBNode : public LoadNode {
   191 public:
   192   LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
   193     : LoadNode(c,mem,adr,at,ti) {}
   194   virtual int Opcode() const;
   195   virtual uint ideal_reg() const { return Op_RegI; }
   196   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   197   virtual int store_Opcode() const { return Op_StoreB; }
   198   virtual BasicType memory_type() const { return T_BYTE; }
   199 };
   201 //------------------------------LoadCNode--------------------------------------
   202 // Load a char (16bits unsigned) from memory
   203 class LoadCNode : public LoadNode {
   204 public:
   205   LoadCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
   206     : LoadNode(c,mem,adr,at,ti) {}
   207   virtual int Opcode() const;
   208   virtual uint ideal_reg() const { return Op_RegI; }
   209   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   210   virtual int store_Opcode() const { return Op_StoreC; }
   211   virtual BasicType memory_type() const { return T_CHAR; }
   212 };
   214 //------------------------------LoadINode--------------------------------------
   215 // Load an integer from memory
   216 class LoadINode : public LoadNode {
   217 public:
   218   LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
   219     : LoadNode(c,mem,adr,at,ti) {}
   220   virtual int Opcode() const;
   221   virtual uint ideal_reg() const { return Op_RegI; }
   222   virtual int store_Opcode() const { return Op_StoreI; }
   223   virtual BasicType memory_type() const { return T_INT; }
   224 };
   226 //------------------------------LoadRangeNode----------------------------------
   227 // Load an array length from the array
   228 class LoadRangeNode : public LoadINode {
   229 public:
   230   LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
   231     : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
   232   virtual int Opcode() const;
   233   virtual const Type *Value( PhaseTransform *phase ) const;
   234   virtual Node *Identity( PhaseTransform *phase );
   235 };
   237 //------------------------------LoadLNode--------------------------------------
   238 // Load a long from memory
   239 class LoadLNode : public LoadNode {
   240   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
   241   virtual uint cmp( const Node &n ) const {
   242     return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
   243       && LoadNode::cmp(n);
   244   }
   245   virtual uint size_of() const { return sizeof(*this); }
   246   const bool _require_atomic_access;  // is piecewise load forbidden?
   248 public:
   249   LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
   250              const TypeLong *tl = TypeLong::LONG,
   251              bool require_atomic_access = false )
   252     : LoadNode(c,mem,adr,at,tl)
   253     , _require_atomic_access(require_atomic_access)
   254   {}
   255   virtual int Opcode() const;
   256   virtual uint ideal_reg() const { return Op_RegL; }
   257   virtual int store_Opcode() const { return Op_StoreL; }
   258   virtual BasicType memory_type() const { return T_LONG; }
   259   bool require_atomic_access() { return _require_atomic_access; }
   260   static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
   261 #ifndef PRODUCT
   262   virtual void dump_spec(outputStream *st) const {
   263     LoadNode::dump_spec(st);
   264     if (_require_atomic_access)  st->print(" Atomic!");
   265   }
   266 #endif
   267 };
   269 //------------------------------LoadL_unalignedNode----------------------------
   270 // Load a long from unaligned memory
   271 class LoadL_unalignedNode : public LoadLNode {
   272 public:
   273   LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
   274     : LoadLNode(c,mem,adr,at) {}
   275   virtual int Opcode() const;
   276 };
   278 //------------------------------LoadFNode--------------------------------------
   279 // Load a float (64 bits) from memory
   280 class LoadFNode : public LoadNode {
   281 public:
   282   LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
   283     : LoadNode(c,mem,adr,at,t) {}
   284   virtual int Opcode() const;
   285   virtual uint ideal_reg() const { return Op_RegF; }
   286   virtual int store_Opcode() const { return Op_StoreF; }
   287   virtual BasicType memory_type() const { return T_FLOAT; }
   288 };
   290 //------------------------------LoadDNode--------------------------------------
   291 // Load a double (64 bits) from memory
   292 class LoadDNode : public LoadNode {
   293 public:
   294   LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
   295     : LoadNode(c,mem,adr,at,t) {}
   296   virtual int Opcode() const;
   297   virtual uint ideal_reg() const { return Op_RegD; }
   298   virtual int store_Opcode() const { return Op_StoreD; }
   299   virtual BasicType memory_type() const { return T_DOUBLE; }
   300 };
   302 //------------------------------LoadD_unalignedNode----------------------------
   303 // Load a double from unaligned memory
   304 class LoadD_unalignedNode : public LoadDNode {
   305 public:
   306   LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
   307     : LoadDNode(c,mem,adr,at) {}
   308   virtual int Opcode() const;
   309 };
   311 //------------------------------LoadPNode--------------------------------------
   312 // Load a pointer from memory (either object or array)
   313 class LoadPNode : public LoadNode {
   314 public:
   315   LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
   316     : LoadNode(c,mem,adr,at,t) {}
   317   virtual int Opcode() const;
   318   virtual uint ideal_reg() const { return Op_RegP; }
   319   virtual int store_Opcode() const { return Op_StoreP; }
   320   virtual BasicType memory_type() const { return T_ADDRESS; }
   321   // depends_only_on_test is almost always true, and needs to be almost always
   322   // true to enable key hoisting & commoning optimizations.  However, for the
   323   // special case of RawPtr loads from TLS top & end, the control edge carries
   324   // the dependence preventing hoisting past a Safepoint instead of the memory
   325   // edge.  (An unfortunate consequence of having Safepoints not set Raw
   326   // Memory; itself an unfortunate consequence of having Nodes which produce
   327   // results (new raw memory state) inside of loops preventing all manner of
   328   // other optimizations).  Basically, it's ugly but so is the alternative.
   329   // See comment in macro.cpp, around line 125 expand_allocate_common().
   330   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
   331 };
   333 //------------------------------LoadKlassNode----------------------------------
   334 // Load a Klass from an object
   335 class LoadKlassNode : public LoadPNode {
   336 public:
   337   LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk = TypeKlassPtr::OBJECT )
   338     : LoadPNode(c,mem,adr,at,tk) {}
   339   virtual int Opcode() const;
   340   virtual const Type *Value( PhaseTransform *phase ) const;
   341   virtual Node *Identity( PhaseTransform *phase );
   342   virtual bool depends_only_on_test() const { return true; }
   343 };
   345 //------------------------------LoadSNode--------------------------------------
   346 // Load a short (16bits signed) from memory
   347 class LoadSNode : public LoadNode {
   348 public:
   349   LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
   350     : LoadNode(c,mem,adr,at,ti) {}
   351   virtual int Opcode() const;
   352   virtual uint ideal_reg() const { return Op_RegI; }
   353   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   354   virtual int store_Opcode() const { return Op_StoreC; }
   355   virtual BasicType memory_type() const { return T_SHORT; }
   356 };
   358 //------------------------------StoreNode--------------------------------------
   359 // Store value; requires Store, Address and Value
   360 class StoreNode : public MemNode {
   361 protected:
   362   virtual uint cmp( const Node &n ) const;
   363   virtual bool depends_only_on_test() const { return false; }
   365   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
   366   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
   368 public:
   369   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
   370     : MemNode(c,mem,adr,at,val) {
   371     init_class_id(Class_Store);
   372   }
   373   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
   374     : MemNode(c,mem,adr,at,val,oop_store) {
   375     init_class_id(Class_Store);
   376   }
   378   // Polymorphic factory method:
   379   static StoreNode* make( Compile *C, Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, BasicType bt );
   381   virtual uint hash() const;    // Check the type
   383   // If the store is to Field memory and the pointer is non-null, we can
   384   // zero out the control input.
   385   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   387   // Compute a new Type for this node.  Basically we just do the pre-check,
   388   // then call the virtual add() to set the type.
   389   virtual const Type *Value( PhaseTransform *phase ) const;
   391   // Check for identity function on memory (Load then Store at same address)
   392   virtual Node *Identity( PhaseTransform *phase );
   394   // Do not match memory edge
   395   virtual uint match_edge(uint idx) const;
   397   virtual const Type *bottom_type() const;  // returns Type::MEMORY
   399   // Map a store opcode to its corresponding own opcode, trivially.
   400   virtual int store_Opcode() const { return Opcode(); }
   402   // have all possible loads of the value stored been optimized away?
   403   bool value_never_loaded(PhaseTransform *phase) const;
   404 };
   406 //------------------------------StoreBNode-------------------------------------
   407 // Store byte to memory
   408 class StoreBNode : public StoreNode {
   409 public:
   410   StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   411   virtual int Opcode() const;
   412   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   413   virtual BasicType memory_type() const { return T_BYTE; }
   414 };
   416 //------------------------------StoreCNode-------------------------------------
   417 // Store char/short to memory
   418 class StoreCNode : public StoreNode {
   419 public:
   420   StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   421   virtual int Opcode() const;
   422   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   423   virtual BasicType memory_type() const { return T_CHAR; }
   424 };
   426 //------------------------------StoreINode-------------------------------------
   427 // Store int to memory
   428 class StoreINode : public StoreNode {
   429 public:
   430   StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   431   virtual int Opcode() const;
   432   virtual BasicType memory_type() const { return T_INT; }
   433 };
   435 //------------------------------StoreLNode-------------------------------------
   436 // Store long to memory
   437 class StoreLNode : public StoreNode {
   438   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
   439   virtual uint cmp( const Node &n ) const {
   440     return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
   441       && StoreNode::cmp(n);
   442   }
   443   virtual uint size_of() const { return sizeof(*this); }
   444   const bool _require_atomic_access;  // is piecewise store forbidden?
   446 public:
   447   StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
   448               bool require_atomic_access = false )
   449     : StoreNode(c,mem,adr,at,val)
   450     , _require_atomic_access(require_atomic_access)
   451   {}
   452   virtual int Opcode() const;
   453   virtual BasicType memory_type() const { return T_LONG; }
   454   bool require_atomic_access() { return _require_atomic_access; }
   455   static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
   456 #ifndef PRODUCT
   457   virtual void dump_spec(outputStream *st) const {
   458     StoreNode::dump_spec(st);
   459     if (_require_atomic_access)  st->print(" Atomic!");
   460   }
   461 #endif
   462 };
   464 //------------------------------StoreFNode-------------------------------------
   465 // Store float to memory
   466 class StoreFNode : public StoreNode {
   467 public:
   468   StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   469   virtual int Opcode() const;
   470   virtual BasicType memory_type() const { return T_FLOAT; }
   471 };
   473 //------------------------------StoreDNode-------------------------------------
   474 // Store double to memory
   475 class StoreDNode : public StoreNode {
   476 public:
   477   StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   478   virtual int Opcode() const;
   479   virtual BasicType memory_type() const { return T_DOUBLE; }
   480 };
   482 //------------------------------StorePNode-------------------------------------
   483 // Store pointer to memory
   484 class StorePNode : public StoreNode {
   485 public:
   486   StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
   487   virtual int Opcode() const;
   488   virtual BasicType memory_type() const { return T_ADDRESS; }
   489 };
   491 //------------------------------StoreCMNode-----------------------------------
   492 // Store card-mark byte to memory for CM
   493 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
   494 // Preceeding equivalent StoreCMs may be eliminated.
   495 class StoreCMNode : public StoreNode {
   496 public:
   497   StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store ) : StoreNode(c,mem,adr,at,val,oop_store) {}
   498   virtual int Opcode() const;
   499   virtual Node *Identity( PhaseTransform *phase );
   500   virtual const Type *Value( PhaseTransform *phase ) const;
   501   virtual BasicType memory_type() const { return T_VOID; } // unspecific
   502 };
   504 //------------------------------LoadPLockedNode---------------------------------
   505 // Load-locked a pointer from memory (either object or array).
   506 // On Sparc & Intel this is implemented as a normal pointer load.
   507 // On PowerPC and friends it's a real load-locked.
   508 class LoadPLockedNode : public LoadPNode {
   509 public:
   510   LoadPLockedNode( Node *c, Node *mem, Node *adr )
   511     : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
   512   virtual int Opcode() const;
   513   virtual int store_Opcode() const { return Op_StorePConditional; }
   514   virtual bool depends_only_on_test() const { return true; }
   515 };
   517 //------------------------------LoadLLockedNode---------------------------------
   518 // Load-locked a pointer from memory (either object or array).
   519 // On Sparc & Intel this is implemented as a normal long load.
   520 class LoadLLockedNode : public LoadLNode {
   521 public:
   522   LoadLLockedNode( Node *c, Node *mem, Node *adr )
   523     : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
   524   virtual int Opcode() const;
   525   virtual int store_Opcode() const { return Op_StoreLConditional; }
   526 };
   528 //------------------------------SCMemProjNode---------------------------------------
   529 // This class defines a projection of the memory  state of a store conditional node.
   530 // These nodes return a value, but also update memory.
   531 class SCMemProjNode : public ProjNode {
   532 public:
   533   enum {SCMEMPROJCON = (uint)-2};
   534   SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
   535   virtual int Opcode() const;
   536   virtual bool      is_CFG() const  { return false; }
   537   virtual const Type *bottom_type() const {return Type::MEMORY;}
   538   virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
   539   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
   540   virtual const Type *Value( PhaseTransform *phase ) const;
   541 #ifndef PRODUCT
   542   virtual void dump_spec(outputStream *st) const {};
   543 #endif
   544 };
   546 //------------------------------LoadStoreNode---------------------------
   547 class LoadStoreNode : public Node {
   548 public:
   549   enum {
   550     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
   551   };
   552   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex);
   553   virtual bool depends_only_on_test() const { return false; }
   554   virtual const Type *bottom_type() const { return TypeInt::BOOL; }
   555   virtual uint ideal_reg() const { return Op_RegI; }
   556   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
   557 };
   559 //------------------------------StorePConditionalNode---------------------------
   560 // Conditionally store pointer to memory, if no change since prior
   561 // load-locked.  Sets flags for success or failure of the store.
   562 class StorePConditionalNode : public LoadStoreNode {
   563 public:
   564   StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
   565   virtual int Opcode() const;
   566   // Produces flags
   567   virtual uint ideal_reg() const { return Op_RegFlags; }
   568 };
   570 //------------------------------StoreLConditionalNode---------------------------
   571 // Conditionally store long to memory, if no change since prior
   572 // load-locked.  Sets flags for success or failure of the store.
   573 class StoreLConditionalNode : public LoadStoreNode {
   574 public:
   575   StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
   576   virtual int Opcode() const;
   577 };
   580 //------------------------------CompareAndSwapLNode---------------------------
   581 class CompareAndSwapLNode : public LoadStoreNode {
   582 public:
   583   CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   584   virtual int Opcode() const;
   585 };
   588 //------------------------------CompareAndSwapINode---------------------------
   589 class CompareAndSwapINode : public LoadStoreNode {
   590 public:
   591   CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   592   virtual int Opcode() const;
   593 };
   596 //------------------------------CompareAndSwapPNode---------------------------
   597 class CompareAndSwapPNode : public LoadStoreNode {
   598 public:
   599   CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
   600   virtual int Opcode() const;
   601 };
   603 //------------------------------ClearArray-------------------------------------
   604 class ClearArrayNode: public Node {
   605 public:
   606   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) : Node(ctrl,arymem,word_cnt,base) {}
   607   virtual int         Opcode() const;
   608   virtual const Type *bottom_type() const { return Type::MEMORY; }
   609   // ClearArray modifies array elements, and so affects only the
   610   // array memory addressed by the bottom_type of its base address.
   611   virtual const class TypePtr *adr_type() const;
   612   virtual Node *Identity( PhaseTransform *phase );
   613   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   614   virtual uint match_edge(uint idx) const;
   616   // Clear the given area of an object or array.
   617   // The start offset must always be aligned mod BytesPerInt.
   618   // The end offset must always be aligned mod BytesPerLong.
   619   // Return the new memory.
   620   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   621                             intptr_t start_offset,
   622                             intptr_t end_offset,
   623                             PhaseGVN* phase);
   624   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   625                             intptr_t start_offset,
   626                             Node* end_offset,
   627                             PhaseGVN* phase);
   628   static Node* clear_memory(Node* control, Node* mem, Node* dest,
   629                             Node* start_offset,
   630                             Node* end_offset,
   631                             PhaseGVN* phase);
   632 };
   634 //------------------------------StrComp-------------------------------------
   635 class StrCompNode: public Node {
   636 public:
   637   StrCompNode(Node *control,
   638               Node* char_array_mem,
   639               Node* value_mem,
   640               Node* count_mem,
   641               Node* offset_mem,
   642               Node* s1, Node* s2): Node(control,
   643                                         char_array_mem,
   644                                         value_mem,
   645                                         count_mem,
   646                                         offset_mem,
   647                                         s1, s2) {};
   648   virtual int Opcode() const;
   649   virtual bool depends_only_on_test() const { return false; }
   650   virtual const Type* bottom_type() const { return TypeInt::INT; }
   651   // a StrCompNode (conservatively) aliases with everything:
   652   virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
   653   virtual uint match_edge(uint idx) const;
   654   virtual uint ideal_reg() const { return Op_RegI; }
   655   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   656 };
   658 //------------------------------MemBar-----------------------------------------
   659 // There are different flavors of Memory Barriers to match the Java Memory
   660 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
   661 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
   662 // volatile-load.  Monitor-exit and volatile-store act as Release: no
   663 // preceeding ref can be moved to after them.  We insert a MemBar-Release
   664 // before a FastUnlock or volatile-store.  All volatiles need to be
   665 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
   666 // seperate it from any following volatile-load.
   667 class MemBarNode: public MultiNode {
   668   virtual uint hash() const ;                  // { return NO_HASH; }
   669   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
   671   virtual uint size_of() const { return sizeof(*this); }
   672   // Memory type this node is serializing.  Usually either rawptr or bottom.
   673   const TypePtr* _adr_type;
   675 public:
   676   enum {
   677     Precedent = TypeFunc::Parms  // optional edge to force precedence
   678   };
   679   MemBarNode(Compile* C, int alias_idx, Node* precedent);
   680   virtual int Opcode() const = 0;
   681   virtual const class TypePtr *adr_type() const { return _adr_type; }
   682   virtual const Type *Value( PhaseTransform *phase ) const;
   683   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   684   virtual uint match_edge(uint idx) const { return 0; }
   685   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
   686   virtual Node *match( const ProjNode *proj, const Matcher *m );
   687   // Factory method.  Builds a wide or narrow membar.
   688   // Optional 'precedent' becomes an extra edge if not null.
   689   static MemBarNode* make(Compile* C, int opcode,
   690                           int alias_idx = Compile::AliasIdxBot,
   691                           Node* precedent = NULL);
   692 };
   694 // "Acquire" - no following ref can move before (but earlier refs can
   695 // follow, like an early Load stalled in cache).  Requires multi-cpu
   696 // visibility.  Inserted after a volatile load or FastLock.
   697 class MemBarAcquireNode: public MemBarNode {
   698 public:
   699   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
   700     : MemBarNode(C, alias_idx, precedent) {}
   701   virtual int Opcode() const;
   702 };
   704 // "Release" - no earlier ref can move after (but later refs can move
   705 // up, like a speculative pipelined cache-hitting Load).  Requires
   706 // multi-cpu visibility.  Inserted before a volatile store or FastUnLock.
   707 class MemBarReleaseNode: public MemBarNode {
   708 public:
   709   MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
   710     : MemBarNode(C, alias_idx, precedent) {}
   711   virtual int Opcode() const;
   712 };
   714 // Ordering between a volatile store and a following volatile load.
   715 // Requires multi-CPU visibility?
   716 class MemBarVolatileNode: public MemBarNode {
   717 public:
   718   MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
   719     : MemBarNode(C, alias_idx, precedent) {}
   720   virtual int Opcode() const;
   721 };
   723 // Ordering within the same CPU.  Used to order unsafe memory references
   724 // inside the compiler when we lack alias info.  Not needed "outside" the
   725 // compiler because the CPU does all the ordering for us.
   726 class MemBarCPUOrderNode: public MemBarNode {
   727 public:
   728   MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
   729     : MemBarNode(C, alias_idx, precedent) {}
   730   virtual int Opcode() const;
   731   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
   732 };
   734 // Isolation of object setup after an AllocateNode and before next safepoint.
   735 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
   736 class InitializeNode: public MemBarNode {
   737   friend class AllocateNode;
   739   bool _is_complete;
   741 public:
   742   enum {
   743     Control    = TypeFunc::Control,
   744     Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
   745     RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
   746     RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
   747   };
   749   InitializeNode(Compile* C, int adr_type, Node* rawoop);
   750   virtual int Opcode() const;
   751   virtual uint size_of() const { return sizeof(*this); }
   752   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
   753   virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
   755   // Manage incoming memory edges via a MergeMem on in(Memory):
   756   Node* memory(uint alias_idx);
   758   // The raw memory edge coming directly from the Allocation.
   759   // The contents of this memory are *always* all-zero-bits.
   760   Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
   762   // Return the corresponding allocation for this initialization (or null if none).
   763   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
   764   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
   765   AllocateNode* allocation();
   767   // Anything other than zeroing in this init?
   768   bool is_non_zero();
   770   // An InitializeNode must completed before macro expansion is done.
   771   // Completion requires that the AllocateNode must be followed by
   772   // initialization of the new memory to zero, then to any initializers.
   773   bool is_complete() { return _is_complete; }
   775   // Mark complete.  (Must not yet be complete.)
   776   void set_complete(PhaseGVN* phase);
   778 #ifdef ASSERT
   779   // ensure all non-degenerate stores are ordered and non-overlapping
   780   bool stores_are_sane(PhaseTransform* phase);
   781 #endif //ASSERT
   783   // See if this store can be captured; return offset where it initializes.
   784   // Return 0 if the store cannot be moved (any sort of problem).
   785   intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
   787   // Capture another store; reformat it to write my internal raw memory.
   788   // Return the captured copy, else NULL if there is some sort of problem.
   789   Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
   791   // Find captured store which corresponds to the range [start..start+size).
   792   // Return my own memory projection (meaning the initial zero bits)
   793   // if there is no such store.  Return NULL if there is a problem.
   794   Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
   796   // Called when the associated AllocateNode is expanded into CFG.
   797   Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
   798                         intptr_t header_size, Node* size_in_bytes,
   799                         PhaseGVN* phase);
   801  private:
   802   void remove_extra_zeroes();
   804   // Find out where a captured store should be placed (or already is placed).
   805   int captured_store_insertion_point(intptr_t start, int size_in_bytes,
   806                                      PhaseTransform* phase);
   808   static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
   810   Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
   812   bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
   814   void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
   815                                PhaseGVN* phase);
   817   intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
   818 };
   820 //------------------------------MergeMem---------------------------------------
   821 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
   822 class MergeMemNode: public Node {
   823   virtual uint hash() const ;                  // { return NO_HASH; }
   824   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
   825   friend class MergeMemStream;
   826   MergeMemNode(Node* def);  // clients use MergeMemNode::make
   828 public:
   829   // If the input is a whole memory state, clone it with all its slices intact.
   830   // Otherwise, make a new memory state with just that base memory input.
   831   // In either case, the result is a newly created MergeMem.
   832   static MergeMemNode* make(Compile* C, Node* base_memory);
   834   virtual int Opcode() const;
   835   virtual Node *Identity( PhaseTransform *phase );
   836   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   837   virtual uint ideal_reg() const { return NotAMachineReg; }
   838   virtual uint match_edge(uint idx) const { return 0; }
   839   virtual const RegMask &out_RegMask() const;
   840   virtual const Type *bottom_type() const { return Type::MEMORY; }
   841   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
   842   // sparse accessors
   843   // Fetch the previously stored "set_memory_at", or else the base memory.
   844   // (Caller should clone it if it is a phi-nest.)
   845   Node* memory_at(uint alias_idx) const;
   846   // set the memory, regardless of its previous value
   847   void set_memory_at(uint alias_idx, Node* n);
   848   // the "base" is the memory that provides the non-finite support
   849   Node* base_memory() const       { return in(Compile::AliasIdxBot); }
   850   // warning: setting the base can implicitly set any of the other slices too
   851   void set_base_memory(Node* def);
   852   // sentinel value which denotes a copy of the base memory:
   853   Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
   854   static Node* make_empty_memory(); // where the sentinel comes from
   855   bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
   856   // hook for the iterator, to perform any necessary setup
   857   void iteration_setup(const MergeMemNode* other = NULL);
   858   // push sentinels until I am at least as long as the other (semantic no-op)
   859   void grow_to_match(const MergeMemNode* other);
   860   bool verify_sparse() const PRODUCT_RETURN0;
   861 #ifndef PRODUCT
   862   virtual void dump_spec(outputStream *st) const;
   863 #endif
   864 };
   866 class MergeMemStream : public StackObj {
   867  private:
   868   MergeMemNode*       _mm;
   869   const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
   870   Node*               _mm_base;  // loop-invariant base memory of _mm
   871   int                 _idx;
   872   int                 _cnt;
   873   Node*               _mem;
   874   Node*               _mem2;
   875   int                 _cnt2;
   877   void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
   878     // subsume_node will break sparseness at times, whenever a memory slice
   879     // folds down to a copy of the base ("fat") memory.  In such a case,
   880     // the raw edge will update to base, although it should be top.
   881     // This iterator will recognize either top or base_memory as an
   882     // "empty" slice.  See is_empty, is_empty2, and next below.
   883     //
   884     // The sparseness property is repaired in MergeMemNode::Ideal.
   885     // As long as access to a MergeMem goes through this iterator
   886     // or the memory_at accessor, flaws in the sparseness will
   887     // never be observed.
   888     //
   889     // Also, iteration_setup repairs sparseness.
   890     assert(mm->verify_sparse(), "please, no dups of base");
   891     assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
   893     _mm  = mm;
   894     _mm_base = mm->base_memory();
   895     _mm2 = mm2;
   896     _cnt = mm->req();
   897     _idx = Compile::AliasIdxBot-1; // start at the base memory
   898     _mem = NULL;
   899     _mem2 = NULL;
   900   }
   902 #ifdef ASSERT
   903   Node* check_memory() const {
   904     if (at_base_memory())
   905       return _mm->base_memory();
   906     else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
   907       return _mm->memory_at(_idx);
   908     else
   909       return _mm_base;
   910   }
   911   Node* check_memory2() const {
   912     return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
   913   }
   914 #endif
   916   static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
   917   void assert_synch() const {
   918     assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
   919            "no side-effects except through the stream");
   920   }
   922  public:
   924   // expected usages:
   925   // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
   926   // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
   928   // iterate over one merge
   929   MergeMemStream(MergeMemNode* mm) {
   930     mm->iteration_setup();
   931     init(mm);
   932     debug_only(_cnt2 = 999);
   933   }
   934   // iterate in parallel over two merges
   935   // only iterates through non-empty elements of mm2
   936   MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
   937     assert(mm2, "second argument must be a MergeMem also");
   938     ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
   939     mm->iteration_setup(mm2);
   940     init(mm, mm2);
   941     _cnt2 = mm2->req();
   942   }
   943 #ifdef ASSERT
   944   ~MergeMemStream() {
   945     assert_synch();
   946   }
   947 #endif
   949   MergeMemNode* all_memory() const {
   950     return _mm;
   951   }
   952   Node* base_memory() const {
   953     assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
   954     return _mm_base;
   955   }
   956   const MergeMemNode* all_memory2() const {
   957     assert(_mm2 != NULL, "");
   958     return _mm2;
   959   }
   960   bool at_base_memory() const {
   961     return _idx == Compile::AliasIdxBot;
   962   }
   963   int alias_idx() const {
   964     assert(_mem, "must call next 1st");
   965     return _idx;
   966   }
   968   const TypePtr* adr_type() const {
   969     return Compile::current()->get_adr_type(alias_idx());
   970   }
   972   const TypePtr* adr_type(Compile* C) const {
   973     return C->get_adr_type(alias_idx());
   974   }
   975   bool is_empty() const {
   976     assert(_mem, "must call next 1st");
   977     assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
   978     return _mem->is_top();
   979   }
   980   bool is_empty2() const {
   981     assert(_mem2, "must call next 1st");
   982     assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
   983     return _mem2->is_top();
   984   }
   985   Node* memory() const {
   986     assert(!is_empty(), "must not be empty");
   987     assert_synch();
   988     return _mem;
   989   }
   990   // get the current memory, regardless of empty or non-empty status
   991   Node* force_memory() const {
   992     assert(!is_empty() || !at_base_memory(), "");
   993     // Use _mm_base to defend against updates to _mem->base_memory().
   994     Node *mem = _mem->is_top() ? _mm_base : _mem;
   995     assert(mem == check_memory(), "");
   996     return mem;
   997   }
   998   Node* memory2() const {
   999     assert(_mem2 == check_memory2(), "");
  1000     return _mem2;
  1002   void set_memory(Node* mem) {
  1003     if (at_base_memory()) {
  1004       // Note that this does not change the invariant _mm_base.
  1005       _mm->set_base_memory(mem);
  1006     } else {
  1007       _mm->set_memory_at(_idx, mem);
  1009     _mem = mem;
  1010     assert_synch();
  1013   // Recover from a side effect to the MergeMemNode.
  1014   void set_memory() {
  1015     _mem = _mm->in(_idx);
  1018   bool next()  { return next(false); }
  1019   bool next2() { return next(true); }
  1021   bool next_non_empty()  { return next_non_empty(false); }
  1022   bool next_non_empty2() { return next_non_empty(true); }
  1023   // next_non_empty2 can yield states where is_empty() is true
  1025  private:
  1026   // find the next item, which might be empty
  1027   bool next(bool have_mm2) {
  1028     assert((_mm2 != NULL) == have_mm2, "use other next");
  1029     assert_synch();
  1030     if (++_idx < _cnt) {
  1031       // Note:  This iterator allows _mm to be non-sparse.
  1032       // It behaves the same whether _mem is top or base_memory.
  1033       _mem = _mm->in(_idx);
  1034       if (have_mm2)
  1035         _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
  1036       return true;
  1038     return false;
  1041   // find the next non-empty item
  1042   bool next_non_empty(bool have_mm2) {
  1043     while (next(have_mm2)) {
  1044       if (!is_empty()) {
  1045         // make sure _mem2 is filled in sensibly
  1046         if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
  1047         return true;
  1048       } else if (have_mm2 && !is_empty2()) {
  1049         return true;   // is_empty() == true
  1052     return false;
  1054 };
  1056 //------------------------------Prefetch---------------------------------------
  1058 // Non-faulting prefetch load.  Prefetch for many reads.
  1059 class PrefetchReadNode : public Node {
  1060 public:
  1061   PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1062   virtual int Opcode() const;
  1063   virtual uint ideal_reg() const { return NotAMachineReg; }
  1064   virtual uint match_edge(uint idx) const { return idx==2; }
  1065   virtual const Type *bottom_type() const { return Type::ABIO; }
  1066 };
  1068 // Non-faulting prefetch load.  Prefetch for many reads & many writes.
  1069 class PrefetchWriteNode : public Node {
  1070 public:
  1071   PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  1072   virtual int Opcode() const;
  1073   virtual uint ideal_reg() const { return NotAMachineReg; }
  1074   virtual uint match_edge(uint idx) const { return idx==2; }
  1075   virtual const Type *bottom_type() const { return Type::ABIO; }
  1076 };

mercurial