src/share/vm/opto/node.cpp

Tue, 09 Oct 2012 10:11:38 +0200

author
roland
date
Tue, 09 Oct 2012 10:11:38 +0200
changeset 4159
8e47bac5643a
parent 4115
e626685e9f6c
child 4315
2aff40cb4703
permissions
-rw-r--r--

7054512: Compress class pointers after perm gen removal
Summary: support of compress class pointers in the compilers.
Reviewed-by: kvn, twisti

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "libadt/vectset.hpp"
    27 #include "memory/allocation.inline.hpp"
    28 #include "opto/cfgnode.hpp"
    29 #include "opto/connode.hpp"
    30 #include "opto/machnode.hpp"
    31 #include "opto/matcher.hpp"
    32 #include "opto/node.hpp"
    33 #include "opto/opcodes.hpp"
    34 #include "opto/regmask.hpp"
    35 #include "opto/type.hpp"
    36 #include "utilities/copy.hpp"
    38 class RegMask;
    39 // #include "phase.hpp"
    40 class PhaseTransform;
    41 class PhaseGVN;
    43 // Arena we are currently building Nodes in
    44 const uint Node::NotAMachineReg = 0xffff0000;
    46 #ifndef PRODUCT
    47 extern int nodes_created;
    48 #endif
    50 #ifdef ASSERT
    52 //-------------------------- construct_node------------------------------------
    53 // Set a breakpoint here to identify where a particular node index is built.
    54 void Node::verify_construction() {
    55   _debug_orig = NULL;
    56   int old_debug_idx = Compile::debug_idx();
    57   int new_debug_idx = old_debug_idx+1;
    58   if (new_debug_idx > 0) {
    59     // Arrange that the lowest five decimal digits of _debug_idx
    60     // will repeat thos of _idx.  In case this is somehow pathological,
    61     // we continue to assign negative numbers (!) consecutively.
    62     const int mod = 100000;
    63     int bump = (int)(_idx - new_debug_idx) % mod;
    64     if (bump < 0)  bump += mod;
    65     assert(bump >= 0 && bump < mod, "");
    66     new_debug_idx += bump;
    67   }
    68   Compile::set_debug_idx(new_debug_idx);
    69   set_debug_idx( new_debug_idx );
    70   assert(Compile::current()->unique() < (uint)MaxNodeLimit, "Node limit exceeded");
    71   if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
    72     tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
    73     BREAKPOINT;
    74   }
    75 #if OPTO_DU_ITERATOR_ASSERT
    76   _last_del = NULL;
    77   _del_tick = 0;
    78 #endif
    79   _hash_lock = 0;
    80 }
    83 // #ifdef ASSERT ...
    85 #if OPTO_DU_ITERATOR_ASSERT
    86 void DUIterator_Common::sample(const Node* node) {
    87   _vdui     = VerifyDUIterators;
    88   _node     = node;
    89   _outcnt   = node->_outcnt;
    90   _del_tick = node->_del_tick;
    91   _last     = NULL;
    92 }
    94 void DUIterator_Common::verify(const Node* node, bool at_end_ok) {
    95   assert(_node     == node, "consistent iterator source");
    96   assert(_del_tick == node->_del_tick, "no unexpected deletions allowed");
    97 }
    99 void DUIterator_Common::verify_resync() {
   100   // Ensure that the loop body has just deleted the last guy produced.
   101   const Node* node = _node;
   102   // Ensure that at least one copy of the last-seen edge was deleted.
   103   // Note:  It is OK to delete multiple copies of the last-seen edge.
   104   // Unfortunately, we have no way to verify that all the deletions delete
   105   // that same edge.  On this point we must use the Honor System.
   106   assert(node->_del_tick >= _del_tick+1, "must have deleted an edge");
   107   assert(node->_last_del == _last, "must have deleted the edge just produced");
   108   // We liked this deletion, so accept the resulting outcnt and tick.
   109   _outcnt   = node->_outcnt;
   110   _del_tick = node->_del_tick;
   111 }
   113 void DUIterator_Common::reset(const DUIterator_Common& that) {
   114   if (this == &that)  return;  // ignore assignment to self
   115   if (!_vdui) {
   116     // We need to initialize everything, overwriting garbage values.
   117     _last = that._last;
   118     _vdui = that._vdui;
   119   }
   120   // Note:  It is legal (though odd) for an iterator over some node x
   121   // to be reassigned to iterate over another node y.  Some doubly-nested
   122   // progress loops depend on being able to do this.
   123   const Node* node = that._node;
   124   // Re-initialize everything, except _last.
   125   _node     = node;
   126   _outcnt   = node->_outcnt;
   127   _del_tick = node->_del_tick;
   128 }
   130 void DUIterator::sample(const Node* node) {
   131   DUIterator_Common::sample(node);      // Initialize the assertion data.
   132   _refresh_tick = 0;                    // No refreshes have happened, as yet.
   133 }
   135 void DUIterator::verify(const Node* node, bool at_end_ok) {
   136   DUIterator_Common::verify(node, at_end_ok);
   137   assert(_idx      <  node->_outcnt + (uint)at_end_ok, "idx in range");
   138 }
   140 void DUIterator::verify_increment() {
   141   if (_refresh_tick & 1) {
   142     // We have refreshed the index during this loop.
   143     // Fix up _idx to meet asserts.
   144     if (_idx > _outcnt)  _idx = _outcnt;
   145   }
   146   verify(_node, true);
   147 }
   149 void DUIterator::verify_resync() {
   150   // Note:  We do not assert on _outcnt, because insertions are OK here.
   151   DUIterator_Common::verify_resync();
   152   // Make sure we are still in sync, possibly with no more out-edges:
   153   verify(_node, true);
   154 }
   156 void DUIterator::reset(const DUIterator& that) {
   157   if (this == &that)  return;  // self assignment is always a no-op
   158   assert(that._refresh_tick == 0, "assign only the result of Node::outs()");
   159   assert(that._idx          == 0, "assign only the result of Node::outs()");
   160   assert(_idx               == that._idx, "already assigned _idx");
   161   if (!_vdui) {
   162     // We need to initialize everything, overwriting garbage values.
   163     sample(that._node);
   164   } else {
   165     DUIterator_Common::reset(that);
   166     if (_refresh_tick & 1) {
   167       _refresh_tick++;                  // Clear the "was refreshed" flag.
   168     }
   169     assert(_refresh_tick < 2*100000, "DU iteration must converge quickly");
   170   }
   171 }
   173 void DUIterator::refresh() {
   174   DUIterator_Common::sample(_node);     // Re-fetch assertion data.
   175   _refresh_tick |= 1;                   // Set the "was refreshed" flag.
   176 }
   178 void DUIterator::verify_finish() {
   179   // If the loop has killed the node, do not require it to re-run.
   180   if (_node->_outcnt == 0)  _refresh_tick &= ~1;
   181   // If this assert triggers, it means that a loop used refresh_out_pos
   182   // to re-synch an iteration index, but the loop did not correctly
   183   // re-run itself, using a "while (progress)" construct.
   184   // This iterator enforces the rule that you must keep trying the loop
   185   // until it "runs clean" without any need for refreshing.
   186   assert(!(_refresh_tick & 1), "the loop must run once with no refreshing");
   187 }
   190 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) {
   191   DUIterator_Common::verify(node, at_end_ok);
   192   Node** out    = node->_out;
   193   uint   cnt    = node->_outcnt;
   194   assert(cnt == _outcnt, "no insertions allowed");
   195   assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range");
   196   // This last check is carefully designed to work for NO_OUT_ARRAY.
   197 }
   199 void DUIterator_Fast::verify_limit() {
   200   const Node* node = _node;
   201   verify(node, true);
   202   assert(_outp == node->_out + node->_outcnt, "limit still correct");
   203 }
   205 void DUIterator_Fast::verify_resync() {
   206   const Node* node = _node;
   207   if (_outp == node->_out + _outcnt) {
   208     // Note that the limit imax, not the pointer i, gets updated with the
   209     // exact count of deletions.  (For the pointer it's always "--i".)
   210     assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)");
   211     // This is a limit pointer, with a name like "imax".
   212     // Fudge the _last field so that the common assert will be happy.
   213     _last = (Node*) node->_last_del;
   214     DUIterator_Common::verify_resync();
   215   } else {
   216     assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)");
   217     // A normal internal pointer.
   218     DUIterator_Common::verify_resync();
   219     // Make sure we are still in sync, possibly with no more out-edges:
   220     verify(node, true);
   221   }
   222 }
   224 void DUIterator_Fast::verify_relimit(uint n) {
   225   const Node* node = _node;
   226   assert((int)n > 0, "use imax -= n only with a positive count");
   227   // This must be a limit pointer, with a name like "imax".
   228   assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)");
   229   // The reported number of deletions must match what the node saw.
   230   assert(node->_del_tick == _del_tick + n, "must have deleted n edges");
   231   // Fudge the _last field so that the common assert will be happy.
   232   _last = (Node*) node->_last_del;
   233   DUIterator_Common::verify_resync();
   234 }
   236 void DUIterator_Fast::reset(const DUIterator_Fast& that) {
   237   assert(_outp              == that._outp, "already assigned _outp");
   238   DUIterator_Common::reset(that);
   239 }
   241 void DUIterator_Last::verify(const Node* node, bool at_end_ok) {
   242   // at_end_ok means the _outp is allowed to underflow by 1
   243   _outp += at_end_ok;
   244   DUIterator_Fast::verify(node, at_end_ok);  // check _del_tick, etc.
   245   _outp -= at_end_ok;
   246   assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes");
   247 }
   249 void DUIterator_Last::verify_limit() {
   250   // Do not require the limit address to be resynched.
   251   //verify(node, true);
   252   assert(_outp == _node->_out, "limit still correct");
   253 }
   255 void DUIterator_Last::verify_step(uint num_edges) {
   256   assert((int)num_edges > 0, "need non-zero edge count for loop progress");
   257   _outcnt   -= num_edges;
   258   _del_tick += num_edges;
   259   // Make sure we are still in sync, possibly with no more out-edges:
   260   const Node* node = _node;
   261   verify(node, true);
   262   assert(node->_last_del == _last, "must have deleted the edge just produced");
   263 }
   265 #endif //OPTO_DU_ITERATOR_ASSERT
   268 #endif //ASSERT
   271 // This constant used to initialize _out may be any non-null value.
   272 // The value NULL is reserved for the top node only.
   273 #define NO_OUT_ARRAY ((Node**)-1)
   275 // This funny expression handshakes with Node::operator new
   276 // to pull Compile::current out of the new node's _out field,
   277 // and then calls a subroutine which manages most field
   278 // initializations.  The only one which is tricky is the
   279 // _idx field, which is const, and so must be initialized
   280 // by a return value, not an assignment.
   281 //
   282 // (Aren't you thankful that Java finals don't require so many tricks?)
   283 #define IDX_INIT(req) this->Init((req), (Compile*) this->_out)
   284 #ifdef _MSC_VER // the IDX_INIT hack falls foul of warning C4355
   285 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   286 #endif
   288 // Out-of-line code from node constructors.
   289 // Executed only when extra debug info. is being passed around.
   290 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) {
   291   C->set_node_notes_at(idx, nn);
   292 }
   294 // Shared initialization code.
   295 inline int Node::Init(int req, Compile* C) {
   296   assert(Compile::current() == C, "must use operator new(Compile*)");
   297   int idx = C->next_unique();
   299   // Allocate memory for the necessary number of edges.
   300   if (req > 0) {
   301     // Allocate space for _in array to have double alignment.
   302     _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*))));
   303 #ifdef ASSERT
   304     _in[req-1] = this; // magic cookie for assertion check
   305 #endif
   306   }
   307   // If there are default notes floating around, capture them:
   308   Node_Notes* nn = C->default_node_notes();
   309   if (nn != NULL)  init_node_notes(C, idx, nn);
   311   // Note:  At this point, C is dead,
   312   // and we begin to initialize the new Node.
   314   _cnt = _max = req;
   315   _outcnt = _outmax = 0;
   316   _class_id = Class_Node;
   317   _flags = 0;
   318   _out = NO_OUT_ARRAY;
   319   return idx;
   320 }
   322 //------------------------------Node-------------------------------------------
   323 // Create a Node, with a given number of required edges.
   324 Node::Node(uint req)
   325   : _idx(IDX_INIT(req))
   326 {
   327   assert( req < (uint)(MaxNodeLimit - NodeLimitFudgeFactor), "Input limit exceeded" );
   328   debug_only( verify_construction() );
   329   NOT_PRODUCT(nodes_created++);
   330   if (req == 0) {
   331     assert( _in == (Node**)this, "Must not pass arg count to 'new'" );
   332     _in = NULL;
   333   } else {
   334     assert( _in[req-1] == this, "Must pass arg count to 'new'" );
   335     Node** to = _in;
   336     for(uint i = 0; i < req; i++) {
   337       to[i] = NULL;
   338     }
   339   }
   340 }
   342 //------------------------------Node-------------------------------------------
   343 Node::Node(Node *n0)
   344   : _idx(IDX_INIT(1))
   345 {
   346   debug_only( verify_construction() );
   347   NOT_PRODUCT(nodes_created++);
   348   // Assert we allocated space for input array already
   349   assert( _in[0] == this, "Must pass arg count to 'new'" );
   350   assert( is_not_dead(n0), "can not use dead node");
   351   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
   352 }
   354 //------------------------------Node-------------------------------------------
   355 Node::Node(Node *n0, Node *n1)
   356   : _idx(IDX_INIT(2))
   357 {
   358   debug_only( verify_construction() );
   359   NOT_PRODUCT(nodes_created++);
   360   // Assert we allocated space for input array already
   361   assert( _in[1] == this, "Must pass arg count to 'new'" );
   362   assert( is_not_dead(n0), "can not use dead node");
   363   assert( is_not_dead(n1), "can not use dead node");
   364   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
   365   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
   366 }
   368 //------------------------------Node-------------------------------------------
   369 Node::Node(Node *n0, Node *n1, Node *n2)
   370   : _idx(IDX_INIT(3))
   371 {
   372   debug_only( verify_construction() );
   373   NOT_PRODUCT(nodes_created++);
   374   // Assert we allocated space for input array already
   375   assert( _in[2] == this, "Must pass arg count to 'new'" );
   376   assert( is_not_dead(n0), "can not use dead node");
   377   assert( is_not_dead(n1), "can not use dead node");
   378   assert( is_not_dead(n2), "can not use dead node");
   379   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
   380   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
   381   _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
   382 }
   384 //------------------------------Node-------------------------------------------
   385 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
   386   : _idx(IDX_INIT(4))
   387 {
   388   debug_only( verify_construction() );
   389   NOT_PRODUCT(nodes_created++);
   390   // Assert we allocated space for input array already
   391   assert( _in[3] == this, "Must pass arg count to 'new'" );
   392   assert( is_not_dead(n0), "can not use dead node");
   393   assert( is_not_dead(n1), "can not use dead node");
   394   assert( is_not_dead(n2), "can not use dead node");
   395   assert( is_not_dead(n3), "can not use dead node");
   396   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
   397   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
   398   _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
   399   _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
   400 }
   402 //------------------------------Node-------------------------------------------
   403 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
   404   : _idx(IDX_INIT(5))
   405 {
   406   debug_only( verify_construction() );
   407   NOT_PRODUCT(nodes_created++);
   408   // Assert we allocated space for input array already
   409   assert( _in[4] == this, "Must pass arg count to 'new'" );
   410   assert( is_not_dead(n0), "can not use dead node");
   411   assert( is_not_dead(n1), "can not use dead node");
   412   assert( is_not_dead(n2), "can not use dead node");
   413   assert( is_not_dead(n3), "can not use dead node");
   414   assert( is_not_dead(n4), "can not use dead node");
   415   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
   416   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
   417   _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
   418   _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
   419   _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
   420 }
   422 //------------------------------Node-------------------------------------------
   423 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
   424                      Node *n4, Node *n5)
   425   : _idx(IDX_INIT(6))
   426 {
   427   debug_only( verify_construction() );
   428   NOT_PRODUCT(nodes_created++);
   429   // Assert we allocated space for input array already
   430   assert( _in[5] == this, "Must pass arg count to 'new'" );
   431   assert( is_not_dead(n0), "can not use dead node");
   432   assert( is_not_dead(n1), "can not use dead node");
   433   assert( is_not_dead(n2), "can not use dead node");
   434   assert( is_not_dead(n3), "can not use dead node");
   435   assert( is_not_dead(n4), "can not use dead node");
   436   assert( is_not_dead(n5), "can not use dead node");
   437   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
   438   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
   439   _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
   440   _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
   441   _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
   442   _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
   443 }
   445 //------------------------------Node-------------------------------------------
   446 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
   447                      Node *n4, Node *n5, Node *n6)
   448   : _idx(IDX_INIT(7))
   449 {
   450   debug_only( verify_construction() );
   451   NOT_PRODUCT(nodes_created++);
   452   // Assert we allocated space for input array already
   453   assert( _in[6] == this, "Must pass arg count to 'new'" );
   454   assert( is_not_dead(n0), "can not use dead node");
   455   assert( is_not_dead(n1), "can not use dead node");
   456   assert( is_not_dead(n2), "can not use dead node");
   457   assert( is_not_dead(n3), "can not use dead node");
   458   assert( is_not_dead(n4), "can not use dead node");
   459   assert( is_not_dead(n5), "can not use dead node");
   460   assert( is_not_dead(n6), "can not use dead node");
   461   _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
   462   _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
   463   _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
   464   _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
   465   _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
   466   _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
   467   _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this);
   468 }
   471 //------------------------------clone------------------------------------------
   472 // Clone a Node.
   473 Node *Node::clone() const {
   474   Compile *compile = Compile::current();
   475   uint s = size_of();           // Size of inherited Node
   476   Node *n = (Node*)compile->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*));
   477   Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s);
   478   // Set the new input pointer array
   479   n->_in = (Node**)(((char*)n)+s);
   480   // Cannot share the old output pointer array, so kill it
   481   n->_out = NO_OUT_ARRAY;
   482   // And reset the counters to 0
   483   n->_outcnt = 0;
   484   n->_outmax = 0;
   485   // Unlock this guy, since he is not in any hash table.
   486   debug_only(n->_hash_lock = 0);
   487   // Walk the old node's input list to duplicate its edges
   488   uint i;
   489   for( i = 0; i < len(); i++ ) {
   490     Node *x = in(i);
   491     n->_in[i] = x;
   492     if (x != NULL) x->add_out(n);
   493   }
   494   if (is_macro())
   495     compile->add_macro_node(n);
   497   n->set_idx(compile->next_unique()); // Get new unique index as well
   498   debug_only( n->verify_construction() );
   499   NOT_PRODUCT(nodes_created++);
   500   // Do not patch over the debug_idx of a clone, because it makes it
   501   // impossible to break on the clone's moment of creation.
   502   //debug_only( n->set_debug_idx( debug_idx() ) );
   504   compile->copy_node_notes_to(n, (Node*) this);
   506   // MachNode clone
   507   uint nopnds;
   508   if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) {
   509     MachNode *mach  = n->as_Mach();
   510     MachNode *mthis = this->as_Mach();
   511     // Get address of _opnd_array.
   512     // It should be the same offset since it is the clone of this node.
   513     MachOper **from = mthis->_opnds;
   514     MachOper **to = (MachOper **)((size_t)(&mach->_opnds) +
   515                     pointer_delta((const void*)from,
   516                                   (const void*)(&mthis->_opnds), 1));
   517     mach->_opnds = to;
   518     for ( uint i = 0; i < nopnds; ++i ) {
   519       to[i] = from[i]->clone(compile);
   520     }
   521   }
   522   // cloning CallNode may need to clone JVMState
   523   if (n->is_Call()) {
   524     CallNode *call = n->as_Call();
   525     call->clone_jvms();
   526   }
   527   return n;                     // Return the clone
   528 }
   530 //---------------------------setup_is_top--------------------------------------
   531 // Call this when changing the top node, to reassert the invariants
   532 // required by Node::is_top.  See Compile::set_cached_top_node.
   533 void Node::setup_is_top() {
   534   if (this == (Node*)Compile::current()->top()) {
   535     // This node has just become top.  Kill its out array.
   536     _outcnt = _outmax = 0;
   537     _out = NULL;                           // marker value for top
   538     assert(is_top(), "must be top");
   539   } else {
   540     if (_out == NULL)  _out = NO_OUT_ARRAY;
   541     assert(!is_top(), "must not be top");
   542   }
   543 }
   546 //------------------------------~Node------------------------------------------
   547 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
   548 extern int reclaim_idx ;
   549 extern int reclaim_in  ;
   550 extern int reclaim_node;
   551 void Node::destruct() {
   552   // Eagerly reclaim unique Node numberings
   553   Compile* compile = Compile::current();
   554   if ((uint)_idx+1 == compile->unique()) {
   555     compile->set_unique(compile->unique()-1);
   556 #ifdef ASSERT
   557     reclaim_idx++;
   558 #endif
   559   }
   560   // Clear debug info:
   561   Node_Notes* nn = compile->node_notes_at(_idx);
   562   if (nn != NULL)  nn->clear();
   563   // Walk the input array, freeing the corresponding output edges
   564   _cnt = _max;  // forget req/prec distinction
   565   uint i;
   566   for( i = 0; i < _max; i++ ) {
   567     set_req(i, NULL);
   568     //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim");
   569   }
   570   assert(outcnt() == 0, "deleting a node must not leave a dangling use");
   571   // See if the input array was allocated just prior to the object
   572   int edge_size = _max*sizeof(void*);
   573   int out_edge_size = _outmax*sizeof(void*);
   574   char *edge_end = ((char*)_in) + edge_size;
   575   char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out);
   576   char *out_edge_end = out_array + out_edge_size;
   577   int node_size = size_of();
   579   // Free the output edge array
   580   if (out_edge_size > 0) {
   581 #ifdef ASSERT
   582     if( out_edge_end == compile->node_arena()->hwm() )
   583       reclaim_in  += out_edge_size;  // count reclaimed out edges with in edges
   584 #endif
   585     compile->node_arena()->Afree(out_array, out_edge_size);
   586   }
   588   // Free the input edge array and the node itself
   589   if( edge_end == (char*)this ) {
   590 #ifdef ASSERT
   591     if( edge_end+node_size == compile->node_arena()->hwm() ) {
   592       reclaim_in  += edge_size;
   593       reclaim_node+= node_size;
   594     }
   595 #else
   596     // It was; free the input array and object all in one hit
   597     compile->node_arena()->Afree(_in,edge_size+node_size);
   598 #endif
   599   } else {
   601     // Free just the input array
   602 #ifdef ASSERT
   603     if( edge_end == compile->node_arena()->hwm() )
   604       reclaim_in  += edge_size;
   605 #endif
   606     compile->node_arena()->Afree(_in,edge_size);
   608     // Free just the object
   609 #ifdef ASSERT
   610     if( ((char*)this) + node_size == compile->node_arena()->hwm() )
   611       reclaim_node+= node_size;
   612 #else
   613     compile->node_arena()->Afree(this,node_size);
   614 #endif
   615   }
   616   if (is_macro()) {
   617     compile->remove_macro_node(this);
   618   }
   619 #ifdef ASSERT
   620   // We will not actually delete the storage, but we'll make the node unusable.
   621   *(address*)this = badAddress;  // smash the C++ vtbl, probably
   622   _in = _out = (Node**) badAddress;
   623   _max = _cnt = _outmax = _outcnt = 0;
   624 #endif
   625 }
   627 //------------------------------grow-------------------------------------------
   628 // Grow the input array, making space for more edges
   629 void Node::grow( uint len ) {
   630   Arena* arena = Compile::current()->node_arena();
   631   uint new_max = _max;
   632   if( new_max == 0 ) {
   633     _max = 4;
   634     _in = (Node**)arena->Amalloc(4*sizeof(Node*));
   635     Node** to = _in;
   636     to[0] = NULL;
   637     to[1] = NULL;
   638     to[2] = NULL;
   639     to[3] = NULL;
   640     return;
   641   }
   642   while( new_max <= len ) new_max <<= 1; // Find next power-of-2
   643   // Trimming to limit allows a uint8 to handle up to 255 edges.
   644   // Previously I was using only powers-of-2 which peaked at 128 edges.
   645   //if( new_max >= limit ) new_max = limit-1;
   646   _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*));
   647   Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space
   648   _max = new_max;               // Record new max length
   649   // This assertion makes sure that Node::_max is wide enough to
   650   // represent the numerical value of new_max.
   651   assert(_max == new_max && _max > len, "int width of _max is too small");
   652 }
   654 //-----------------------------out_grow----------------------------------------
   655 // Grow the input array, making space for more edges
   656 void Node::out_grow( uint len ) {
   657   assert(!is_top(), "cannot grow a top node's out array");
   658   Arena* arena = Compile::current()->node_arena();
   659   uint new_max = _outmax;
   660   if( new_max == 0 ) {
   661     _outmax = 4;
   662     _out = (Node **)arena->Amalloc(4*sizeof(Node*));
   663     return;
   664   }
   665   while( new_max <= len ) new_max <<= 1; // Find next power-of-2
   666   // Trimming to limit allows a uint8 to handle up to 255 edges.
   667   // Previously I was using only powers-of-2 which peaked at 128 edges.
   668   //if( new_max >= limit ) new_max = limit-1;
   669   assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value");
   670   _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*));
   671   //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space
   672   _outmax = new_max;               // Record new max length
   673   // This assertion makes sure that Node::_max is wide enough to
   674   // represent the numerical value of new_max.
   675   assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small");
   676 }
   678 #ifdef ASSERT
   679 //------------------------------is_dead----------------------------------------
   680 bool Node::is_dead() const {
   681   // Mach and pinch point nodes may look like dead.
   682   if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) )
   683     return false;
   684   for( uint i = 0; i < _max; i++ )
   685     if( _in[i] != NULL )
   686       return false;
   687   dump();
   688   return true;
   689 }
   690 #endif
   692 //------------------------------add_req----------------------------------------
   693 // Add a new required input at the end
   694 void Node::add_req( Node *n ) {
   695   assert( is_not_dead(n), "can not use dead node");
   697   // Look to see if I can move precedence down one without reallocating
   698   if( (_cnt >= _max) || (in(_max-1) != NULL) )
   699     grow( _max+1 );
   701   // Find a precedence edge to move
   702   if( in(_cnt) != NULL ) {       // Next precedence edge is busy?
   703     uint i;
   704     for( i=_cnt; i<_max; i++ )
   705       if( in(i) == NULL )       // Find the NULL at end of prec edge list
   706         break;                  // There must be one, since we grew the array
   707     _in[i] = in(_cnt);          // Move prec over, making space for req edge
   708   }
   709   _in[_cnt++] = n;            // Stuff over old prec edge
   710   if (n != NULL) n->add_out((Node *)this);
   711 }
   713 //---------------------------add_req_batch-------------------------------------
   714 // Add a new required input at the end
   715 void Node::add_req_batch( Node *n, uint m ) {
   716   assert( is_not_dead(n), "can not use dead node");
   717   // check various edge cases
   718   if ((int)m <= 1) {
   719     assert((int)m >= 0, "oob");
   720     if (m != 0)  add_req(n);
   721     return;
   722   }
   724   // Look to see if I can move precedence down one without reallocating
   725   if( (_cnt+m) > _max || _in[_max-m] )
   726     grow( _max+m );
   728   // Find a precedence edge to move
   729   if( _in[_cnt] != NULL ) {     // Next precedence edge is busy?
   730     uint i;
   731     for( i=_cnt; i<_max; i++ )
   732       if( _in[i] == NULL )      // Find the NULL at end of prec edge list
   733         break;                  // There must be one, since we grew the array
   734     // Slide all the precs over by m positions (assume #prec << m).
   735     Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*)));
   736   }
   738   // Stuff over the old prec edges
   739   for(uint i=0; i<m; i++ ) {
   740     _in[_cnt++] = n;
   741   }
   743   // Insert multiple out edges on the node.
   744   if (n != NULL && !n->is_top()) {
   745     for(uint i=0; i<m; i++ ) {
   746       n->add_out((Node *)this);
   747     }
   748   }
   749 }
   751 //------------------------------del_req----------------------------------------
   752 // Delete the required edge and compact the edge array
   753 void Node::del_req( uint idx ) {
   754   assert( idx < _cnt, "oob");
   755   assert( !VerifyHashTableKeys || _hash_lock == 0,
   756           "remove node from hash table before modifying it");
   757   // First remove corresponding def-use edge
   758   Node *n = in(idx);
   759   if (n != NULL) n->del_out((Node *)this);
   760   _in[idx] = in(--_cnt);  // Compact the array
   761   _in[_cnt] = NULL;       // NULL out emptied slot
   762 }
   764 //------------------------------ins_req----------------------------------------
   765 // Insert a new required input at the end
   766 void Node::ins_req( uint idx, Node *n ) {
   767   assert( is_not_dead(n), "can not use dead node");
   768   add_req(NULL);                // Make space
   769   assert( idx < _max, "Must have allocated enough space");
   770   // Slide over
   771   if(_cnt-idx-1 > 0) {
   772     Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*)));
   773   }
   774   _in[idx] = n;                            // Stuff over old required edge
   775   if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge
   776 }
   778 //-----------------------------find_edge---------------------------------------
   779 int Node::find_edge(Node* n) {
   780   for (uint i = 0; i < len(); i++) {
   781     if (_in[i] == n)  return i;
   782   }
   783   return -1;
   784 }
   786 //----------------------------replace_edge-------------------------------------
   787 int Node::replace_edge(Node* old, Node* neww) {
   788   if (old == neww)  return 0;  // nothing to do
   789   uint nrep = 0;
   790   for (uint i = 0; i < len(); i++) {
   791     if (in(i) == old) {
   792       if (i < req())
   793         set_req(i, neww);
   794       else
   795         set_prec(i, neww);
   796       nrep++;
   797     }
   798   }
   799   return nrep;
   800 }
   802 //-------------------------disconnect_inputs-----------------------------------
   803 // NULL out all inputs to eliminate incoming Def-Use edges.
   804 // Return the number of edges between 'n' and 'this'
   805 int Node::disconnect_inputs(Node *n) {
   806   int edges_to_n = 0;
   808   uint cnt = req();
   809   for( uint i = 0; i < cnt; ++i ) {
   810     if( in(i) == 0 ) continue;
   811     if( in(i) == n ) ++edges_to_n;
   812     set_req(i, NULL);
   813   }
   814   // Remove precedence edges if any exist
   815   // Note: Safepoints may have precedence edges, even during parsing
   816   if( (req() != len()) && (in(req()) != NULL) ) {
   817     uint max = len();
   818     for( uint i = 0; i < max; ++i ) {
   819       if( in(i) == 0 ) continue;
   820       if( in(i) == n ) ++edges_to_n;
   821       set_prec(i, NULL);
   822     }
   823   }
   825   // Node::destruct requires all out edges be deleted first
   826   // debug_only(destruct();)   // no reuse benefit expected
   827   return edges_to_n;
   828 }
   830 //-----------------------------uncast---------------------------------------
   831 // %%% Temporary, until we sort out CheckCastPP vs. CastPP.
   832 // Strip away casting.  (It is depth-limited.)
   833 Node* Node::uncast() const {
   834   // Should be inline:
   835   //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this;
   836   if (is_ConstraintCast() || is_CheckCastPP())
   837     return uncast_helper(this);
   838   else
   839     return (Node*) this;
   840 }
   842 //---------------------------uncast_helper-------------------------------------
   843 Node* Node::uncast_helper(const Node* p) {
   844 #ifdef ASSERT
   845   uint depth_count = 0;
   846   const Node* orig_p = p;
   847 #endif
   849   while (true) {
   850 #ifdef ASSERT
   851     if (depth_count >= K) {
   852       orig_p->dump(4);
   853       if (p != orig_p)
   854         p->dump(1);
   855     }
   856     assert(depth_count++ < K, "infinite loop in Node::uncast_helper");
   857 #endif
   858     if (p == NULL || p->req() != 2) {
   859       break;
   860     } else if (p->is_ConstraintCast()) {
   861       p = p->in(1);
   862     } else if (p->is_CheckCastPP()) {
   863       p = p->in(1);
   864     } else {
   865       break;
   866     }
   867   }
   868   return (Node*) p;
   869 }
   871 //------------------------------add_prec---------------------------------------
   872 // Add a new precedence input.  Precedence inputs are unordered, with
   873 // duplicates removed and NULLs packed down at the end.
   874 void Node::add_prec( Node *n ) {
   875   assert( is_not_dead(n), "can not use dead node");
   877   // Check for NULL at end
   878   if( _cnt >= _max || in(_max-1) )
   879     grow( _max+1 );
   881   // Find a precedence edge to move
   882   uint i = _cnt;
   883   while( in(i) != NULL ) i++;
   884   _in[i] = n;                                // Stuff prec edge over NULL
   885   if ( n != NULL) n->add_out((Node *)this);  // Add mirror edge
   886 }
   888 //------------------------------rm_prec----------------------------------------
   889 // Remove a precedence input.  Precedence inputs are unordered, with
   890 // duplicates removed and NULLs packed down at the end.
   891 void Node::rm_prec( uint j ) {
   893   // Find end of precedence list to pack NULLs
   894   uint i;
   895   for( i=j; i<_max; i++ )
   896     if( !_in[i] )               // Find the NULL at end of prec edge list
   897       break;
   898   if (_in[j] != NULL) _in[j]->del_out((Node *)this);
   899   _in[j] = _in[--i];            // Move last element over removed guy
   900   _in[i] = NULL;                // NULL out last element
   901 }
   903 //------------------------------size_of----------------------------------------
   904 uint Node::size_of() const { return sizeof(*this); }
   906 //------------------------------ideal_reg--------------------------------------
   907 uint Node::ideal_reg() const { return 0; }
   909 //------------------------------jvms-------------------------------------------
   910 JVMState* Node::jvms() const { return NULL; }
   912 #ifdef ASSERT
   913 //------------------------------jvms-------------------------------------------
   914 bool Node::verify_jvms(const JVMState* using_jvms) const {
   915   for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
   916     if (jvms == using_jvms)  return true;
   917   }
   918   return false;
   919 }
   921 //------------------------------init_NodeProperty------------------------------
   922 void Node::init_NodeProperty() {
   923   assert(_max_classes <= max_jushort, "too many NodeProperty classes");
   924   assert(_max_flags <= max_jushort, "too many NodeProperty flags");
   925 }
   926 #endif
   928 //------------------------------format-----------------------------------------
   929 // Print as assembly
   930 void Node::format( PhaseRegAlloc *, outputStream *st ) const {}
   931 //------------------------------emit-------------------------------------------
   932 // Emit bytes starting at parameter 'ptr'.
   933 void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
   934 //------------------------------size-------------------------------------------
   935 // Size of instruction in bytes
   936 uint Node::size(PhaseRegAlloc *ra_) const { return 0; }
   938 //------------------------------CFG Construction-------------------------------
   939 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root,
   940 // Goto and Return.
   941 const Node *Node::is_block_proj() const { return 0; }
   943 // Minimum guaranteed type
   944 const Type *Node::bottom_type() const { return Type::BOTTOM; }
   947 //------------------------------raise_bottom_type------------------------------
   948 // Get the worst-case Type output for this Node.
   949 void Node::raise_bottom_type(const Type* new_type) {
   950   if (is_Type()) {
   951     TypeNode *n = this->as_Type();
   952     if (VerifyAliases) {
   953       assert(new_type->higher_equal(n->type()), "new type must refine old type");
   954     }
   955     n->set_type(new_type);
   956   } else if (is_Load()) {
   957     LoadNode *n = this->as_Load();
   958     if (VerifyAliases) {
   959       assert(new_type->higher_equal(n->type()), "new type must refine old type");
   960     }
   961     n->set_type(new_type);
   962   }
   963 }
   965 //------------------------------Identity---------------------------------------
   966 // Return a node that the given node is equivalent to.
   967 Node *Node::Identity( PhaseTransform * ) {
   968   return this;                  // Default to no identities
   969 }
   971 //------------------------------Value------------------------------------------
   972 // Compute a new Type for a node using the Type of the inputs.
   973 const Type *Node::Value( PhaseTransform * ) const {
   974   return bottom_type();         // Default to worst-case Type
   975 }
   977 //------------------------------Ideal------------------------------------------
   978 //
   979 // 'Idealize' the graph rooted at this Node.
   980 //
   981 // In order to be efficient and flexible there are some subtle invariants
   982 // these Ideal calls need to hold.  Running with '+VerifyIterativeGVN' checks
   983 // these invariants, although its too slow to have on by default.  If you are
   984 // hacking an Ideal call, be sure to test with +VerifyIterativeGVN!
   985 //
   986 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this'
   987 // pointer.  If ANY change is made, it must return the root of the reshaped
   988 // graph - even if the root is the same Node.  Example: swapping the inputs
   989 // to an AddINode gives the same answer and same root, but you still have to
   990 // return the 'this' pointer instead of NULL.
   991 //
   992 // You cannot return an OLD Node, except for the 'this' pointer.  Use the
   993 // Identity call to return an old Node; basically if Identity can find
   994 // another Node have the Ideal call make no change and return NULL.
   995 // Example: AddINode::Ideal must check for add of zero; in this case it
   996 // returns NULL instead of doing any graph reshaping.
   997 //
   998 // You cannot modify any old Nodes except for the 'this' pointer.  Due to
   999 // sharing there may be other users of the old Nodes relying on their current
  1000 // semantics.  Modifying them will break the other users.
  1001 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for
  1002 // "X+3" unchanged in case it is shared.
  1003 //
  1004 // If you modify the 'this' pointer's inputs, you should use
  1005 // 'set_req'.  If you are making a new Node (either as the new root or
  1006 // some new internal piece) you may use 'init_req' to set the initial
  1007 // value.  You can make a new Node with either 'new' or 'clone'.  In
  1008 // either case, def-use info is correctly maintained.
  1009 //
  1010 // Example: reshape "(X+3)+4" into "X+7":
  1011 //    set_req(1, in(1)->in(1));
  1012 //    set_req(2, phase->intcon(7));
  1013 //    return this;
  1014 // Example: reshape "X*4" into "X<<2"
  1015 //    return new (C) LShiftINode(in(1), phase->intcon(2));
  1016 //
  1017 // You must call 'phase->transform(X)' on any new Nodes X you make, except
  1018 // for the returned root node.  Example: reshape "X*31" with "(X<<5)-X".
  1019 //    Node *shift=phase->transform(new(C)LShiftINode(in(1),phase->intcon(5)));
  1020 //    return new (C) AddINode(shift, in(1));
  1021 //
  1022 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'.
  1023 // These forms are faster than 'phase->transform(new (C) ConNode())' and Do
  1024 // The Right Thing with def-use info.
  1025 //
  1026 // You cannot bury the 'this' Node inside of a graph reshape.  If the reshaped
  1027 // graph uses the 'this' Node it must be the root.  If you want a Node with
  1028 // the same Opcode as the 'this' pointer use 'clone'.
  1029 //
  1030 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) {
  1031   return NULL;                  // Default to being Ideal already
  1034 // Some nodes have specific Ideal subgraph transformations only if they are
  1035 // unique users of specific nodes. Such nodes should be put on IGVN worklist
  1036 // for the transformations to happen.
  1037 bool Node::has_special_unique_user() const {
  1038   assert(outcnt() == 1, "match only for unique out");
  1039   Node* n = unique_out();
  1040   int op  = Opcode();
  1041   if( this->is_Store() ) {
  1042     // Condition for back-to-back stores folding.
  1043     return n->Opcode() == op && n->in(MemNode::Memory) == this;
  1044   } else if( op == Op_AddL ) {
  1045     // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
  1046     return n->Opcode() == Op_ConvL2I && n->in(1) == this;
  1047   } else if( op == Op_SubI || op == Op_SubL ) {
  1048     // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y)
  1049     return n->Opcode() == op && n->in(2) == this;
  1051   return false;
  1052 };
  1054 //--------------------------find_exact_control---------------------------------
  1055 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
  1056 Node* Node::find_exact_control(Node* ctrl) {
  1057   if (ctrl == NULL && this->is_Region())
  1058     ctrl = this->as_Region()->is_copy();
  1060   if (ctrl != NULL && ctrl->is_CatchProj()) {
  1061     if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index)
  1062       ctrl = ctrl->in(0);
  1063     if (ctrl != NULL && !ctrl->is_top())
  1064       ctrl = ctrl->in(0);
  1067   if (ctrl != NULL && ctrl->is_Proj())
  1068     ctrl = ctrl->in(0);
  1070   return ctrl;
  1073 //--------------------------dominates------------------------------------------
  1074 // Helper function for MemNode::all_controls_dominate().
  1075 // Check if 'this' control node dominates or equal to 'sub' control node.
  1076 // We already know that if any path back to Root or Start reaches 'this',
  1077 // then all paths so, so this is a simple search for one example,
  1078 // not an exhaustive search for a counterexample.
  1079 bool Node::dominates(Node* sub, Node_List &nlist) {
  1080   assert(this->is_CFG(), "expecting control");
  1081   assert(sub != NULL && sub->is_CFG(), "expecting control");
  1083   // detect dead cycle without regions
  1084   int iterations_without_region_limit = DominatorSearchLimit;
  1086   Node* orig_sub = sub;
  1087   Node* dom      = this;
  1088   bool  met_dom  = false;
  1089   nlist.clear();
  1091   // Walk 'sub' backward up the chain to 'dom', watching for regions.
  1092   // After seeing 'dom', continue up to Root or Start.
  1093   // If we hit a region (backward split point), it may be a loop head.
  1094   // Keep going through one of the region's inputs.  If we reach the
  1095   // same region again, go through a different input.  Eventually we
  1096   // will either exit through the loop head, or give up.
  1097   // (If we get confused, break out and return a conservative 'false'.)
  1098   while (sub != NULL) {
  1099     if (sub->is_top())  break; // Conservative answer for dead code.
  1100     if (sub == dom) {
  1101       if (nlist.size() == 0) {
  1102         // No Region nodes except loops were visited before and the EntryControl
  1103         // path was taken for loops: it did not walk in a cycle.
  1104         return true;
  1105       } else if (met_dom) {
  1106         break;          // already met before: walk in a cycle
  1107       } else {
  1108         // Region nodes were visited. Continue walk up to Start or Root
  1109         // to make sure that it did not walk in a cycle.
  1110         met_dom = true; // first time meet
  1111         iterations_without_region_limit = DominatorSearchLimit; // Reset
  1114     if (sub->is_Start() || sub->is_Root()) {
  1115       // Success if we met 'dom' along a path to Start or Root.
  1116       // We assume there are no alternative paths that avoid 'dom'.
  1117       // (This assumption is up to the caller to ensure!)
  1118       return met_dom;
  1120     Node* up = sub->in(0);
  1121     // Normalize simple pass-through regions and projections:
  1122     up = sub->find_exact_control(up);
  1123     // If sub == up, we found a self-loop.  Try to push past it.
  1124     if (sub == up && sub->is_Loop()) {
  1125       // Take loop entry path on the way up to 'dom'.
  1126       up = sub->in(1); // in(LoopNode::EntryControl);
  1127     } else if (sub == up && sub->is_Region() && sub->req() != 3) {
  1128       // Always take in(1) path on the way up to 'dom' for clone regions
  1129       // (with only one input) or regions which merge > 2 paths
  1130       // (usually used to merge fast/slow paths).
  1131       up = sub->in(1);
  1132     } else if (sub == up && sub->is_Region()) {
  1133       // Try both paths for Regions with 2 input paths (it may be a loop head).
  1134       // It could give conservative 'false' answer without information
  1135       // which region's input is the entry path.
  1136       iterations_without_region_limit = DominatorSearchLimit; // Reset
  1138       bool region_was_visited_before = false;
  1139       // Was this Region node visited before?
  1140       // If so, we have reached it because we accidentally took a
  1141       // loop-back edge from 'sub' back into the body of the loop,
  1142       // and worked our way up again to the loop header 'sub'.
  1143       // So, take the first unexplored path on the way up to 'dom'.
  1144       for (int j = nlist.size() - 1; j >= 0; j--) {
  1145         intptr_t ni = (intptr_t)nlist.at(j);
  1146         Node* visited = (Node*)(ni & ~1);
  1147         bool  visited_twice_already = ((ni & 1) != 0);
  1148         if (visited == sub) {
  1149           if (visited_twice_already) {
  1150             // Visited 2 paths, but still stuck in loop body.  Give up.
  1151             return false;
  1153           // The Region node was visited before only once.
  1154           // (We will repush with the low bit set, below.)
  1155           nlist.remove(j);
  1156           // We will find a new edge and re-insert.
  1157           region_was_visited_before = true;
  1158           break;
  1162       // Find an incoming edge which has not been seen yet; walk through it.
  1163       assert(up == sub, "");
  1164       uint skip = region_was_visited_before ? 1 : 0;
  1165       for (uint i = 1; i < sub->req(); i++) {
  1166         Node* in = sub->in(i);
  1167         if (in != NULL && !in->is_top() && in != sub) {
  1168           if (skip == 0) {
  1169             up = in;
  1170             break;
  1172           --skip;               // skip this nontrivial input
  1176       // Set 0 bit to indicate that both paths were taken.
  1177       nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0)));
  1180     if (up == sub) {
  1181       break;    // some kind of tight cycle
  1183     if (up == orig_sub && met_dom) {
  1184       // returned back after visiting 'dom'
  1185       break;    // some kind of cycle
  1187     if (--iterations_without_region_limit < 0) {
  1188       break;    // dead cycle
  1190     sub = up;
  1193   // Did not meet Root or Start node in pred. chain.
  1194   // Conservative answer for dead code.
  1195   return false;
  1198 //------------------------------remove_dead_region-----------------------------
  1199 // This control node is dead.  Follow the subgraph below it making everything
  1200 // using it dead as well.  This will happen normally via the usual IterGVN
  1201 // worklist but this call is more efficient.  Do not update use-def info
  1202 // inside the dead region, just at the borders.
  1203 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
  1204   // Con's are a popular node to re-hit in the hash table again.
  1205   if( dead->is_Con() ) return;
  1207   // Can't put ResourceMark here since igvn->_worklist uses the same arena
  1208   // for verify pass with +VerifyOpto and we add/remove elements in it here.
  1209   Node_List  nstack(Thread::current()->resource_area());
  1211   Node *top = igvn->C->top();
  1212   nstack.push(dead);
  1214   while (nstack.size() > 0) {
  1215     dead = nstack.pop();
  1216     if (dead->outcnt() > 0) {
  1217       // Keep dead node on stack until all uses are processed.
  1218       nstack.push(dead);
  1219       // For all Users of the Dead...    ;-)
  1220       for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) {
  1221         Node* use = dead->last_out(k);
  1222         igvn->hash_delete(use);       // Yank from hash table prior to mod
  1223         if (use->in(0) == dead) {     // Found another dead node
  1224           assert (!use->is_Con(), "Control for Con node should be Root node.");
  1225           use->set_req(0, top);       // Cut dead edge to prevent processing
  1226           nstack.push(use);           // the dead node again.
  1227         } else {                      // Else found a not-dead user
  1228           for (uint j = 1; j < use->req(); j++) {
  1229             if (use->in(j) == dead) { // Turn all dead inputs into TOP
  1230               use->set_req(j, top);
  1233           igvn->_worklist.push(use);
  1235         // Refresh the iterator, since any number of kills might have happened.
  1236         k = dead->last_outs(kmin);
  1238     } else { // (dead->outcnt() == 0)
  1239       // Done with outputs.
  1240       igvn->hash_delete(dead);
  1241       igvn->_worklist.remove(dead);
  1242       igvn->set_type(dead, Type::TOP);
  1243       if (dead->is_macro()) {
  1244         igvn->C->remove_macro_node(dead);
  1246       // Kill all inputs to the dead guy
  1247       for (uint i=0; i < dead->req(); i++) {
  1248         Node *n = dead->in(i);      // Get input to dead guy
  1249         if (n != NULL && !n->is_top()) { // Input is valid?
  1250           dead->set_req(i, top);    // Smash input away
  1251           if (n->outcnt() == 0) {   // Input also goes dead?
  1252             if (!n->is_Con())
  1253               nstack.push(n);       // Clear it out as well
  1254           } else if (n->outcnt() == 1 &&
  1255                      n->has_special_unique_user()) {
  1256             igvn->add_users_to_worklist( n );
  1257           } else if (n->outcnt() <= 2 && n->is_Store()) {
  1258             // Push store's uses on worklist to enable folding optimization for
  1259             // store/store and store/load to the same address.
  1260             // The restriction (outcnt() <= 2) is the same as in set_req_X()
  1261             // and remove_globally_dead_node().
  1262             igvn->add_users_to_worklist( n );
  1266     } // (dead->outcnt() == 0)
  1267   }   // while (nstack.size() > 0) for outputs
  1268   return;
  1271 //------------------------------remove_dead_region-----------------------------
  1272 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) {
  1273   Node *n = in(0);
  1274   if( !n ) return false;
  1275   // Lost control into this guy?  I.e., it became unreachable?
  1276   // Aggressively kill all unreachable code.
  1277   if (can_reshape && n->is_top()) {
  1278     kill_dead_code(this, phase->is_IterGVN());
  1279     return false; // Node is dead.
  1282   if( n->is_Region() && n->as_Region()->is_copy() ) {
  1283     Node *m = n->nonnull_req();
  1284     set_req(0, m);
  1285     return true;
  1287   return false;
  1290 //------------------------------Ideal_DU_postCCP-------------------------------
  1291 // Idealize graph, using DU info.  Must clone result into new-space
  1292 Node *Node::Ideal_DU_postCCP( PhaseCCP * ) {
  1293   return NULL;                 // Default to no change
  1296 //------------------------------hash-------------------------------------------
  1297 // Hash function over Nodes.
  1298 uint Node::hash() const {
  1299   uint sum = 0;
  1300   for( uint i=0; i<_cnt; i++ )  // Add in all inputs
  1301     sum = (sum<<1)-(uintptr_t)in(i);        // Ignore embedded NULLs
  1302   return (sum>>2) + _cnt + Opcode();
  1305 //------------------------------cmp--------------------------------------------
  1306 // Compare special parts of simple Nodes
  1307 uint Node::cmp( const Node &n ) const {
  1308   return 1;                     // Must be same
  1311 //------------------------------rematerialize-----------------------------------
  1312 // Should we clone rather than spill this instruction?
  1313 bool Node::rematerialize() const {
  1314   if ( is_Mach() )
  1315     return this->as_Mach()->rematerialize();
  1316   else
  1317     return (_flags & Flag_rematerialize) != 0;
  1320 //------------------------------needs_anti_dependence_check---------------------
  1321 // Nodes which use memory without consuming it, hence need antidependences.
  1322 bool Node::needs_anti_dependence_check() const {
  1323   if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 )
  1324     return false;
  1325   else
  1326     return in(1)->bottom_type()->has_memory();
  1330 // Get an integer constant from a ConNode (or CastIINode).
  1331 // Return a default value if there is no apparent constant here.
  1332 const TypeInt* Node::find_int_type() const {
  1333   if (this->is_Type()) {
  1334     return this->as_Type()->type()->isa_int();
  1335   } else if (this->is_Con()) {
  1336     assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
  1337     return this->bottom_type()->isa_int();
  1339   return NULL;
  1342 // Get a pointer constant from a ConstNode.
  1343 // Returns the constant if it is a pointer ConstNode
  1344 intptr_t Node::get_ptr() const {
  1345   assert( Opcode() == Op_ConP, "" );
  1346   return ((ConPNode*)this)->type()->is_ptr()->get_con();
  1349 // Get a narrow oop constant from a ConNNode.
  1350 intptr_t Node::get_narrowcon() const {
  1351   assert( Opcode() == Op_ConN, "" );
  1352   return ((ConNNode*)this)->type()->is_narrowoop()->get_con();
  1355 // Get a long constant from a ConNode.
  1356 // Return a default value if there is no apparent constant here.
  1357 const TypeLong* Node::find_long_type() const {
  1358   if (this->is_Type()) {
  1359     return this->as_Type()->type()->isa_long();
  1360   } else if (this->is_Con()) {
  1361     assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
  1362     return this->bottom_type()->isa_long();
  1364   return NULL;
  1367 // Get a double constant from a ConstNode.
  1368 // Returns the constant if it is a double ConstNode
  1369 jdouble Node::getd() const {
  1370   assert( Opcode() == Op_ConD, "" );
  1371   return ((ConDNode*)this)->type()->is_double_constant()->getd();
  1374 // Get a float constant from a ConstNode.
  1375 // Returns the constant if it is a float ConstNode
  1376 jfloat Node::getf() const {
  1377   assert( Opcode() == Op_ConF, "" );
  1378   return ((ConFNode*)this)->type()->is_float_constant()->getf();
  1381 #ifndef PRODUCT
  1383 //----------------------------NotANode----------------------------------------
  1384 // Used in debugging code to avoid walking across dead or uninitialized edges.
  1385 static inline bool NotANode(const Node* n) {
  1386   if (n == NULL)                   return true;
  1387   if (((intptr_t)n & 1) != 0)      return true;  // uninitialized, etc.
  1388   if (*(address*)n == badAddress)  return true;  // kill by Node::destruct
  1389   return false;
  1393 //------------------------------find------------------------------------------
  1394 // Find a neighbor of this Node with the given _idx
  1395 // If idx is negative, find its absolute value, following both _in and _out.
  1396 static void find_recur(Compile* C,  Node* &result, Node *n, int idx, bool only_ctrl,
  1397                         VectorSet* old_space, VectorSet* new_space ) {
  1398   int node_idx = (idx >= 0) ? idx : -idx;
  1399   if (NotANode(n))  return;  // Gracefully handle NULL, -1, 0xabababab, etc.
  1400   // Contained in new_space or old_space?   Check old_arena first since it's mostly empty.
  1401   VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space;
  1402   if( v->test(n->_idx) ) return;
  1403   if( (int)n->_idx == node_idx
  1404       debug_only(|| n->debug_idx() == node_idx) ) {
  1405     if (result != NULL)
  1406       tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n",
  1407                  (uintptr_t)result, (uintptr_t)n, node_idx);
  1408     result = n;
  1410   v->set(n->_idx);
  1411   for( uint i=0; i<n->len(); i++ ) {
  1412     if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue;
  1413     find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space );
  1415   // Search along forward edges also:
  1416   if (idx < 0 && !only_ctrl) {
  1417     for( uint j=0; j<n->outcnt(); j++ ) {
  1418       find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space );
  1421 #ifdef ASSERT
  1422   // Search along debug_orig edges last, checking for cycles
  1423   Node* orig = n->debug_orig();
  1424   if (orig != NULL) {
  1425     do {
  1426       if (NotANode(orig))  break;
  1427       find_recur(C, result, orig, idx, only_ctrl, old_space, new_space );
  1428       orig = orig->debug_orig();
  1429     } while (orig != NULL && orig != n->debug_orig());
  1431 #endif //ASSERT
  1434 // call this from debugger:
  1435 Node* find_node(Node* n, int idx) {
  1436   return n->find(idx);
  1439 //------------------------------find-------------------------------------------
  1440 Node* Node::find(int idx) const {
  1441   ResourceArea *area = Thread::current()->resource_area();
  1442   VectorSet old_space(area), new_space(area);
  1443   Node* result = NULL;
  1444   find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space );
  1445   return result;
  1448 //------------------------------find_ctrl--------------------------------------
  1449 // Find an ancestor to this node in the control history with given _idx
  1450 Node* Node::find_ctrl(int idx) const {
  1451   ResourceArea *area = Thread::current()->resource_area();
  1452   VectorSet old_space(area), new_space(area);
  1453   Node* result = NULL;
  1454   find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space );
  1455   return result;
  1457 #endif
  1461 #ifndef PRODUCT
  1462 int Node::_in_dump_cnt = 0;
  1464 // -----------------------------Name-------------------------------------------
  1465 extern const char *NodeClassNames[];
  1466 const char *Node::Name() const { return NodeClassNames[Opcode()]; }
  1468 static bool is_disconnected(const Node* n) {
  1469   for (uint i = 0; i < n->req(); i++) {
  1470     if (n->in(i) != NULL)  return false;
  1472   return true;
  1475 #ifdef ASSERT
  1476 static void dump_orig(Node* orig) {
  1477   Compile* C = Compile::current();
  1478   if (NotANode(orig))  orig = NULL;
  1479   if (orig != NULL && !C->node_arena()->contains(orig))  orig = NULL;
  1480   if (orig == NULL)  return;
  1481   tty->print(" !orig=");
  1482   Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
  1483   if (NotANode(fast))  fast = NULL;
  1484   while (orig != NULL) {
  1485     bool discon = is_disconnected(orig);  // if discon, print [123] else 123
  1486     if (discon)  tty->print("[");
  1487     if (!Compile::current()->node_arena()->contains(orig))
  1488       tty->print("o");
  1489     tty->print("%d", orig->_idx);
  1490     if (discon)  tty->print("]");
  1491     orig = orig->debug_orig();
  1492     if (NotANode(orig))  orig = NULL;
  1493     if (orig != NULL && !C->node_arena()->contains(orig))  orig = NULL;
  1494     if (orig != NULL)  tty->print(",");
  1495     if (fast != NULL) {
  1496       // Step fast twice for each single step of orig:
  1497       fast = fast->debug_orig();
  1498       if (NotANode(fast))  fast = NULL;
  1499       if (fast != NULL && fast != orig) {
  1500         fast = fast->debug_orig();
  1501         if (NotANode(fast))  fast = NULL;
  1503       if (fast == orig) {
  1504         tty->print("...");
  1505         break;
  1511 void Node::set_debug_orig(Node* orig) {
  1512   _debug_orig = orig;
  1513   if (BreakAtNode == 0)  return;
  1514   if (NotANode(orig))  orig = NULL;
  1515   int trip = 10;
  1516   while (orig != NULL) {
  1517     if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) {
  1518       tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d",
  1519                     this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx());
  1520       BREAKPOINT;
  1522     orig = orig->debug_orig();
  1523     if (NotANode(orig))  orig = NULL;
  1524     if (trip-- <= 0)  break;
  1527 #endif //ASSERT
  1529 //------------------------------dump------------------------------------------
  1530 // Dump a Node
  1531 void Node::dump() const {
  1532   Compile* C = Compile::current();
  1533   bool is_new = C->node_arena()->contains(this);
  1534   _in_dump_cnt++;
  1535   tty->print("%c%d\t%s\t=== ",
  1536              is_new ? ' ' : 'o', _idx, Name());
  1538   // Dump the required and precedence inputs
  1539   dump_req();
  1540   dump_prec();
  1541   // Dump the outputs
  1542   dump_out();
  1544   if (is_disconnected(this)) {
  1545 #ifdef ASSERT
  1546     tty->print("  [%d]",debug_idx());
  1547     dump_orig(debug_orig());
  1548 #endif
  1549     tty->cr();
  1550     _in_dump_cnt--;
  1551     return;                     // don't process dead nodes
  1554   // Dump node-specific info
  1555   dump_spec(tty);
  1556 #ifdef ASSERT
  1557   // Dump the non-reset _debug_idx
  1558   if( Verbose && WizardMode ) {
  1559     tty->print("  [%d]",debug_idx());
  1561 #endif
  1563   const Type *t = bottom_type();
  1565   if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) {
  1566     const TypeInstPtr  *toop = t->isa_instptr();
  1567     const TypeKlassPtr *tkls = t->isa_klassptr();
  1568     ciKlass*           klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
  1569     if( klass && klass->is_loaded() && klass->is_interface() ) {
  1570       tty->print("  Interface:");
  1571     } else if( toop ) {
  1572       tty->print("  Oop:");
  1573     } else if( tkls ) {
  1574       tty->print("  Klass:");
  1576     t->dump();
  1577   } else if( t == Type::MEMORY ) {
  1578     tty->print("  Memory:");
  1579     MemNode::dump_adr_type(this, adr_type(), tty);
  1580   } else if( Verbose || WizardMode ) {
  1581     tty->print("  Type:");
  1582     if( t ) {
  1583       t->dump();
  1584     } else {
  1585       tty->print("no type");
  1587   } else if (t->isa_vect() && this->is_MachSpillCopy()) {
  1588     // Dump MachSpillcopy vector type.
  1589     t->dump();
  1591   if (is_new) {
  1592     debug_only(dump_orig(debug_orig()));
  1593     Node_Notes* nn = C->node_notes_at(_idx);
  1594     if (nn != NULL && !nn->is_clear()) {
  1595       if (nn->jvms() != NULL) {
  1596         tty->print(" !jvms:");
  1597         nn->jvms()->dump_spec(tty);
  1601   tty->cr();
  1602   _in_dump_cnt--;
  1605 //------------------------------dump_req--------------------------------------
  1606 void Node::dump_req() const {
  1607   // Dump the required input edges
  1608   for (uint i = 0; i < req(); i++) {    // For all required inputs
  1609     Node* d = in(i);
  1610     if (d == NULL) {
  1611       tty->print("_ ");
  1612     } else if (NotANode(d)) {
  1613       tty->print("NotANode ");  // uninitialized, sentinel, garbage, etc.
  1614     } else {
  1615       tty->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx);
  1621 //------------------------------dump_prec-------------------------------------
  1622 void Node::dump_prec() const {
  1623   // Dump the precedence edges
  1624   int any_prec = 0;
  1625   for (uint i = req(); i < len(); i++) {       // For all precedence inputs
  1626     Node* p = in(i);
  1627     if (p != NULL) {
  1628       if( !any_prec++ ) tty->print(" |");
  1629       if (NotANode(p)) { tty->print("NotANode "); continue; }
  1630       tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
  1635 //------------------------------dump_out--------------------------------------
  1636 void Node::dump_out() const {
  1637   // Delimit the output edges
  1638   tty->print(" [[");
  1639   // Dump the output edges
  1640   for (uint i = 0; i < _outcnt; i++) {    // For all outputs
  1641     Node* u = _out[i];
  1642     if (u == NULL) {
  1643       tty->print("_ ");
  1644     } else if (NotANode(u)) {
  1645       tty->print("NotANode ");
  1646     } else {
  1647       tty->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx);
  1650   tty->print("]] ");
  1653 //------------------------------dump_nodes-------------------------------------
  1654 static void dump_nodes(const Node* start, int d, bool only_ctrl) {
  1655   Node* s = (Node*)start; // remove const
  1656   if (NotANode(s)) return;
  1658   uint depth = (uint)ABS(d);
  1659   int direction = d;
  1660   Compile* C = Compile::current();
  1661   GrowableArray <Node *> nstack(C->unique());
  1663   nstack.append(s);
  1664   int begin = 0;
  1665   int end = 0;
  1666   for(uint i = 0; i < depth; i++) {
  1667     end = nstack.length();
  1668     for(int j = begin; j < end; j++) {
  1669       Node* tp  = nstack.at(j);
  1670       uint limit = direction > 0 ? tp->len() : tp->outcnt();
  1671       for(uint k = 0; k < limit; k++) {
  1672         Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k);
  1674         if (NotANode(n))  continue;
  1675         // do not recurse through top or the root (would reach unrelated stuff)
  1676         if (n->is_Root() || n->is_top())  continue;
  1677         if (only_ctrl && !n->is_CFG()) continue;
  1679         bool on_stack = nstack.contains(n);
  1680         if (!on_stack) {
  1681           nstack.append(n);
  1685     begin = end;
  1687   end = nstack.length();
  1688   if (direction > 0) {
  1689     for(int j = end-1; j >= 0; j--) {
  1690       nstack.at(j)->dump();
  1692   } else {
  1693     for(int j = 0; j < end; j++) {
  1694       nstack.at(j)->dump();
  1699 //------------------------------dump-------------------------------------------
  1700 void Node::dump(int d) const {
  1701   dump_nodes(this, d, false);
  1704 //------------------------------dump_ctrl--------------------------------------
  1705 // Dump a Node's control history to depth
  1706 void Node::dump_ctrl(int d) const {
  1707   dump_nodes(this, d, true);
  1710 // VERIFICATION CODE
  1711 // For each input edge to a node (ie - for each Use-Def edge), verify that
  1712 // there is a corresponding Def-Use edge.
  1713 //------------------------------verify_edges-----------------------------------
  1714 void Node::verify_edges(Unique_Node_List &visited) {
  1715   uint i, j, idx;
  1716   int  cnt;
  1717   Node *n;
  1719   // Recursive termination test
  1720   if (visited.member(this))  return;
  1721   visited.push(this);
  1723   // Walk over all input edges, checking for correspondence
  1724   for( i = 0; i < len(); i++ ) {
  1725     n = in(i);
  1726     if (n != NULL && !n->is_top()) {
  1727       // Count instances of (Node *)this
  1728       cnt = 0;
  1729       for (idx = 0; idx < n->_outcnt; idx++ ) {
  1730         if (n->_out[idx] == (Node *)this)  cnt++;
  1732       assert( cnt > 0,"Failed to find Def-Use edge." );
  1733       // Check for duplicate edges
  1734       // walk the input array downcounting the input edges to n
  1735       for( j = 0; j < len(); j++ ) {
  1736         if( in(j) == n ) cnt--;
  1738       assert( cnt == 0,"Mismatched edge count.");
  1739     } else if (n == NULL) {
  1740       assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges");
  1741     } else {
  1742       assert(n->is_top(), "sanity");
  1743       // Nothing to check.
  1746   // Recursive walk over all input edges
  1747   for( i = 0; i < len(); i++ ) {
  1748     n = in(i);
  1749     if( n != NULL )
  1750       in(i)->verify_edges(visited);
  1754 //------------------------------verify_recur-----------------------------------
  1755 static const Node *unique_top = NULL;
  1757 void Node::verify_recur(const Node *n, int verify_depth,
  1758                         VectorSet &old_space, VectorSet &new_space) {
  1759   if ( verify_depth == 0 )  return;
  1760   if (verify_depth > 0)  --verify_depth;
  1762   Compile* C = Compile::current();
  1764   // Contained in new_space or old_space?
  1765   VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space;
  1766   // Check for visited in the proper space.  Numberings are not unique
  1767   // across spaces so we need a separate VectorSet for each space.
  1768   if( v->test_set(n->_idx) ) return;
  1770   if (n->is_Con() && n->bottom_type() == Type::TOP) {
  1771     if (C->cached_top_node() == NULL)
  1772       C->set_cached_top_node((Node*)n);
  1773     assert(C->cached_top_node() == n, "TOP node must be unique");
  1776   for( uint i = 0; i < n->len(); i++ ) {
  1777     Node *x = n->in(i);
  1778     if (!x || x->is_top()) continue;
  1780     // Verify my input has a def-use edge to me
  1781     if (true /*VerifyDefUse*/) {
  1782       // Count use-def edges from n to x
  1783       int cnt = 0;
  1784       for( uint j = 0; j < n->len(); j++ )
  1785         if( n->in(j) == x )
  1786           cnt++;
  1787       // Count def-use edges from x to n
  1788       uint max = x->_outcnt;
  1789       for( uint k = 0; k < max; k++ )
  1790         if (x->_out[k] == n)
  1791           cnt--;
  1792       assert( cnt == 0, "mismatched def-use edge counts" );
  1795     verify_recur(x, verify_depth, old_space, new_space);
  1800 //------------------------------verify-----------------------------------------
  1801 // Check Def-Use info for my subgraph
  1802 void Node::verify() const {
  1803   Compile* C = Compile::current();
  1804   Node* old_top = C->cached_top_node();
  1805   ResourceMark rm;
  1806   ResourceArea *area = Thread::current()->resource_area();
  1807   VectorSet old_space(area), new_space(area);
  1808   verify_recur(this, -1, old_space, new_space);
  1809   C->set_cached_top_node(old_top);
  1811 #endif
  1814 //------------------------------walk-------------------------------------------
  1815 // Graph walk, with both pre-order and post-order functions
  1816 void Node::walk(NFunc pre, NFunc post, void *env) {
  1817   VectorSet visited(Thread::current()->resource_area()); // Setup for local walk
  1818   walk_(pre, post, env, visited);
  1821 void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) {
  1822   if( visited.test_set(_idx) ) return;
  1823   pre(*this,env);               // Call the pre-order walk function
  1824   for( uint i=0; i<_max; i++ )
  1825     if( in(i) )                 // Input exists and is not walked?
  1826       in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions
  1827   post(*this,env);              // Call the post-order walk function
  1830 void Node::nop(Node &, void*) {}
  1832 //------------------------------Registers--------------------------------------
  1833 // Do we Match on this edge index or not?  Generally false for Control
  1834 // and true for everything else.  Weird for calls & returns.
  1835 uint Node::match_edge(uint idx) const {
  1836   return idx;                   // True for other than index 0 (control)
  1839 // Register classes are defined for specific machines
  1840 const RegMask &Node::out_RegMask() const {
  1841   ShouldNotCallThis();
  1842   return *(new RegMask());
  1845 const RegMask &Node::in_RegMask(uint) const {
  1846   ShouldNotCallThis();
  1847   return *(new RegMask());
  1850 //=============================================================================
  1851 //-----------------------------------------------------------------------------
  1852 void Node_Array::reset( Arena *new_arena ) {
  1853   _a->Afree(_nodes,_max*sizeof(Node*));
  1854   _max   = 0;
  1855   _nodes = NULL;
  1856   _a     = new_arena;
  1859 //------------------------------clear------------------------------------------
  1860 // Clear all entries in _nodes to NULL but keep storage
  1861 void Node_Array::clear() {
  1862   Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) );
  1865 //-----------------------------------------------------------------------------
  1866 void Node_Array::grow( uint i ) {
  1867   if( !_max ) {
  1868     _max = 1;
  1869     _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) );
  1870     _nodes[0] = NULL;
  1872   uint old = _max;
  1873   while( i >= _max ) _max <<= 1;        // Double to fit
  1874   _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*));
  1875   Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) );
  1878 //-----------------------------------------------------------------------------
  1879 void Node_Array::insert( uint i, Node *n ) {
  1880   if( _nodes[_max-1] ) grow(_max);      // Get more space if full
  1881   Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*)));
  1882   _nodes[i] = n;
  1885 //-----------------------------------------------------------------------------
  1886 void Node_Array::remove( uint i ) {
  1887   Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*)));
  1888   _nodes[_max-1] = NULL;
  1891 //-----------------------------------------------------------------------------
  1892 void Node_Array::sort( C_sort_func_t func) {
  1893   qsort( _nodes, _max, sizeof( Node* ), func );
  1896 //-----------------------------------------------------------------------------
  1897 void Node_Array::dump() const {
  1898 #ifndef PRODUCT
  1899   for( uint i = 0; i < _max; i++ ) {
  1900     Node *nn = _nodes[i];
  1901     if( nn != NULL ) {
  1902       tty->print("%5d--> ",i); nn->dump();
  1905 #endif
  1908 //--------------------------is_iteratively_computed------------------------------
  1909 // Operation appears to be iteratively computed (such as an induction variable)
  1910 // It is possible for this operation to return false for a loop-varying
  1911 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
  1912 bool Node::is_iteratively_computed() {
  1913   if (ideal_reg()) { // does operation have a result register?
  1914     for (uint i = 1; i < req(); i++) {
  1915       Node* n = in(i);
  1916       if (n != NULL && n->is_Phi()) {
  1917         for (uint j = 1; j < n->req(); j++) {
  1918           if (n->in(j) == this) {
  1919             return true;
  1925   return false;
  1928 //--------------------------find_similar------------------------------
  1929 // Return a node with opcode "opc" and same inputs as "this" if one can
  1930 // be found; Otherwise return NULL;
  1931 Node* Node::find_similar(int opc) {
  1932   if (req() >= 2) {
  1933     Node* def = in(1);
  1934     if (def && def->outcnt() >= 2) {
  1935       for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) {
  1936         Node* use = def->fast_out(i);
  1937         if (use->Opcode() == opc &&
  1938             use->req() == req()) {
  1939           uint j;
  1940           for (j = 0; j < use->req(); j++) {
  1941             if (use->in(j) != in(j)) {
  1942               break;
  1945           if (j == use->req()) {
  1946             return use;
  1952   return NULL;
  1956 //--------------------------unique_ctrl_out------------------------------
  1957 // Return the unique control out if only one. Null if none or more than one.
  1958 Node* Node::unique_ctrl_out() {
  1959   Node* found = NULL;
  1960   for (uint i = 0; i < outcnt(); i++) {
  1961     Node* use = raw_out(i);
  1962     if (use->is_CFG() && use != this) {
  1963       if (found != NULL) return NULL;
  1964       found = use;
  1967   return found;
  1970 //=============================================================================
  1971 //------------------------------yank-------------------------------------------
  1972 // Find and remove
  1973 void Node_List::yank( Node *n ) {
  1974   uint i;
  1975   for( i = 0; i < _cnt; i++ )
  1976     if( _nodes[i] == n )
  1977       break;
  1979   if( i < _cnt )
  1980     _nodes[i] = _nodes[--_cnt];
  1983 //------------------------------dump-------------------------------------------
  1984 void Node_List::dump() const {
  1985 #ifndef PRODUCT
  1986   for( uint i = 0; i < _cnt; i++ )
  1987     if( _nodes[i] ) {
  1988       tty->print("%5d--> ",i);
  1989       _nodes[i]->dump();
  1991 #endif
  1994 //=============================================================================
  1995 //------------------------------remove-----------------------------------------
  1996 void Unique_Node_List::remove( Node *n ) {
  1997   if( _in_worklist[n->_idx] ) {
  1998     for( uint i = 0; i < size(); i++ )
  1999       if( _nodes[i] == n ) {
  2000         map(i,Node_List::pop());
  2001         _in_worklist >>= n->_idx;
  2002         return;
  2004     ShouldNotReachHere();
  2008 //-----------------------remove_useless_nodes----------------------------------
  2009 // Remove useless nodes from worklist
  2010 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) {
  2012   for( uint i = 0; i < size(); ++i ) {
  2013     Node *n = at(i);
  2014     assert( n != NULL, "Did not expect null entries in worklist");
  2015     if( ! useful.test(n->_idx) ) {
  2016       _in_worklist >>= n->_idx;
  2017       map(i,Node_List::pop());
  2018       // Node *replacement = Node_List::pop();
  2019       // if( i != size() ) { // Check if removing last entry
  2020       //   _nodes[i] = replacement;
  2021       // }
  2022       --i;  // Visit popped node
  2023       // If it was last entry, loop terminates since size() was also reduced
  2028 //=============================================================================
  2029 void Node_Stack::grow() {
  2030   size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top
  2031   size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode));
  2032   size_t max = old_max << 1;             // max * 2
  2033   _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max);
  2034   _inode_max = _inodes + max;
  2035   _inode_top = _inodes + old_top;        // restore _top
  2038 // Node_Stack is used to map nodes.
  2039 Node* Node_Stack::find(uint idx) const {
  2040   uint sz = size();
  2041   for (uint i=0; i < sz; i++) {
  2042     if (idx == index_at(i) )
  2043       return node_at(i);
  2045   return NULL;
  2048 //=============================================================================
  2049 uint TypeNode::size_of() const { return sizeof(*this); }
  2050 #ifndef PRODUCT
  2051 void TypeNode::dump_spec(outputStream *st) const {
  2052   if( !Verbose && !WizardMode ) {
  2053     // standard dump does this in Verbose and WizardMode
  2054     st->print(" #"); _type->dump_on(st);
  2057 #endif
  2058 uint TypeNode::hash() const {
  2059   return Node::hash() + _type->hash();
  2061 uint TypeNode::cmp( const Node &n ) const
  2062 { return !Type::cmp( _type, ((TypeNode&)n)._type ); }
  2063 const Type *TypeNode::bottom_type() const { return _type; }
  2064 const Type *TypeNode::Value( PhaseTransform * ) const { return _type; }
  2066 //------------------------------ideal_reg--------------------------------------
  2067 uint TypeNode::ideal_reg() const {
  2068   return _type->ideal_reg();

mercurial