src/share/vm/opto/escape.cpp

Mon, 14 Nov 2011 18:38:03 -0800

author
kvn
date
Mon, 14 Nov 2011 18:38:03 -0800
changeset 3309
8c57262447d3
parent 3254
59e515ee9354
child 3311
1bd45abaa507
permissions
-rw-r--r--

7105605: Use EA info to optimize pointers compare
Summary: optimize pointers compare using EA information.
Reviewed-by: never, twisti

     1 /*
     2  * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "ci/bcEscapeAnalyzer.hpp"
    27 #include "libadt/vectset.hpp"
    28 #include "memory/allocation.hpp"
    29 #include "opto/c2compiler.hpp"
    30 #include "opto/callnode.hpp"
    31 #include "opto/cfgnode.hpp"
    32 #include "opto/compile.hpp"
    33 #include "opto/escape.hpp"
    34 #include "opto/phaseX.hpp"
    35 #include "opto/rootnode.hpp"
    37 void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) {
    38   uint v = (targIdx << EdgeShift) + ((uint) et);
    39   if (_edges == NULL) {
    40      Arena *a = Compile::current()->comp_arena();
    41     _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0);
    42   }
    43   _edges->append_if_missing(v);
    44 }
    46 void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
    47   uint v = (targIdx << EdgeShift) + ((uint) et);
    49   _edges->remove(v);
    50 }
    52 #ifndef PRODUCT
    53 static const char *node_type_names[] = {
    54   "UnknownType",
    55   "JavaObject",
    56   "LocalVar",
    57   "Field"
    58 };
    60 static const char *esc_names[] = {
    61   "UnknownEscape",
    62   "NoEscape",
    63   "ArgEscape",
    64   "GlobalEscape"
    65 };
    67 static const char *edge_type_suffix[] = {
    68  "?", // UnknownEdge
    69  "P", // PointsToEdge
    70  "D", // DeferredEdge
    71  "F"  // FieldEdge
    72 };
    74 void PointsToNode::dump(bool print_state) const {
    75   NodeType nt = node_type();
    76   tty->print("%s ", node_type_names[(int) nt]);
    77   if (print_state) {
    78     EscapeState es = escape_state();
    79     tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR");
    80   }
    81   tty->print("[[");
    82   for (uint i = 0; i < edge_count(); i++) {
    83     tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
    84   }
    85   tty->print("]]  ");
    86   if (_node == NULL)
    87     tty->print_cr("<null>");
    88   else
    89     _node->dump();
    90 }
    91 #endif
    93 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
    94   _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
    95   _processed(C->comp_arena()),
    96   pt_ptset(C->comp_arena()),
    97   pt_visited(C->comp_arena()),
    98   pt_worklist(C->comp_arena(), 4, 0, 0),
    99   _collecting(true),
   100   _progress(false),
   101   _compile(C),
   102   _igvn(igvn),
   103   _node_map(C->comp_arena()) {
   105   _phantom_object = C->top()->_idx,
   106   add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true);
   108   // Add ConP(#NULL) and ConN(#NULL) nodes.
   109   Node* oop_null = igvn->zerocon(T_OBJECT);
   110   _oop_null = oop_null->_idx;
   111   assert(_oop_null < nodes_size(), "should be created already");
   112   add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
   114   if (UseCompressedOops) {
   115     Node* noop_null = igvn->zerocon(T_NARROWOOP);
   116     _noop_null = noop_null->_idx;
   117     assert(_noop_null < nodes_size(), "should be created already");
   118     add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
   119   } else {
   120     _noop_null = _oop_null; // Should be initialized
   121   }
   122   _pcmp_neq = NULL; // Should be initialized
   123   _pcmp_eq  = NULL;
   124 }
   126 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
   127   PointsToNode *f = ptnode_adr(from_i);
   128   PointsToNode *t = ptnode_adr(to_i);
   130   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   131   assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge");
   132   assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge");
   133   add_edge(f, to_i, PointsToNode::PointsToEdge);
   134 }
   136 void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) {
   137   PointsToNode *f = ptnode_adr(from_i);
   138   PointsToNode *t = ptnode_adr(to_i);
   140   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   141   assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge");
   142   assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge");
   143   // don't add a self-referential edge, this can occur during removal of
   144   // deferred edges
   145   if (from_i != to_i)
   146     add_edge(f, to_i, PointsToNode::DeferredEdge);
   147 }
   149 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
   150   const Type *adr_type = phase->type(adr);
   151   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
   152       adr->in(AddPNode::Address)->is_Proj() &&
   153       adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
   154     // We are computing a raw address for a store captured by an Initialize
   155     // compute an appropriate address type. AddP cases #3 and #5 (see below).
   156     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
   157     assert(offs != Type::OffsetBot ||
   158            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
   159            "offset must be a constant or it is initialization of array");
   160     return offs;
   161   }
   162   const TypePtr *t_ptr = adr_type->isa_ptr();
   163   assert(t_ptr != NULL, "must be a pointer type");
   164   return t_ptr->offset();
   165 }
   167 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) {
   168   PointsToNode *f = ptnode_adr(from_i);
   169   PointsToNode *t = ptnode_adr(to_i);
   171   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   172   assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge");
   173   assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge");
   174   assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets");
   175   t->set_offset(offset);
   177   add_edge(f, to_i, PointsToNode::FieldEdge);
   178 }
   180 void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) {
   181   // Don't change non-escaping state of NULL pointer.
   182   if (ni == _noop_null || ni == _oop_null)
   183     return;
   184   PointsToNode *npt = ptnode_adr(ni);
   185   PointsToNode::EscapeState old_es = npt->escape_state();
   186   if (es > old_es)
   187     npt->set_escape_state(es);
   188 }
   190 void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
   191                                PointsToNode::EscapeState es, bool done) {
   192   PointsToNode* ptadr = ptnode_adr(n->_idx);
   193   ptadr->_node = n;
   194   ptadr->set_node_type(nt);
   196   // inline set_escape_state(idx, es);
   197   PointsToNode::EscapeState old_es = ptadr->escape_state();
   198   if (es > old_es)
   199     ptadr->set_escape_state(es);
   201   if (done)
   202     _processed.set(n->_idx);
   203 }
   205 PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n) {
   206   uint idx = n->_idx;
   207   PointsToNode::EscapeState es;
   209   // If we are still collecting or there were no non-escaping allocations
   210   // we don't know the answer yet
   211   if (_collecting)
   212     return PointsToNode::UnknownEscape;
   214   // if the node was created after the escape computation, return
   215   // UnknownEscape
   216   if (idx >= nodes_size())
   217     return PointsToNode::UnknownEscape;
   219   es = ptnode_adr(idx)->escape_state();
   221   // if we have already computed a value, return it
   222   if (es != PointsToNode::UnknownEscape &&
   223       ptnode_adr(idx)->node_type() == PointsToNode::JavaObject)
   224     return es;
   226   // PointsTo() calls n->uncast() which can return a new ideal node.
   227   if (n->uncast()->_idx >= nodes_size())
   228     return PointsToNode::UnknownEscape;
   230   PointsToNode::EscapeState orig_es = es;
   232   // compute max escape state of anything this node could point to
   233   for(VectorSetI i(PointsTo(n)); i.test() && es != PointsToNode::GlobalEscape; ++i) {
   234     uint pt = i.elem;
   235     PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
   236     if (pes > es)
   237       es = pes;
   238   }
   239   if (orig_es != es) {
   240     // cache the computed escape state
   241     assert(es > orig_es, "should have computed an escape state");
   242     set_escape_state(idx, es);
   243   } // orig_es could be PointsToNode::UnknownEscape
   244   return es;
   245 }
   247 VectorSet* ConnectionGraph::PointsTo(Node * n) {
   248   pt_ptset.Reset();
   249   pt_visited.Reset();
   250   pt_worklist.clear();
   252 #ifdef ASSERT
   253   Node *orig_n = n;
   254 #endif
   256   n = n->uncast();
   257   PointsToNode* npt = ptnode_adr(n->_idx);
   259   // If we have a JavaObject, return just that object
   260   if (npt->node_type() == PointsToNode::JavaObject) {
   261     pt_ptset.set(n->_idx);
   262     return &pt_ptset;
   263   }
   264 #ifdef ASSERT
   265   if (npt->_node == NULL) {
   266     if (orig_n != n)
   267       orig_n->dump();
   268     n->dump();
   269     assert(npt->_node != NULL, "unregistered node");
   270   }
   271 #endif
   272   pt_worklist.push(n->_idx);
   273   while(pt_worklist.length() > 0) {
   274     int ni = pt_worklist.pop();
   275     if (pt_visited.test_set(ni))
   276       continue;
   278     PointsToNode* pn = ptnode_adr(ni);
   279     // ensure that all inputs of a Phi have been processed
   280     assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),"");
   282     int edges_processed = 0;
   283     uint e_cnt = pn->edge_count();
   284     for (uint e = 0; e < e_cnt; e++) {
   285       uint etgt = pn->edge_target(e);
   286       PointsToNode::EdgeType et = pn->edge_type(e);
   287       if (et == PointsToNode::PointsToEdge) {
   288         pt_ptset.set(etgt);
   289         edges_processed++;
   290       } else if (et == PointsToNode::DeferredEdge) {
   291         pt_worklist.push(etgt);
   292         edges_processed++;
   293       } else {
   294         assert(false,"neither PointsToEdge or DeferredEdge");
   295       }
   296     }
   297     if (edges_processed == 0) {
   298       // no deferred or pointsto edges found.  Assume the value was set
   299       // outside this method.  Add the phantom object to the pointsto set.
   300       pt_ptset.set(_phantom_object);
   301     }
   302   }
   303   return &pt_ptset;
   304 }
   306 void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) {
   307   // This method is most expensive during ConnectionGraph construction.
   308   // Reuse vectorSet and an additional growable array for deferred edges.
   309   deferred_edges->clear();
   310   visited->Reset();
   312   visited->set(ni);
   313   PointsToNode *ptn = ptnode_adr(ni);
   314   if (ptn->edge_count() == 0) {
   315     // No deferred or pointsto edges found.  Assume the value was set
   316     // outside this method.  Add edge to phantom object.
   317     add_pointsto_edge(ni, _phantom_object);
   318   }
   320   // Mark current edges as visited and move deferred edges to separate array.
   321   for (uint i = 0; i < ptn->edge_count(); ) {
   322     uint t = ptn->edge_target(i);
   323 #ifdef ASSERT
   324     assert(!visited->test_set(t), "expecting no duplications");
   325 #else
   326     visited->set(t);
   327 #endif
   328     if (ptn->edge_type(i) == PointsToNode::DeferredEdge) {
   329       ptn->remove_edge(t, PointsToNode::DeferredEdge);
   330       deferred_edges->append(t);
   331     } else {
   332       i++;
   333     }
   334   }
   335   for (int next = 0; next < deferred_edges->length(); ++next) {
   336     uint t = deferred_edges->at(next);
   337     PointsToNode *ptt = ptnode_adr(t);
   338     uint e_cnt = ptt->edge_count();
   339     if (e_cnt == 0) {
   340       // No deferred or pointsto edges found.  Assume the value was set
   341       // outside this method.  Add edge to phantom object.
   342       add_pointsto_edge(t, _phantom_object);
   343       add_pointsto_edge(ni, _phantom_object);
   344     }
   345     for (uint e = 0; e < e_cnt; e++) {
   346       uint etgt = ptt->edge_target(e);
   347       if (visited->test_set(etgt))
   348         continue;
   350       PointsToNode::EdgeType et = ptt->edge_type(e);
   351       if (et == PointsToNode::PointsToEdge) {
   352         add_pointsto_edge(ni, etgt);
   353         if(etgt == _phantom_object) {
   354           // Special case - field set outside (globally escaping).
   355           set_escape_state(ni, PointsToNode::GlobalEscape);
   356         }
   357       } else if (et == PointsToNode::DeferredEdge) {
   358         deferred_edges->append(etgt);
   359       } else {
   360         assert(false,"invalid connection graph");
   361       }
   362     }
   363   }
   364 }
   367 //  Add an edge to node given by "to_i" from any field of adr_i whose offset
   368 //  matches "offset"  A deferred edge is added if to_i is a LocalVar, and
   369 //  a pointsto edge is added if it is a JavaObject
   371 void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) {
   372   PointsToNode* an = ptnode_adr(adr_i);
   373   PointsToNode* to = ptnode_adr(to_i);
   374   bool deferred = (to->node_type() == PointsToNode::LocalVar);
   376   for (uint fe = 0; fe < an->edge_count(); fe++) {
   377     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
   378     int fi = an->edge_target(fe);
   379     PointsToNode* pf = ptnode_adr(fi);
   380     int po = pf->offset();
   381     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
   382       if (deferred)
   383         add_deferred_edge(fi, to_i);
   384       else
   385         add_pointsto_edge(fi, to_i);
   386     }
   387   }
   388 }
   390 // Add a deferred  edge from node given by "from_i" to any field of adr_i
   391 // whose offset matches "offset".
   392 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
   393   PointsToNode* an = ptnode_adr(adr_i);
   394   bool is_alloc = an->_node->is_Allocate();
   395   for (uint fe = 0; fe < an->edge_count(); fe++) {
   396     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
   397     int fi = an->edge_target(fe);
   398     PointsToNode* pf = ptnode_adr(fi);
   399     int offset = pf->offset();
   400     if (!is_alloc) {
   401       // Assume the field was set outside this method if it is not Allocation
   402       add_pointsto_edge(fi, _phantom_object);
   403     }
   404     if (offset == offs || offset == Type::OffsetBot || offs == Type::OffsetBot) {
   405       add_deferred_edge(from_i, fi);
   406     }
   407   }
   408   // Some fields references (AddP) may still be missing
   409   // until Connection Graph construction is complete.
   410   // For example, loads from RAW pointers with offset 0
   411   // which don't have AddP.
   412   // A reference to phantom_object will be added if
   413   // a field reference is still missing after completing
   414   // Connection Graph (see remove_deferred()).
   415 }
   417 // Helper functions
   419 static Node* get_addp_base(Node *addp) {
   420   assert(addp->is_AddP(), "must be AddP");
   421   //
   422   // AddP cases for Base and Address inputs:
   423   // case #1. Direct object's field reference:
   424   //     Allocate
   425   //       |
   426   //     Proj #5 ( oop result )
   427   //       |
   428   //     CheckCastPP (cast to instance type)
   429   //      | |
   430   //     AddP  ( base == address )
   431   //
   432   // case #2. Indirect object's field reference:
   433   //      Phi
   434   //       |
   435   //     CastPP (cast to instance type)
   436   //      | |
   437   //     AddP  ( base == address )
   438   //
   439   // case #3. Raw object's field reference for Initialize node:
   440   //      Allocate
   441   //        |
   442   //      Proj #5 ( oop result )
   443   //  top   |
   444   //     \  |
   445   //     AddP  ( base == top )
   446   //
   447   // case #4. Array's element reference:
   448   //   {CheckCastPP | CastPP}
   449   //     |  | |
   450   //     |  AddP ( array's element offset )
   451   //     |  |
   452   //     AddP ( array's offset )
   453   //
   454   // case #5. Raw object's field reference for arraycopy stub call:
   455   //          The inline_native_clone() case when the arraycopy stub is called
   456   //          after the allocation before Initialize and CheckCastPP nodes.
   457   //      Allocate
   458   //        |
   459   //      Proj #5 ( oop result )
   460   //       | |
   461   //       AddP  ( base == address )
   462   //
   463   // case #6. Constant Pool, ThreadLocal, CastX2P or
   464   //          Raw object's field reference:
   465   //      {ConP, ThreadLocal, CastX2P, raw Load}
   466   //  top   |
   467   //     \  |
   468   //     AddP  ( base == top )
   469   //
   470   // case #7. Klass's field reference.
   471   //      LoadKlass
   472   //       | |
   473   //       AddP  ( base == address )
   474   //
   475   // case #8. narrow Klass's field reference.
   476   //      LoadNKlass
   477   //       |
   478   //      DecodeN
   479   //       | |
   480   //       AddP  ( base == address )
   481   //
   482   Node *base = addp->in(AddPNode::Base)->uncast();
   483   if (base->is_top()) { // The AddP case #3 and #6.
   484     base = addp->in(AddPNode::Address)->uncast();
   485     while (base->is_AddP()) {
   486       // Case #6 (unsafe access) may have several chained AddP nodes.
   487       assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only");
   488       base = base->in(AddPNode::Address)->uncast();
   489     }
   490     assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal ||
   491            base->Opcode() == Op_CastX2P || base->is_DecodeN() ||
   492            (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) ||
   493            (base->is_Proj() && base->in(0)->is_Allocate()), "sanity");
   494   }
   495   return base;
   496 }
   498 static Node* find_second_addp(Node* addp, Node* n) {
   499   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
   501   Node* addp2 = addp->raw_out(0);
   502   if (addp->outcnt() == 1 && addp2->is_AddP() &&
   503       addp2->in(AddPNode::Base) == n &&
   504       addp2->in(AddPNode::Address) == addp) {
   506     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
   507     //
   508     // Find array's offset to push it on worklist first and
   509     // as result process an array's element offset first (pushed second)
   510     // to avoid CastPP for the array's offset.
   511     // Otherwise the inserted CastPP (LocalVar) will point to what
   512     // the AddP (Field) points to. Which would be wrong since
   513     // the algorithm expects the CastPP has the same point as
   514     // as AddP's base CheckCastPP (LocalVar).
   515     //
   516     //    ArrayAllocation
   517     //     |
   518     //    CheckCastPP
   519     //     |
   520     //    memProj (from ArrayAllocation CheckCastPP)
   521     //     |  ||
   522     //     |  ||   Int (element index)
   523     //     |  ||    |   ConI (log(element size))
   524     //     |  ||    |   /
   525     //     |  ||   LShift
   526     //     |  ||  /
   527     //     |  AddP (array's element offset)
   528     //     |  |
   529     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
   530     //     | / /
   531     //     AddP (array's offset)
   532     //      |
   533     //     Load/Store (memory operation on array's element)
   534     //
   535     return addp2;
   536   }
   537   return NULL;
   538 }
   540 //
   541 // Adjust the type and inputs of an AddP which computes the
   542 // address of a field of an instance
   543 //
   544 bool ConnectionGraph::split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn) {
   545   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
   546   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
   547   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
   548   if (t == NULL) {
   549     // We are computing a raw address for a store captured by an Initialize
   550     // compute an appropriate address type (cases #3 and #5).
   551     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
   552     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
   553     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
   554     assert(offs != Type::OffsetBot, "offset must be a constant");
   555     t = base_t->add_offset(offs)->is_oopptr();
   556   }
   557   int inst_id =  base_t->instance_id();
   558   assert(!t->is_known_instance() || t->instance_id() == inst_id,
   559                              "old type must be non-instance or match new type");
   561   // The type 't' could be subclass of 'base_t'.
   562   // As result t->offset() could be large then base_t's size and it will
   563   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
   564   // constructor verifies correctness of the offset.
   565   //
   566   // It could happened on subclass's branch (from the type profiling
   567   // inlining) which was not eliminated during parsing since the exactness
   568   // of the allocation type was not propagated to the subclass type check.
   569   //
   570   // Or the type 't' could be not related to 'base_t' at all.
   571   // It could happened when CHA type is different from MDO type on a dead path
   572   // (for example, from instanceof check) which is not collapsed during parsing.
   573   //
   574   // Do nothing for such AddP node and don't process its users since
   575   // this code branch will go away.
   576   //
   577   if (!t->is_known_instance() &&
   578       !base_t->klass()->is_subtype_of(t->klass())) {
   579      return false; // bail out
   580   }
   582   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
   583   // Do NOT remove the next line: ensure a new alias index is allocated
   584   // for the instance type. Note: C++ will not remove it since the call
   585   // has side effect.
   586   int alias_idx = _compile->get_alias_index(tinst);
   587   igvn->set_type(addp, tinst);
   588   // record the allocation in the node map
   589   assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered");
   590   set_map(addp->_idx, get_map(base->_idx));
   592   // Set addp's Base and Address to 'base'.
   593   Node *abase = addp->in(AddPNode::Base);
   594   Node *adr   = addp->in(AddPNode::Address);
   595   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
   596       adr->in(0)->_idx == (uint)inst_id) {
   597     // Skip AddP cases #3 and #5.
   598   } else {
   599     assert(!abase->is_top(), "sanity"); // AddP case #3
   600     if (abase != base) {
   601       igvn->hash_delete(addp);
   602       addp->set_req(AddPNode::Base, base);
   603       if (abase == adr) {
   604         addp->set_req(AddPNode::Address, base);
   605       } else {
   606         // AddP case #4 (adr is array's element offset AddP node)
   607 #ifdef ASSERT
   608         const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
   609         assert(adr->is_AddP() && atype != NULL &&
   610                atype->instance_id() == inst_id, "array's element offset should be processed first");
   611 #endif
   612       }
   613       igvn->hash_insert(addp);
   614     }
   615   }
   616   // Put on IGVN worklist since at least addp's type was changed above.
   617   record_for_optimizer(addp);
   618   return true;
   619 }
   621 //
   622 // Create a new version of orig_phi if necessary. Returns either the newly
   623 // created phi or an existing phi.  Sets create_new to indicate whether a new
   624 // phi was created.  Cache the last newly created phi in the node map.
   625 //
   626 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created) {
   627   Compile *C = _compile;
   628   new_created = false;
   629   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
   630   // nothing to do if orig_phi is bottom memory or matches alias_idx
   631   if (phi_alias_idx == alias_idx) {
   632     return orig_phi;
   633   }
   634   // Have we recently created a Phi for this alias index?
   635   PhiNode *result = get_map_phi(orig_phi->_idx);
   636   if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
   637     return result;
   638   }
   639   // Previous check may fail when the same wide memory Phi was split into Phis
   640   // for different memory slices. Search all Phis for this region.
   641   if (result != NULL) {
   642     Node* region = orig_phi->in(0);
   643     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
   644       Node* phi = region->fast_out(i);
   645       if (phi->is_Phi() &&
   646           C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
   647         assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
   648         return phi->as_Phi();
   649       }
   650     }
   651   }
   652   if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
   653     if (C->do_escape_analysis() == true && !C->failing()) {
   654       // Retry compilation without escape analysis.
   655       // If this is the first failure, the sentinel string will "stick"
   656       // to the Compile object, and the C2Compiler will see it and retry.
   657       C->record_failure(C2Compiler::retry_no_escape_analysis());
   658     }
   659     return NULL;
   660   }
   661   orig_phi_worklist.append_if_missing(orig_phi);
   662   const TypePtr *atype = C->get_adr_type(alias_idx);
   663   result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
   664   C->copy_node_notes_to(result, orig_phi);
   665   igvn->set_type(result, result->bottom_type());
   666   record_for_optimizer(result);
   668   debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;)
   669   assert(pn == NULL || pn == orig_phi, "wrong node");
   670   set_map(orig_phi->_idx, result);
   671   ptnode_adr(orig_phi->_idx)->_node = orig_phi;
   673   new_created = true;
   674   return result;
   675 }
   677 //
   678 // Return a new version of Memory Phi "orig_phi" with the inputs having the
   679 // specified alias index.
   680 //
   681 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn) {
   683   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
   684   Compile *C = _compile;
   685   bool new_phi_created;
   686   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created);
   687   if (!new_phi_created) {
   688     return result;
   689   }
   691   GrowableArray<PhiNode *>  phi_list;
   692   GrowableArray<uint>  cur_input;
   694   PhiNode *phi = orig_phi;
   695   uint idx = 1;
   696   bool finished = false;
   697   while(!finished) {
   698     while (idx < phi->req()) {
   699       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn);
   700       if (mem != NULL && mem->is_Phi()) {
   701         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created);
   702         if (new_phi_created) {
   703           // found an phi for which we created a new split, push current one on worklist and begin
   704           // processing new one
   705           phi_list.push(phi);
   706           cur_input.push(idx);
   707           phi = mem->as_Phi();
   708           result = newphi;
   709           idx = 1;
   710           continue;
   711         } else {
   712           mem = newphi;
   713         }
   714       }
   715       if (C->failing()) {
   716         return NULL;
   717       }
   718       result->set_req(idx++, mem);
   719     }
   720 #ifdef ASSERT
   721     // verify that the new Phi has an input for each input of the original
   722     assert( phi->req() == result->req(), "must have same number of inputs.");
   723     assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
   724 #endif
   725     // Check if all new phi's inputs have specified alias index.
   726     // Otherwise use old phi.
   727     for (uint i = 1; i < phi->req(); i++) {
   728       Node* in = result->in(i);
   729       assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
   730     }
   731     // we have finished processing a Phi, see if there are any more to do
   732     finished = (phi_list.length() == 0 );
   733     if (!finished) {
   734       phi = phi_list.pop();
   735       idx = cur_input.pop();
   736       PhiNode *prev_result = get_map_phi(phi->_idx);
   737       prev_result->set_req(idx++, result);
   738       result = prev_result;
   739     }
   740   }
   741   return result;
   742 }
   745 //
   746 // The next methods are derived from methods in MemNode.
   747 //
   748 static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
   749   Node *mem = mmem;
   750   // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
   751   // means an array I have not precisely typed yet.  Do not do any
   752   // alias stuff with it any time soon.
   753   if( toop->base() != Type::AnyPtr &&
   754       !(toop->klass() != NULL &&
   755         toop->klass()->is_java_lang_Object() &&
   756         toop->offset() == Type::OffsetBot) ) {
   757     mem = mmem->memory_at(alias_idx);
   758     // Update input if it is progress over what we have now
   759   }
   760   return mem;
   761 }
   763 //
   764 // Move memory users to their memory slices.
   765 //
   766 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *igvn) {
   767   Compile* C = _compile;
   769   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
   770   assert(tp != NULL, "ptr type");
   771   int alias_idx = C->get_alias_index(tp);
   772   int general_idx = C->get_general_index(alias_idx);
   774   // Move users first
   775   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
   776     Node* use = n->fast_out(i);
   777     if (use->is_MergeMem()) {
   778       MergeMemNode* mmem = use->as_MergeMem();
   779       assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
   780       if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
   781         continue; // Nothing to do
   782       }
   783       // Replace previous general reference to mem node.
   784       uint orig_uniq = C->unique();
   785       Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
   786       assert(orig_uniq == C->unique(), "no new nodes");
   787       mmem->set_memory_at(general_idx, m);
   788       --imax;
   789       --i;
   790     } else if (use->is_MemBar()) {
   791       assert(!use->is_Initialize(), "initializing stores should not be moved");
   792       if (use->req() > MemBarNode::Precedent &&
   793           use->in(MemBarNode::Precedent) == n) {
   794         // Don't move related membars.
   795         record_for_optimizer(use);
   796         continue;
   797       }
   798       tp = use->as_MemBar()->adr_type()->isa_ptr();
   799       if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
   800           alias_idx == general_idx) {
   801         continue; // Nothing to do
   802       }
   803       // Move to general memory slice.
   804       uint orig_uniq = C->unique();
   805       Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
   806       assert(orig_uniq == C->unique(), "no new nodes");
   807       igvn->hash_delete(use);
   808       imax -= use->replace_edge(n, m);
   809       igvn->hash_insert(use);
   810       record_for_optimizer(use);
   811       --i;
   812 #ifdef ASSERT
   813     } else if (use->is_Mem()) {
   814       if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
   815         // Don't move related cardmark.
   816         continue;
   817       }
   818       // Memory nodes should have new memory input.
   819       tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
   820       assert(tp != NULL, "ptr type");
   821       int idx = C->get_alias_index(tp);
   822       assert(get_map(use->_idx) != NULL || idx == alias_idx,
   823              "Following memory nodes should have new memory input or be on the same memory slice");
   824     } else if (use->is_Phi()) {
   825       // Phi nodes should be split and moved already.
   826       tp = use->as_Phi()->adr_type()->isa_ptr();
   827       assert(tp != NULL, "ptr type");
   828       int idx = C->get_alias_index(tp);
   829       assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
   830     } else {
   831       use->dump();
   832       assert(false, "should not be here");
   833 #endif
   834     }
   835   }
   836 }
   838 //
   839 // Search memory chain of "mem" to find a MemNode whose address
   840 // is the specified alias index.
   841 //
   842 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *phase) {
   843   if (orig_mem == NULL)
   844     return orig_mem;
   845   Compile* C = phase->C;
   846   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
   847   bool is_instance = (toop != NULL) && toop->is_known_instance();
   848   Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
   849   Node *prev = NULL;
   850   Node *result = orig_mem;
   851   while (prev != result) {
   852     prev = result;
   853     if (result == start_mem)
   854       break;  // hit one of our sentinels
   855     if (result->is_Mem()) {
   856       const Type *at = phase->type(result->in(MemNode::Address));
   857       if (at == Type::TOP)
   858         break; // Dead
   859       assert (at->isa_ptr() != NULL, "pointer type required.");
   860       int idx = C->get_alias_index(at->is_ptr());
   861       if (idx == alias_idx)
   862         break; // Found
   863       if (!is_instance && (at->isa_oopptr() == NULL ||
   864                            !at->is_oopptr()->is_known_instance())) {
   865         break; // Do not skip store to general memory slice.
   866       }
   867       result = result->in(MemNode::Memory);
   868     }
   869     if (!is_instance)
   870       continue;  // don't search further for non-instance types
   871     // skip over a call which does not affect this memory slice
   872     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
   873       Node *proj_in = result->in(0);
   874       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
   875         break;  // hit one of our sentinels
   876       } else if (proj_in->is_Call()) {
   877         CallNode *call = proj_in->as_Call();
   878         if (!call->may_modify(toop, phase)) {
   879           result = call->in(TypeFunc::Memory);
   880         }
   881       } else if (proj_in->is_Initialize()) {
   882         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
   883         // Stop if this is the initialization for the object instance which
   884         // which contains this memory slice, otherwise skip over it.
   885         if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {
   886           result = proj_in->in(TypeFunc::Memory);
   887         }
   888       } else if (proj_in->is_MemBar()) {
   889         result = proj_in->in(TypeFunc::Memory);
   890       }
   891     } else if (result->is_MergeMem()) {
   892       MergeMemNode *mmem = result->as_MergeMem();
   893       result = step_through_mergemem(mmem, alias_idx, toop);
   894       if (result == mmem->base_memory()) {
   895         // Didn't find instance memory, search through general slice recursively.
   896         result = mmem->memory_at(C->get_general_index(alias_idx));
   897         result = find_inst_mem(result, alias_idx, orig_phis, phase);
   898         if (C->failing()) {
   899           return NULL;
   900         }
   901         mmem->set_memory_at(alias_idx, result);
   902       }
   903     } else if (result->is_Phi() &&
   904                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
   905       Node *un = result->as_Phi()->unique_input(phase);
   906       if (un != NULL) {
   907         orig_phis.append_if_missing(result->as_Phi());
   908         result = un;
   909       } else {
   910         break;
   911       }
   912     } else if (result->is_ClearArray()) {
   913       if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), phase)) {
   914         // Can not bypass initialization of the instance
   915         // we are looking for.
   916         break;
   917       }
   918       // Otherwise skip it (the call updated 'result' value).
   919     } else if (result->Opcode() == Op_SCMemProj) {
   920       assert(result->in(0)->is_LoadStore(), "sanity");
   921       const Type *at = phase->type(result->in(0)->in(MemNode::Address));
   922       if (at != Type::TOP) {
   923         assert (at->isa_ptr() != NULL, "pointer type required.");
   924         int idx = C->get_alias_index(at->is_ptr());
   925         assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
   926         break;
   927       }
   928       result = result->in(0)->in(MemNode::Memory);
   929     }
   930   }
   931   if (result->is_Phi()) {
   932     PhiNode *mphi = result->as_Phi();
   933     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
   934     const TypePtr *t = mphi->adr_type();
   935     if (!is_instance) {
   936       // Push all non-instance Phis on the orig_phis worklist to update inputs
   937       // during Phase 4 if needed.
   938       orig_phis.append_if_missing(mphi);
   939     } else if (C->get_alias_index(t) != alias_idx) {
   940       // Create a new Phi with the specified alias index type.
   941       result = split_memory_phi(mphi, alias_idx, orig_phis, phase);
   942     }
   943   }
   944   // the result is either MemNode, PhiNode, InitializeNode.
   945   return result;
   946 }
   948 //
   949 //  Convert the types of unescaped object to instance types where possible,
   950 //  propagate the new type information through the graph, and update memory
   951 //  edges and MergeMem inputs to reflect the new type.
   952 //
   953 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
   954 //  The processing is done in 4 phases:
   955 //
   956 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
   957 //            types for the CheckCastPP for allocations where possible.
   958 //            Propagate the the new types through users as follows:
   959 //               casts and Phi:  push users on alloc_worklist
   960 //               AddP:  cast Base and Address inputs to the instance type
   961 //                      push any AddP users on alloc_worklist and push any memnode
   962 //                      users onto memnode_worklist.
   963 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
   964 //            search the Memory chain for a store with the appropriate type
   965 //            address type.  If a Phi is found, create a new version with
   966 //            the appropriate memory slices from each of the Phi inputs.
   967 //            For stores, process the users as follows:
   968 //               MemNode:  push on memnode_worklist
   969 //               MergeMem: push on mergemem_worklist
   970 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
   971 //            moving the first node encountered of each  instance type to the
   972 //            the input corresponding to its alias index.
   973 //            appropriate memory slice.
   974 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
   975 //
   976 // In the following example, the CheckCastPP nodes are the cast of allocation
   977 // results and the allocation of node 29 is unescaped and eligible to be an
   978 // instance type.
   979 //
   980 // We start with:
   981 //
   982 //     7 Parm #memory
   983 //    10  ConI  "12"
   984 //    19  CheckCastPP   "Foo"
   985 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
   986 //    29  CheckCastPP   "Foo"
   987 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
   988 //
   989 //    40  StoreP  25   7  20   ... alias_index=4
   990 //    50  StoreP  35  40  30   ... alias_index=4
   991 //    60  StoreP  45  50  20   ... alias_index=4
   992 //    70  LoadP    _  60  30   ... alias_index=4
   993 //    80  Phi     75  50  60   Memory alias_index=4
   994 //    90  LoadP    _  80  30   ... alias_index=4
   995 //   100  LoadP    _  80  20   ... alias_index=4
   996 //
   997 //
   998 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
   999 // and creating a new alias index for node 30.  This gives:
  1000 //
  1001 //     7 Parm #memory
  1002 //    10  ConI  "12"
  1003 //    19  CheckCastPP   "Foo"
  1004 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
  1005 //    29  CheckCastPP   "Foo"  iid=24
  1006 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
  1007 //
  1008 //    40  StoreP  25   7  20   ... alias_index=4
  1009 //    50  StoreP  35  40  30   ... alias_index=6
  1010 //    60  StoreP  45  50  20   ... alias_index=4
  1011 //    70  LoadP    _  60  30   ... alias_index=6
  1012 //    80  Phi     75  50  60   Memory alias_index=4
  1013 //    90  LoadP    _  80  30   ... alias_index=6
  1014 //   100  LoadP    _  80  20   ... alias_index=4
  1015 //
  1016 // In phase 2, new memory inputs are computed for the loads and stores,
  1017 // And a new version of the phi is created.  In phase 4, the inputs to
  1018 // node 80 are updated and then the memory nodes are updated with the
  1019 // values computed in phase 2.  This results in:
  1020 //
  1021 //     7 Parm #memory
  1022 //    10  ConI  "12"
  1023 //    19  CheckCastPP   "Foo"
  1024 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
  1025 //    29  CheckCastPP   "Foo"  iid=24
  1026 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
  1027 //
  1028 //    40  StoreP  25  7   20   ... alias_index=4
  1029 //    50  StoreP  35  7   30   ... alias_index=6
  1030 //    60  StoreP  45  40  20   ... alias_index=4
  1031 //    70  LoadP    _  50  30   ... alias_index=6
  1032 //    80  Phi     75  40  60   Memory alias_index=4
  1033 //   120  Phi     75  50  50   Memory alias_index=6
  1034 //    90  LoadP    _ 120  30   ... alias_index=6
  1035 //   100  LoadP    _  80  20   ... alias_index=4
  1036 //
  1037 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist) {
  1038   GrowableArray<Node *>  memnode_worklist;
  1039   GrowableArray<PhiNode *>  orig_phis;
  1041   PhaseIterGVN  *igvn = _igvn;
  1042   uint new_index_start = (uint) _compile->num_alias_types();
  1043   Arena* arena = Thread::current()->resource_area();
  1044   VectorSet visited(arena);
  1047   //  Phase 1:  Process possible allocations from alloc_worklist.
  1048   //  Create instance types for the CheckCastPP for allocations where possible.
  1049   //
  1050   // (Note: don't forget to change the order of the second AddP node on
  1051   //  the alloc_worklist if the order of the worklist processing is changed,
  1052   //  see the comment in find_second_addp().)
  1053   //
  1054   while (alloc_worklist.length() != 0) {
  1055     Node *n = alloc_worklist.pop();
  1056     uint ni = n->_idx;
  1057     const TypeOopPtr* tinst = NULL;
  1058     if (n->is_Call()) {
  1059       CallNode *alloc = n->as_Call();
  1060       // copy escape information to call node
  1061       PointsToNode* ptn = ptnode_adr(alloc->_idx);
  1062       PointsToNode::EscapeState es = escape_state(alloc);
  1063       // We have an allocation or call which returns a Java object,
  1064       // see if it is unescaped.
  1065       if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())
  1066         continue;
  1068       // Find CheckCastPP for the allocate or for the return value of a call
  1069       n = alloc->result_cast();
  1070       if (n == NULL) {            // No uses except Initialize node
  1071         if (alloc->is_Allocate()) {
  1072           // Set the scalar_replaceable flag for allocation
  1073           // so it could be eliminated if it has no uses.
  1074           alloc->as_Allocate()->_is_scalar_replaceable = true;
  1076         continue;
  1078       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
  1079         assert(!alloc->is_Allocate(), "allocation should have unique type");
  1080         continue;
  1083       // The inline code for Object.clone() casts the allocation result to
  1084       // java.lang.Object and then to the actual type of the allocated
  1085       // object. Detect this case and use the second cast.
  1086       // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
  1087       // the allocation result is cast to java.lang.Object and then
  1088       // to the actual Array type.
  1089       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
  1090           && (alloc->is_AllocateArray() ||
  1091               igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {
  1092         Node *cast2 = NULL;
  1093         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1094           Node *use = n->fast_out(i);
  1095           if (use->is_CheckCastPP()) {
  1096             cast2 = use;
  1097             break;
  1100         if (cast2 != NULL) {
  1101           n = cast2;
  1102         } else {
  1103           // Non-scalar replaceable if the allocation type is unknown statically
  1104           // (reflection allocation), the object can't be restored during
  1105           // deoptimization without precise type.
  1106           continue;
  1109       if (alloc->is_Allocate()) {
  1110         // Set the scalar_replaceable flag for allocation
  1111         // so it could be eliminated.
  1112         alloc->as_Allocate()->_is_scalar_replaceable = true;
  1114       set_escape_state(n->_idx, es); // CheckCastPP escape state
  1115       // in order for an object to be scalar-replaceable, it must be:
  1116       //   - a direct allocation (not a call returning an object)
  1117       //   - non-escaping
  1118       //   - eligible to be a unique type
  1119       //   - not determined to be ineligible by escape analysis
  1120       assert(ptnode_adr(alloc->_idx)->_node != NULL &&
  1121              ptnode_adr(n->_idx)->_node != NULL, "should be registered");
  1122       set_map(alloc->_idx, n);
  1123       set_map(n->_idx, alloc);
  1124       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
  1125       if (t == NULL)
  1126         continue;  // not a TypeOopPtr
  1127       tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
  1128       igvn->hash_delete(n);
  1129       igvn->set_type(n,  tinst);
  1130       n->raise_bottom_type(tinst);
  1131       igvn->hash_insert(n);
  1132       record_for_optimizer(n);
  1133       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
  1135         // First, put on the worklist all Field edges from Connection Graph
  1136         // which is more accurate then putting immediate users from Ideal Graph.
  1137         for (uint e = 0; e < ptn->edge_count(); e++) {
  1138           Node *use = ptnode_adr(ptn->edge_target(e))->_node;
  1139           assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
  1140                  "only AddP nodes are Field edges in CG");
  1141           if (use->outcnt() > 0) { // Don't process dead nodes
  1142             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
  1143             if (addp2 != NULL) {
  1144               assert(alloc->is_AllocateArray(),"array allocation was expected");
  1145               alloc_worklist.append_if_missing(addp2);
  1147             alloc_worklist.append_if_missing(use);
  1151         // An allocation may have an Initialize which has raw stores. Scan
  1152         // the users of the raw allocation result and push AddP users
  1153         // on alloc_worklist.
  1154         Node *raw_result = alloc->proj_out(TypeFunc::Parms);
  1155         assert (raw_result != NULL, "must have an allocation result");
  1156         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
  1157           Node *use = raw_result->fast_out(i);
  1158           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
  1159             Node* addp2 = find_second_addp(use, raw_result);
  1160             if (addp2 != NULL) {
  1161               assert(alloc->is_AllocateArray(),"array allocation was expected");
  1162               alloc_worklist.append_if_missing(addp2);
  1164             alloc_worklist.append_if_missing(use);
  1165           } else if (use->is_MemBar()) {
  1166             memnode_worklist.append_if_missing(use);
  1170     } else if (n->is_AddP()) {
  1171       VectorSet* ptset = PointsTo(get_addp_base(n));
  1172       assert(ptset->Size() == 1, "AddP address is unique");
  1173       uint elem = ptset->getelem(); // Allocation node's index
  1174       if (elem == _phantom_object) {
  1175         assert(false, "escaped allocation");
  1176         continue; // Assume the value was set outside this method.
  1178       Node *base = get_map(elem);  // CheckCastPP node
  1179       if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path
  1180       tinst = igvn->type(base)->isa_oopptr();
  1181     } else if (n->is_Phi() ||
  1182                n->is_CheckCastPP() ||
  1183                n->is_EncodeP() ||
  1184                n->is_DecodeN() ||
  1185                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
  1186       if (visited.test_set(n->_idx)) {
  1187         assert(n->is_Phi(), "loops only through Phi's");
  1188         continue;  // already processed
  1190       VectorSet* ptset = PointsTo(n);
  1191       if (ptset->Size() == 1) {
  1192         uint elem = ptset->getelem(); // Allocation node's index
  1193         if (elem == _phantom_object) {
  1194           assert(false, "escaped allocation");
  1195           continue; // Assume the value was set outside this method.
  1197         Node *val = get_map(elem);   // CheckCastPP node
  1198         TypeNode *tn = n->as_Type();
  1199         tinst = igvn->type(val)->isa_oopptr();
  1200         assert(tinst != NULL && tinst->is_known_instance() &&
  1201                (uint)tinst->instance_id() == elem , "instance type expected.");
  1203         const Type *tn_type = igvn->type(tn);
  1204         const TypeOopPtr *tn_t;
  1205         if (tn_type->isa_narrowoop()) {
  1206           tn_t = tn_type->make_ptr()->isa_oopptr();
  1207         } else {
  1208           tn_t = tn_type->isa_oopptr();
  1211         if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
  1212           if (tn_type->isa_narrowoop()) {
  1213             tn_type = tinst->make_narrowoop();
  1214           } else {
  1215             tn_type = tinst;
  1217           igvn->hash_delete(tn);
  1218           igvn->set_type(tn, tn_type);
  1219           tn->set_type(tn_type);
  1220           igvn->hash_insert(tn);
  1221           record_for_optimizer(n);
  1222         } else {
  1223           assert(tn_type == TypePtr::NULL_PTR ||
  1224                  tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
  1225                  "unexpected type");
  1226           continue; // Skip dead path with different type
  1229     } else {
  1230       debug_only(n->dump();)
  1231       assert(false, "EA: unexpected node");
  1232       continue;
  1234     // push allocation's users on appropriate worklist
  1235     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1236       Node *use = n->fast_out(i);
  1237       if(use->is_Mem() && use->in(MemNode::Address) == n) {
  1238         // Load/store to instance's field
  1239         memnode_worklist.append_if_missing(use);
  1240       } else if (use->is_MemBar()) {
  1241         memnode_worklist.append_if_missing(use);
  1242       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
  1243         Node* addp2 = find_second_addp(use, n);
  1244         if (addp2 != NULL) {
  1245           alloc_worklist.append_if_missing(addp2);
  1247         alloc_worklist.append_if_missing(use);
  1248       } else if (use->is_Phi() ||
  1249                  use->is_CheckCastPP() ||
  1250                  use->is_EncodeP() ||
  1251                  use->is_DecodeN() ||
  1252                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
  1253         alloc_worklist.append_if_missing(use);
  1254 #ifdef ASSERT
  1255       } else if (use->is_Mem()) {
  1256         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
  1257       } else if (use->is_MergeMem()) {
  1258         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
  1259       } else if (use->is_SafePoint()) {
  1260         // Look for MergeMem nodes for calls which reference unique allocation
  1261         // (through CheckCastPP nodes) even for debug info.
  1262         Node* m = use->in(TypeFunc::Memory);
  1263         if (m->is_MergeMem()) {
  1264           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
  1266       } else {
  1267         uint op = use->Opcode();
  1268         if (!(op == Op_CmpP || op == Op_Conv2B ||
  1269               op == Op_CastP2X || op == Op_StoreCM ||
  1270               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
  1271               op == Op_StrEquals || op == Op_StrIndexOf)) {
  1272           n->dump();
  1273           use->dump();
  1274           assert(false, "EA: missing allocation reference path");
  1276 #endif
  1281   // New alias types were created in split_AddP().
  1282   uint new_index_end = (uint) _compile->num_alias_types();
  1284   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
  1285   //            compute new values for Memory inputs  (the Memory inputs are not
  1286   //            actually updated until phase 4.)
  1287   if (memnode_worklist.length() == 0)
  1288     return;  // nothing to do
  1290   while (memnode_worklist.length() != 0) {
  1291     Node *n = memnode_worklist.pop();
  1292     if (visited.test_set(n->_idx))
  1293       continue;
  1294     if (n->is_Phi() || n->is_ClearArray()) {
  1295       // we don't need to do anything, but the users must be pushed
  1296     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
  1297       // we don't need to do anything, but the users must be pushed
  1298       n = n->as_MemBar()->proj_out(TypeFunc::Memory);
  1299       if (n == NULL)
  1300         continue;
  1301     } else {
  1302       assert(n->is_Mem(), "memory node required.");
  1303       Node *addr = n->in(MemNode::Address);
  1304       const Type *addr_t = igvn->type(addr);
  1305       if (addr_t == Type::TOP)
  1306         continue;
  1307       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
  1308       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
  1309       assert ((uint)alias_idx < new_index_end, "wrong alias index");
  1310       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn);
  1311       if (_compile->failing()) {
  1312         return;
  1314       if (mem != n->in(MemNode::Memory)) {
  1315         // We delay the memory edge update since we need old one in
  1316         // MergeMem code below when instances memory slices are separated.
  1317         debug_only(Node* pn = ptnode_adr(n->_idx)->_node;)
  1318         assert(pn == NULL || pn == n, "wrong node");
  1319         set_map(n->_idx, mem);
  1320         ptnode_adr(n->_idx)->_node = n;
  1322       if (n->is_Load()) {
  1323         continue;  // don't push users
  1324       } else if (n->is_LoadStore()) {
  1325         // get the memory projection
  1326         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1327           Node *use = n->fast_out(i);
  1328           if (use->Opcode() == Op_SCMemProj) {
  1329             n = use;
  1330             break;
  1333         assert(n->Opcode() == Op_SCMemProj, "memory projection required");
  1336     // push user on appropriate worklist
  1337     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1338       Node *use = n->fast_out(i);
  1339       if (use->is_Phi() || use->is_ClearArray()) {
  1340         memnode_worklist.append_if_missing(use);
  1341       } else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
  1342         if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
  1343           continue;
  1344         memnode_worklist.append_if_missing(use);
  1345       } else if (use->is_MemBar()) {
  1346         memnode_worklist.append_if_missing(use);
  1347 #ifdef ASSERT
  1348       } else if(use->is_Mem()) {
  1349         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
  1350       } else if (use->is_MergeMem()) {
  1351         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
  1352       } else {
  1353         uint op = use->Opcode();
  1354         if (!(op == Op_StoreCM ||
  1355               (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
  1356                strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
  1357               op == Op_AryEq || op == Op_StrComp ||
  1358               op == Op_StrEquals || op == Op_StrIndexOf)) {
  1359           n->dump();
  1360           use->dump();
  1361           assert(false, "EA: missing memory path");
  1363 #endif
  1368   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
  1369   //            Walk each memory slice moving the first node encountered of each
  1370   //            instance type to the the input corresponding to its alias index.
  1371   uint length = _mergemem_worklist.length();
  1372   for( uint next = 0; next < length; ++next ) {
  1373     MergeMemNode* nmm = _mergemem_worklist.at(next);
  1374     assert(!visited.test_set(nmm->_idx), "should not be visited before");
  1375     // Note: we don't want to use MergeMemStream here because we only want to
  1376     // scan inputs which exist at the start, not ones we add during processing.
  1377     // Note 2: MergeMem may already contains instance memory slices added
  1378     // during find_inst_mem() call when memory nodes were processed above.
  1379     igvn->hash_delete(nmm);
  1380     uint nslices = nmm->req();
  1381     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
  1382       Node* mem = nmm->in(i);
  1383       Node* cur = NULL;
  1384       if (mem == NULL || mem->is_top())
  1385         continue;
  1386       // First, update mergemem by moving memory nodes to corresponding slices
  1387       // if their type became more precise since this mergemem was created.
  1388       while (mem->is_Mem()) {
  1389         const Type *at = igvn->type(mem->in(MemNode::Address));
  1390         if (at != Type::TOP) {
  1391           assert (at->isa_ptr() != NULL, "pointer type required.");
  1392           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
  1393           if (idx == i) {
  1394             if (cur == NULL)
  1395               cur = mem;
  1396           } else {
  1397             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
  1398               nmm->set_memory_at(idx, mem);
  1402         mem = mem->in(MemNode::Memory);
  1404       nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
  1405       // Find any instance of the current type if we haven't encountered
  1406       // already a memory slice of the instance along the memory chain.
  1407       for (uint ni = new_index_start; ni < new_index_end; ni++) {
  1408         if((uint)_compile->get_general_index(ni) == i) {
  1409           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
  1410           if (nmm->is_empty_memory(m)) {
  1411             Node* result = find_inst_mem(mem, ni, orig_phis, igvn);
  1412             if (_compile->failing()) {
  1413               return;
  1415             nmm->set_memory_at(ni, result);
  1420     // Find the rest of instances values
  1421     for (uint ni = new_index_start; ni < new_index_end; ni++) {
  1422       const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
  1423       Node* result = step_through_mergemem(nmm, ni, tinst);
  1424       if (result == nmm->base_memory()) {
  1425         // Didn't find instance memory, search through general slice recursively.
  1426         result = nmm->memory_at(_compile->get_general_index(ni));
  1427         result = find_inst_mem(result, ni, orig_phis, igvn);
  1428         if (_compile->failing()) {
  1429           return;
  1431         nmm->set_memory_at(ni, result);
  1434     igvn->hash_insert(nmm);
  1435     record_for_optimizer(nmm);
  1438   //  Phase 4:  Update the inputs of non-instance memory Phis and
  1439   //            the Memory input of memnodes
  1440   // First update the inputs of any non-instance Phi's from
  1441   // which we split out an instance Phi.  Note we don't have
  1442   // to recursively process Phi's encounted on the input memory
  1443   // chains as is done in split_memory_phi() since they  will
  1444   // also be processed here.
  1445   for (int j = 0; j < orig_phis.length(); j++) {
  1446     PhiNode *phi = orig_phis.at(j);
  1447     int alias_idx = _compile->get_alias_index(phi->adr_type());
  1448     igvn->hash_delete(phi);
  1449     for (uint i = 1; i < phi->req(); i++) {
  1450       Node *mem = phi->in(i);
  1451       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn);
  1452       if (_compile->failing()) {
  1453         return;
  1455       if (mem != new_mem) {
  1456         phi->set_req(i, new_mem);
  1459     igvn->hash_insert(phi);
  1460     record_for_optimizer(phi);
  1463   // Update the memory inputs of MemNodes with the value we computed
  1464   // in Phase 2 and move stores memory users to corresponding memory slices.
  1466   // Disable memory split verification code until the fix for 6984348.
  1467   // Currently it produces false negative results since it does not cover all cases.
  1468 #if 0 // ifdef ASSERT
  1469   visited.Reset();
  1470   Node_Stack old_mems(arena, _compile->unique() >> 2);
  1471 #endif
  1472   for (uint i = 0; i < nodes_size(); i++) {
  1473     Node *nmem = get_map(i);
  1474     if (nmem != NULL) {
  1475       Node *n = ptnode_adr(i)->_node;
  1476       assert(n != NULL, "sanity");
  1477       if (n->is_Mem()) {
  1478 #if 0 // ifdef ASSERT
  1479         Node* old_mem = n->in(MemNode::Memory);
  1480         if (!visited.test_set(old_mem->_idx)) {
  1481           old_mems.push(old_mem, old_mem->outcnt());
  1483 #endif
  1484         assert(n->in(MemNode::Memory) != nmem, "sanity");
  1485         if (!n->is_Load()) {
  1486           // Move memory users of a store first.
  1487           move_inst_mem(n, orig_phis, igvn);
  1489         // Now update memory input
  1490         igvn->hash_delete(n);
  1491         n->set_req(MemNode::Memory, nmem);
  1492         igvn->hash_insert(n);
  1493         record_for_optimizer(n);
  1494       } else {
  1495         assert(n->is_Allocate() || n->is_CheckCastPP() ||
  1496                n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
  1500 #if 0 // ifdef ASSERT
  1501   // Verify that memory was split correctly
  1502   while (old_mems.is_nonempty()) {
  1503     Node* old_mem = old_mems.node();
  1504     uint  old_cnt = old_mems.index();
  1505     old_mems.pop();
  1506     assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
  1508 #endif
  1511 bool ConnectionGraph::has_candidates(Compile *C) {
  1512   // EA brings benefits only when the code has allocations and/or locks which
  1513   // are represented by ideal Macro nodes.
  1514   int cnt = C->macro_count();
  1515   for( int i=0; i < cnt; i++ ) {
  1516     Node *n = C->macro_node(i);
  1517     if ( n->is_Allocate() )
  1518       return true;
  1519     if( n->is_Lock() ) {
  1520       Node* obj = n->as_Lock()->obj_node()->uncast();
  1521       if( !(obj->is_Parm() || obj->is_Con()) )
  1522         return true;
  1525   return false;
  1528 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
  1529   // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
  1530   // to create space for them in ConnectionGraph::_nodes[].
  1531   Node* oop_null = igvn->zerocon(T_OBJECT);
  1532   Node* noop_null = igvn->zerocon(T_NARROWOOP);
  1534   ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
  1535   // Perform escape analysis
  1536   if (congraph->compute_escape()) {
  1537     // There are non escaping objects.
  1538     C->set_congraph(congraph);
  1541   // Cleanup.
  1542   if (oop_null->outcnt() == 0)
  1543     igvn->hash_delete(oop_null);
  1544   if (noop_null->outcnt() == 0)
  1545     igvn->hash_delete(noop_null);
  1548 bool ConnectionGraph::compute_escape() {
  1549   Compile* C = _compile;
  1551   // 1. Populate Connection Graph (CG) with Ideal nodes.
  1553   Unique_Node_List worklist_init;
  1554   worklist_init.map(C->unique(), NULL);  // preallocate space
  1556   // Initialize worklist
  1557   if (C->root() != NULL) {
  1558     worklist_init.push(C->root());
  1561   GrowableArray<Node*> alloc_worklist;
  1562   GrowableArray<Node*> addp_worklist;
  1563   GrowableArray<Node*> ptr_cmp_worklist;
  1564   PhaseGVN* igvn = _igvn;
  1565   bool has_allocations = false;
  1567   // Push all useful nodes onto CG list and set their type.
  1568   for( uint next = 0; next < worklist_init.size(); ++next ) {
  1569     Node* n = worklist_init.at(next);
  1570     record_for_escape_analysis(n, igvn);
  1571     // Only allocations and java static calls results are checked
  1572     // for an escape status. See process_call_result() below.
  1573     if (n->is_Allocate() || n->is_CallStaticJava() &&
  1574         ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
  1575       has_allocations = true;
  1576       if (n->is_Allocate())
  1577         alloc_worklist.append(n);
  1578     } else if(n->is_AddP()) {
  1579       // Collect address nodes. Use them during stage 3 below
  1580       // to build initial connection graph field edges.
  1581       addp_worklist.append(n);
  1582     } else if (n->is_MergeMem()) {
  1583       // Collect all MergeMem nodes to add memory slices for
  1584       // scalar replaceable objects in split_unique_types().
  1585       _mergemem_worklist.append(n->as_MergeMem());
  1586     } else if (OptimizePtrCompare && n->is_Cmp() &&
  1587                (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
  1588       // Compare pointers nodes
  1589       ptr_cmp_worklist.append(n);
  1591     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1592       Node* m = n->fast_out(i);   // Get user
  1593       worklist_init.push(m);
  1597   if (!has_allocations) {
  1598     _collecting = false;
  1599     return false; // Nothing to do.
  1602   // 2. First pass to create simple CG edges (doesn't require to walk CG).
  1603   uint delayed_size = _delayed_worklist.size();
  1604   for( uint next = 0; next < delayed_size; ++next ) {
  1605     Node* n = _delayed_worklist.at(next);
  1606     build_connection_graph(n, igvn);
  1609   // 3. Pass to create initial fields edges (JavaObject -F-> AddP)
  1610   //    to reduce number of iterations during stage 4 below.
  1611   uint addp_length = addp_worklist.length();
  1612   for( uint next = 0; next < addp_length; ++next ) {
  1613     Node* n = addp_worklist.at(next);
  1614     Node* base = get_addp_base(n);
  1615     if (base->is_Proj() && base->in(0)->is_Call())
  1616       base = base->in(0);
  1617     PointsToNode::NodeType nt = ptnode_adr(base->_idx)->node_type();
  1618     if (nt == PointsToNode::JavaObject) {
  1619       build_connection_graph(n, igvn);
  1623   GrowableArray<int> cg_worklist;
  1624   cg_worklist.append(_phantom_object);
  1625   GrowableArray<uint>  worklist;
  1627   // 4. Build Connection Graph which need
  1628   //    to walk the connection graph.
  1629   _progress = false;
  1630   for (uint ni = 0; ni < nodes_size(); ni++) {
  1631     PointsToNode* ptn = ptnode_adr(ni);
  1632     Node *n = ptn->_node;
  1633     if (n != NULL) { // Call, AddP, LoadP, StoreP
  1634       build_connection_graph(n, igvn);
  1635       if (ptn->node_type() != PointsToNode::UnknownType)
  1636         cg_worklist.append(n->_idx); // Collect CG nodes
  1637       if (!_processed.test(n->_idx))
  1638         worklist.append(n->_idx); // Collect C/A/L/S nodes
  1642   // After IGVN user nodes may have smaller _idx than
  1643   // their inputs so they will be processed first in
  1644   // previous loop. Because of that not all Graph
  1645   // edges will be created. Walk over interesting
  1646   // nodes again until no new edges are created.
  1647   //
  1648   // Normally only 1-3 passes needed to build
  1649   // Connection Graph depending on graph complexity.
  1650   // Observed 8 passes in jvm2008 compiler.compiler.
  1651   // Set limit to 20 to catch situation when something
  1652   // did go wrong and recompile the method without EA.
  1654 #define CG_BUILD_ITER_LIMIT 20
  1656   uint length = worklist.length();
  1657   int iterations = 0;
  1658   while(_progress && (iterations++ < CG_BUILD_ITER_LIMIT)) {
  1659     _progress = false;
  1660     for( uint next = 0; next < length; ++next ) {
  1661       int ni = worklist.at(next);
  1662       PointsToNode* ptn = ptnode_adr(ni);
  1663       Node* n = ptn->_node;
  1664       assert(n != NULL, "should be known node");
  1665       build_connection_graph(n, igvn);
  1668   if (iterations >= CG_BUILD_ITER_LIMIT) {
  1669     assert(iterations < CG_BUILD_ITER_LIMIT,
  1670            err_msg("infinite EA connection graph build with %d nodes and worklist size %d",
  1671            nodes_size(), length));
  1672     // Possible infinite build_connection_graph loop,
  1673     // retry compilation without escape analysis.
  1674     C->record_failure(C2Compiler::retry_no_escape_analysis());
  1675     _collecting = false;
  1676     return false;
  1678 #undef CG_BUILD_ITER_LIMIT
  1680   Arena* arena = Thread::current()->resource_area();
  1681   VectorSet visited(arena);
  1683   // 5. Find fields initializing values for not escaped allocations
  1684   uint alloc_length = alloc_worklist.length();
  1685   for (uint next = 0; next < alloc_length; ++next) {
  1686     Node* n = alloc_worklist.at(next);
  1687     if (ptnode_adr(n->_idx)->escape_state() == PointsToNode::NoEscape) {
  1688       find_init_values(n, &visited, igvn);
  1692   worklist.clear();
  1694   // 6. Remove deferred edges from the graph.
  1695   uint cg_length = cg_worklist.length();
  1696   for (uint next = 0; next < cg_length; ++next) {
  1697     int ni = cg_worklist.at(next);
  1698     PointsToNode* ptn = ptnode_adr(ni);
  1699     PointsToNode::NodeType nt = ptn->node_type();
  1700     if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
  1701       remove_deferred(ni, &worklist, &visited);
  1705   // 7. Adjust escape state of nonescaping objects.
  1706   for (uint next = 0; next < addp_length; ++next) {
  1707     Node* n = addp_worklist.at(next);
  1708     adjust_escape_state(n);
  1711   // 8. Propagate escape states.
  1712   worklist.clear();
  1714   // mark all nodes reachable from GlobalEscape nodes
  1715   (void)propagate_escape_state(&cg_worklist, &worklist, PointsToNode::GlobalEscape);
  1717   // mark all nodes reachable from ArgEscape nodes
  1718   bool has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape);
  1720   // push all NoEscape nodes on the worklist
  1721   for( uint next = 0; next < cg_length; ++next ) {
  1722     int nk = cg_worklist.at(next);
  1723     if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape)
  1724       worklist.push(nk);
  1726   alloc_worklist.clear();
  1727   // mark all nodes reachable from NoEscape nodes
  1728   while(worklist.length() > 0) {
  1729     uint nk = worklist.pop();
  1730     PointsToNode* ptn = ptnode_adr(nk);
  1731     if (ptn->node_type() == PointsToNode::JavaObject &&
  1732         !(nk == _noop_null || nk == _oop_null))
  1733       has_non_escaping_obj = true; // Non Escape
  1734     Node* n = ptn->_node;
  1735     bool scalar_replaceable = ptn->scalar_replaceable();
  1736     if (n->is_Allocate() && scalar_replaceable) {
  1737       // Push scalar replaceable allocations on alloc_worklist
  1738       // for processing in split_unique_types(). Note,
  1739       // following code may change scalar_replaceable value.
  1740       alloc_worklist.append(n);
  1742     uint e_cnt = ptn->edge_count();
  1743     for (uint ei = 0; ei < e_cnt; ei++) {
  1744       uint npi = ptn->edge_target(ei);
  1745       PointsToNode *np = ptnode_adr(npi);
  1746       if (np->escape_state() < PointsToNode::NoEscape) {
  1747         set_escape_state(npi, PointsToNode::NoEscape);
  1748         if (!scalar_replaceable) {
  1749           np->set_scalar_replaceable(false);
  1751         worklist.push(npi);
  1752       } else if (np->scalar_replaceable() && !scalar_replaceable) {
  1753         // Propagate scalar_replaceable value.
  1754         np->set_scalar_replaceable(false);
  1755         worklist.push(npi);
  1760   _collecting = false;
  1761   assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
  1763   assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape, "sanity");
  1764   if (UseCompressedOops) {
  1765     assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape, "sanity");
  1768   if (EliminateLocks && has_non_escaping_obj) {
  1769     // Mark locks before changing ideal graph.
  1770     int cnt = C->macro_count();
  1771     for( int i=0; i < cnt; i++ ) {
  1772       Node *n = C->macro_node(i);
  1773       if (n->is_AbstractLock()) { // Lock and Unlock nodes
  1774         AbstractLockNode* alock = n->as_AbstractLock();
  1775         if (!alock->is_eliminated()) {
  1776           PointsToNode::EscapeState es = escape_state(alock->obj_node());
  1777           assert(es != PointsToNode::UnknownEscape, "should know");
  1778           if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
  1779             // Mark it eliminated
  1780             alock->set_eliminated();
  1787   if (OptimizePtrCompare && has_non_escaping_obj) {
  1788     // Add ConI(#CC_GT) and ConI(#CC_EQ).
  1789     _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
  1790     _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
  1791     // Optimize objects compare.
  1792     while (ptr_cmp_worklist.length() != 0) {
  1793       Node *n = ptr_cmp_worklist.pop();
  1794       Node *res = optimize_ptr_compare(n);
  1795       if (res != NULL) {
  1796 #ifndef PRODUCT
  1797         if (PrintOptimizePtrCompare) {
  1798           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
  1799           if (Verbose) {
  1800             n->dump(1);
  1803 #endif
  1804         _igvn->replace_node(n, res);
  1807     // cleanup
  1808     if (_pcmp_neq->outcnt() == 0)
  1809       igvn->hash_delete(_pcmp_neq);
  1810     if (_pcmp_eq->outcnt()  == 0)
  1811       igvn->hash_delete(_pcmp_eq);
  1814 #ifndef PRODUCT
  1815   if (PrintEscapeAnalysis) {
  1816     dump(); // Dump ConnectionGraph
  1818 #endif
  1820   bool has_scalar_replaceable_candidates = false;
  1821   alloc_length = alloc_worklist.length();
  1822   for (uint next = 0; next < alloc_length; ++next) {
  1823     Node* n = alloc_worklist.at(next);
  1824     PointsToNode* ptn = ptnode_adr(n->_idx);
  1825     assert(ptn->escape_state() == PointsToNode::NoEscape, "sanity");
  1826     if (ptn->scalar_replaceable()) {
  1827       has_scalar_replaceable_candidates = true;
  1828       break;
  1832   if ( has_scalar_replaceable_candidates &&
  1833        C->AliasLevel() >= 3 && EliminateAllocations ) {
  1835     // Now use the escape information to create unique types for
  1836     // scalar replaceable objects.
  1837     split_unique_types(alloc_worklist);
  1839     if (C->failing())  return false;
  1841     C->print_method("After Escape Analysis", 2);
  1843 #ifdef ASSERT
  1844   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
  1845     tty->print("=== No allocations eliminated for ");
  1846     C->method()->print_short_name();
  1847     if(!EliminateAllocations) {
  1848       tty->print(" since EliminateAllocations is off ===");
  1849     } else if(!has_scalar_replaceable_candidates) {
  1850       tty->print(" since there are no scalar replaceable candidates ===");
  1851     } else if(C->AliasLevel() < 3) {
  1852       tty->print(" since AliasLevel < 3 ===");
  1854     tty->cr();
  1855 #endif
  1857   return has_non_escaping_obj;
  1860 // Find fields initializing values for allocations.
  1861 void ConnectionGraph::find_init_values(Node* alloc, VectorSet* visited, PhaseTransform* phase) {
  1862   assert(alloc->is_Allocate(), "Should be called for Allocate nodes only");
  1863   PointsToNode* pta = ptnode_adr(alloc->_idx);
  1864   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
  1865   InitializeNode* ini = alloc->as_Allocate()->initialization();
  1867   Compile* C = _compile;
  1868   visited->Reset();
  1869   // Check if a oop field's initializing value is recorded and add
  1870   // a corresponding NULL field's value if it is not recorded.
  1871   // Connection Graph does not record a default initialization by NULL
  1872   // captured by Initialize node.
  1873   //
  1874   uint ae_cnt = pta->edge_count();
  1875   for (uint ei = 0; ei < ae_cnt; ei++) {
  1876     uint nidx = pta->edge_target(ei); // Field (AddP)
  1877     PointsToNode* ptn = ptnode_adr(nidx);
  1878     assert(ptn->_node->is_AddP(), "Should be AddP nodes only");
  1879     int offset = ptn->offset();
  1880     if (offset != Type::OffsetBot &&
  1881         offset != oopDesc::klass_offset_in_bytes() &&
  1882         !visited->test_set(offset)) {
  1884       // Check only oop fields.
  1885       const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
  1886       BasicType basic_field_type = T_INT;
  1887       if (adr_type->isa_instptr()) {
  1888         ciField* field = C->alias_type(adr_type->isa_instptr())->field();
  1889         if (field != NULL) {
  1890           basic_field_type = field->layout_type();
  1891         } else {
  1892           // Ignore non field load (for example, klass load)
  1894       } else if (adr_type->isa_aryptr()) {
  1895         if (offset != arrayOopDesc::length_offset_in_bytes()) {
  1896           const Type* elemtype = adr_type->isa_aryptr()->elem();
  1897           basic_field_type = elemtype->array_element_basic_type();
  1898         } else {
  1899           // Ignore array length load
  1901 #ifdef ASSERT
  1902       } else {
  1903         // Raw pointers are used for initializing stores so skip it
  1904         // since it should be recorded already
  1905         Node* base = get_addp_base(ptn->_node);
  1906         assert(adr_type->isa_rawptr() && base->is_Proj() &&
  1907                (base->in(0) == alloc),"unexpected pointer type");
  1908 #endif
  1910       if (basic_field_type == T_OBJECT ||
  1911           basic_field_type == T_NARROWOOP ||
  1912           basic_field_type == T_ARRAY) {
  1913         Node* value = NULL;
  1914         if (ini != NULL) {
  1915           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
  1916           Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
  1917           if (store != NULL && store->is_Store()) {
  1918             value = store->in(MemNode::ValueIn);
  1919           } else if (ptn->edge_count() > 0) { // Are there oop stores?
  1920             // Check for a store which follows allocation without branches.
  1921             // For example, a volatile field store is not collected
  1922             // by Initialize node. TODO: it would be nice to use idom() here.
  1923             //
  1924             // Search all references to the same field which use different
  1925             // AddP nodes, for example, in the next case:
  1926             //
  1927             //    Point p[] = new Point[1];
  1928             //    if ( x ) { p[0] = new Point(); p[0].x = x; }
  1929             //    if ( p[0] != null ) { y = p[0].x; } // has CastPP
  1930             //
  1931             for (uint next = ei; (next < ae_cnt) && (value == NULL); next++) {
  1932               uint fpi = pta->edge_target(next); // Field (AddP)
  1933               PointsToNode *ptf = ptnode_adr(fpi);
  1934               if (ptf->offset() == offset) {
  1935                 Node* nf = ptf->_node;
  1936                 for (DUIterator_Fast imax, i = nf->fast_outs(imax); i < imax; i++) {
  1937                   store = nf->fast_out(i);
  1938                   if (store->is_Store() && store->in(0) != NULL) {
  1939                     Node* ctrl = store->in(0);
  1940                     while(!(ctrl == ini || ctrl == alloc || ctrl == NULL ||
  1941                             ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() ||
  1942                             ctrl->is_IfTrue() || ctrl->is_IfFalse())) {
  1943                        ctrl = ctrl->in(0);
  1945                     if (ctrl == ini || ctrl == alloc) {
  1946                       value = store->in(MemNode::ValueIn);
  1947                       break;
  1955         if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
  1956           // A field's initializing value was not recorded. Add NULL.
  1957           uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
  1958           add_edge_from_fields(alloc->_idx, null_idx, offset);
  1965 // Adjust escape state after Connection Graph is built.
  1966 void ConnectionGraph::adjust_escape_state(Node* n) {
  1967   PointsToNode* ptn = ptnode_adr(n->_idx);
  1968   assert(n->is_AddP(), "Should be called for AddP nodes only");
  1969   // Search for objects which are not scalar replaceable
  1970   // and mark them to propagate the state to referenced objects.
  1971   //
  1973   int offset = ptn->offset();
  1974   Node* base = get_addp_base(n);
  1975   VectorSet* ptset = PointsTo(base);
  1976   int ptset_size = ptset->Size();
  1978   // An object is not scalar replaceable if the field which may point
  1979   // to it has unknown offset (unknown element of an array of objects).
  1980   //
  1982   if (offset == Type::OffsetBot) {
  1983     uint e_cnt = ptn->edge_count();
  1984     for (uint ei = 0; ei < e_cnt; ei++) {
  1985       uint npi = ptn->edge_target(ei);
  1986       ptnode_adr(npi)->set_scalar_replaceable(false);
  1990   // Currently an object is not scalar replaceable if a LoadStore node
  1991   // access its field since the field value is unknown after it.
  1992   //
  1993   bool has_LoadStore = false;
  1994   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1995     Node *use = n->fast_out(i);
  1996     if (use->is_LoadStore()) {
  1997       has_LoadStore = true;
  1998       break;
  2001   // An object is not scalar replaceable if the address points
  2002   // to unknown field (unknown element for arrays, offset is OffsetBot).
  2003   //
  2004   // Or the address may point to more then one object. This may produce
  2005   // the false positive result (set not scalar replaceable)
  2006   // since the flow-insensitive escape analysis can't separate
  2007   // the case when stores overwrite the field's value from the case
  2008   // when stores happened on different control branches.
  2009   //
  2010   // Note: it will disable scalar replacement in some cases:
  2011   //
  2012   //    Point p[] = new Point[1];
  2013   //    p[0] = new Point(); // Will be not scalar replaced
  2014   //
  2015   // but it will save us from incorrect optimizations in next cases:
  2016   //
  2017   //    Point p[] = new Point[1];
  2018   //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
  2019   //
  2020   if (ptset_size > 1 || ptset_size != 0 &&
  2021       (has_LoadStore || offset == Type::OffsetBot)) {
  2022     for( VectorSetI j(ptset); j.test(); ++j ) {
  2023       ptnode_adr(j.elem)->set_scalar_replaceable(false);
  2028 // Propagate escape states to referenced nodes.
  2029 bool ConnectionGraph::propagate_escape_state(GrowableArray<int>* cg_worklist,
  2030                                              GrowableArray<uint>* worklist,
  2031                                              PointsToNode::EscapeState esc_state) {
  2032   bool has_java_obj = false;
  2034   // push all nodes with the same escape state on the worklist
  2035   uint cg_length = cg_worklist->length();
  2036   for (uint next = 0; next < cg_length; ++next) {
  2037     int nk = cg_worklist->at(next);
  2038     if (ptnode_adr(nk)->escape_state() == esc_state)
  2039       worklist->push(nk);
  2041   // mark all reachable nodes
  2042   while (worklist->length() > 0) {
  2043     PointsToNode* ptn = ptnode_adr(worklist->pop());
  2044     if (ptn->node_type() == PointsToNode::JavaObject) {
  2045       has_java_obj = true;
  2047     uint e_cnt = ptn->edge_count();
  2048     for (uint ei = 0; ei < e_cnt; ei++) {
  2049       uint npi = ptn->edge_target(ei);
  2050       PointsToNode *np = ptnode_adr(npi);
  2051       if (np->escape_state() < esc_state) {
  2052         set_escape_state(npi, esc_state);
  2053         worklist->push(npi);
  2057   // Has not escaping java objects
  2058   return has_java_obj && (esc_state < PointsToNode::GlobalEscape);
  2061 // Optimize objects compare.
  2062 Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
  2063   assert(OptimizePtrCompare, "sanity");
  2064   // Clone returned Set since PointsTo() returns pointer
  2065   // to the same structure ConnectionGraph.pt_ptset.
  2066   VectorSet ptset1 = *PointsTo(n->in(1));
  2067   VectorSet ptset2 = *PointsTo(n->in(2));
  2069   // Check simple cases first.
  2070   if (ptset1.Size() == 1) {
  2071     uint pt1 = ptset1.getelem();
  2072     PointsToNode* ptn1 = ptnode_adr(pt1);
  2073     if (ptn1->escape_state() == PointsToNode::NoEscape) {
  2074       if (ptset2.Size() == 1 && ptset2.getelem() == pt1) {
  2075         // Comparing the same not escaping object.
  2076         return _pcmp_eq;
  2078       Node* obj = ptn1->_node;
  2079       // Comparing not escaping allocation.
  2080       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
  2081           !ptset2.test(pt1)) {
  2082         return _pcmp_neq; // This includes nullness check.
  2085   } else if (ptset2.Size() == 1) {
  2086     uint pt2 = ptset2.getelem();
  2087     PointsToNode* ptn2 = ptnode_adr(pt2);
  2088     if (ptn2->escape_state() == PointsToNode::NoEscape) {
  2089       Node* obj = ptn2->_node;
  2090       // Comparing not escaping allocation.
  2091       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
  2092           !ptset1.test(pt2)) {
  2093         return _pcmp_neq; // This includes nullness check.
  2098   if (!ptset1.disjoint(ptset2)) {
  2099     return NULL; // Sets are not disjoint
  2102   // Sets are disjoint.
  2103   bool set1_has_unknown_ptr = ptset1.test(_phantom_object) != 0;
  2104   bool set2_has_unknown_ptr = ptset2.test(_phantom_object) != 0;
  2105   bool set1_has_null_ptr   = (ptset1.test(_oop_null) | ptset1.test(_noop_null)) != 0;
  2106   bool set2_has_null_ptr   = (ptset2.test(_oop_null) | ptset2.test(_noop_null)) != 0;
  2108   if (set1_has_unknown_ptr && set2_has_null_ptr ||
  2109       set2_has_unknown_ptr && set1_has_null_ptr) {
  2110     // Check nullness of unknown object.
  2111     return NULL;
  2114   // Disjointness by itself is not sufficient since
  2115   // alias analysis is not complete for escaped objects.
  2116   // Disjoint sets are definitely unrelated only when
  2117   // at least one set has only not escaping objects.
  2118   if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
  2119     bool has_only_non_escaping_alloc = true;
  2120     for (VectorSetI i(&ptset1); i.test(); ++i) {
  2121       uint pt = i.elem;
  2122       PointsToNode* ptn = ptnode_adr(pt);
  2123       Node* obj = ptn->_node;
  2124       if (ptn->escape_state() != PointsToNode::NoEscape ||
  2125           !(obj->is_Allocate() || obj->is_CallStaticJava())) {
  2126         has_only_non_escaping_alloc = false;
  2127         break;
  2130     if (has_only_non_escaping_alloc) {
  2131       return _pcmp_neq;
  2134   if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
  2135     bool has_only_non_escaping_alloc = true;
  2136     for (VectorSetI i(&ptset2); i.test(); ++i) {
  2137       uint pt = i.elem;
  2138       PointsToNode* ptn = ptnode_adr(pt);
  2139       Node* obj = ptn->_node;
  2140       if (ptn->escape_state() != PointsToNode::NoEscape ||
  2141           !(obj->is_Allocate() || obj->is_CallStaticJava())) {
  2142         has_only_non_escaping_alloc = false;
  2143         break;
  2146     if (has_only_non_escaping_alloc) {
  2147       return _pcmp_neq;
  2150   return NULL;
  2153 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
  2155     switch (call->Opcode()) {
  2156 #ifdef ASSERT
  2157     case Op_Allocate:
  2158     case Op_AllocateArray:
  2159     case Op_Lock:
  2160     case Op_Unlock:
  2161       assert(false, "should be done already");
  2162       break;
  2163 #endif
  2164     case Op_CallLeaf:
  2165     case Op_CallLeafNoFP:
  2167       // Stub calls, objects do not escape but they are not scale replaceable.
  2168       // Adjust escape state for outgoing arguments.
  2169       const TypeTuple * d = call->tf()->domain();
  2170       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  2171         const Type* at = d->field_at(i);
  2172         Node *arg = call->in(i)->uncast();
  2173         const Type *aat = phase->type(arg);
  2174         if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
  2175             ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
  2177           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
  2178                  aat->isa_ptr() != NULL, "expecting an Ptr");
  2179 #ifdef ASSERT
  2180           if (!(call->Opcode() == Op_CallLeafNoFP &&
  2181                 call->as_CallLeaf()->_name != NULL &&
  2182                 (strstr(call->as_CallLeaf()->_name, "arraycopy")  != 0) ||
  2183                 call->as_CallLeaf()->_name != NULL &&
  2184                 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
  2185                  strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
  2186           ) {
  2187             call->dump();
  2188             assert(false, "EA: unexpected CallLeaf");
  2190 #endif
  2191           set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  2192           if (arg->is_AddP()) {
  2193             //
  2194             // The inline_native_clone() case when the arraycopy stub is called
  2195             // after the allocation before Initialize and CheckCastPP nodes.
  2196             //
  2197             // Set AddP's base (Allocate) as not scalar replaceable since
  2198             // pointer to the base (with offset) is passed as argument.
  2199             //
  2200             arg = get_addp_base(arg);
  2202           for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
  2203             uint pt = j.elem;
  2204             set_escape_state(pt, PointsToNode::ArgEscape);
  2208       break;
  2211     case Op_CallStaticJava:
  2212     // For a static call, we know exactly what method is being called.
  2213     // Use bytecode estimator to record the call's escape affects
  2215       ciMethod *meth = call->as_CallJava()->method();
  2216       BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
  2217       // fall-through if not a Java method or no analyzer information
  2218       if (call_analyzer != NULL) {
  2219         const TypeTuple * d = call->tf()->domain();
  2220         bool copy_dependencies = false;
  2221         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  2222           const Type* at = d->field_at(i);
  2223           int k = i - TypeFunc::Parms;
  2224           Node *arg = call->in(i)->uncast();
  2226           if (at->isa_oopptr() != NULL &&
  2227               ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) {
  2229             bool global_escapes = false;
  2230             bool fields_escapes = false;
  2231             if (!call_analyzer->is_arg_stack(k)) {
  2232               // The argument global escapes, mark everything it could point to
  2233               set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  2234               global_escapes = true;
  2235             } else {
  2236               if (!call_analyzer->is_arg_local(k)) {
  2237                 // The argument itself doesn't escape, but any fields might
  2238                 fields_escapes = true;
  2240               set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  2241               copy_dependencies = true;
  2244             for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
  2245               uint pt = j.elem;
  2246               if (global_escapes) {
  2247                 //The argument global escapes, mark everything it could point to
  2248                 set_escape_state(pt, PointsToNode::GlobalEscape);
  2249               } else {
  2250                 if (fields_escapes) {
  2251                   // The argument itself doesn't escape, but any fields might
  2252                   add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
  2254                 set_escape_state(pt, PointsToNode::ArgEscape);
  2259         if (copy_dependencies)
  2260           call_analyzer->copy_dependencies(_compile->dependencies());
  2261         break;
  2265     default:
  2266     // Fall-through here if not a Java method or no analyzer information
  2267     // or some other type of call, assume the worst case: all arguments
  2268     // globally escape.
  2270       // adjust escape state for  outgoing arguments
  2271       const TypeTuple * d = call->tf()->domain();
  2272       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  2273         const Type* at = d->field_at(i);
  2274         if (at->isa_oopptr() != NULL) {
  2275           Node *arg = call->in(i)->uncast();
  2276           set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  2277           for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
  2278             uint pt = j.elem;
  2279             set_escape_state(pt, PointsToNode::GlobalEscape);
  2286 void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) {
  2287   CallNode   *call = resproj->in(0)->as_Call();
  2288   uint    call_idx = call->_idx;
  2289   uint resproj_idx = resproj->_idx;
  2291   switch (call->Opcode()) {
  2292     case Op_Allocate:
  2294       Node *k = call->in(AllocateNode::KlassNode);
  2295       const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
  2296       assert(kt != NULL, "TypeKlassPtr  required.");
  2297       ciKlass* cik = kt->klass();
  2299       PointsToNode::EscapeState es;
  2300       uint edge_to;
  2301       if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
  2302          !cik->is_instance_klass() || // StressReflectiveCode
  2303           cik->as_instance_klass()->has_finalizer()) {
  2304         es = PointsToNode::GlobalEscape;
  2305         edge_to = _phantom_object; // Could not be worse
  2306       } else {
  2307         es = PointsToNode::NoEscape;
  2308         edge_to = call_idx;
  2309         assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
  2311       set_escape_state(call_idx, es);
  2312       add_pointsto_edge(resproj_idx, edge_to);
  2313       _processed.set(resproj_idx);
  2314       break;
  2317     case Op_AllocateArray:
  2320       Node *k = call->in(AllocateNode::KlassNode);
  2321       const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
  2322       assert(kt != NULL, "TypeKlassPtr  required.");
  2323       ciKlass* cik = kt->klass();
  2325       PointsToNode::EscapeState es;
  2326       uint edge_to;
  2327       if (!cik->is_array_klass()) { // StressReflectiveCode
  2328         es = PointsToNode::GlobalEscape;
  2329         edge_to = _phantom_object;
  2330       } else {
  2331         es = PointsToNode::NoEscape;
  2332         edge_to = call_idx;
  2333         assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
  2334         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
  2335         if (length < 0 || length > EliminateAllocationArraySizeLimit) {
  2336           // Not scalar replaceable if the length is not constant or too big.
  2337           ptnode_adr(call_idx)->set_scalar_replaceable(false);
  2340       set_escape_state(call_idx, es);
  2341       add_pointsto_edge(resproj_idx, edge_to);
  2342       _processed.set(resproj_idx);
  2343       break;
  2346     case Op_CallStaticJava:
  2347     // For a static call, we know exactly what method is being called.
  2348     // Use bytecode estimator to record whether the call's return value escapes
  2350       bool done = true;
  2351       const TypeTuple *r = call->tf()->range();
  2352       const Type* ret_type = NULL;
  2354       if (r->cnt() > TypeFunc::Parms)
  2355         ret_type = r->field_at(TypeFunc::Parms);
  2357       // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  2358       //        _multianewarray functions return a TypeRawPtr.
  2359       if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
  2360         _processed.set(resproj_idx);
  2361         break;  // doesn't return a pointer type
  2363       ciMethod *meth = call->as_CallJava()->method();
  2364       const TypeTuple * d = call->tf()->domain();
  2365       if (meth == NULL) {
  2366         // not a Java method, assume global escape
  2367         set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2368         add_pointsto_edge(resproj_idx, _phantom_object);
  2369       } else {
  2370         BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
  2371         bool copy_dependencies = false;
  2373         if (call_analyzer->is_return_allocated()) {
  2374           // Returns a newly allocated unescaped object, simply
  2375           // update dependency information.
  2376           // Mark it as NoEscape so that objects referenced by
  2377           // it's fields will be marked as NoEscape at least.
  2378           set_escape_state(call_idx, PointsToNode::NoEscape);
  2379           ptnode_adr(call_idx)->set_scalar_replaceable(false);
  2380           add_pointsto_edge(resproj_idx, call_idx);
  2381           copy_dependencies = true;
  2382         } else if (call_analyzer->is_return_local()) {
  2383           // determine whether any arguments are returned
  2384           set_escape_state(call_idx, PointsToNode::ArgEscape);
  2385           bool ret_arg = false;
  2386           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  2387             const Type* at = d->field_at(i);
  2389             if (at->isa_oopptr() != NULL) {
  2390               Node *arg = call->in(i)->uncast();
  2392               if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
  2393                 ret_arg = true;
  2394                 PointsToNode *arg_esp = ptnode_adr(arg->_idx);
  2395                 if (arg_esp->node_type() == PointsToNode::UnknownType)
  2396                   done = false;
  2397                 else if (arg_esp->node_type() == PointsToNode::JavaObject)
  2398                   add_pointsto_edge(resproj_idx, arg->_idx);
  2399                 else
  2400                   add_deferred_edge(resproj_idx, arg->_idx);
  2404           if (done && !ret_arg) {
  2405             // Returns unknown object.
  2406             set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2407             add_pointsto_edge(resproj_idx, _phantom_object);
  2409           if (done) {
  2410             copy_dependencies = true;
  2412         } else {
  2413           set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2414           add_pointsto_edge(resproj_idx, _phantom_object);
  2416         if (copy_dependencies)
  2417           call_analyzer->copy_dependencies(_compile->dependencies());
  2419       if (done)
  2420         _processed.set(resproj_idx);
  2421       break;
  2424     default:
  2425     // Some other type of call, assume the worst case that the
  2426     // returned value, if any, globally escapes.
  2428       const TypeTuple *r = call->tf()->range();
  2429       if (r->cnt() > TypeFunc::Parms) {
  2430         const Type* ret_type = r->field_at(TypeFunc::Parms);
  2432         // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  2433         //        _multianewarray functions return a TypeRawPtr.
  2434         if (ret_type->isa_ptr() != NULL) {
  2435           set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2436           add_pointsto_edge(resproj_idx, _phantom_object);
  2439       _processed.set(resproj_idx);
  2444 // Populate Connection Graph with Ideal nodes and create simple
  2445 // connection graph edges (do not need to check the node_type of inputs
  2446 // or to call PointsTo() to walk the connection graph).
  2447 void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) {
  2448   if (_processed.test(n->_idx))
  2449     return; // No need to redefine node's state.
  2451   if (n->is_Call()) {
  2452     // Arguments to allocation and locking don't escape.
  2453     if (n->is_Allocate()) {
  2454       add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true);
  2455       record_for_optimizer(n);
  2456     } else if (n->is_Lock() || n->is_Unlock()) {
  2457       // Put Lock and Unlock nodes on IGVN worklist to process them during
  2458       // the first IGVN optimization when escape information is still available.
  2459       record_for_optimizer(n);
  2460       _processed.set(n->_idx);
  2461     } else {
  2462       // Don't mark as processed since call's arguments have to be processed.
  2463       PointsToNode::NodeType nt = PointsToNode::UnknownType;
  2464       PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
  2466       // Check if a call returns an object.
  2467       const TypeTuple *r = n->as_Call()->tf()->range();
  2468       if (r->cnt() > TypeFunc::Parms &&
  2469           r->field_at(TypeFunc::Parms)->isa_ptr() &&
  2470           n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
  2471         nt = PointsToNode::JavaObject;
  2472         if (!n->is_CallStaticJava()) {
  2473           // Since the called mathod is statically unknown assume
  2474           // the worst case that the returned value globally escapes.
  2475           es = PointsToNode::GlobalEscape;
  2478       add_node(n, nt, es, false);
  2480     return;
  2483   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
  2484   // ThreadLocal has RawPrt type.
  2485   switch (n->Opcode()) {
  2486     case Op_AddP:
  2488       add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false);
  2489       break;
  2491     case Op_CastX2P:
  2492     { // "Unsafe" memory access.
  2493       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  2494       break;
  2496     case Op_CastPP:
  2497     case Op_CheckCastPP:
  2498     case Op_EncodeP:
  2499     case Op_DecodeN:
  2501       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2502       int ti = n->in(1)->_idx;
  2503       PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2504       if (nt == PointsToNode::UnknownType) {
  2505         _delayed_worklist.push(n); // Process it later.
  2506         break;
  2507       } else if (nt == PointsToNode::JavaObject) {
  2508         add_pointsto_edge(n->_idx, ti);
  2509       } else {
  2510         add_deferred_edge(n->_idx, ti);
  2512       _processed.set(n->_idx);
  2513       break;
  2515     case Op_ConP:
  2517       // assume all pointer constants globally escape except for null
  2518       PointsToNode::EscapeState es;
  2519       if (phase->type(n) == TypePtr::NULL_PTR)
  2520         es = PointsToNode::NoEscape;
  2521       else
  2522         es = PointsToNode::GlobalEscape;
  2524       add_node(n, PointsToNode::JavaObject, es, true);
  2525       break;
  2527     case Op_ConN:
  2529       // assume all narrow oop constants globally escape except for null
  2530       PointsToNode::EscapeState es;
  2531       if (phase->type(n) == TypeNarrowOop::NULL_PTR)
  2532         es = PointsToNode::NoEscape;
  2533       else
  2534         es = PointsToNode::GlobalEscape;
  2536       add_node(n, PointsToNode::JavaObject, es, true);
  2537       break;
  2539     case Op_CreateEx:
  2541       // assume that all exception objects globally escape
  2542       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  2543       break;
  2545     case Op_LoadKlass:
  2546     case Op_LoadNKlass:
  2548       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  2549       break;
  2551     case Op_LoadP:
  2552     case Op_LoadN:
  2554       const Type *t = phase->type(n);
  2555       if (t->make_ptr() == NULL) {
  2556         _processed.set(n->_idx);
  2557         return;
  2559       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2560       break;
  2562     case Op_Parm:
  2564       _processed.set(n->_idx); // No need to redefine it state.
  2565       uint con = n->as_Proj()->_con;
  2566       if (con < TypeFunc::Parms)
  2567         return;
  2568       const Type *t = n->in(0)->as_Start()->_domain->field_at(con);
  2569       if (t->isa_ptr() == NULL)
  2570         return;
  2571       // We have to assume all input parameters globally escape
  2572       // (Note: passing 'false' since _processed is already set).
  2573       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
  2574       break;
  2576     case Op_PartialSubtypeCheck:
  2577     { // Produces Null or notNull and is used in CmpP.
  2578       add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
  2579       break;
  2581     case Op_Phi:
  2583       const Type *t = n->as_Phi()->type();
  2584       if (t->make_ptr() == NULL) {
  2585         // nothing to do if not an oop or narrow oop
  2586         _processed.set(n->_idx);
  2587         return;
  2589       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2590       uint i;
  2591       for (i = 1; i < n->req() ; i++) {
  2592         Node* in = n->in(i);
  2593         if (in == NULL)
  2594           continue;  // ignore NULL
  2595         in = in->uncast();
  2596         if (in->is_top() || in == n)
  2597           continue;  // ignore top or inputs which go back this node
  2598         int ti = in->_idx;
  2599         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2600         if (nt == PointsToNode::UnknownType) {
  2601           break;
  2602         } else if (nt == PointsToNode::JavaObject) {
  2603           add_pointsto_edge(n->_idx, ti);
  2604         } else {
  2605           add_deferred_edge(n->_idx, ti);
  2608       if (i >= n->req())
  2609         _processed.set(n->_idx);
  2610       else
  2611         _delayed_worklist.push(n);
  2612       break;
  2614     case Op_Proj:
  2616       // we are only interested in the oop result projection from a call
  2617       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  2618         const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
  2619         assert(r->cnt() > TypeFunc::Parms, "sanity");
  2620         if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
  2621           add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2622           int ti = n->in(0)->_idx;
  2623           // The call may not be registered yet (since not all its inputs are registered)
  2624           // if this is the projection from backbranch edge of Phi.
  2625           if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
  2626             process_call_result(n->as_Proj(), phase);
  2628           if (!_processed.test(n->_idx)) {
  2629             // The call's result may need to be processed later if the call
  2630             // returns it's argument and the argument is not processed yet.
  2631             _delayed_worklist.push(n);
  2633           break;
  2636       _processed.set(n->_idx);
  2637       break;
  2639     case Op_Return:
  2641       if( n->req() > TypeFunc::Parms &&
  2642           phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  2643         // Treat Return value as LocalVar with GlobalEscape escape state.
  2644         add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false);
  2645         int ti = n->in(TypeFunc::Parms)->_idx;
  2646         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2647         if (nt == PointsToNode::UnknownType) {
  2648           _delayed_worklist.push(n); // Process it later.
  2649           break;
  2650         } else if (nt == PointsToNode::JavaObject) {
  2651           add_pointsto_edge(n->_idx, ti);
  2652         } else {
  2653           add_deferred_edge(n->_idx, ti);
  2656       _processed.set(n->_idx);
  2657       break;
  2659     case Op_StoreP:
  2660     case Op_StoreN:
  2662       const Type *adr_type = phase->type(n->in(MemNode::Address));
  2663       adr_type = adr_type->make_ptr();
  2664       if (adr_type->isa_oopptr()) {
  2665         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2666       } else {
  2667         Node* adr = n->in(MemNode::Address);
  2668         if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL &&
  2669             adr->in(AddPNode::Address)->is_Proj() &&
  2670             adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
  2671           add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2672           // We are computing a raw address for a store captured
  2673           // by an Initialize compute an appropriate address type.
  2674           int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
  2675           assert(offs != Type::OffsetBot, "offset must be a constant");
  2676         } else {
  2677           _processed.set(n->_idx);
  2678           return;
  2681       break;
  2683     case Op_StorePConditional:
  2684     case Op_CompareAndSwapP:
  2685     case Op_CompareAndSwapN:
  2687       const Type *adr_type = phase->type(n->in(MemNode::Address));
  2688       adr_type = adr_type->make_ptr();
  2689       if (adr_type->isa_oopptr()) {
  2690         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2691       } else {
  2692         _processed.set(n->_idx);
  2693         return;
  2695       break;
  2697     case Op_AryEq:
  2698     case Op_StrComp:
  2699     case Op_StrEquals:
  2700     case Op_StrIndexOf:
  2702       // char[] arrays passed to string intrinsics are not scalar replaceable.
  2703       add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2704       break;
  2706     case Op_ThreadLocal:
  2708       add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
  2709       break;
  2711     default:
  2713       // nothing to do
  2715   return;
  2718 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
  2719   uint n_idx = n->_idx;
  2720   assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
  2722   // Don't set processed bit for AddP, LoadP, StoreP since
  2723   // they may need more then one pass to process.
  2724   // Also don't mark as processed Call nodes since their
  2725   // arguments may need more then one pass to process.
  2726   if (_processed.test(n_idx))
  2727     return; // No need to redefine node's state.
  2729   if (n->is_Call()) {
  2730     CallNode *call = n->as_Call();
  2731     process_call_arguments(call, phase);
  2732     return;
  2735   switch (n->Opcode()) {
  2736     case Op_AddP:
  2738       Node *base = get_addp_base(n);
  2739       int offset = address_offset(n, phase);
  2740       // Create a field edge to this node from everything base could point to.
  2741       for( VectorSetI i(PointsTo(base)); i.test(); ++i ) {
  2742         uint pt = i.elem;
  2743         add_field_edge(pt, n_idx, offset);
  2745       break;
  2747     case Op_CastX2P:
  2749       assert(false, "Op_CastX2P");
  2750       break;
  2752     case Op_CastPP:
  2753     case Op_CheckCastPP:
  2754     case Op_EncodeP:
  2755     case Op_DecodeN:
  2757       int ti = n->in(1)->_idx;
  2758       assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
  2759       if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
  2760         add_pointsto_edge(n_idx, ti);
  2761       } else {
  2762         add_deferred_edge(n_idx, ti);
  2764       _processed.set(n_idx);
  2765       break;
  2767     case Op_ConP:
  2769       assert(false, "Op_ConP");
  2770       break;
  2772     case Op_ConN:
  2774       assert(false, "Op_ConN");
  2775       break;
  2777     case Op_CreateEx:
  2779       assert(false, "Op_CreateEx");
  2780       break;
  2782     case Op_LoadKlass:
  2783     case Op_LoadNKlass:
  2785       assert(false, "Op_LoadKlass");
  2786       break;
  2788     case Op_LoadP:
  2789     case Op_LoadN:
  2791       const Type *t = phase->type(n);
  2792 #ifdef ASSERT
  2793       if (t->make_ptr() == NULL)
  2794         assert(false, "Op_LoadP");
  2795 #endif
  2797       Node* adr = n->in(MemNode::Address)->uncast();
  2798       Node* adr_base;
  2799       if (adr->is_AddP()) {
  2800         adr_base = get_addp_base(adr);
  2801       } else {
  2802         adr_base = adr;
  2805       // For everything "adr_base" could point to, create a deferred edge from
  2806       // this node to each field with the same offset.
  2807       int offset = address_offset(adr, phase);
  2808       for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
  2809         uint pt = i.elem;
  2810         if (adr->is_AddP()) {
  2811           // Add field edge if it is missing.
  2812           add_field_edge(pt, adr->_idx, offset);
  2814         add_deferred_edge_to_fields(n_idx, pt, offset);
  2816       break;
  2818     case Op_Parm:
  2820       assert(false, "Op_Parm");
  2821       break;
  2823     case Op_PartialSubtypeCheck:
  2825       assert(false, "Op_PartialSubtypeCheck");
  2826       break;
  2828     case Op_Phi:
  2830 #ifdef ASSERT
  2831       const Type *t = n->as_Phi()->type();
  2832       if (t->make_ptr() == NULL)
  2833         assert(false, "Op_Phi");
  2834 #endif
  2835       for (uint i = 1; i < n->req() ; i++) {
  2836         Node* in = n->in(i);
  2837         if (in == NULL)
  2838           continue;  // ignore NULL
  2839         in = in->uncast();
  2840         if (in->is_top() || in == n)
  2841           continue;  // ignore top or inputs which go back this node
  2842         int ti = in->_idx;
  2843         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2844         assert(nt != PointsToNode::UnknownType, "all nodes should be known");
  2845         if (nt == PointsToNode::JavaObject) {
  2846           add_pointsto_edge(n_idx, ti);
  2847         } else {
  2848           add_deferred_edge(n_idx, ti);
  2851       _processed.set(n_idx);
  2852       break;
  2854     case Op_Proj:
  2856       // we are only interested in the oop result projection from a call
  2857       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  2858         assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
  2859                "all nodes should be registered");
  2860         const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
  2861         assert(r->cnt() > TypeFunc::Parms, "sanity");
  2862         if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
  2863           process_call_result(n->as_Proj(), phase);
  2864           assert(_processed.test(n_idx), "all call results should be processed");
  2865           break;
  2868       assert(false, "Op_Proj");
  2869       break;
  2871     case Op_Return:
  2873 #ifdef ASSERT
  2874       if( n->req() <= TypeFunc::Parms ||
  2875           !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  2876         assert(false, "Op_Return");
  2878 #endif
  2879       int ti = n->in(TypeFunc::Parms)->_idx;
  2880       assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
  2881       if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
  2882         add_pointsto_edge(n_idx, ti);
  2883       } else {
  2884         add_deferred_edge(n_idx, ti);
  2886       _processed.set(n_idx);
  2887       break;
  2889     case Op_StoreP:
  2890     case Op_StoreN:
  2891     case Op_StorePConditional:
  2892     case Op_CompareAndSwapP:
  2893     case Op_CompareAndSwapN:
  2895       Node *adr = n->in(MemNode::Address);
  2896       const Type *adr_type = phase->type(adr)->make_ptr();
  2897 #ifdef ASSERT
  2898       if (!adr_type->isa_oopptr())
  2899         assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP");
  2900 #endif
  2902       assert(adr->is_AddP(), "expecting an AddP");
  2903       Node *adr_base = get_addp_base(adr);
  2904       Node *val = n->in(MemNode::ValueIn)->uncast();
  2905       int offset = address_offset(adr, phase);
  2906       // For everything "adr_base" could point to, create a deferred edge
  2907       // to "val" from each field with the same offset.
  2908       for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
  2909         uint pt = i.elem;
  2910         // Add field edge if it is missing.
  2911         add_field_edge(pt, adr->_idx, offset);
  2912         add_edge_from_fields(pt, val->_idx, offset);
  2914       break;
  2916     case Op_AryEq:
  2917     case Op_StrComp:
  2918     case Op_StrEquals:
  2919     case Op_StrIndexOf:
  2921       // char[] arrays passed to string intrinsic do not escape but
  2922       // they are not scalar replaceable. Adjust escape state for them.
  2923       // Start from in(2) edge since in(1) is memory edge.
  2924       for (uint i = 2; i < n->req(); i++) {
  2925         Node* adr = n->in(i)->uncast();
  2926         const Type *at = phase->type(adr);
  2927         if (!adr->is_top() && at->isa_ptr()) {
  2928           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
  2929                  at->isa_ptr() != NULL, "expecting an Ptr");
  2930           if (adr->is_AddP()) {
  2931             adr = get_addp_base(adr);
  2933           // Mark as ArgEscape everything "adr" could point to.
  2934           set_escape_state(adr->_idx, PointsToNode::ArgEscape);
  2937       _processed.set(n_idx);
  2938       break;
  2940     case Op_ThreadLocal:
  2942       assert(false, "Op_ThreadLocal");
  2943       break;
  2945     default:
  2946       // This method should be called only for EA specific nodes.
  2947       ShouldNotReachHere();
  2951 #ifndef PRODUCT
  2952 void ConnectionGraph::dump() {
  2953   bool first = true;
  2955   uint size = nodes_size();
  2956   for (uint ni = 0; ni < size; ni++) {
  2957     PointsToNode *ptn = ptnode_adr(ni);
  2958     PointsToNode::NodeType ptn_type = ptn->node_type();
  2960     if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
  2961       continue;
  2962     PointsToNode::EscapeState es = escape_state(ptn->_node);
  2963     if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
  2964       if (first) {
  2965         tty->cr();
  2966         tty->print("======== Connection graph for ");
  2967         _compile->method()->print_short_name();
  2968         tty->cr();
  2969         first = false;
  2971       tty->print("%6d ", ni);
  2972       ptn->dump();
  2973       // Print all locals which reference this allocation
  2974       for (uint li = ni; li < size; li++) {
  2975         PointsToNode *ptn_loc = ptnode_adr(li);
  2976         PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type();
  2977         if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL &&
  2978              ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) {
  2979           ptnode_adr(li)->dump(false);
  2982       if (Verbose) {
  2983         // Print all fields which reference this allocation
  2984         for (uint i = 0; i < ptn->edge_count(); i++) {
  2985           uint ei = ptn->edge_target(i);
  2986           ptnode_adr(ei)->dump(false);
  2989       tty->cr();
  2993 #endif

mercurial