src/share/vm/opto/escape.cpp

Wed, 02 Jul 2008 12:55:16 -0700

author
xdono
date
Wed, 02 Jul 2008 12:55:16 -0700
changeset 631
d1605aabd0a1
parent 603
7793bd37a336
child 670
9c2ecc2ffb12
permissions
-rw-r--r--

6719955: Update copyright year
Summary: Update copyright year for files that have been modified in 2008
Reviewed-by: ohair, tbell

     1 /*
     2  * Copyright 2005-2008 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_escape.cpp.incl"
    28 uint PointsToNode::edge_target(uint e) const {
    29   assert(_edges != NULL && e < (uint)_edges->length(), "valid edge index");
    30   return (_edges->at(e) >> EdgeShift);
    31 }
    33 PointsToNode::EdgeType PointsToNode::edge_type(uint e) const {
    34   assert(_edges != NULL && e < (uint)_edges->length(), "valid edge index");
    35   return (EdgeType) (_edges->at(e) & EdgeMask);
    36 }
    38 void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) {
    39   uint v = (targIdx << EdgeShift) + ((uint) et);
    40   if (_edges == NULL) {
    41      Arena *a = Compile::current()->comp_arena();
    42     _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0);
    43   }
    44   _edges->append_if_missing(v);
    45 }
    47 void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
    48   uint v = (targIdx << EdgeShift) + ((uint) et);
    50   _edges->remove(v);
    51 }
    53 #ifndef PRODUCT
    54 static const char *node_type_names[] = {
    55   "UnknownType",
    56   "JavaObject",
    57   "LocalVar",
    58   "Field"
    59 };
    61 static const char *esc_names[] = {
    62   "UnknownEscape",
    63   "NoEscape",
    64   "ArgEscape",
    65   "GlobalEscape"
    66 };
    68 static const char *edge_type_suffix[] = {
    69  "?", // UnknownEdge
    70  "P", // PointsToEdge
    71  "D", // DeferredEdge
    72  "F"  // FieldEdge
    73 };
    75 void PointsToNode::dump() const {
    76   NodeType nt = node_type();
    77   EscapeState es = escape_state();
    78   tty->print("%s %s %s [[", node_type_names[(int) nt], esc_names[(int) es], _scalar_replaceable ? "" : "NSR");
    79   for (uint i = 0; i < edge_count(); i++) {
    80     tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
    81   }
    82   tty->print("]]  ");
    83   if (_node == NULL)
    84     tty->print_cr("<null>");
    85   else
    86     _node->dump();
    87 }
    88 #endif
    90 ConnectionGraph::ConnectionGraph(Compile * C) : _processed(C->comp_arena()), _node_map(C->comp_arena()) {
    91   _collecting = true;
    92   this->_compile = C;
    93   const PointsToNode &dummy = PointsToNode();
    94   int sz = C->unique();
    95   _nodes = new(C->comp_arena()) GrowableArray<PointsToNode>(C->comp_arena(), sz, sz, dummy);
    96   _phantom_object = C->top()->_idx;
    97   PointsToNode *phn = ptnode_adr(_phantom_object);
    98   phn->_node = C->top();
    99   phn->set_node_type(PointsToNode::JavaObject);
   100   phn->set_escape_state(PointsToNode::GlobalEscape);
   101 }
   103 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
   104   PointsToNode *f = ptnode_adr(from_i);
   105   PointsToNode *t = ptnode_adr(to_i);
   107   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   108   assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge");
   109   assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge");
   110   f->add_edge(to_i, PointsToNode::PointsToEdge);
   111 }
   113 void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) {
   114   PointsToNode *f = ptnode_adr(from_i);
   115   PointsToNode *t = ptnode_adr(to_i);
   117   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   118   assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge");
   119   assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge");
   120   // don't add a self-referential edge, this can occur during removal of
   121   // deferred edges
   122   if (from_i != to_i)
   123     f->add_edge(to_i, PointsToNode::DeferredEdge);
   124 }
   126 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
   127   const Type *adr_type = phase->type(adr);
   128   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
   129       adr->in(AddPNode::Address)->is_Proj() &&
   130       adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
   131     // We are computing a raw address for a store captured by an Initialize
   132     // compute an appropriate address type. AddP cases #3 and #5 (see below).
   133     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
   134     assert(offs != Type::OffsetBot ||
   135            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
   136            "offset must be a constant or it is initialization of array");
   137     return offs;
   138   }
   139   const TypePtr *t_ptr = adr_type->isa_ptr();
   140   assert(t_ptr != NULL, "must be a pointer type");
   141   return t_ptr->offset();
   142 }
   144 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) {
   145   PointsToNode *f = ptnode_adr(from_i);
   146   PointsToNode *t = ptnode_adr(to_i);
   148   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   149   assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge");
   150   assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge");
   151   assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets");
   152   t->set_offset(offset);
   154   f->add_edge(to_i, PointsToNode::FieldEdge);
   155 }
   157 void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) {
   158   PointsToNode *npt = ptnode_adr(ni);
   159   PointsToNode::EscapeState old_es = npt->escape_state();
   160   if (es > old_es)
   161     npt->set_escape_state(es);
   162 }
   164 void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
   165                                PointsToNode::EscapeState es, bool done) {
   166   PointsToNode* ptadr = ptnode_adr(n->_idx);
   167   ptadr->_node = n;
   168   ptadr->set_node_type(nt);
   170   // inline set_escape_state(idx, es);
   171   PointsToNode::EscapeState old_es = ptadr->escape_state();
   172   if (es > old_es)
   173     ptadr->set_escape_state(es);
   175   if (done)
   176     _processed.set(n->_idx);
   177 }
   179 PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform *phase) {
   180   uint idx = n->_idx;
   181   PointsToNode::EscapeState es;
   183   // If we are still collecting or there were no non-escaping allocations
   184   // we don't know the answer yet
   185   if (_collecting || !_has_allocations)
   186     return PointsToNode::UnknownEscape;
   188   // if the node was created after the escape computation, return
   189   // UnknownEscape
   190   if (idx >= (uint)_nodes->length())
   191     return PointsToNode::UnknownEscape;
   193   es = _nodes->at_grow(idx).escape_state();
   195   // if we have already computed a value, return it
   196   if (es != PointsToNode::UnknownEscape)
   197     return es;
   199   // compute max escape state of anything this node could point to
   200   VectorSet ptset(Thread::current()->resource_area());
   201   PointsTo(ptset, n, phase);
   202   for(VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i) {
   203     uint pt = i.elem;
   204     PointsToNode::EscapeState pes = _nodes->adr_at(pt)->escape_state();
   205     if (pes > es)
   206       es = pes;
   207   }
   208   // cache the computed escape state
   209   assert(es != PointsToNode::UnknownEscape, "should have computed an escape state");
   210   _nodes->adr_at(idx)->set_escape_state(es);
   211   return es;
   212 }
   214 void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n, PhaseTransform *phase) {
   215   VectorSet visited(Thread::current()->resource_area());
   216   GrowableArray<uint>  worklist;
   218 #ifdef ASSERT
   219   Node *orig_n = n;
   220 #endif
   222   n = n->uncast();
   223   PointsToNode  npt = _nodes->at_grow(n->_idx);
   225   // If we have a JavaObject, return just that object
   226   if (npt.node_type() == PointsToNode::JavaObject) {
   227     ptset.set(n->_idx);
   228     return;
   229   }
   230 #ifdef ASSERT
   231   if (npt._node == NULL) {
   232     if (orig_n != n)
   233       orig_n->dump();
   234     n->dump();
   235     assert(npt._node != NULL, "unregistered node");
   236   }
   237 #endif
   238   worklist.push(n->_idx);
   239   while(worklist.length() > 0) {
   240     int ni = worklist.pop();
   241     PointsToNode pn = _nodes->at_grow(ni);
   242     if (!visited.test_set(ni)) {
   243       // ensure that all inputs of a Phi have been processed
   244       assert(!_collecting || !pn._node->is_Phi() || _processed.test(ni),"");
   246       int edges_processed = 0;
   247       for (uint e = 0; e < pn.edge_count(); e++) {
   248         uint etgt = pn.edge_target(e);
   249         PointsToNode::EdgeType et = pn.edge_type(e);
   250         if (et == PointsToNode::PointsToEdge) {
   251           ptset.set(etgt);
   252           edges_processed++;
   253         } else if (et == PointsToNode::DeferredEdge) {
   254           worklist.push(etgt);
   255           edges_processed++;
   256         } else {
   257           assert(false,"neither PointsToEdge or DeferredEdge");
   258         }
   259       }
   260       if (edges_processed == 0) {
   261         // no deferred or pointsto edges found.  Assume the value was set
   262         // outside this method.  Add the phantom object to the pointsto set.
   263         ptset.set(_phantom_object);
   264       }
   265     }
   266   }
   267 }
   269 void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) {
   270   // This method is most expensive during ConnectionGraph construction.
   271   // Reuse vectorSet and an additional growable array for deferred edges.
   272   deferred_edges->clear();
   273   visited->Clear();
   275   uint i = 0;
   276   PointsToNode *ptn = ptnode_adr(ni);
   278   // Mark current edges as visited and move deferred edges to separate array.
   279   while (i < ptn->edge_count()) {
   280     uint t = ptn->edge_target(i);
   281 #ifdef ASSERT
   282     assert(!visited->test_set(t), "expecting no duplications");
   283 #else
   284     visited->set(t);
   285 #endif
   286     if (ptn->edge_type(i) == PointsToNode::DeferredEdge) {
   287       ptn->remove_edge(t, PointsToNode::DeferredEdge);
   288       deferred_edges->append(t);
   289     } else {
   290       i++;
   291     }
   292   }
   293   for (int next = 0; next < deferred_edges->length(); ++next) {
   294     uint t = deferred_edges->at(next);
   295     PointsToNode *ptt = ptnode_adr(t);
   296     for (uint j = 0; j < ptt->edge_count(); j++) {
   297       uint n1 = ptt->edge_target(j);
   298       if (visited->test_set(n1))
   299         continue;
   300       switch(ptt->edge_type(j)) {
   301         case PointsToNode::PointsToEdge:
   302           add_pointsto_edge(ni, n1);
   303           if(n1 == _phantom_object) {
   304             // Special case - field set outside (globally escaping).
   305             ptn->set_escape_state(PointsToNode::GlobalEscape);
   306           }
   307           break;
   308         case PointsToNode::DeferredEdge:
   309           deferred_edges->append(n1);
   310           break;
   311         case PointsToNode::FieldEdge:
   312           assert(false, "invalid connection graph");
   313           break;
   314       }
   315     }
   316   }
   317 }
   320 //  Add an edge to node given by "to_i" from any field of adr_i whose offset
   321 //  matches "offset"  A deferred edge is added if to_i is a LocalVar, and
   322 //  a pointsto edge is added if it is a JavaObject
   324 void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) {
   325   PointsToNode an = _nodes->at_grow(adr_i);
   326   PointsToNode to = _nodes->at_grow(to_i);
   327   bool deferred = (to.node_type() == PointsToNode::LocalVar);
   329   for (uint fe = 0; fe < an.edge_count(); fe++) {
   330     assert(an.edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
   331     int fi = an.edge_target(fe);
   332     PointsToNode pf = _nodes->at_grow(fi);
   333     int po = pf.offset();
   334     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
   335       if (deferred)
   336         add_deferred_edge(fi, to_i);
   337       else
   338         add_pointsto_edge(fi, to_i);
   339     }
   340   }
   341 }
   343 // Add a deferred  edge from node given by "from_i" to any field of adr_i
   344 // whose offset matches "offset".
   345 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
   346   PointsToNode an = _nodes->at_grow(adr_i);
   347   for (uint fe = 0; fe < an.edge_count(); fe++) {
   348     assert(an.edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
   349     int fi = an.edge_target(fe);
   350     PointsToNode pf = _nodes->at_grow(fi);
   351     int po = pf.offset();
   352     if (pf.edge_count() == 0) {
   353       // we have not seen any stores to this field, assume it was set outside this method
   354       add_pointsto_edge(fi, _phantom_object);
   355     }
   356     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
   357       add_deferred_edge(from_i, fi);
   358     }
   359   }
   360 }
   362 // Helper functions
   364 static Node* get_addp_base(Node *addp) {
   365   assert(addp->is_AddP(), "must be AddP");
   366   //
   367   // AddP cases for Base and Address inputs:
   368   // case #1. Direct object's field reference:
   369   //     Allocate
   370   //       |
   371   //     Proj #5 ( oop result )
   372   //       |
   373   //     CheckCastPP (cast to instance type)
   374   //      | |
   375   //     AddP  ( base == address )
   376   //
   377   // case #2. Indirect object's field reference:
   378   //      Phi
   379   //       |
   380   //     CastPP (cast to instance type)
   381   //      | |
   382   //     AddP  ( base == address )
   383   //
   384   // case #3. Raw object's field reference for Initialize node:
   385   //      Allocate
   386   //        |
   387   //      Proj #5 ( oop result )
   388   //  top   |
   389   //     \  |
   390   //     AddP  ( base == top )
   391   //
   392   // case #4. Array's element reference:
   393   //   {CheckCastPP | CastPP}
   394   //     |  | |
   395   //     |  AddP ( array's element offset )
   396   //     |  |
   397   //     AddP ( array's offset )
   398   //
   399   // case #5. Raw object's field reference for arraycopy stub call:
   400   //          The inline_native_clone() case when the arraycopy stub is called
   401   //          after the allocation before Initialize and CheckCastPP nodes.
   402   //      Allocate
   403   //        |
   404   //      Proj #5 ( oop result )
   405   //       | |
   406   //       AddP  ( base == address )
   407   //
   408   // case #6. Constant Pool, ThreadLocal, CastX2P or
   409   //          Raw object's field reference:
   410   //      {ConP, ThreadLocal, CastX2P, raw Load}
   411   //  top   |
   412   //     \  |
   413   //     AddP  ( base == top )
   414   //
   415   // case #7. Klass's field reference.
   416   //      LoadKlass
   417   //       | |
   418   //       AddP  ( base == address )
   419   //
   420   // case #8. narrow Klass's field reference.
   421   //      LoadNKlass
   422   //       |
   423   //      DecodeN
   424   //       | |
   425   //       AddP  ( base == address )
   426   //
   427   Node *base = addp->in(AddPNode::Base)->uncast();
   428   if (base->is_top()) { // The AddP case #3 and #6.
   429     base = addp->in(AddPNode::Address)->uncast();
   430     assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal ||
   431            base->Opcode() == Op_CastX2P || base->is_DecodeN() ||
   432            (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) ||
   433            (base->is_Proj() && base->in(0)->is_Allocate()), "sanity");
   434   }
   435   return base;
   436 }
   438 static Node* find_second_addp(Node* addp, Node* n) {
   439   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
   441   Node* addp2 = addp->raw_out(0);
   442   if (addp->outcnt() == 1 && addp2->is_AddP() &&
   443       addp2->in(AddPNode::Base) == n &&
   444       addp2->in(AddPNode::Address) == addp) {
   446     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
   447     //
   448     // Find array's offset to push it on worklist first and
   449     // as result process an array's element offset first (pushed second)
   450     // to avoid CastPP for the array's offset.
   451     // Otherwise the inserted CastPP (LocalVar) will point to what
   452     // the AddP (Field) points to. Which would be wrong since
   453     // the algorithm expects the CastPP has the same point as
   454     // as AddP's base CheckCastPP (LocalVar).
   455     //
   456     //    ArrayAllocation
   457     //     |
   458     //    CheckCastPP
   459     //     |
   460     //    memProj (from ArrayAllocation CheckCastPP)
   461     //     |  ||
   462     //     |  ||   Int (element index)
   463     //     |  ||    |   ConI (log(element size))
   464     //     |  ||    |   /
   465     //     |  ||   LShift
   466     //     |  ||  /
   467     //     |  AddP (array's element offset)
   468     //     |  |
   469     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
   470     //     | / /
   471     //     AddP (array's offset)
   472     //      |
   473     //     Load/Store (memory operation on array's element)
   474     //
   475     return addp2;
   476   }
   477   return NULL;
   478 }
   480 //
   481 // Adjust the type and inputs of an AddP which computes the
   482 // address of a field of an instance
   483 //
   484 void ConnectionGraph::split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn) {
   485   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
   486   assert(base_t != NULL && base_t->is_instance(), "expecting instance oopptr");
   487   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
   488   if (t == NULL) {
   489     // We are computing a raw address for a store captured by an Initialize
   490     // compute an appropriate address type.
   491     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
   492     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
   493     int offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
   494     assert(offs != Type::OffsetBot, "offset must be a constant");
   495     t = base_t->add_offset(offs)->is_oopptr();
   496   }
   497   uint inst_id =  base_t->instance_id();
   498   assert(!t->is_instance() || t->instance_id() == inst_id,
   499                              "old type must be non-instance or match new type");
   500   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
   501   // Do NOT remove the next call: ensure an new alias index is allocated
   502   // for the instance type
   503   int alias_idx = _compile->get_alias_index(tinst);
   504   igvn->set_type(addp, tinst);
   505   // record the allocation in the node map
   506   set_map(addp->_idx, get_map(base->_idx));
   507   // if the Address input is not the appropriate instance type
   508   // (due to intervening casts,) insert a cast
   509   Node *adr = addp->in(AddPNode::Address);
   510   const TypeOopPtr  *atype = igvn->type(adr)->isa_oopptr();
   511   if (atype != NULL && atype->instance_id() != inst_id) {
   512     assert(!atype->is_instance(), "no conflicting instances");
   513     const TypeOopPtr *new_atype = base_t->add_offset(atype->offset())->isa_oopptr();
   514     Node *acast = new (_compile, 2) CastPPNode(adr, new_atype);
   515     acast->set_req(0, adr->in(0));
   516     igvn->set_type(acast, new_atype);
   517     record_for_optimizer(acast);
   518     Node *bcast = acast;
   519     Node *abase = addp->in(AddPNode::Base);
   520     if (abase != adr) {
   521       bcast = new (_compile, 2) CastPPNode(abase, base_t);
   522       bcast->set_req(0, abase->in(0));
   523       igvn->set_type(bcast, base_t);
   524       record_for_optimizer(bcast);
   525     }
   526     igvn->hash_delete(addp);
   527     addp->set_req(AddPNode::Base, bcast);
   528     addp->set_req(AddPNode::Address, acast);
   529     igvn->hash_insert(addp);
   530   }
   531   // Put on IGVN worklist since at least addp's type was changed above.
   532   record_for_optimizer(addp);
   533 }
   535 //
   536 // Create a new version of orig_phi if necessary. Returns either the newly
   537 // created phi or an existing phi.  Sets create_new to indicate wheter  a new
   538 // phi was created.  Cache the last newly created phi in the node map.
   539 //
   540 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created) {
   541   Compile *C = _compile;
   542   new_created = false;
   543   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
   544   // nothing to do if orig_phi is bottom memory or matches alias_idx
   545   if (phi_alias_idx == alias_idx) {
   546     return orig_phi;
   547   }
   548   // have we already created a Phi for this alias index?
   549   PhiNode *result = get_map_phi(orig_phi->_idx);
   550   if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
   551     return result;
   552   }
   553   if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
   554     if (C->do_escape_analysis() == true && !C->failing()) {
   555       // Retry compilation without escape analysis.
   556       // If this is the first failure, the sentinel string will "stick"
   557       // to the Compile object, and the C2Compiler will see it and retry.
   558       C->record_failure(C2Compiler::retry_no_escape_analysis());
   559     }
   560     return NULL;
   561   }
   562   orig_phi_worklist.append_if_missing(orig_phi);
   563   const TypePtr *atype = C->get_adr_type(alias_idx);
   564   result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
   565   set_map_phi(orig_phi->_idx, result);
   566   igvn->set_type(result, result->bottom_type());
   567   record_for_optimizer(result);
   568   new_created = true;
   569   return result;
   570 }
   572 //
   573 // Return a new version  of Memory Phi "orig_phi" with the inputs having the
   574 // specified alias index.
   575 //
   576 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn) {
   578   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
   579   Compile *C = _compile;
   580   bool new_phi_created;
   581   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created);
   582   if (!new_phi_created) {
   583     return result;
   584   }
   586   GrowableArray<PhiNode *>  phi_list;
   587   GrowableArray<uint>  cur_input;
   589   PhiNode *phi = orig_phi;
   590   uint idx = 1;
   591   bool finished = false;
   592   while(!finished) {
   593     while (idx < phi->req()) {
   594       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn);
   595       if (mem != NULL && mem->is_Phi()) {
   596         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created);
   597         if (new_phi_created) {
   598           // found an phi for which we created a new split, push current one on worklist and begin
   599           // processing new one
   600           phi_list.push(phi);
   601           cur_input.push(idx);
   602           phi = mem->as_Phi();
   603           result = newphi;
   604           idx = 1;
   605           continue;
   606         } else {
   607           mem = newphi;
   608         }
   609       }
   610       if (C->failing()) {
   611         return NULL;
   612       }
   613       result->set_req(idx++, mem);
   614     }
   615 #ifdef ASSERT
   616     // verify that the new Phi has an input for each input of the original
   617     assert( phi->req() == result->req(), "must have same number of inputs.");
   618     assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
   619 #endif
   620     // Check if all new phi's inputs have specified alias index.
   621     // Otherwise use old phi.
   622     for (uint i = 1; i < phi->req(); i++) {
   623       Node* in = result->in(i);
   624       assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
   625     }
   626     // we have finished processing a Phi, see if there are any more to do
   627     finished = (phi_list.length() == 0 );
   628     if (!finished) {
   629       phi = phi_list.pop();
   630       idx = cur_input.pop();
   631       PhiNode *prev_result = get_map_phi(phi->_idx);
   632       prev_result->set_req(idx++, result);
   633       result = prev_result;
   634     }
   635   }
   636   return result;
   637 }
   640 //
   641 // The next methods are derived from methods in MemNode.
   642 //
   643 static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *tinst) {
   644   Node *mem = mmem;
   645   // TypeInstPtr::NOTNULL+any is an OOP with unknown offset - generally
   646   // means an array I have not precisely typed yet.  Do not do any
   647   // alias stuff with it any time soon.
   648   if( tinst->base() != Type::AnyPtr &&
   649       !(tinst->klass()->is_java_lang_Object() &&
   650         tinst->offset() == Type::OffsetBot) ) {
   651     mem = mmem->memory_at(alias_idx);
   652     // Update input if it is progress over what we have now
   653   }
   654   return mem;
   655 }
   657 //
   658 // Search memory chain of "mem" to find a MemNode whose address
   659 // is the specified alias index.
   660 //
   661 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *phase) {
   662   if (orig_mem == NULL)
   663     return orig_mem;
   664   Compile* C = phase->C;
   665   const TypeOopPtr *tinst = C->get_adr_type(alias_idx)->isa_oopptr();
   666   bool is_instance = (tinst != NULL) && tinst->is_instance();
   667   Node *prev = NULL;
   668   Node *result = orig_mem;
   669   while (prev != result) {
   670     prev = result;
   671     if (result->is_Mem()) {
   672       MemNode *mem = result->as_Mem();
   673       const Type *at = phase->type(mem->in(MemNode::Address));
   674       if (at != Type::TOP) {
   675         assert (at->isa_ptr() != NULL, "pointer type required.");
   676         int idx = C->get_alias_index(at->is_ptr());
   677         if (idx == alias_idx)
   678           break;
   679       }
   680       result = mem->in(MemNode::Memory);
   681     }
   682     if (!is_instance)
   683       continue;  // don't search further for non-instance types
   684     // skip over a call which does not affect this memory slice
   685     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
   686       Node *proj_in = result->in(0);
   687       if (proj_in->is_Call()) {
   688         CallNode *call = proj_in->as_Call();
   689         if (!call->may_modify(tinst, phase)) {
   690           result = call->in(TypeFunc::Memory);
   691         }
   692       } else if (proj_in->is_Initialize()) {
   693         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
   694         // Stop if this is the initialization for the object instance which
   695         // which contains this memory slice, otherwise skip over it.
   696         if (alloc == NULL || alloc->_idx != tinst->instance_id()) {
   697           result = proj_in->in(TypeFunc::Memory);
   698         }
   699       } else if (proj_in->is_MemBar()) {
   700         result = proj_in->in(TypeFunc::Memory);
   701       }
   702     } else if (result->is_MergeMem()) {
   703       MergeMemNode *mmem = result->as_MergeMem();
   704       result = step_through_mergemem(mmem, alias_idx, tinst);
   705       if (result == mmem->base_memory()) {
   706         // Didn't find instance memory, search through general slice recursively.
   707         result = mmem->memory_at(C->get_general_index(alias_idx));
   708         result = find_inst_mem(result, alias_idx, orig_phis, phase);
   709         if (C->failing()) {
   710           return NULL;
   711         }
   712         mmem->set_memory_at(alias_idx, result);
   713       }
   714     } else if (result->is_Phi() &&
   715                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
   716       Node *un = result->as_Phi()->unique_input(phase);
   717       if (un != NULL) {
   718         result = un;
   719       } else {
   720         break;
   721       }
   722     }
   723   }
   724   if (is_instance && result->is_Phi()) {
   725     PhiNode *mphi = result->as_Phi();
   726     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
   727     const TypePtr *t = mphi->adr_type();
   728     if (C->get_alias_index(t) != alias_idx) {
   729       result = split_memory_phi(mphi, alias_idx, orig_phis, phase);
   730     }
   731   }
   732   // the result is either MemNode, PhiNode, InitializeNode.
   733   return result;
   734 }
   737 //
   738 //  Convert the types of unescaped object to instance types where possible,
   739 //  propagate the new type information through the graph, and update memory
   740 //  edges and MergeMem inputs to reflect the new type.
   741 //
   742 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
   743 //  The processing is done in 4 phases:
   744 //
   745 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
   746 //            types for the CheckCastPP for allocations where possible.
   747 //            Propagate the the new types through users as follows:
   748 //               casts and Phi:  push users on alloc_worklist
   749 //               AddP:  cast Base and Address inputs to the instance type
   750 //                      push any AddP users on alloc_worklist and push any memnode
   751 //                      users onto memnode_worklist.
   752 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
   753 //            search the Memory chain for a store with the appropriate type
   754 //            address type.  If a Phi is found, create a new version with
   755 //            the approriate memory slices from each of the Phi inputs.
   756 //            For stores, process the users as follows:
   757 //               MemNode:  push on memnode_worklist
   758 //               MergeMem: push on mergemem_worklist
   759 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
   760 //            moving the first node encountered of each  instance type to the
   761 //            the input corresponding to its alias index.
   762 //            appropriate memory slice.
   763 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
   764 //
   765 // In the following example, the CheckCastPP nodes are the cast of allocation
   766 // results and the allocation of node 29 is unescaped and eligible to be an
   767 // instance type.
   768 //
   769 // We start with:
   770 //
   771 //     7 Parm #memory
   772 //    10  ConI  "12"
   773 //    19  CheckCastPP   "Foo"
   774 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
   775 //    29  CheckCastPP   "Foo"
   776 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
   777 //
   778 //    40  StoreP  25   7  20   ... alias_index=4
   779 //    50  StoreP  35  40  30   ... alias_index=4
   780 //    60  StoreP  45  50  20   ... alias_index=4
   781 //    70  LoadP    _  60  30   ... alias_index=4
   782 //    80  Phi     75  50  60   Memory alias_index=4
   783 //    90  LoadP    _  80  30   ... alias_index=4
   784 //   100  LoadP    _  80  20   ... alias_index=4
   785 //
   786 //
   787 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
   788 // and creating a new alias index for node 30.  This gives:
   789 //
   790 //     7 Parm #memory
   791 //    10  ConI  "12"
   792 //    19  CheckCastPP   "Foo"
   793 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
   794 //    29  CheckCastPP   "Foo"  iid=24
   795 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
   796 //
   797 //    40  StoreP  25   7  20   ... alias_index=4
   798 //    50  StoreP  35  40  30   ... alias_index=6
   799 //    60  StoreP  45  50  20   ... alias_index=4
   800 //    70  LoadP    _  60  30   ... alias_index=6
   801 //    80  Phi     75  50  60   Memory alias_index=4
   802 //    90  LoadP    _  80  30   ... alias_index=6
   803 //   100  LoadP    _  80  20   ... alias_index=4
   804 //
   805 // In phase 2, new memory inputs are computed for the loads and stores,
   806 // And a new version of the phi is created.  In phase 4, the inputs to
   807 // node 80 are updated and then the memory nodes are updated with the
   808 // values computed in phase 2.  This results in:
   809 //
   810 //     7 Parm #memory
   811 //    10  ConI  "12"
   812 //    19  CheckCastPP   "Foo"
   813 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
   814 //    29  CheckCastPP   "Foo"  iid=24
   815 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
   816 //
   817 //    40  StoreP  25  7   20   ... alias_index=4
   818 //    50  StoreP  35  7   30   ... alias_index=6
   819 //    60  StoreP  45  40  20   ... alias_index=4
   820 //    70  LoadP    _  50  30   ... alias_index=6
   821 //    80  Phi     75  40  60   Memory alias_index=4
   822 //   120  Phi     75  50  50   Memory alias_index=6
   823 //    90  LoadP    _ 120  30   ... alias_index=6
   824 //   100  LoadP    _  80  20   ... alias_index=4
   825 //
   826 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist) {
   827   GrowableArray<Node *>  memnode_worklist;
   828   GrowableArray<Node *>  mergemem_worklist;
   829   GrowableArray<PhiNode *>  orig_phis;
   830   PhaseGVN  *igvn = _compile->initial_gvn();
   831   uint new_index_start = (uint) _compile->num_alias_types();
   832   VectorSet visited(Thread::current()->resource_area());
   833   VectorSet ptset(Thread::current()->resource_area());
   836   //  Phase 1:  Process possible allocations from alloc_worklist.
   837   //  Create instance types for the CheckCastPP for allocations where possible.
   838   while (alloc_worklist.length() != 0) {
   839     Node *n = alloc_worklist.pop();
   840     uint ni = n->_idx;
   841     const TypeOopPtr* tinst = NULL;
   842     if (n->is_Call()) {
   843       CallNode *alloc = n->as_Call();
   844       // copy escape information to call node
   845       PointsToNode* ptn = _nodes->adr_at(alloc->_idx);
   846       PointsToNode::EscapeState es = escape_state(alloc, igvn);
   847       // We have an allocation or call which returns a Java object,
   848       // see if it is unescaped.
   849       if (es != PointsToNode::NoEscape || !ptn->_scalar_replaceable)
   850         continue;
   851       if (alloc->is_Allocate()) {
   852         // Set the scalar_replaceable flag before the next check.
   853         alloc->as_Allocate()->_is_scalar_replaceable = true;
   854       }
   855       // find CheckCastPP of call return value
   856       n = alloc->result_cast();
   857       if (n == NULL ||          // No uses accept Initialize or
   858           !n->is_CheckCastPP()) // not unique CheckCastPP.
   859         continue;
   860       // The inline code for Object.clone() casts the allocation result to
   861       // java.lang.Object and then to the the actual type of the allocated
   862       // object. Detect this case and use the second cast.
   863       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
   864           && igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT) {
   865         Node *cast2 = NULL;
   866         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
   867           Node *use = n->fast_out(i);
   868           if (use->is_CheckCastPP()) {
   869             cast2 = use;
   870             break;
   871           }
   872         }
   873         if (cast2 != NULL) {
   874           n = cast2;
   875         } else {
   876           continue;
   877         }
   878       }
   879       set_escape_state(n->_idx, es);
   880       // in order for an object to be stackallocatable, it must be:
   881       //   - a direct allocation (not a call returning an object)
   882       //   - non-escaping
   883       //   - eligible to be a unique type
   884       //   - not determined to be ineligible by escape analysis
   885       set_map(alloc->_idx, n);
   886       set_map(n->_idx, alloc);
   887       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
   888       if (t == NULL)
   889         continue;  // not a TypeInstPtr
   890       tinst = t->cast_to_instance(ni);
   891       igvn->hash_delete(n);
   892       igvn->set_type(n,  tinst);
   893       n->raise_bottom_type(tinst);
   894       igvn->hash_insert(n);
   895       record_for_optimizer(n);
   896       if (alloc->is_Allocate() && ptn->_scalar_replaceable &&
   897           (t->isa_instptr() || t->isa_aryptr())) {
   899         // First, put on the worklist all Field edges from Connection Graph
   900         // which is more accurate then putting immediate users from Ideal Graph.
   901         for (uint e = 0; e < ptn->edge_count(); e++) {
   902           Node *use = _nodes->adr_at(ptn->edge_target(e))->_node;
   903           assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
   904                  "only AddP nodes are Field edges in CG");
   905           if (use->outcnt() > 0) { // Don't process dead nodes
   906             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
   907             if (addp2 != NULL) {
   908               assert(alloc->is_AllocateArray(),"array allocation was expected");
   909               alloc_worklist.append_if_missing(addp2);
   910             }
   911             alloc_worklist.append_if_missing(use);
   912           }
   913         }
   915         // An allocation may have an Initialize which has raw stores. Scan
   916         // the users of the raw allocation result and push AddP users
   917         // on alloc_worklist.
   918         Node *raw_result = alloc->proj_out(TypeFunc::Parms);
   919         assert (raw_result != NULL, "must have an allocation result");
   920         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
   921           Node *use = raw_result->fast_out(i);
   922           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
   923             Node* addp2 = find_second_addp(use, raw_result);
   924             if (addp2 != NULL) {
   925               assert(alloc->is_AllocateArray(),"array allocation was expected");
   926               alloc_worklist.append_if_missing(addp2);
   927             }
   928             alloc_worklist.append_if_missing(use);
   929           } else if (use->is_Initialize()) {
   930             memnode_worklist.append_if_missing(use);
   931           }
   932         }
   933       }
   934     } else if (n->is_AddP()) {
   935       ptset.Clear();
   936       PointsTo(ptset, get_addp_base(n), igvn);
   937       assert(ptset.Size() == 1, "AddP address is unique");
   938       uint elem = ptset.getelem(); // Allocation node's index
   939       if (elem == _phantom_object)
   940         continue; // Assume the value was set outside this method.
   941       Node *base = get_map(elem);  // CheckCastPP node
   942       split_AddP(n, base, igvn);
   943       tinst = igvn->type(base)->isa_oopptr();
   944     } else if (n->is_Phi() ||
   945                n->is_CheckCastPP() ||
   946                n->is_EncodeP() ||
   947                n->is_DecodeN() ||
   948                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
   949       if (visited.test_set(n->_idx)) {
   950         assert(n->is_Phi(), "loops only through Phi's");
   951         continue;  // already processed
   952       }
   953       ptset.Clear();
   954       PointsTo(ptset, n, igvn);
   955       if (ptset.Size() == 1) {
   956         uint elem = ptset.getelem(); // Allocation node's index
   957         if (elem == _phantom_object)
   958           continue; // Assume the value was set outside this method.
   959         Node *val = get_map(elem);   // CheckCastPP node
   960         TypeNode *tn = n->as_Type();
   961         tinst = igvn->type(val)->isa_oopptr();
   962         assert(tinst != NULL && tinst->is_instance() &&
   963                tinst->instance_id() == elem , "instance type expected.");
   965         const TypeOopPtr *tn_t = NULL;
   966         const Type *tn_type = igvn->type(tn);
   967         if (tn_type->isa_narrowoop()) {
   968           tn_t = tn_type->is_narrowoop()->make_oopptr()->isa_oopptr();
   969         } else {
   970           tn_t = tn_type->isa_oopptr();
   971         }
   973         if (tn_t != NULL &&
   974  tinst->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE)->higher_equal(tn_t)) {
   975           if (tn_type->isa_narrowoop()) {
   976             tn_type = tinst->make_narrowoop();
   977           } else {
   978             tn_type = tinst;
   979           }
   980           igvn->hash_delete(tn);
   981           igvn->set_type(tn, tn_type);
   982           tn->set_type(tn_type);
   983           igvn->hash_insert(tn);
   984           record_for_optimizer(n);
   985         }
   986       }
   987     } else {
   988       continue;
   989     }
   990     // push users on appropriate worklist
   991     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
   992       Node *use = n->fast_out(i);
   993       if(use->is_Mem() && use->in(MemNode::Address) == n) {
   994         memnode_worklist.append_if_missing(use);
   995       } else if (use->is_Initialize()) {
   996         memnode_worklist.append_if_missing(use);
   997       } else if (use->is_MergeMem()) {
   998         mergemem_worklist.append_if_missing(use);
   999       } else if (use->is_Call() && tinst != NULL) {
  1000         // Look for MergeMem nodes for calls which reference unique allocation
  1001         // (through CheckCastPP nodes) even for debug info.
  1002         Node* m = use->in(TypeFunc::Memory);
  1003         uint iid = tinst->instance_id();
  1004         while (m->is_Proj() && m->in(0)->is_Call() &&
  1005                m->in(0) != use && !m->in(0)->_idx != iid) {
  1006           m = m->in(0)->in(TypeFunc::Memory);
  1008         if (m->is_MergeMem()) {
  1009           mergemem_worklist.append_if_missing(m);
  1011       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
  1012         Node* addp2 = find_second_addp(use, n);
  1013         if (addp2 != NULL) {
  1014           alloc_worklist.append_if_missing(addp2);
  1016         alloc_worklist.append_if_missing(use);
  1017       } else if (use->is_Phi() ||
  1018                  use->is_CheckCastPP() ||
  1019                  use->is_EncodeP() ||
  1020                  use->is_DecodeN() ||
  1021                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
  1022         alloc_worklist.append_if_missing(use);
  1027   // New alias types were created in split_AddP().
  1028   uint new_index_end = (uint) _compile->num_alias_types();
  1030   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
  1031   //            compute new values for Memory inputs  (the Memory inputs are not
  1032   //            actually updated until phase 4.)
  1033   if (memnode_worklist.length() == 0)
  1034     return;  // nothing to do
  1036   while (memnode_worklist.length() != 0) {
  1037     Node *n = memnode_worklist.pop();
  1038     if (visited.test_set(n->_idx))
  1039       continue;
  1040     if (n->is_Phi()) {
  1041       assert(n->as_Phi()->adr_type() != TypePtr::BOTTOM, "narrow memory slice required");
  1042       // we don't need to do anything, but the users must be pushed if we haven't processed
  1043       // this Phi before
  1044     } else if (n->is_Initialize()) {
  1045       // we don't need to do anything, but the users of the memory projection must be pushed
  1046       n = n->as_Initialize()->proj_out(TypeFunc::Memory);
  1047       if (n == NULL)
  1048         continue;
  1049     } else {
  1050       assert(n->is_Mem(), "memory node required.");
  1051       Node *addr = n->in(MemNode::Address);
  1052       assert(addr->is_AddP(), "AddP required");
  1053       const Type *addr_t = igvn->type(addr);
  1054       if (addr_t == Type::TOP)
  1055         continue;
  1056       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
  1057       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
  1058       assert ((uint)alias_idx < new_index_end, "wrong alias index");
  1059       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn);
  1060       if (_compile->failing()) {
  1061         return;
  1063       if (mem != n->in(MemNode::Memory)) {
  1064         set_map(n->_idx, mem);
  1065         _nodes->adr_at(n->_idx)->_node = n;
  1067       if (n->is_Load()) {
  1068         continue;  // don't push users
  1069       } else if (n->is_LoadStore()) {
  1070         // get the memory projection
  1071         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1072           Node *use = n->fast_out(i);
  1073           if (use->Opcode() == Op_SCMemProj) {
  1074             n = use;
  1075             break;
  1078         assert(n->Opcode() == Op_SCMemProj, "memory projection required");
  1081     // push user on appropriate worklist
  1082     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1083       Node *use = n->fast_out(i);
  1084       if (use->is_Phi()) {
  1085         memnode_worklist.append_if_missing(use);
  1086       } else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
  1087         memnode_worklist.append_if_missing(use);
  1088       } else if (use->is_Initialize()) {
  1089         memnode_worklist.append_if_missing(use);
  1090       } else if (use->is_MergeMem()) {
  1091         mergemem_worklist.append_if_missing(use);
  1096   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
  1097   //            Walk each memory moving the first node encountered of each
  1098   //            instance type to the the input corresponding to its alias index.
  1099   while (mergemem_worklist.length() != 0) {
  1100     Node *n = mergemem_worklist.pop();
  1101     assert(n->is_MergeMem(), "MergeMem node required.");
  1102     if (visited.test_set(n->_idx))
  1103       continue;
  1104     MergeMemNode *nmm = n->as_MergeMem();
  1105     // Note: we don't want to use MergeMemStream here because we only want to
  1106     //  scan inputs which exist at the start, not ones we add during processing.
  1107     uint nslices = nmm->req();
  1108     igvn->hash_delete(nmm);
  1109     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
  1110       Node* mem = nmm->in(i);
  1111       Node* cur = NULL;
  1112       if (mem == NULL || mem->is_top())
  1113         continue;
  1114       while (mem->is_Mem()) {
  1115         const Type *at = igvn->type(mem->in(MemNode::Address));
  1116         if (at != Type::TOP) {
  1117           assert (at->isa_ptr() != NULL, "pointer type required.");
  1118           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
  1119           if (idx == i) {
  1120             if (cur == NULL)
  1121               cur = mem;
  1122           } else {
  1123             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
  1124               nmm->set_memory_at(idx, mem);
  1128         mem = mem->in(MemNode::Memory);
  1130       nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
  1131       // Find any instance of the current type if we haven't encountered
  1132       // a value of the instance along the chain.
  1133       for (uint ni = new_index_start; ni < new_index_end; ni++) {
  1134         if((uint)_compile->get_general_index(ni) == i) {
  1135           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
  1136           if (nmm->is_empty_memory(m)) {
  1137             Node* result = find_inst_mem(mem, ni, orig_phis, igvn);
  1138             if (_compile->failing()) {
  1139               return;
  1141             nmm->set_memory_at(ni, result);
  1146     // Find the rest of instances values
  1147     for (uint ni = new_index_start; ni < new_index_end; ni++) {
  1148       const TypeOopPtr *tinst = igvn->C->get_adr_type(ni)->isa_oopptr();
  1149       Node* result = step_through_mergemem(nmm, ni, tinst);
  1150       if (result == nmm->base_memory()) {
  1151         // Didn't find instance memory, search through general slice recursively.
  1152         result = nmm->memory_at(igvn->C->get_general_index(ni));
  1153         result = find_inst_mem(result, ni, orig_phis, igvn);
  1154         if (_compile->failing()) {
  1155           return;
  1157         nmm->set_memory_at(ni, result);
  1160     igvn->hash_insert(nmm);
  1161     record_for_optimizer(nmm);
  1163     // Propagate new memory slices to following MergeMem nodes.
  1164     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1165       Node *use = n->fast_out(i);
  1166       if (use->is_Call()) {
  1167         CallNode* in = use->as_Call();
  1168         if (in->proj_out(TypeFunc::Memory) != NULL) {
  1169           Node* m = in->proj_out(TypeFunc::Memory);
  1170           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
  1171             Node* mm = m->fast_out(j);
  1172             if (mm->is_MergeMem()) {
  1173               mergemem_worklist.append_if_missing(mm);
  1177         if (use->is_Allocate()) {
  1178           use = use->as_Allocate()->initialization();
  1179           if (use == NULL) {
  1180             continue;
  1184       if (use->is_Initialize()) {
  1185         InitializeNode* in = use->as_Initialize();
  1186         if (in->proj_out(TypeFunc::Memory) != NULL) {
  1187           Node* m = in->proj_out(TypeFunc::Memory);
  1188           for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
  1189             Node* mm = m->fast_out(j);
  1190             if (mm->is_MergeMem()) {
  1191               mergemem_worklist.append_if_missing(mm);
  1199   //  Phase 4:  Update the inputs of non-instance memory Phis and
  1200   //            the Memory input of memnodes
  1201   // First update the inputs of any non-instance Phi's from
  1202   // which we split out an instance Phi.  Note we don't have
  1203   // to recursively process Phi's encounted on the input memory
  1204   // chains as is done in split_memory_phi() since they  will
  1205   // also be processed here.
  1206   while (orig_phis.length() != 0) {
  1207     PhiNode *phi = orig_phis.pop();
  1208     int alias_idx = _compile->get_alias_index(phi->adr_type());
  1209     igvn->hash_delete(phi);
  1210     for (uint i = 1; i < phi->req(); i++) {
  1211       Node *mem = phi->in(i);
  1212       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn);
  1213       if (_compile->failing()) {
  1214         return;
  1216       if (mem != new_mem) {
  1217         phi->set_req(i, new_mem);
  1220     igvn->hash_insert(phi);
  1221     record_for_optimizer(phi);
  1224   // Update the memory inputs of MemNodes with the value we computed
  1225   // in Phase 2.
  1226   for (int i = 0; i < _nodes->length(); i++) {
  1227     Node *nmem = get_map(i);
  1228     if (nmem != NULL) {
  1229       Node *n = _nodes->adr_at(i)->_node;
  1230       if (n != NULL && n->is_Mem()) {
  1231         igvn->hash_delete(n);
  1232         n->set_req(MemNode::Memory, nmem);
  1233         igvn->hash_insert(n);
  1234         record_for_optimizer(n);
  1240 void ConnectionGraph::compute_escape() {
  1242   // 1. Populate Connection Graph (CG) with Ideal nodes.
  1244   Unique_Node_List worklist_init;
  1245   worklist_init.map(_compile->unique(), NULL);  // preallocate space
  1247   // Initialize worklist
  1248   if (_compile->root() != NULL) {
  1249     worklist_init.push(_compile->root());
  1252   GrowableArray<int> cg_worklist;
  1253   PhaseGVN* igvn = _compile->initial_gvn();
  1254   bool has_allocations = false;
  1256   // Push all useful nodes onto CG list and set their type.
  1257   for( uint next = 0; next < worklist_init.size(); ++next ) {
  1258     Node* n = worklist_init.at(next);
  1259     record_for_escape_analysis(n, igvn);
  1260     if (n->is_Call() &&
  1261         _nodes->adr_at(n->_idx)->node_type() == PointsToNode::JavaObject) {
  1262       has_allocations = true;
  1264     if(n->is_AddP())
  1265       cg_worklist.append(n->_idx);
  1266     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1267       Node* m = n->fast_out(i);   // Get user
  1268       worklist_init.push(m);
  1272   if (has_allocations) {
  1273     _has_allocations = true;
  1274   } else {
  1275     _has_allocations = false;
  1276     _collecting = false;
  1277     return; // Nothing to do.
  1280   // 2. First pass to create simple CG edges (doesn't require to walk CG).
  1281   for( uint next = 0; next < _delayed_worklist.size(); ++next ) {
  1282     Node* n = _delayed_worklist.at(next);
  1283     build_connection_graph(n, igvn);
  1286   // 3. Pass to create fields edges (Allocate -F-> AddP).
  1287   for( int next = 0; next < cg_worklist.length(); ++next ) {
  1288     int ni = cg_worklist.at(next);
  1289     build_connection_graph(_nodes->adr_at(ni)->_node, igvn);
  1292   cg_worklist.clear();
  1293   cg_worklist.append(_phantom_object);
  1295   // 4. Build Connection Graph which need
  1296   //    to walk the connection graph.
  1297   for (uint ni = 0; ni < (uint)_nodes->length(); ni++) {
  1298     PointsToNode* ptn = _nodes->adr_at(ni);
  1299     Node *n = ptn->_node;
  1300     if (n != NULL) { // Call, AddP, LoadP, StoreP
  1301       build_connection_graph(n, igvn);
  1302       if (ptn->node_type() != PointsToNode::UnknownType)
  1303         cg_worklist.append(n->_idx); // Collect CG nodes
  1307   VectorSet ptset(Thread::current()->resource_area());
  1308   GrowableArray<Node*> alloc_worklist;
  1309   GrowableArray<int>   worklist;
  1310   GrowableArray<uint>  deferred_edges;
  1311   VectorSet visited(Thread::current()->resource_area());
  1313   // remove deferred edges from the graph and collect
  1314   // information we will need for type splitting
  1315   for( int next = 0; next < cg_worklist.length(); ++next ) {
  1316     int ni = cg_worklist.at(next);
  1317     PointsToNode* ptn = _nodes->adr_at(ni);
  1318     PointsToNode::NodeType nt = ptn->node_type();
  1319     Node *n = ptn->_node;
  1320     if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
  1321       remove_deferred(ni, &deferred_edges, &visited);
  1322       if (n->is_AddP()) {
  1323         // If this AddP computes an address which may point to more that one
  1324         // object or more then one field (array's element), nothing the address
  1325         // points to can be scalar replaceable.
  1326         Node *base = get_addp_base(n);
  1327         ptset.Clear();
  1328         PointsTo(ptset, base, igvn);
  1329         if (ptset.Size() > 1 ||
  1330             (ptset.Size() != 0 && ptn->offset() == Type::OffsetBot)) {
  1331           for( VectorSetI j(&ptset); j.test(); ++j ) {
  1332             uint pt = j.elem;
  1333             ptnode_adr(pt)->_scalar_replaceable = false;
  1337     } else if (nt == PointsToNode::JavaObject && n->is_Call()) {
  1338       // Push call on alloc_worlist (alocations are calls)
  1339       // for processing by split_unique_types().
  1340       alloc_worklist.append(n);
  1344   // push all GlobalEscape nodes on the worklist
  1345   for( int next = 0; next < cg_worklist.length(); ++next ) {
  1346     int nk = cg_worklist.at(next);
  1347     if (_nodes->adr_at(nk)->escape_state() == PointsToNode::GlobalEscape)
  1348       worklist.append(nk);
  1350   // mark all node reachable from GlobalEscape nodes
  1351   while(worklist.length() > 0) {
  1352     PointsToNode n = _nodes->at(worklist.pop());
  1353     for (uint ei = 0; ei < n.edge_count(); ei++) {
  1354       uint npi = n.edge_target(ei);
  1355       PointsToNode *np = ptnode_adr(npi);
  1356       if (np->escape_state() < PointsToNode::GlobalEscape) {
  1357         np->set_escape_state(PointsToNode::GlobalEscape);
  1358         worklist.append_if_missing(npi);
  1363   // push all ArgEscape nodes on the worklist
  1364   for( int next = 0; next < cg_worklist.length(); ++next ) {
  1365     int nk = cg_worklist.at(next);
  1366     if (_nodes->adr_at(nk)->escape_state() == PointsToNode::ArgEscape)
  1367       worklist.push(nk);
  1369   // mark all node reachable from ArgEscape nodes
  1370   while(worklist.length() > 0) {
  1371     PointsToNode n = _nodes->at(worklist.pop());
  1372     for (uint ei = 0; ei < n.edge_count(); ei++) {
  1373       uint npi = n.edge_target(ei);
  1374       PointsToNode *np = ptnode_adr(npi);
  1375       if (np->escape_state() < PointsToNode::ArgEscape) {
  1376         np->set_escape_state(PointsToNode::ArgEscape);
  1377         worklist.append_if_missing(npi);
  1382   // push all NoEscape nodes on the worklist
  1383   for( int next = 0; next < cg_worklist.length(); ++next ) {
  1384     int nk = cg_worklist.at(next);
  1385     if (_nodes->adr_at(nk)->escape_state() == PointsToNode::NoEscape)
  1386       worklist.push(nk);
  1388   // mark all node reachable from NoEscape nodes
  1389   while(worklist.length() > 0) {
  1390     PointsToNode n = _nodes->at(worklist.pop());
  1391     for (uint ei = 0; ei < n.edge_count(); ei++) {
  1392       uint npi = n.edge_target(ei);
  1393       PointsToNode *np = ptnode_adr(npi);
  1394       if (np->escape_state() < PointsToNode::NoEscape) {
  1395         np->set_escape_state(PointsToNode::NoEscape);
  1396         worklist.append_if_missing(npi);
  1401   _collecting = false;
  1403   has_allocations = false; // Are there scalar replaceable allocations?
  1405   for( int next = 0; next < alloc_worklist.length(); ++next ) {
  1406     Node* n = alloc_worklist.at(next);
  1407     uint ni = n->_idx;
  1408     PointsToNode* ptn = _nodes->adr_at(ni);
  1409     PointsToNode::EscapeState es = ptn->escape_state();
  1410     if (ptn->escape_state() == PointsToNode::NoEscape &&
  1411         ptn->_scalar_replaceable) {
  1412       has_allocations = true;
  1413       break;
  1416   if (!has_allocations) {
  1417     return; // Nothing to do.
  1420   if(_compile->AliasLevel() >= 3 && EliminateAllocations) {
  1421     // Now use the escape information to create unique types for
  1422     // unescaped objects
  1423     split_unique_types(alloc_worklist);
  1424     if (_compile->failing())  return;
  1426     // Clean up after split unique types.
  1427     ResourceMark rm;
  1428     PhaseRemoveUseless pru(_compile->initial_gvn(), _compile->for_igvn());
  1430 #ifdef ASSERT
  1431   } else if (PrintEscapeAnalysis || PrintEliminateAllocations) {
  1432     tty->print("=== No allocations eliminated for ");
  1433     C()->method()->print_short_name();
  1434     if(!EliminateAllocations) {
  1435       tty->print(" since EliminateAllocations is off ===");
  1436     } else if(_compile->AliasLevel() < 3) {
  1437       tty->print(" since AliasLevel < 3 ===");
  1439     tty->cr();
  1440 #endif
  1444 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
  1446     switch (call->Opcode()) {
  1447 #ifdef ASSERT
  1448     case Op_Allocate:
  1449     case Op_AllocateArray:
  1450     case Op_Lock:
  1451     case Op_Unlock:
  1452       assert(false, "should be done already");
  1453       break;
  1454 #endif
  1455     case Op_CallLeafNoFP:
  1457       // Stub calls, objects do not escape but they are not scale replaceable.
  1458       // Adjust escape state for outgoing arguments.
  1459       const TypeTuple * d = call->tf()->domain();
  1460       VectorSet ptset(Thread::current()->resource_area());
  1461       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1462         const Type* at = d->field_at(i);
  1463         Node *arg = call->in(i)->uncast();
  1464         const Type *aat = phase->type(arg);
  1465         if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr()) {
  1466           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
  1467                  aat->isa_ptr() != NULL, "expecting an Ptr");
  1468           set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  1469           if (arg->is_AddP()) {
  1470             //
  1471             // The inline_native_clone() case when the arraycopy stub is called
  1472             // after the allocation before Initialize and CheckCastPP nodes.
  1473             //
  1474             // Set AddP's base (Allocate) as not scalar replaceable since
  1475             // pointer to the base (with offset) is passed as argument.
  1476             //
  1477             arg = get_addp_base(arg);
  1479           ptset.Clear();
  1480           PointsTo(ptset, arg, phase);
  1481           for( VectorSetI j(&ptset); j.test(); ++j ) {
  1482             uint pt = j.elem;
  1483             set_escape_state(pt, PointsToNode::ArgEscape);
  1487       break;
  1490     case Op_CallStaticJava:
  1491     // For a static call, we know exactly what method is being called.
  1492     // Use bytecode estimator to record the call's escape affects
  1494       ciMethod *meth = call->as_CallJava()->method();
  1495       BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
  1496       // fall-through if not a Java method or no analyzer information
  1497       if (call_analyzer != NULL) {
  1498         const TypeTuple * d = call->tf()->domain();
  1499         VectorSet ptset(Thread::current()->resource_area());
  1500         bool copy_dependencies = false;
  1501         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1502           const Type* at = d->field_at(i);
  1503           int k = i - TypeFunc::Parms;
  1505           if (at->isa_oopptr() != NULL) {
  1506             Node *arg = call->in(i)->uncast();
  1508             bool global_escapes = false;
  1509             bool fields_escapes = false;
  1510             if (!call_analyzer->is_arg_stack(k)) {
  1511               // The argument global escapes, mark everything it could point to
  1512               set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  1513               global_escapes = true;
  1514             } else {
  1515               if (!call_analyzer->is_arg_local(k)) {
  1516                 // The argument itself doesn't escape, but any fields might
  1517                 fields_escapes = true;
  1519               set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  1520               copy_dependencies = true;
  1523             ptset.Clear();
  1524             PointsTo(ptset, arg, phase);
  1525             for( VectorSetI j(&ptset); j.test(); ++j ) {
  1526               uint pt = j.elem;
  1527               if (global_escapes) {
  1528                 //The argument global escapes, mark everything it could point to
  1529                 set_escape_state(pt, PointsToNode::GlobalEscape);
  1530               } else {
  1531                 if (fields_escapes) {
  1532                   // The argument itself doesn't escape, but any fields might
  1533                   add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
  1535                 set_escape_state(pt, PointsToNode::ArgEscape);
  1540         if (copy_dependencies)
  1541           call_analyzer->copy_dependencies(C()->dependencies());
  1542         break;
  1546     default:
  1547     // Fall-through here if not a Java method or no analyzer information
  1548     // or some other type of call, assume the worst case: all arguments
  1549     // globally escape.
  1551       // adjust escape state for  outgoing arguments
  1552       const TypeTuple * d = call->tf()->domain();
  1553       VectorSet ptset(Thread::current()->resource_area());
  1554       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1555         const Type* at = d->field_at(i);
  1556         if (at->isa_oopptr() != NULL) {
  1557           Node *arg = call->in(i)->uncast();
  1558           set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  1559           ptset.Clear();
  1560           PointsTo(ptset, arg, phase);
  1561           for( VectorSetI j(&ptset); j.test(); ++j ) {
  1562             uint pt = j.elem;
  1563             set_escape_state(pt, PointsToNode::GlobalEscape);
  1564             PointsToNode *ptadr = ptnode_adr(pt);
  1571 void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) {
  1572   PointsToNode *ptadr = ptnode_adr(resproj->_idx);
  1574   CallNode *call = resproj->in(0)->as_Call();
  1575   switch (call->Opcode()) {
  1576     case Op_Allocate:
  1578       Node *k = call->in(AllocateNode::KlassNode);
  1579       const TypeKlassPtr *kt;
  1580       if (k->Opcode() == Op_LoadKlass) {
  1581         kt = k->as_Load()->type()->isa_klassptr();
  1582       } else {
  1583         // Also works for DecodeN(LoadNKlass).
  1584         kt = k->as_Type()->type()->isa_klassptr();
  1586       assert(kt != NULL, "TypeKlassPtr  required.");
  1587       ciKlass* cik = kt->klass();
  1588       ciInstanceKlass* ciik = cik->as_instance_klass();
  1590       PointsToNode *ptadr = ptnode_adr(call->_idx);
  1591       PointsToNode::EscapeState es;
  1592       uint edge_to;
  1593       if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) {
  1594         es = PointsToNode::GlobalEscape;
  1595         edge_to = _phantom_object; // Could not be worse
  1596       } else {
  1597         es = PointsToNode::NoEscape;
  1598         edge_to = call->_idx;
  1600       set_escape_state(call->_idx, es);
  1601       add_pointsto_edge(resproj->_idx, edge_to);
  1602       _processed.set(resproj->_idx);
  1603       break;
  1606     case Op_AllocateArray:
  1608       PointsToNode *ptadr = ptnode_adr(call->_idx);
  1609       int length = call->in(AllocateNode::ALength)->find_int_con(-1);
  1610       if (length < 0 || length > EliminateAllocationArraySizeLimit) {
  1611         // Not scalar replaceable if the length is not constant or too big.
  1612         ptadr->_scalar_replaceable = false;
  1614       set_escape_state(call->_idx, PointsToNode::NoEscape);
  1615       add_pointsto_edge(resproj->_idx, call->_idx);
  1616       _processed.set(resproj->_idx);
  1617       break;
  1620     case Op_CallStaticJava:
  1621     // For a static call, we know exactly what method is being called.
  1622     // Use bytecode estimator to record whether the call's return value escapes
  1624       bool done = true;
  1625       const TypeTuple *r = call->tf()->range();
  1626       const Type* ret_type = NULL;
  1628       if (r->cnt() > TypeFunc::Parms)
  1629         ret_type = r->field_at(TypeFunc::Parms);
  1631       // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  1632       //        _multianewarray functions return a TypeRawPtr.
  1633       if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
  1634         _processed.set(resproj->_idx);
  1635         break;  // doesn't return a pointer type
  1637       ciMethod *meth = call->as_CallJava()->method();
  1638       const TypeTuple * d = call->tf()->domain();
  1639       if (meth == NULL) {
  1640         // not a Java method, assume global escape
  1641         set_escape_state(call->_idx, PointsToNode::GlobalEscape);
  1642         if (resproj != NULL)
  1643           add_pointsto_edge(resproj->_idx, _phantom_object);
  1644       } else {
  1645         BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
  1646         VectorSet ptset(Thread::current()->resource_area());
  1647         bool copy_dependencies = false;
  1649         if (call_analyzer->is_return_allocated()) {
  1650           // Returns a newly allocated unescaped object, simply
  1651           // update dependency information.
  1652           // Mark it as NoEscape so that objects referenced by
  1653           // it's fields will be marked as NoEscape at least.
  1654           set_escape_state(call->_idx, PointsToNode::NoEscape);
  1655           if (resproj != NULL)
  1656             add_pointsto_edge(resproj->_idx, call->_idx);
  1657           copy_dependencies = true;
  1658         } else if (call_analyzer->is_return_local() && resproj != NULL) {
  1659           // determine whether any arguments are returned
  1660           set_escape_state(call->_idx, PointsToNode::NoEscape);
  1661           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1662             const Type* at = d->field_at(i);
  1664             if (at->isa_oopptr() != NULL) {
  1665               Node *arg = call->in(i)->uncast();
  1667               if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
  1668                 PointsToNode *arg_esp = _nodes->adr_at(arg->_idx);
  1669                 if (arg_esp->node_type() == PointsToNode::UnknownType)
  1670                   done = false;
  1671                 else if (arg_esp->node_type() == PointsToNode::JavaObject)
  1672                   add_pointsto_edge(resproj->_idx, arg->_idx);
  1673                 else
  1674                   add_deferred_edge(resproj->_idx, arg->_idx);
  1675                 arg_esp->_hidden_alias = true;
  1679           copy_dependencies = true;
  1680         } else {
  1681           set_escape_state(call->_idx, PointsToNode::GlobalEscape);
  1682           if (resproj != NULL)
  1683             add_pointsto_edge(resproj->_idx, _phantom_object);
  1684           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1685             const Type* at = d->field_at(i);
  1686             if (at->isa_oopptr() != NULL) {
  1687               Node *arg = call->in(i)->uncast();
  1688               PointsToNode *arg_esp = _nodes->adr_at(arg->_idx);
  1689               arg_esp->_hidden_alias = true;
  1693         if (copy_dependencies)
  1694           call_analyzer->copy_dependencies(C()->dependencies());
  1696       if (done)
  1697         _processed.set(resproj->_idx);
  1698       break;
  1701     default:
  1702     // Some other type of call, assume the worst case that the
  1703     // returned value, if any, globally escapes.
  1705       const TypeTuple *r = call->tf()->range();
  1706       if (r->cnt() > TypeFunc::Parms) {
  1707         const Type* ret_type = r->field_at(TypeFunc::Parms);
  1709         // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  1710         //        _multianewarray functions return a TypeRawPtr.
  1711         if (ret_type->isa_ptr() != NULL) {
  1712           PointsToNode *ptadr = ptnode_adr(call->_idx);
  1713           set_escape_state(call->_idx, PointsToNode::GlobalEscape);
  1714           if (resproj != NULL)
  1715             add_pointsto_edge(resproj->_idx, _phantom_object);
  1718       _processed.set(resproj->_idx);
  1723 // Populate Connection Graph with Ideal nodes and create simple
  1724 // connection graph edges (do not need to check the node_type of inputs
  1725 // or to call PointsTo() to walk the connection graph).
  1726 void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) {
  1727   if (_processed.test(n->_idx))
  1728     return; // No need to redefine node's state.
  1730   if (n->is_Call()) {
  1731     // Arguments to allocation and locking don't escape.
  1732     if (n->is_Allocate()) {
  1733       add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true);
  1734       record_for_optimizer(n);
  1735     } else if (n->is_Lock() || n->is_Unlock()) {
  1736       // Put Lock and Unlock nodes on IGVN worklist to process them during
  1737       // the first IGVN optimization when escape information is still available.
  1738       record_for_optimizer(n);
  1739       _processed.set(n->_idx);
  1740     } else {
  1741       // Have to process call's arguments first.
  1742       PointsToNode::NodeType nt = PointsToNode::UnknownType;
  1744       // Check if a call returns an object.
  1745       const TypeTuple *r = n->as_Call()->tf()->range();
  1746       if (r->cnt() > TypeFunc::Parms &&
  1747           n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
  1748         // Note:  use isa_ptr() instead of isa_oopptr() here because
  1749         //        the _multianewarray functions return a TypeRawPtr.
  1750         if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
  1751           nt = PointsToNode::JavaObject;
  1754       add_node(n, nt, PointsToNode::UnknownEscape, false);
  1756     return;
  1759   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
  1760   // ThreadLocal has RawPrt type.
  1761   switch (n->Opcode()) {
  1762     case Op_AddP:
  1764       add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false);
  1765       break;
  1767     case Op_CastX2P:
  1768     { // "Unsafe" memory access.
  1769       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  1770       break;
  1772     case Op_CastPP:
  1773     case Op_CheckCastPP:
  1774     case Op_EncodeP:
  1775     case Op_DecodeN:
  1777       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  1778       int ti = n->in(1)->_idx;
  1779       PointsToNode::NodeType nt = _nodes->adr_at(ti)->node_type();
  1780       if (nt == PointsToNode::UnknownType) {
  1781         _delayed_worklist.push(n); // Process it later.
  1782         break;
  1783       } else if (nt == PointsToNode::JavaObject) {
  1784         add_pointsto_edge(n->_idx, ti);
  1785       } else {
  1786         add_deferred_edge(n->_idx, ti);
  1788       _processed.set(n->_idx);
  1789       break;
  1791     case Op_ConP:
  1793       // assume all pointer constants globally escape except for null
  1794       PointsToNode::EscapeState es;
  1795       if (phase->type(n) == TypePtr::NULL_PTR)
  1796         es = PointsToNode::NoEscape;
  1797       else
  1798         es = PointsToNode::GlobalEscape;
  1800       add_node(n, PointsToNode::JavaObject, es, true);
  1801       break;
  1803     case Op_ConN:
  1805       // assume all narrow oop constants globally escape except for null
  1806       PointsToNode::EscapeState es;
  1807       if (phase->type(n) == TypeNarrowOop::NULL_PTR)
  1808         es = PointsToNode::NoEscape;
  1809       else
  1810         es = PointsToNode::GlobalEscape;
  1812       add_node(n, PointsToNode::JavaObject, es, true);
  1813       break;
  1815     case Op_CreateEx:
  1817       // assume that all exception objects globally escape
  1818       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  1819       break;
  1821     case Op_LoadKlass:
  1822     case Op_LoadNKlass:
  1824       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  1825       break;
  1827     case Op_LoadP:
  1828     case Op_LoadN:
  1830       const Type *t = phase->type(n);
  1831       if (!t->isa_narrowoop() && t->isa_ptr() == NULL) {
  1832         _processed.set(n->_idx);
  1833         return;
  1835       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  1836       break;
  1838     case Op_Parm:
  1840       _processed.set(n->_idx); // No need to redefine it state.
  1841       uint con = n->as_Proj()->_con;
  1842       if (con < TypeFunc::Parms)
  1843         return;
  1844       const Type *t = n->in(0)->as_Start()->_domain->field_at(con);
  1845       if (t->isa_ptr() == NULL)
  1846         return;
  1847       // We have to assume all input parameters globally escape
  1848       // (Note: passing 'false' since _processed is already set).
  1849       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
  1850       break;
  1852     case Op_Phi:
  1854       if (n->as_Phi()->type()->isa_ptr() == NULL) {
  1855         // nothing to do if not an oop
  1856         _processed.set(n->_idx);
  1857         return;
  1859       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  1860       uint i;
  1861       for (i = 1; i < n->req() ; i++) {
  1862         Node* in = n->in(i);
  1863         if (in == NULL)
  1864           continue;  // ignore NULL
  1865         in = in->uncast();
  1866         if (in->is_top() || in == n)
  1867           continue;  // ignore top or inputs which go back this node
  1868         int ti = in->_idx;
  1869         PointsToNode::NodeType nt = _nodes->adr_at(ti)->node_type();
  1870         if (nt == PointsToNode::UnknownType) {
  1871           break;
  1872         } else if (nt == PointsToNode::JavaObject) {
  1873           add_pointsto_edge(n->_idx, ti);
  1874         } else {
  1875           add_deferred_edge(n->_idx, ti);
  1878       if (i >= n->req())
  1879         _processed.set(n->_idx);
  1880       else
  1881         _delayed_worklist.push(n);
  1882       break;
  1884     case Op_Proj:
  1886       // we are only interested in the result projection from a call
  1887       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  1888         add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  1889         process_call_result(n->as_Proj(), phase);
  1890         if (!_processed.test(n->_idx)) {
  1891           // The call's result may need to be processed later if the call
  1892           // returns it's argument and the argument is not processed yet.
  1893           _delayed_worklist.push(n);
  1895       } else {
  1896         _processed.set(n->_idx);
  1898       break;
  1900     case Op_Return:
  1902       if( n->req() > TypeFunc::Parms &&
  1903           phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  1904         // Treat Return value as LocalVar with GlobalEscape escape state.
  1905         add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false);
  1906         int ti = n->in(TypeFunc::Parms)->_idx;
  1907         PointsToNode::NodeType nt = _nodes->adr_at(ti)->node_type();
  1908         if (nt == PointsToNode::UnknownType) {
  1909           _delayed_worklist.push(n); // Process it later.
  1910           break;
  1911         } else if (nt == PointsToNode::JavaObject) {
  1912           add_pointsto_edge(n->_idx, ti);
  1913         } else {
  1914           add_deferred_edge(n->_idx, ti);
  1917       _processed.set(n->_idx);
  1918       break;
  1920     case Op_StoreP:
  1921     case Op_StoreN:
  1923       const Type *adr_type = phase->type(n->in(MemNode::Address));
  1924       if (adr_type->isa_narrowoop()) {
  1925         adr_type = adr_type->is_narrowoop()->make_oopptr();
  1927       if (adr_type->isa_oopptr()) {
  1928         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  1929       } else {
  1930         Node* adr = n->in(MemNode::Address);
  1931         if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL &&
  1932             adr->in(AddPNode::Address)->is_Proj() &&
  1933             adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
  1934           add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  1935           // We are computing a raw address for a store captured
  1936           // by an Initialize compute an appropriate address type.
  1937           int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
  1938           assert(offs != Type::OffsetBot, "offset must be a constant");
  1939         } else {
  1940           _processed.set(n->_idx);
  1941           return;
  1944       break;
  1946     case Op_StorePConditional:
  1947     case Op_CompareAndSwapP:
  1948     case Op_CompareAndSwapN:
  1950       const Type *adr_type = phase->type(n->in(MemNode::Address));
  1951       if (adr_type->isa_narrowoop()) {
  1952         adr_type = adr_type->is_narrowoop()->make_oopptr();
  1954       if (adr_type->isa_oopptr()) {
  1955         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  1956       } else {
  1957         _processed.set(n->_idx);
  1958         return;
  1960       break;
  1962     case Op_ThreadLocal:
  1964       add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
  1965       break;
  1967     default:
  1969       // nothing to do
  1971   return;
  1974 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
  1975   // Don't set processed bit for AddP, LoadP, StoreP since
  1976   // they may need more then one pass to process.
  1977   if (_processed.test(n->_idx))
  1978     return; // No need to redefine node's state.
  1980   PointsToNode *ptadr = ptnode_adr(n->_idx);
  1982   if (n->is_Call()) {
  1983     CallNode *call = n->as_Call();
  1984     process_call_arguments(call, phase);
  1985     _processed.set(n->_idx);
  1986     return;
  1989   switch (n->Opcode()) {
  1990     case Op_AddP:
  1992       Node *base = get_addp_base(n);
  1993       // Create a field edge to this node from everything base could point to.
  1994       VectorSet ptset(Thread::current()->resource_area());
  1995       PointsTo(ptset, base, phase);
  1996       for( VectorSetI i(&ptset); i.test(); ++i ) {
  1997         uint pt = i.elem;
  1998         add_field_edge(pt, n->_idx, address_offset(n, phase));
  2000       break;
  2002     case Op_CastX2P:
  2004       assert(false, "Op_CastX2P");
  2005       break;
  2007     case Op_CastPP:
  2008     case Op_CheckCastPP:
  2009     case Op_EncodeP:
  2010     case Op_DecodeN:
  2012       int ti = n->in(1)->_idx;
  2013       if (_nodes->adr_at(ti)->node_type() == PointsToNode::JavaObject) {
  2014         add_pointsto_edge(n->_idx, ti);
  2015       } else {
  2016         add_deferred_edge(n->_idx, ti);
  2018       _processed.set(n->_idx);
  2019       break;
  2021     case Op_ConP:
  2023       assert(false, "Op_ConP");
  2024       break;
  2026     case Op_ConN:
  2028       assert(false, "Op_ConN");
  2029       break;
  2031     case Op_CreateEx:
  2033       assert(false, "Op_CreateEx");
  2034       break;
  2036     case Op_LoadKlass:
  2037     case Op_LoadNKlass:
  2039       assert(false, "Op_LoadKlass");
  2040       break;
  2042     case Op_LoadP:
  2043     case Op_LoadN:
  2045       const Type *t = phase->type(n);
  2046 #ifdef ASSERT
  2047       if (!t->isa_narrowoop() && t->isa_ptr() == NULL)
  2048         assert(false, "Op_LoadP");
  2049 #endif
  2051       Node* adr = n->in(MemNode::Address)->uncast();
  2052       const Type *adr_type = phase->type(adr);
  2053       Node* adr_base;
  2054       if (adr->is_AddP()) {
  2055         adr_base = get_addp_base(adr);
  2056       } else {
  2057         adr_base = adr;
  2060       // For everything "adr_base" could point to, create a deferred edge from
  2061       // this node to each field with the same offset.
  2062       VectorSet ptset(Thread::current()->resource_area());
  2063       PointsTo(ptset, adr_base, phase);
  2064       int offset = address_offset(adr, phase);
  2065       for( VectorSetI i(&ptset); i.test(); ++i ) {
  2066         uint pt = i.elem;
  2067         add_deferred_edge_to_fields(n->_idx, pt, offset);
  2069       break;
  2071     case Op_Parm:
  2073       assert(false, "Op_Parm");
  2074       break;
  2076     case Op_Phi:
  2078 #ifdef ASSERT
  2079       if (n->as_Phi()->type()->isa_ptr() == NULL)
  2080         assert(false, "Op_Phi");
  2081 #endif
  2082       for (uint i = 1; i < n->req() ; i++) {
  2083         Node* in = n->in(i);
  2084         if (in == NULL)
  2085           continue;  // ignore NULL
  2086         in = in->uncast();
  2087         if (in->is_top() || in == n)
  2088           continue;  // ignore top or inputs which go back this node
  2089         int ti = in->_idx;
  2090         if (_nodes->adr_at(in->_idx)->node_type() == PointsToNode::JavaObject) {
  2091           add_pointsto_edge(n->_idx, ti);
  2092         } else {
  2093           add_deferred_edge(n->_idx, ti);
  2096       _processed.set(n->_idx);
  2097       break;
  2099     case Op_Proj:
  2101       // we are only interested in the result projection from a call
  2102       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  2103         process_call_result(n->as_Proj(), phase);
  2104         assert(_processed.test(n->_idx), "all call results should be processed");
  2105       } else {
  2106         assert(false, "Op_Proj");
  2108       break;
  2110     case Op_Return:
  2112 #ifdef ASSERT
  2113       if( n->req() <= TypeFunc::Parms ||
  2114           !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  2115         assert(false, "Op_Return");
  2117 #endif
  2118       int ti = n->in(TypeFunc::Parms)->_idx;
  2119       if (_nodes->adr_at(ti)->node_type() == PointsToNode::JavaObject) {
  2120         add_pointsto_edge(n->_idx, ti);
  2121       } else {
  2122         add_deferred_edge(n->_idx, ti);
  2124       _processed.set(n->_idx);
  2125       break;
  2127     case Op_StoreP:
  2128     case Op_StoreN:
  2129     case Op_StorePConditional:
  2130     case Op_CompareAndSwapP:
  2131     case Op_CompareAndSwapN:
  2133       Node *adr = n->in(MemNode::Address);
  2134       const Type *adr_type = phase->type(adr);
  2135       if (adr_type->isa_narrowoop()) {
  2136         adr_type = adr_type->is_narrowoop()->make_oopptr();
  2138 #ifdef ASSERT
  2139       if (!adr_type->isa_oopptr())
  2140         assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP");
  2141 #endif
  2143       assert(adr->is_AddP(), "expecting an AddP");
  2144       Node *adr_base = get_addp_base(adr);
  2145       Node *val = n->in(MemNode::ValueIn)->uncast();
  2146       // For everything "adr_base" could point to, create a deferred edge
  2147       // to "val" from each field with the same offset.
  2148       VectorSet ptset(Thread::current()->resource_area());
  2149       PointsTo(ptset, adr_base, phase);
  2150       for( VectorSetI i(&ptset); i.test(); ++i ) {
  2151         uint pt = i.elem;
  2152         add_edge_from_fields(pt, val->_idx, address_offset(adr, phase));
  2154       break;
  2156     case Op_ThreadLocal:
  2158       assert(false, "Op_ThreadLocal");
  2159       break;
  2161     default:
  2163       // nothing to do
  2167 #ifndef PRODUCT
  2168 void ConnectionGraph::dump() {
  2169   PhaseGVN  *igvn = _compile->initial_gvn();
  2170   bool first = true;
  2172   uint size = (uint)_nodes->length();
  2173   for (uint ni = 0; ni < size; ni++) {
  2174     PointsToNode *ptn = _nodes->adr_at(ni);
  2175     PointsToNode::NodeType ptn_type = ptn->node_type();
  2177     if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
  2178       continue;
  2179     PointsToNode::EscapeState es = escape_state(ptn->_node, igvn);
  2180     if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
  2181       if (first) {
  2182         tty->cr();
  2183         tty->print("======== Connection graph for ");
  2184         C()->method()->print_short_name();
  2185         tty->cr();
  2186         first = false;
  2188       tty->print("%6d ", ni);
  2189       ptn->dump();
  2190       // Print all locals which reference this allocation
  2191       for (uint li = ni; li < size; li++) {
  2192         PointsToNode *ptn_loc = _nodes->adr_at(li);
  2193         PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type();
  2194         if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL &&
  2195              ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) {
  2196           tty->print("%6d  LocalVar [[%d]]", li, ni);
  2197           _nodes->adr_at(li)->_node->dump();
  2200       if (Verbose) {
  2201         // Print all fields which reference this allocation
  2202         for (uint i = 0; i < ptn->edge_count(); i++) {
  2203           uint ei = ptn->edge_target(i);
  2204           tty->print("%6d  Field [[%d]]", ei, ni);
  2205           _nodes->adr_at(ei)->_node->dump();
  2208       tty->cr();
  2212 #endif

mercurial