src/share/vm/opto/escape.cpp

Thu, 07 Oct 2010 21:40:55 -0700

author
never
date
Thu, 07 Oct 2010 21:40:55 -0700
changeset 2199
75588558f1bf
parent 2170
5867d89c129b
child 2276
e4fcbeb5a698
permissions
-rw-r--r--

6980792: Crash "exception happened outside interpreter, nmethods and vtable stubs (1)"
Reviewed-by: kvn

     1 /*
     2  * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_escape.cpp.incl"
    28 void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) {
    29   uint v = (targIdx << EdgeShift) + ((uint) et);
    30   if (_edges == NULL) {
    31      Arena *a = Compile::current()->comp_arena();
    32     _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0);
    33   }
    34   _edges->append_if_missing(v);
    35 }
    37 void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
    38   uint v = (targIdx << EdgeShift) + ((uint) et);
    40   _edges->remove(v);
    41 }
    43 #ifndef PRODUCT
    44 static const char *node_type_names[] = {
    45   "UnknownType",
    46   "JavaObject",
    47   "LocalVar",
    48   "Field"
    49 };
    51 static const char *esc_names[] = {
    52   "UnknownEscape",
    53   "NoEscape",
    54   "ArgEscape",
    55   "GlobalEscape"
    56 };
    58 static const char *edge_type_suffix[] = {
    59  "?", // UnknownEdge
    60  "P", // PointsToEdge
    61  "D", // DeferredEdge
    62  "F"  // FieldEdge
    63 };
    65 void PointsToNode::dump(bool print_state) const {
    66   NodeType nt = node_type();
    67   tty->print("%s ", node_type_names[(int) nt]);
    68   if (print_state) {
    69     EscapeState es = escape_state();
    70     tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR");
    71   }
    72   tty->print("[[");
    73   for (uint i = 0; i < edge_count(); i++) {
    74     tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
    75   }
    76   tty->print("]]  ");
    77   if (_node == NULL)
    78     tty->print_cr("<null>");
    79   else
    80     _node->dump();
    81 }
    82 #endif
    84 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
    85   _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
    86   _processed(C->comp_arena()),
    87   _collecting(true),
    88   _compile(C),
    89   _igvn(igvn),
    90   _node_map(C->comp_arena()) {
    92   _phantom_object = C->top()->_idx,
    93   add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true);
    95   // Add ConP(#NULL) and ConN(#NULL) nodes.
    96   Node* oop_null = igvn->zerocon(T_OBJECT);
    97   _oop_null = oop_null->_idx;
    98   assert(_oop_null < C->unique(), "should be created already");
    99   add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
   101   if (UseCompressedOops) {
   102     Node* noop_null = igvn->zerocon(T_NARROWOOP);
   103     _noop_null = noop_null->_idx;
   104     assert(_noop_null < C->unique(), "should be created already");
   105     add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
   106   }
   107 }
   109 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
   110   PointsToNode *f = ptnode_adr(from_i);
   111   PointsToNode *t = ptnode_adr(to_i);
   113   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   114   assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge");
   115   assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge");
   116   f->add_edge(to_i, PointsToNode::PointsToEdge);
   117 }
   119 void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) {
   120   PointsToNode *f = ptnode_adr(from_i);
   121   PointsToNode *t = ptnode_adr(to_i);
   123   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   124   assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge");
   125   assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge");
   126   // don't add a self-referential edge, this can occur during removal of
   127   // deferred edges
   128   if (from_i != to_i)
   129     f->add_edge(to_i, PointsToNode::DeferredEdge);
   130 }
   132 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
   133   const Type *adr_type = phase->type(adr);
   134   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
   135       adr->in(AddPNode::Address)->is_Proj() &&
   136       adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
   137     // We are computing a raw address for a store captured by an Initialize
   138     // compute an appropriate address type. AddP cases #3 and #5 (see below).
   139     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
   140     assert(offs != Type::OffsetBot ||
   141            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
   142            "offset must be a constant or it is initialization of array");
   143     return offs;
   144   }
   145   const TypePtr *t_ptr = adr_type->isa_ptr();
   146   assert(t_ptr != NULL, "must be a pointer type");
   147   return t_ptr->offset();
   148 }
   150 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) {
   151   PointsToNode *f = ptnode_adr(from_i);
   152   PointsToNode *t = ptnode_adr(to_i);
   154   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   155   assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge");
   156   assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge");
   157   assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets");
   158   t->set_offset(offset);
   160   f->add_edge(to_i, PointsToNode::FieldEdge);
   161 }
   163 void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) {
   164   PointsToNode *npt = ptnode_adr(ni);
   165   PointsToNode::EscapeState old_es = npt->escape_state();
   166   if (es > old_es)
   167     npt->set_escape_state(es);
   168 }
   170 void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
   171                                PointsToNode::EscapeState es, bool done) {
   172   PointsToNode* ptadr = ptnode_adr(n->_idx);
   173   ptadr->_node = n;
   174   ptadr->set_node_type(nt);
   176   // inline set_escape_state(idx, es);
   177   PointsToNode::EscapeState old_es = ptadr->escape_state();
   178   if (es > old_es)
   179     ptadr->set_escape_state(es);
   181   if (done)
   182     _processed.set(n->_idx);
   183 }
   185 PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n) {
   186   uint idx = n->_idx;
   187   PointsToNode::EscapeState es;
   189   // If we are still collecting or there were no non-escaping allocations
   190   // we don't know the answer yet
   191   if (_collecting)
   192     return PointsToNode::UnknownEscape;
   194   // if the node was created after the escape computation, return
   195   // UnknownEscape
   196   if (idx >= nodes_size())
   197     return PointsToNode::UnknownEscape;
   199   es = ptnode_adr(idx)->escape_state();
   201   // if we have already computed a value, return it
   202   if (es != PointsToNode::UnknownEscape &&
   203       ptnode_adr(idx)->node_type() == PointsToNode::JavaObject)
   204     return es;
   206   // PointsTo() calls n->uncast() which can return a new ideal node.
   207   if (n->uncast()->_idx >= nodes_size())
   208     return PointsToNode::UnknownEscape;
   210   PointsToNode::EscapeState orig_es = es;
   212   // compute max escape state of anything this node could point to
   213   VectorSet ptset(Thread::current()->resource_area());
   214   PointsTo(ptset, n);
   215   for(VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i) {
   216     uint pt = i.elem;
   217     PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
   218     if (pes > es)
   219       es = pes;
   220   }
   221   if (orig_es != es) {
   222     // cache the computed escape state
   223     assert(es != PointsToNode::UnknownEscape, "should have computed an escape state");
   224     ptnode_adr(idx)->set_escape_state(es);
   225   } // orig_es could be PointsToNode::UnknownEscape
   226   return es;
   227 }
   229 void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n) {
   230   VectorSet visited(Thread::current()->resource_area());
   231   GrowableArray<uint>  worklist;
   233 #ifdef ASSERT
   234   Node *orig_n = n;
   235 #endif
   237   n = n->uncast();
   238   PointsToNode* npt = ptnode_adr(n->_idx);
   240   // If we have a JavaObject, return just that object
   241   if (npt->node_type() == PointsToNode::JavaObject) {
   242     ptset.set(n->_idx);
   243     return;
   244   }
   245 #ifdef ASSERT
   246   if (npt->_node == NULL) {
   247     if (orig_n != n)
   248       orig_n->dump();
   249     n->dump();
   250     assert(npt->_node != NULL, "unregistered node");
   251   }
   252 #endif
   253   worklist.push(n->_idx);
   254   while(worklist.length() > 0) {
   255     int ni = worklist.pop();
   256     if (visited.test_set(ni))
   257       continue;
   259     PointsToNode* pn = ptnode_adr(ni);
   260     // ensure that all inputs of a Phi have been processed
   261     assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),"");
   263     int edges_processed = 0;
   264     uint e_cnt = pn->edge_count();
   265     for (uint e = 0; e < e_cnt; e++) {
   266       uint etgt = pn->edge_target(e);
   267       PointsToNode::EdgeType et = pn->edge_type(e);
   268       if (et == PointsToNode::PointsToEdge) {
   269         ptset.set(etgt);
   270         edges_processed++;
   271       } else if (et == PointsToNode::DeferredEdge) {
   272         worklist.push(etgt);
   273         edges_processed++;
   274       } else {
   275         assert(false,"neither PointsToEdge or DeferredEdge");
   276       }
   277     }
   278     if (edges_processed == 0) {
   279       // no deferred or pointsto edges found.  Assume the value was set
   280       // outside this method.  Add the phantom object to the pointsto set.
   281       ptset.set(_phantom_object);
   282     }
   283   }
   284 }
   286 void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) {
   287   // This method is most expensive during ConnectionGraph construction.
   288   // Reuse vectorSet and an additional growable array for deferred edges.
   289   deferred_edges->clear();
   290   visited->Clear();
   292   visited->set(ni);
   293   PointsToNode *ptn = ptnode_adr(ni);
   295   // Mark current edges as visited and move deferred edges to separate array.
   296   for (uint i = 0; i < ptn->edge_count(); ) {
   297     uint t = ptn->edge_target(i);
   298 #ifdef ASSERT
   299     assert(!visited->test_set(t), "expecting no duplications");
   300 #else
   301     visited->set(t);
   302 #endif
   303     if (ptn->edge_type(i) == PointsToNode::DeferredEdge) {
   304       ptn->remove_edge(t, PointsToNode::DeferredEdge);
   305       deferred_edges->append(t);
   306     } else {
   307       i++;
   308     }
   309   }
   310   for (int next = 0; next < deferred_edges->length(); ++next) {
   311     uint t = deferred_edges->at(next);
   312     PointsToNode *ptt = ptnode_adr(t);
   313     uint e_cnt = ptt->edge_count();
   314     for (uint e = 0; e < e_cnt; e++) {
   315       uint etgt = ptt->edge_target(e);
   316       if (visited->test_set(etgt))
   317         continue;
   319       PointsToNode::EdgeType et = ptt->edge_type(e);
   320       if (et == PointsToNode::PointsToEdge) {
   321         add_pointsto_edge(ni, etgt);
   322         if(etgt == _phantom_object) {
   323           // Special case - field set outside (globally escaping).
   324           ptn->set_escape_state(PointsToNode::GlobalEscape);
   325         }
   326       } else if (et == PointsToNode::DeferredEdge) {
   327         deferred_edges->append(etgt);
   328       } else {
   329         assert(false,"invalid connection graph");
   330       }
   331     }
   332   }
   333 }
   336 //  Add an edge to node given by "to_i" from any field of adr_i whose offset
   337 //  matches "offset"  A deferred edge is added if to_i is a LocalVar, and
   338 //  a pointsto edge is added if it is a JavaObject
   340 void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) {
   341   PointsToNode* an = ptnode_adr(adr_i);
   342   PointsToNode* to = ptnode_adr(to_i);
   343   bool deferred = (to->node_type() == PointsToNode::LocalVar);
   345   for (uint fe = 0; fe < an->edge_count(); fe++) {
   346     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
   347     int fi = an->edge_target(fe);
   348     PointsToNode* pf = ptnode_adr(fi);
   349     int po = pf->offset();
   350     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
   351       if (deferred)
   352         add_deferred_edge(fi, to_i);
   353       else
   354         add_pointsto_edge(fi, to_i);
   355     }
   356   }
   357 }
   359 // Add a deferred  edge from node given by "from_i" to any field of adr_i
   360 // whose offset matches "offset".
   361 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
   362   PointsToNode* an = ptnode_adr(adr_i);
   363   for (uint fe = 0; fe < an->edge_count(); fe++) {
   364     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
   365     int fi = an->edge_target(fe);
   366     PointsToNode* pf = ptnode_adr(fi);
   367     int po = pf->offset();
   368     if (pf->edge_count() == 0) {
   369       // we have not seen any stores to this field, assume it was set outside this method
   370       add_pointsto_edge(fi, _phantom_object);
   371     }
   372     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
   373       add_deferred_edge(from_i, fi);
   374     }
   375   }
   376 }
   378 // Helper functions
   380 static Node* get_addp_base(Node *addp) {
   381   assert(addp->is_AddP(), "must be AddP");
   382   //
   383   // AddP cases for Base and Address inputs:
   384   // case #1. Direct object's field reference:
   385   //     Allocate
   386   //       |
   387   //     Proj #5 ( oop result )
   388   //       |
   389   //     CheckCastPP (cast to instance type)
   390   //      | |
   391   //     AddP  ( base == address )
   392   //
   393   // case #2. Indirect object's field reference:
   394   //      Phi
   395   //       |
   396   //     CastPP (cast to instance type)
   397   //      | |
   398   //     AddP  ( base == address )
   399   //
   400   // case #3. Raw object's field reference for Initialize node:
   401   //      Allocate
   402   //        |
   403   //      Proj #5 ( oop result )
   404   //  top   |
   405   //     \  |
   406   //     AddP  ( base == top )
   407   //
   408   // case #4. Array's element reference:
   409   //   {CheckCastPP | CastPP}
   410   //     |  | |
   411   //     |  AddP ( array's element offset )
   412   //     |  |
   413   //     AddP ( array's offset )
   414   //
   415   // case #5. Raw object's field reference for arraycopy stub call:
   416   //          The inline_native_clone() case when the arraycopy stub is called
   417   //          after the allocation before Initialize and CheckCastPP nodes.
   418   //      Allocate
   419   //        |
   420   //      Proj #5 ( oop result )
   421   //       | |
   422   //       AddP  ( base == address )
   423   //
   424   // case #6. Constant Pool, ThreadLocal, CastX2P or
   425   //          Raw object's field reference:
   426   //      {ConP, ThreadLocal, CastX2P, raw Load}
   427   //  top   |
   428   //     \  |
   429   //     AddP  ( base == top )
   430   //
   431   // case #7. Klass's field reference.
   432   //      LoadKlass
   433   //       | |
   434   //       AddP  ( base == address )
   435   //
   436   // case #8. narrow Klass's field reference.
   437   //      LoadNKlass
   438   //       |
   439   //      DecodeN
   440   //       | |
   441   //       AddP  ( base == address )
   442   //
   443   Node *base = addp->in(AddPNode::Base)->uncast();
   444   if (base->is_top()) { // The AddP case #3 and #6.
   445     base = addp->in(AddPNode::Address)->uncast();
   446     while (base->is_AddP()) {
   447       // Case #6 (unsafe access) may have several chained AddP nodes.
   448       assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only");
   449       base = base->in(AddPNode::Address)->uncast();
   450     }
   451     assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal ||
   452            base->Opcode() == Op_CastX2P || base->is_DecodeN() ||
   453            (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) ||
   454            (base->is_Proj() && base->in(0)->is_Allocate()), "sanity");
   455   }
   456   return base;
   457 }
   459 static Node* find_second_addp(Node* addp, Node* n) {
   460   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
   462   Node* addp2 = addp->raw_out(0);
   463   if (addp->outcnt() == 1 && addp2->is_AddP() &&
   464       addp2->in(AddPNode::Base) == n &&
   465       addp2->in(AddPNode::Address) == addp) {
   467     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
   468     //
   469     // Find array's offset to push it on worklist first and
   470     // as result process an array's element offset first (pushed second)
   471     // to avoid CastPP for the array's offset.
   472     // Otherwise the inserted CastPP (LocalVar) will point to what
   473     // the AddP (Field) points to. Which would be wrong since
   474     // the algorithm expects the CastPP has the same point as
   475     // as AddP's base CheckCastPP (LocalVar).
   476     //
   477     //    ArrayAllocation
   478     //     |
   479     //    CheckCastPP
   480     //     |
   481     //    memProj (from ArrayAllocation CheckCastPP)
   482     //     |  ||
   483     //     |  ||   Int (element index)
   484     //     |  ||    |   ConI (log(element size))
   485     //     |  ||    |   /
   486     //     |  ||   LShift
   487     //     |  ||  /
   488     //     |  AddP (array's element offset)
   489     //     |  |
   490     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
   491     //     | / /
   492     //     AddP (array's offset)
   493     //      |
   494     //     Load/Store (memory operation on array's element)
   495     //
   496     return addp2;
   497   }
   498   return NULL;
   499 }
   501 //
   502 // Adjust the type and inputs of an AddP which computes the
   503 // address of a field of an instance
   504 //
   505 bool ConnectionGraph::split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn) {
   506   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
   507   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
   508   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
   509   if (t == NULL) {
   510     // We are computing a raw address for a store captured by an Initialize
   511     // compute an appropriate address type (cases #3 and #5).
   512     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
   513     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
   514     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
   515     assert(offs != Type::OffsetBot, "offset must be a constant");
   516     t = base_t->add_offset(offs)->is_oopptr();
   517   }
   518   int inst_id =  base_t->instance_id();
   519   assert(!t->is_known_instance() || t->instance_id() == inst_id,
   520                              "old type must be non-instance or match new type");
   522   // The type 't' could be subclass of 'base_t'.
   523   // As result t->offset() could be large then base_t's size and it will
   524   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
   525   // constructor verifies correctness of the offset.
   526   //
   527   // It could happened on subclass's branch (from the type profiling
   528   // inlining) which was not eliminated during parsing since the exactness
   529   // of the allocation type was not propagated to the subclass type check.
   530   //
   531   // Or the type 't' could be not related to 'base_t' at all.
   532   // It could happened when CHA type is different from MDO type on a dead path
   533   // (for example, from instanceof check) which is not collapsed during parsing.
   534   //
   535   // Do nothing for such AddP node and don't process its users since
   536   // this code branch will go away.
   537   //
   538   if (!t->is_known_instance() &&
   539       !base_t->klass()->is_subtype_of(t->klass())) {
   540      return false; // bail out
   541   }
   543   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
   544   // Do NOT remove the next line: ensure a new alias index is allocated
   545   // for the instance type. Note: C++ will not remove it since the call
   546   // has side effect.
   547   int alias_idx = _compile->get_alias_index(tinst);
   548   igvn->set_type(addp, tinst);
   549   // record the allocation in the node map
   550   assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered");
   551   set_map(addp->_idx, get_map(base->_idx));
   553   // Set addp's Base and Address to 'base'.
   554   Node *abase = addp->in(AddPNode::Base);
   555   Node *adr   = addp->in(AddPNode::Address);
   556   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
   557       adr->in(0)->_idx == (uint)inst_id) {
   558     // Skip AddP cases #3 and #5.
   559   } else {
   560     assert(!abase->is_top(), "sanity"); // AddP case #3
   561     if (abase != base) {
   562       igvn->hash_delete(addp);
   563       addp->set_req(AddPNode::Base, base);
   564       if (abase == adr) {
   565         addp->set_req(AddPNode::Address, base);
   566       } else {
   567         // AddP case #4 (adr is array's element offset AddP node)
   568 #ifdef ASSERT
   569         const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
   570         assert(adr->is_AddP() && atype != NULL &&
   571                atype->instance_id() == inst_id, "array's element offset should be processed first");
   572 #endif
   573       }
   574       igvn->hash_insert(addp);
   575     }
   576   }
   577   // Put on IGVN worklist since at least addp's type was changed above.
   578   record_for_optimizer(addp);
   579   return true;
   580 }
   582 //
   583 // Create a new version of orig_phi if necessary. Returns either the newly
   584 // created phi or an existing phi.  Sets create_new to indicate wheter  a new
   585 // phi was created.  Cache the last newly created phi in the node map.
   586 //
   587 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created) {
   588   Compile *C = _compile;
   589   new_created = false;
   590   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
   591   // nothing to do if orig_phi is bottom memory or matches alias_idx
   592   if (phi_alias_idx == alias_idx) {
   593     return orig_phi;
   594   }
   595   // Have we recently created a Phi for this alias index?
   596   PhiNode *result = get_map_phi(orig_phi->_idx);
   597   if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
   598     return result;
   599   }
   600   // Previous check may fail when the same wide memory Phi was split into Phis
   601   // for different memory slices. Search all Phis for this region.
   602   if (result != NULL) {
   603     Node* region = orig_phi->in(0);
   604     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
   605       Node* phi = region->fast_out(i);
   606       if (phi->is_Phi() &&
   607           C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
   608         assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
   609         return phi->as_Phi();
   610       }
   611     }
   612   }
   613   if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
   614     if (C->do_escape_analysis() == true && !C->failing()) {
   615       // Retry compilation without escape analysis.
   616       // If this is the first failure, the sentinel string will "stick"
   617       // to the Compile object, and the C2Compiler will see it and retry.
   618       C->record_failure(C2Compiler::retry_no_escape_analysis());
   619     }
   620     return NULL;
   621   }
   622   orig_phi_worklist.append_if_missing(orig_phi);
   623   const TypePtr *atype = C->get_adr_type(alias_idx);
   624   result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
   625   C->copy_node_notes_to(result, orig_phi);
   626   igvn->set_type(result, result->bottom_type());
   627   record_for_optimizer(result);
   629   debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;)
   630   assert(pn == NULL || pn == orig_phi, "wrong node");
   631   set_map(orig_phi->_idx, result);
   632   ptnode_adr(orig_phi->_idx)->_node = orig_phi;
   634   new_created = true;
   635   return result;
   636 }
   638 //
   639 // Return a new version  of Memory Phi "orig_phi" with the inputs having the
   640 // specified alias index.
   641 //
   642 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn) {
   644   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
   645   Compile *C = _compile;
   646   bool new_phi_created;
   647   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created);
   648   if (!new_phi_created) {
   649     return result;
   650   }
   652   GrowableArray<PhiNode *>  phi_list;
   653   GrowableArray<uint>  cur_input;
   655   PhiNode *phi = orig_phi;
   656   uint idx = 1;
   657   bool finished = false;
   658   while(!finished) {
   659     while (idx < phi->req()) {
   660       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn);
   661       if (mem != NULL && mem->is_Phi()) {
   662         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created);
   663         if (new_phi_created) {
   664           // found an phi for which we created a new split, push current one on worklist and begin
   665           // processing new one
   666           phi_list.push(phi);
   667           cur_input.push(idx);
   668           phi = mem->as_Phi();
   669           result = newphi;
   670           idx = 1;
   671           continue;
   672         } else {
   673           mem = newphi;
   674         }
   675       }
   676       if (C->failing()) {
   677         return NULL;
   678       }
   679       result->set_req(idx++, mem);
   680     }
   681 #ifdef ASSERT
   682     // verify that the new Phi has an input for each input of the original
   683     assert( phi->req() == result->req(), "must have same number of inputs.");
   684     assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
   685 #endif
   686     // Check if all new phi's inputs have specified alias index.
   687     // Otherwise use old phi.
   688     for (uint i = 1; i < phi->req(); i++) {
   689       Node* in = result->in(i);
   690       assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
   691     }
   692     // we have finished processing a Phi, see if there are any more to do
   693     finished = (phi_list.length() == 0 );
   694     if (!finished) {
   695       phi = phi_list.pop();
   696       idx = cur_input.pop();
   697       PhiNode *prev_result = get_map_phi(phi->_idx);
   698       prev_result->set_req(idx++, result);
   699       result = prev_result;
   700     }
   701   }
   702   return result;
   703 }
   706 //
   707 // The next methods are derived from methods in MemNode.
   708 //
   709 static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
   710   Node *mem = mmem;
   711   // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
   712   // means an array I have not precisely typed yet.  Do not do any
   713   // alias stuff with it any time soon.
   714   if( toop->base() != Type::AnyPtr &&
   715       !(toop->klass() != NULL &&
   716         toop->klass()->is_java_lang_Object() &&
   717         toop->offset() == Type::OffsetBot) ) {
   718     mem = mmem->memory_at(alias_idx);
   719     // Update input if it is progress over what we have now
   720   }
   721   return mem;
   722 }
   724 //
   725 // Move memory users to their memory slices.
   726 //
   727 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *igvn) {
   728   Compile* C = _compile;
   730   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
   731   assert(tp != NULL, "ptr type");
   732   int alias_idx = C->get_alias_index(tp);
   733   int general_idx = C->get_general_index(alias_idx);
   735   // Move users first
   736   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
   737     Node* use = n->fast_out(i);
   738     if (use->is_MergeMem()) {
   739       MergeMemNode* mmem = use->as_MergeMem();
   740       assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
   741       if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
   742         continue; // Nothing to do
   743       }
   744       // Replace previous general reference to mem node.
   745       uint orig_uniq = C->unique();
   746       Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
   747       assert(orig_uniq == C->unique(), "no new nodes");
   748       mmem->set_memory_at(general_idx, m);
   749       --imax;
   750       --i;
   751     } else if (use->is_MemBar()) {
   752       assert(!use->is_Initialize(), "initializing stores should not be moved");
   753       if (use->req() > MemBarNode::Precedent &&
   754           use->in(MemBarNode::Precedent) == n) {
   755         // Don't move related membars.
   756         record_for_optimizer(use);
   757         continue;
   758       }
   759       tp = use->as_MemBar()->adr_type()->isa_ptr();
   760       if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
   761           alias_idx == general_idx) {
   762         continue; // Nothing to do
   763       }
   764       // Move to general memory slice.
   765       uint orig_uniq = C->unique();
   766       Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
   767       assert(orig_uniq == C->unique(), "no new nodes");
   768       igvn->hash_delete(use);
   769       imax -= use->replace_edge(n, m);
   770       igvn->hash_insert(use);
   771       record_for_optimizer(use);
   772       --i;
   773 #ifdef ASSERT
   774     } else if (use->is_Mem()) {
   775       if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
   776         // Don't move related cardmark.
   777         continue;
   778       }
   779       // Memory nodes should have new memory input.
   780       tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
   781       assert(tp != NULL, "ptr type");
   782       int idx = C->get_alias_index(tp);
   783       assert(get_map(use->_idx) != NULL || idx == alias_idx,
   784              "Following memory nodes should have new memory input or be on the same memory slice");
   785     } else if (use->is_Phi()) {
   786       // Phi nodes should be split and moved already.
   787       tp = use->as_Phi()->adr_type()->isa_ptr();
   788       assert(tp != NULL, "ptr type");
   789       int idx = C->get_alias_index(tp);
   790       assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
   791     } else {
   792       use->dump();
   793       assert(false, "should not be here");
   794 #endif
   795     }
   796   }
   797 }
   799 //
   800 // Search memory chain of "mem" to find a MemNode whose address
   801 // is the specified alias index.
   802 //
   803 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *phase) {
   804   if (orig_mem == NULL)
   805     return orig_mem;
   806   Compile* C = phase->C;
   807   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
   808   bool is_instance = (toop != NULL) && toop->is_known_instance();
   809   Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
   810   Node *prev = NULL;
   811   Node *result = orig_mem;
   812   while (prev != result) {
   813     prev = result;
   814     if (result == start_mem)
   815       break;  // hit one of our sentinels
   816     if (result->is_Mem()) {
   817       const Type *at = phase->type(result->in(MemNode::Address));
   818       if (at != Type::TOP) {
   819         assert (at->isa_ptr() != NULL, "pointer type required.");
   820         int idx = C->get_alias_index(at->is_ptr());
   821         if (idx == alias_idx)
   822           break;
   823       }
   824       result = result->in(MemNode::Memory);
   825     }
   826     if (!is_instance)
   827       continue;  // don't search further for non-instance types
   828     // skip over a call which does not affect this memory slice
   829     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
   830       Node *proj_in = result->in(0);
   831       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
   832         break;  // hit one of our sentinels
   833       } else if (proj_in->is_Call()) {
   834         CallNode *call = proj_in->as_Call();
   835         if (!call->may_modify(toop, phase)) {
   836           result = call->in(TypeFunc::Memory);
   837         }
   838       } else if (proj_in->is_Initialize()) {
   839         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
   840         // Stop if this is the initialization for the object instance which
   841         // which contains this memory slice, otherwise skip over it.
   842         if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {
   843           result = proj_in->in(TypeFunc::Memory);
   844         }
   845       } else if (proj_in->is_MemBar()) {
   846         result = proj_in->in(TypeFunc::Memory);
   847       }
   848     } else if (result->is_MergeMem()) {
   849       MergeMemNode *mmem = result->as_MergeMem();
   850       result = step_through_mergemem(mmem, alias_idx, toop);
   851       if (result == mmem->base_memory()) {
   852         // Didn't find instance memory, search through general slice recursively.
   853         result = mmem->memory_at(C->get_general_index(alias_idx));
   854         result = find_inst_mem(result, alias_idx, orig_phis, phase);
   855         if (C->failing()) {
   856           return NULL;
   857         }
   858         mmem->set_memory_at(alias_idx, result);
   859       }
   860     } else if (result->is_Phi() &&
   861                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
   862       Node *un = result->as_Phi()->unique_input(phase);
   863       if (un != NULL) {
   864         orig_phis.append_if_missing(result->as_Phi());
   865         result = un;
   866       } else {
   867         break;
   868       }
   869     } else if (result->is_ClearArray()) {
   870       if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), phase)) {
   871         // Can not bypass initialization of the instance
   872         // we are looking for.
   873         break;
   874       }
   875       // Otherwise skip it (the call updated 'result' value).
   876     } else if (result->Opcode() == Op_SCMemProj) {
   877       assert(result->in(0)->is_LoadStore(), "sanity");
   878       const Type *at = phase->type(result->in(0)->in(MemNode::Address));
   879       if (at != Type::TOP) {
   880         assert (at->isa_ptr() != NULL, "pointer type required.");
   881         int idx = C->get_alias_index(at->is_ptr());
   882         assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
   883         break;
   884       }
   885       result = result->in(0)->in(MemNode::Memory);
   886     }
   887   }
   888   if (result->is_Phi()) {
   889     PhiNode *mphi = result->as_Phi();
   890     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
   891     const TypePtr *t = mphi->adr_type();
   892     if (C->get_alias_index(t) != alias_idx) {
   893       // Create a new Phi with the specified alias index type.
   894       result = split_memory_phi(mphi, alias_idx, orig_phis, phase);
   895     } else if (!is_instance) {
   896       // Push all non-instance Phis on the orig_phis worklist to update inputs
   897       // during Phase 4 if needed.
   898       orig_phis.append_if_missing(mphi);
   899     }
   900   }
   901   // the result is either MemNode, PhiNode, InitializeNode.
   902   return result;
   903 }
   905 //
   906 //  Convert the types of unescaped object to instance types where possible,
   907 //  propagate the new type information through the graph, and update memory
   908 //  edges and MergeMem inputs to reflect the new type.
   909 //
   910 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
   911 //  The processing is done in 4 phases:
   912 //
   913 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
   914 //            types for the CheckCastPP for allocations where possible.
   915 //            Propagate the the new types through users as follows:
   916 //               casts and Phi:  push users on alloc_worklist
   917 //               AddP:  cast Base and Address inputs to the instance type
   918 //                      push any AddP users on alloc_worklist and push any memnode
   919 //                      users onto memnode_worklist.
   920 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
   921 //            search the Memory chain for a store with the appropriate type
   922 //            address type.  If a Phi is found, create a new version with
   923 //            the appropriate memory slices from each of the Phi inputs.
   924 //            For stores, process the users as follows:
   925 //               MemNode:  push on memnode_worklist
   926 //               MergeMem: push on mergemem_worklist
   927 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
   928 //            moving the first node encountered of each  instance type to the
   929 //            the input corresponding to its alias index.
   930 //            appropriate memory slice.
   931 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
   932 //
   933 // In the following example, the CheckCastPP nodes are the cast of allocation
   934 // results and the allocation of node 29 is unescaped and eligible to be an
   935 // instance type.
   936 //
   937 // We start with:
   938 //
   939 //     7 Parm #memory
   940 //    10  ConI  "12"
   941 //    19  CheckCastPP   "Foo"
   942 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
   943 //    29  CheckCastPP   "Foo"
   944 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
   945 //
   946 //    40  StoreP  25   7  20   ... alias_index=4
   947 //    50  StoreP  35  40  30   ... alias_index=4
   948 //    60  StoreP  45  50  20   ... alias_index=4
   949 //    70  LoadP    _  60  30   ... alias_index=4
   950 //    80  Phi     75  50  60   Memory alias_index=4
   951 //    90  LoadP    _  80  30   ... alias_index=4
   952 //   100  LoadP    _  80  20   ... alias_index=4
   953 //
   954 //
   955 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
   956 // and creating a new alias index for node 30.  This gives:
   957 //
   958 //     7 Parm #memory
   959 //    10  ConI  "12"
   960 //    19  CheckCastPP   "Foo"
   961 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
   962 //    29  CheckCastPP   "Foo"  iid=24
   963 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
   964 //
   965 //    40  StoreP  25   7  20   ... alias_index=4
   966 //    50  StoreP  35  40  30   ... alias_index=6
   967 //    60  StoreP  45  50  20   ... alias_index=4
   968 //    70  LoadP    _  60  30   ... alias_index=6
   969 //    80  Phi     75  50  60   Memory alias_index=4
   970 //    90  LoadP    _  80  30   ... alias_index=6
   971 //   100  LoadP    _  80  20   ... alias_index=4
   972 //
   973 // In phase 2, new memory inputs are computed for the loads and stores,
   974 // And a new version of the phi is created.  In phase 4, the inputs to
   975 // node 80 are updated and then the memory nodes are updated with the
   976 // values computed in phase 2.  This results in:
   977 //
   978 //     7 Parm #memory
   979 //    10  ConI  "12"
   980 //    19  CheckCastPP   "Foo"
   981 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
   982 //    29  CheckCastPP   "Foo"  iid=24
   983 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
   984 //
   985 //    40  StoreP  25  7   20   ... alias_index=4
   986 //    50  StoreP  35  7   30   ... alias_index=6
   987 //    60  StoreP  45  40  20   ... alias_index=4
   988 //    70  LoadP    _  50  30   ... alias_index=6
   989 //    80  Phi     75  40  60   Memory alias_index=4
   990 //   120  Phi     75  50  50   Memory alias_index=6
   991 //    90  LoadP    _ 120  30   ... alias_index=6
   992 //   100  LoadP    _  80  20   ... alias_index=4
   993 //
   994 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist) {
   995   GrowableArray<Node *>  memnode_worklist;
   996   GrowableArray<PhiNode *>  orig_phis;
   998   PhaseGVN  *igvn = _igvn;
   999   uint new_index_start = (uint) _compile->num_alias_types();
  1000   Arena* arena = Thread::current()->resource_area();
  1001   VectorSet visited(arena);
  1002   VectorSet ptset(arena);
  1005   //  Phase 1:  Process possible allocations from alloc_worklist.
  1006   //  Create instance types for the CheckCastPP for allocations where possible.
  1007   //
  1008   // (Note: don't forget to change the order of the second AddP node on
  1009   //  the alloc_worklist if the order of the worklist processing is changed,
  1010   //  see the comment in find_second_addp().)
  1011   //
  1012   while (alloc_worklist.length() != 0) {
  1013     Node *n = alloc_worklist.pop();
  1014     uint ni = n->_idx;
  1015     const TypeOopPtr* tinst = NULL;
  1016     if (n->is_Call()) {
  1017       CallNode *alloc = n->as_Call();
  1018       // copy escape information to call node
  1019       PointsToNode* ptn = ptnode_adr(alloc->_idx);
  1020       PointsToNode::EscapeState es = escape_state(alloc);
  1021       // We have an allocation or call which returns a Java object,
  1022       // see if it is unescaped.
  1023       if (es != PointsToNode::NoEscape || !ptn->_scalar_replaceable)
  1024         continue;
  1026       // Find CheckCastPP for the allocate or for the return value of a call
  1027       n = alloc->result_cast();
  1028       if (n == NULL) {            // No uses except Initialize node
  1029         if (alloc->is_Allocate()) {
  1030           // Set the scalar_replaceable flag for allocation
  1031           // so it could be eliminated if it has no uses.
  1032           alloc->as_Allocate()->_is_scalar_replaceable = true;
  1034         continue;
  1036       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
  1037         assert(!alloc->is_Allocate(), "allocation should have unique type");
  1038         continue;
  1041       // The inline code for Object.clone() casts the allocation result to
  1042       // java.lang.Object and then to the actual type of the allocated
  1043       // object. Detect this case and use the second cast.
  1044       // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
  1045       // the allocation result is cast to java.lang.Object and then
  1046       // to the actual Array type.
  1047       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
  1048           && (alloc->is_AllocateArray() ||
  1049               igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {
  1050         Node *cast2 = NULL;
  1051         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1052           Node *use = n->fast_out(i);
  1053           if (use->is_CheckCastPP()) {
  1054             cast2 = use;
  1055             break;
  1058         if (cast2 != NULL) {
  1059           n = cast2;
  1060         } else {
  1061           // Non-scalar replaceable if the allocation type is unknown statically
  1062           // (reflection allocation), the object can't be restored during
  1063           // deoptimization without precise type.
  1064           continue;
  1067       if (alloc->is_Allocate()) {
  1068         // Set the scalar_replaceable flag for allocation
  1069         // so it could be eliminated.
  1070         alloc->as_Allocate()->_is_scalar_replaceable = true;
  1072       set_escape_state(n->_idx, es);
  1073       // in order for an object to be scalar-replaceable, it must be:
  1074       //   - a direct allocation (not a call returning an object)
  1075       //   - non-escaping
  1076       //   - eligible to be a unique type
  1077       //   - not determined to be ineligible by escape analysis
  1078       assert(ptnode_adr(alloc->_idx)->_node != NULL &&
  1079              ptnode_adr(n->_idx)->_node != NULL, "should be registered");
  1080       set_map(alloc->_idx, n);
  1081       set_map(n->_idx, alloc);
  1082       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
  1083       if (t == NULL)
  1084         continue;  // not a TypeInstPtr
  1085       tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
  1086       igvn->hash_delete(n);
  1087       igvn->set_type(n,  tinst);
  1088       n->raise_bottom_type(tinst);
  1089       igvn->hash_insert(n);
  1090       record_for_optimizer(n);
  1091       if (alloc->is_Allocate() && ptn->_scalar_replaceable &&
  1092           (t->isa_instptr() || t->isa_aryptr())) {
  1094         // First, put on the worklist all Field edges from Connection Graph
  1095         // which is more accurate then putting immediate users from Ideal Graph.
  1096         for (uint e = 0; e < ptn->edge_count(); e++) {
  1097           Node *use = ptnode_adr(ptn->edge_target(e))->_node;
  1098           assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
  1099                  "only AddP nodes are Field edges in CG");
  1100           if (use->outcnt() > 0) { // Don't process dead nodes
  1101             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
  1102             if (addp2 != NULL) {
  1103               assert(alloc->is_AllocateArray(),"array allocation was expected");
  1104               alloc_worklist.append_if_missing(addp2);
  1106             alloc_worklist.append_if_missing(use);
  1110         // An allocation may have an Initialize which has raw stores. Scan
  1111         // the users of the raw allocation result and push AddP users
  1112         // on alloc_worklist.
  1113         Node *raw_result = alloc->proj_out(TypeFunc::Parms);
  1114         assert (raw_result != NULL, "must have an allocation result");
  1115         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
  1116           Node *use = raw_result->fast_out(i);
  1117           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
  1118             Node* addp2 = find_second_addp(use, raw_result);
  1119             if (addp2 != NULL) {
  1120               assert(alloc->is_AllocateArray(),"array allocation was expected");
  1121               alloc_worklist.append_if_missing(addp2);
  1123             alloc_worklist.append_if_missing(use);
  1124           } else if (use->is_MemBar()) {
  1125             memnode_worklist.append_if_missing(use);
  1129     } else if (n->is_AddP()) {
  1130       ptset.Clear();
  1131       PointsTo(ptset, get_addp_base(n));
  1132       assert(ptset.Size() == 1, "AddP address is unique");
  1133       uint elem = ptset.getelem(); // Allocation node's index
  1134       if (elem == _phantom_object) {
  1135         assert(false, "escaped allocation");
  1136         continue; // Assume the value was set outside this method.
  1138       Node *base = get_map(elem);  // CheckCastPP node
  1139       if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path
  1140       tinst = igvn->type(base)->isa_oopptr();
  1141     } else if (n->is_Phi() ||
  1142                n->is_CheckCastPP() ||
  1143                n->is_EncodeP() ||
  1144                n->is_DecodeN() ||
  1145                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
  1146       if (visited.test_set(n->_idx)) {
  1147         assert(n->is_Phi(), "loops only through Phi's");
  1148         continue;  // already processed
  1150       ptset.Clear();
  1151       PointsTo(ptset, n);
  1152       if (ptset.Size() == 1) {
  1153         uint elem = ptset.getelem(); // Allocation node's index
  1154         if (elem == _phantom_object) {
  1155           assert(false, "escaped allocation");
  1156           continue; // Assume the value was set outside this method.
  1158         Node *val = get_map(elem);   // CheckCastPP node
  1159         TypeNode *tn = n->as_Type();
  1160         tinst = igvn->type(val)->isa_oopptr();
  1161         assert(tinst != NULL && tinst->is_known_instance() &&
  1162                (uint)tinst->instance_id() == elem , "instance type expected.");
  1164         const Type *tn_type = igvn->type(tn);
  1165         const TypeOopPtr *tn_t;
  1166         if (tn_type->isa_narrowoop()) {
  1167           tn_t = tn_type->make_ptr()->isa_oopptr();
  1168         } else {
  1169           tn_t = tn_type->isa_oopptr();
  1172         if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
  1173           if (tn_type->isa_narrowoop()) {
  1174             tn_type = tinst->make_narrowoop();
  1175           } else {
  1176             tn_type = tinst;
  1178           igvn->hash_delete(tn);
  1179           igvn->set_type(tn, tn_type);
  1180           tn->set_type(tn_type);
  1181           igvn->hash_insert(tn);
  1182           record_for_optimizer(n);
  1183         } else {
  1184           assert(tn_type == TypePtr::NULL_PTR ||
  1185                  tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
  1186                  "unexpected type");
  1187           continue; // Skip dead path with different type
  1190     } else {
  1191       debug_only(n->dump();)
  1192       assert(false, "EA: unexpected node");
  1193       continue;
  1195     // push allocation's users on appropriate worklist
  1196     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1197       Node *use = n->fast_out(i);
  1198       if(use->is_Mem() && use->in(MemNode::Address) == n) {
  1199         // Load/store to instance's field
  1200         memnode_worklist.append_if_missing(use);
  1201       } else if (use->is_MemBar()) {
  1202         memnode_worklist.append_if_missing(use);
  1203       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
  1204         Node* addp2 = find_second_addp(use, n);
  1205         if (addp2 != NULL) {
  1206           alloc_worklist.append_if_missing(addp2);
  1208         alloc_worklist.append_if_missing(use);
  1209       } else if (use->is_Phi() ||
  1210                  use->is_CheckCastPP() ||
  1211                  use->is_EncodeP() ||
  1212                  use->is_DecodeN() ||
  1213                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
  1214         alloc_worklist.append_if_missing(use);
  1215 #ifdef ASSERT
  1216       } else if (use->is_Mem()) {
  1217         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
  1218       } else if (use->is_MergeMem()) {
  1219         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
  1220       } else if (use->is_SafePoint()) {
  1221         // Look for MergeMem nodes for calls which reference unique allocation
  1222         // (through CheckCastPP nodes) even for debug info.
  1223         Node* m = use->in(TypeFunc::Memory);
  1224         if (m->is_MergeMem()) {
  1225           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
  1227       } else {
  1228         uint op = use->Opcode();
  1229         if (!(op == Op_CmpP || op == Op_Conv2B ||
  1230               op == Op_CastP2X || op == Op_StoreCM ||
  1231               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
  1232               op == Op_StrEquals || op == Op_StrIndexOf)) {
  1233           n->dump();
  1234           use->dump();
  1235           assert(false, "EA: missing allocation reference path");
  1237 #endif
  1242   // New alias types were created in split_AddP().
  1243   uint new_index_end = (uint) _compile->num_alias_types();
  1245   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
  1246   //            compute new values for Memory inputs  (the Memory inputs are not
  1247   //            actually updated until phase 4.)
  1248   if (memnode_worklist.length() == 0)
  1249     return;  // nothing to do
  1251   while (memnode_worklist.length() != 0) {
  1252     Node *n = memnode_worklist.pop();
  1253     if (visited.test_set(n->_idx))
  1254       continue;
  1255     if (n->is_Phi() || n->is_ClearArray()) {
  1256       // we don't need to do anything, but the users must be pushed
  1257     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
  1258       // we don't need to do anything, but the users must be pushed
  1259       n = n->as_MemBar()->proj_out(TypeFunc::Memory);
  1260       if (n == NULL)
  1261         continue;
  1262     } else {
  1263       assert(n->is_Mem(), "memory node required.");
  1264       Node *addr = n->in(MemNode::Address);
  1265       const Type *addr_t = igvn->type(addr);
  1266       if (addr_t == Type::TOP)
  1267         continue;
  1268       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
  1269       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
  1270       assert ((uint)alias_idx < new_index_end, "wrong alias index");
  1271       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn);
  1272       if (_compile->failing()) {
  1273         return;
  1275       if (mem != n->in(MemNode::Memory)) {
  1276         // We delay the memory edge update since we need old one in
  1277         // MergeMem code below when instances memory slices are separated.
  1278         debug_only(Node* pn = ptnode_adr(n->_idx)->_node;)
  1279         assert(pn == NULL || pn == n, "wrong node");
  1280         set_map(n->_idx, mem);
  1281         ptnode_adr(n->_idx)->_node = n;
  1283       if (n->is_Load()) {
  1284         continue;  // don't push users
  1285       } else if (n->is_LoadStore()) {
  1286         // get the memory projection
  1287         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1288           Node *use = n->fast_out(i);
  1289           if (use->Opcode() == Op_SCMemProj) {
  1290             n = use;
  1291             break;
  1294         assert(n->Opcode() == Op_SCMemProj, "memory projection required");
  1297     // push user on appropriate worklist
  1298     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1299       Node *use = n->fast_out(i);
  1300       if (use->is_Phi() || use->is_ClearArray()) {
  1301         memnode_worklist.append_if_missing(use);
  1302       } else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
  1303         if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
  1304           continue;
  1305         memnode_worklist.append_if_missing(use);
  1306       } else if (use->is_MemBar()) {
  1307         memnode_worklist.append_if_missing(use);
  1308 #ifdef ASSERT
  1309       } else if(use->is_Mem()) {
  1310         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
  1311       } else if (use->is_MergeMem()) {
  1312         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
  1313       } else {
  1314         uint op = use->Opcode();
  1315         if (!(op == Op_StoreCM ||
  1316               (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
  1317                strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
  1318               op == Op_AryEq || op == Op_StrComp ||
  1319               op == Op_StrEquals || op == Op_StrIndexOf)) {
  1320           n->dump();
  1321           use->dump();
  1322           assert(false, "EA: missing memory path");
  1324 #endif
  1329   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
  1330   //            Walk each memory slice moving the first node encountered of each
  1331   //            instance type to the the input corresponding to its alias index.
  1332   uint length = _mergemem_worklist.length();
  1333   for( uint next = 0; next < length; ++next ) {
  1334     MergeMemNode* nmm = _mergemem_worklist.at(next);
  1335     assert(!visited.test_set(nmm->_idx), "should not be visited before");
  1336     // Note: we don't want to use MergeMemStream here because we only want to
  1337     // scan inputs which exist at the start, not ones we add during processing.
  1338     // Note 2: MergeMem may already contains instance memory slices added
  1339     // during find_inst_mem() call when memory nodes were processed above.
  1340     igvn->hash_delete(nmm);
  1341     uint nslices = nmm->req();
  1342     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
  1343       Node* mem = nmm->in(i);
  1344       Node* cur = NULL;
  1345       if (mem == NULL || mem->is_top())
  1346         continue;
  1347       // First, update mergemem by moving memory nodes to corresponding slices
  1348       // if their type became more precise since this mergemem was created.
  1349       while (mem->is_Mem()) {
  1350         const Type *at = igvn->type(mem->in(MemNode::Address));
  1351         if (at != Type::TOP) {
  1352           assert (at->isa_ptr() != NULL, "pointer type required.");
  1353           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
  1354           if (idx == i) {
  1355             if (cur == NULL)
  1356               cur = mem;
  1357           } else {
  1358             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
  1359               nmm->set_memory_at(idx, mem);
  1363         mem = mem->in(MemNode::Memory);
  1365       nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
  1366       // Find any instance of the current type if we haven't encountered
  1367       // already a memory slice of the instance along the memory chain.
  1368       for (uint ni = new_index_start; ni < new_index_end; ni++) {
  1369         if((uint)_compile->get_general_index(ni) == i) {
  1370           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
  1371           if (nmm->is_empty_memory(m)) {
  1372             Node* result = find_inst_mem(mem, ni, orig_phis, igvn);
  1373             if (_compile->failing()) {
  1374               return;
  1376             nmm->set_memory_at(ni, result);
  1381     // Find the rest of instances values
  1382     for (uint ni = new_index_start; ni < new_index_end; ni++) {
  1383       const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
  1384       Node* result = step_through_mergemem(nmm, ni, tinst);
  1385       if (result == nmm->base_memory()) {
  1386         // Didn't find instance memory, search through general slice recursively.
  1387         result = nmm->memory_at(_compile->get_general_index(ni));
  1388         result = find_inst_mem(result, ni, orig_phis, igvn);
  1389         if (_compile->failing()) {
  1390           return;
  1392         nmm->set_memory_at(ni, result);
  1395     igvn->hash_insert(nmm);
  1396     record_for_optimizer(nmm);
  1399   //  Phase 4:  Update the inputs of non-instance memory Phis and
  1400   //            the Memory input of memnodes
  1401   // First update the inputs of any non-instance Phi's from
  1402   // which we split out an instance Phi.  Note we don't have
  1403   // to recursively process Phi's encounted on the input memory
  1404   // chains as is done in split_memory_phi() since they  will
  1405   // also be processed here.
  1406   for (int j = 0; j < orig_phis.length(); j++) {
  1407     PhiNode *phi = orig_phis.at(j);
  1408     int alias_idx = _compile->get_alias_index(phi->adr_type());
  1409     igvn->hash_delete(phi);
  1410     for (uint i = 1; i < phi->req(); i++) {
  1411       Node *mem = phi->in(i);
  1412       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn);
  1413       if (_compile->failing()) {
  1414         return;
  1416       if (mem != new_mem) {
  1417         phi->set_req(i, new_mem);
  1420     igvn->hash_insert(phi);
  1421     record_for_optimizer(phi);
  1424   // Update the memory inputs of MemNodes with the value we computed
  1425   // in Phase 2 and move stores memory users to corresponding memory slices.
  1426 #ifdef ASSERT
  1427   visited.Clear();
  1428   Node_Stack old_mems(arena, _compile->unique() >> 2);
  1429 #endif
  1430   for (uint i = 0; i < nodes_size(); i++) {
  1431     Node *nmem = get_map(i);
  1432     if (nmem != NULL) {
  1433       Node *n = ptnode_adr(i)->_node;
  1434       assert(n != NULL, "sanity");
  1435       if (n->is_Mem()) {
  1436 #ifdef ASSERT
  1437         Node* old_mem = n->in(MemNode::Memory);
  1438         if (!visited.test_set(old_mem->_idx)) {
  1439           old_mems.push(old_mem, old_mem->outcnt());
  1441 #endif
  1442         assert(n->in(MemNode::Memory) != nmem, "sanity");
  1443         if (!n->is_Load()) {
  1444           // Move memory users of a store first.
  1445           move_inst_mem(n, orig_phis, igvn);
  1447         // Now update memory input
  1448         igvn->hash_delete(n);
  1449         n->set_req(MemNode::Memory, nmem);
  1450         igvn->hash_insert(n);
  1451         record_for_optimizer(n);
  1452       } else {
  1453         assert(n->is_Allocate() || n->is_CheckCastPP() ||
  1454                n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
  1458 #ifdef ASSERT
  1459   // Verify that memory was split correctly
  1460   while (old_mems.is_nonempty()) {
  1461     Node* old_mem = old_mems.node();
  1462     uint  old_cnt = old_mems.index();
  1463     old_mems.pop();
  1464     assert(old_cnt = old_mem->outcnt(), "old mem could be lost");
  1466 #endif
  1469 bool ConnectionGraph::has_candidates(Compile *C) {
  1470   // EA brings benefits only when the code has allocations and/or locks which
  1471   // are represented by ideal Macro nodes.
  1472   int cnt = C->macro_count();
  1473   for( int i=0; i < cnt; i++ ) {
  1474     Node *n = C->macro_node(i);
  1475     if ( n->is_Allocate() )
  1476       return true;
  1477     if( n->is_Lock() ) {
  1478       Node* obj = n->as_Lock()->obj_node()->uncast();
  1479       if( !(obj->is_Parm() || obj->is_Con()) )
  1480         return true;
  1483   return false;
  1486 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
  1487   // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
  1488   // to create space for them in ConnectionGraph::_nodes[].
  1489   Node* oop_null = igvn->zerocon(T_OBJECT);
  1490   Node* noop_null = igvn->zerocon(T_NARROWOOP);
  1492   ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
  1493   // Perform escape analysis
  1494   if (congraph->compute_escape()) {
  1495     // There are non escaping objects.
  1496     C->set_congraph(congraph);
  1499   // Cleanup.
  1500   if (oop_null->outcnt() == 0)
  1501     igvn->hash_delete(oop_null);
  1502   if (noop_null->outcnt() == 0)
  1503     igvn->hash_delete(noop_null);
  1506 bool ConnectionGraph::compute_escape() {
  1507   Compile* C = _compile;
  1509   // 1. Populate Connection Graph (CG) with Ideal nodes.
  1511   Unique_Node_List worklist_init;
  1512   worklist_init.map(C->unique(), NULL);  // preallocate space
  1514   // Initialize worklist
  1515   if (C->root() != NULL) {
  1516     worklist_init.push(C->root());
  1519   GrowableArray<int> cg_worklist;
  1520   PhaseGVN* igvn = _igvn;
  1521   bool has_allocations = false;
  1523   // Push all useful nodes onto CG list and set their type.
  1524   for( uint next = 0; next < worklist_init.size(); ++next ) {
  1525     Node* n = worklist_init.at(next);
  1526     record_for_escape_analysis(n, igvn);
  1527     // Only allocations and java static calls results are checked
  1528     // for an escape status. See process_call_result() below.
  1529     if (n->is_Allocate() || n->is_CallStaticJava() &&
  1530         ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
  1531       has_allocations = true;
  1533     if(n->is_AddP()) {
  1534       // Collect address nodes which directly reference an allocation.
  1535       // Use them during stage 3 below to build initial connection graph
  1536       // field edges. Other field edges could be added after StoreP/LoadP
  1537       // nodes are processed during stage 4 below.
  1538       Node* base = get_addp_base(n);
  1539       if(base->is_Proj() && base->in(0)->is_Allocate()) {
  1540         cg_worklist.append(n->_idx);
  1542     } else if (n->is_MergeMem()) {
  1543       // Collect all MergeMem nodes to add memory slices for
  1544       // scalar replaceable objects in split_unique_types().
  1545       _mergemem_worklist.append(n->as_MergeMem());
  1547     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1548       Node* m = n->fast_out(i);   // Get user
  1549       worklist_init.push(m);
  1553   if (!has_allocations) {
  1554     _collecting = false;
  1555     return false; // Nothing to do.
  1558   // 2. First pass to create simple CG edges (doesn't require to walk CG).
  1559   uint delayed_size = _delayed_worklist.size();
  1560   for( uint next = 0; next < delayed_size; ++next ) {
  1561     Node* n = _delayed_worklist.at(next);
  1562     build_connection_graph(n, igvn);
  1565   // 3. Pass to create fields edges (Allocate -F-> AddP).
  1566   uint cg_length = cg_worklist.length();
  1567   for( uint next = 0; next < cg_length; ++next ) {
  1568     int ni = cg_worklist.at(next);
  1569     build_connection_graph(ptnode_adr(ni)->_node, igvn);
  1572   cg_worklist.clear();
  1573   cg_worklist.append(_phantom_object);
  1575   // 4. Build Connection Graph which need
  1576   //    to walk the connection graph.
  1577   for (uint ni = 0; ni < nodes_size(); ni++) {
  1578     PointsToNode* ptn = ptnode_adr(ni);
  1579     Node *n = ptn->_node;
  1580     if (n != NULL) { // Call, AddP, LoadP, StoreP
  1581       build_connection_graph(n, igvn);
  1582       if (ptn->node_type() != PointsToNode::UnknownType)
  1583         cg_worklist.append(n->_idx); // Collect CG nodes
  1587   Arena* arena = Thread::current()->resource_area();
  1588   VectorSet ptset(arena);
  1589   GrowableArray<uint>  deferred_edges;
  1590   VectorSet visited(arena);
  1592   // 5. Remove deferred edges from the graph and adjust
  1593   //    escape state of nonescaping objects.
  1594   cg_length = cg_worklist.length();
  1595   for( uint next = 0; next < cg_length; ++next ) {
  1596     int ni = cg_worklist.at(next);
  1597     PointsToNode* ptn = ptnode_adr(ni);
  1598     PointsToNode::NodeType nt = ptn->node_type();
  1599     if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
  1600       remove_deferred(ni, &deferred_edges, &visited);
  1601       Node *n = ptn->_node;
  1602       if (n->is_AddP()) {
  1603         // Search for objects which are not scalar replaceable
  1604         // and adjust their escape state.
  1605         verify_escape_state(ni, ptset, igvn);
  1610   // 6. Propagate escape states.
  1611   GrowableArray<int>  worklist;
  1612   bool has_non_escaping_obj = false;
  1614   // push all GlobalEscape nodes on the worklist
  1615   for( uint next = 0; next < cg_length; ++next ) {
  1616     int nk = cg_worklist.at(next);
  1617     if (ptnode_adr(nk)->escape_state() == PointsToNode::GlobalEscape)
  1618       worklist.push(nk);
  1620   // mark all nodes reachable from GlobalEscape nodes
  1621   while(worklist.length() > 0) {
  1622     PointsToNode* ptn = ptnode_adr(worklist.pop());
  1623     uint e_cnt = ptn->edge_count();
  1624     for (uint ei = 0; ei < e_cnt; ei++) {
  1625       uint npi = ptn->edge_target(ei);
  1626       PointsToNode *np = ptnode_adr(npi);
  1627       if (np->escape_state() < PointsToNode::GlobalEscape) {
  1628         np->set_escape_state(PointsToNode::GlobalEscape);
  1629         worklist.push(npi);
  1634   // push all ArgEscape nodes on the worklist
  1635   for( uint next = 0; next < cg_length; ++next ) {
  1636     int nk = cg_worklist.at(next);
  1637     if (ptnode_adr(nk)->escape_state() == PointsToNode::ArgEscape)
  1638       worklist.push(nk);
  1640   // mark all nodes reachable from ArgEscape nodes
  1641   while(worklist.length() > 0) {
  1642     PointsToNode* ptn = ptnode_adr(worklist.pop());
  1643     if (ptn->node_type() == PointsToNode::JavaObject)
  1644       has_non_escaping_obj = true; // Non GlobalEscape
  1645     uint e_cnt = ptn->edge_count();
  1646     for (uint ei = 0; ei < e_cnt; ei++) {
  1647       uint npi = ptn->edge_target(ei);
  1648       PointsToNode *np = ptnode_adr(npi);
  1649       if (np->escape_state() < PointsToNode::ArgEscape) {
  1650         np->set_escape_state(PointsToNode::ArgEscape);
  1651         worklist.push(npi);
  1656   GrowableArray<Node*> alloc_worklist;
  1658   // push all NoEscape nodes on the worklist
  1659   for( uint next = 0; next < cg_length; ++next ) {
  1660     int nk = cg_worklist.at(next);
  1661     if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape)
  1662       worklist.push(nk);
  1664   // mark all nodes reachable from NoEscape nodes
  1665   while(worklist.length() > 0) {
  1666     PointsToNode* ptn = ptnode_adr(worklist.pop());
  1667     if (ptn->node_type() == PointsToNode::JavaObject)
  1668       has_non_escaping_obj = true; // Non GlobalEscape
  1669     Node* n = ptn->_node;
  1670     if (n->is_Allocate() && ptn->_scalar_replaceable ) {
  1671       // Push scalar replaceable allocations on alloc_worklist
  1672       // for processing in split_unique_types().
  1673       alloc_worklist.append(n);
  1675     uint e_cnt = ptn->edge_count();
  1676     for (uint ei = 0; ei < e_cnt; ei++) {
  1677       uint npi = ptn->edge_target(ei);
  1678       PointsToNode *np = ptnode_adr(npi);
  1679       if (np->escape_state() < PointsToNode::NoEscape) {
  1680         np->set_escape_state(PointsToNode::NoEscape);
  1681         worklist.push(npi);
  1686   _collecting = false;
  1687   assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
  1689 #ifndef PRODUCT
  1690   if (PrintEscapeAnalysis) {
  1691     dump(); // Dump ConnectionGraph
  1693 #endif
  1695   bool has_scalar_replaceable_candidates = alloc_worklist.length() > 0;
  1696   if ( has_scalar_replaceable_candidates &&
  1697        C->AliasLevel() >= 3 && EliminateAllocations ) {
  1699     // Now use the escape information to create unique types for
  1700     // scalar replaceable objects.
  1701     split_unique_types(alloc_worklist);
  1703     if (C->failing())  return false;
  1705     C->print_method("After Escape Analysis", 2);
  1707 #ifdef ASSERT
  1708   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
  1709     tty->print("=== No allocations eliminated for ");
  1710     C->method()->print_short_name();
  1711     if(!EliminateAllocations) {
  1712       tty->print(" since EliminateAllocations is off ===");
  1713     } else if(!has_scalar_replaceable_candidates) {
  1714       tty->print(" since there are no scalar replaceable candidates ===");
  1715     } else if(C->AliasLevel() < 3) {
  1716       tty->print(" since AliasLevel < 3 ===");
  1718     tty->cr();
  1719 #endif
  1721   return has_non_escaping_obj;
  1724 // Search for objects which are not scalar replaceable.
  1725 void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase) {
  1726   PointsToNode* ptn = ptnode_adr(nidx);
  1727   Node* n = ptn->_node;
  1728   assert(n->is_AddP(), "Should be called for AddP nodes only");
  1729   // Search for objects which are not scalar replaceable.
  1730   // Mark their escape state as ArgEscape to propagate the state
  1731   // to referenced objects.
  1732   // Note: currently there are no difference in compiler optimizations
  1733   // for ArgEscape objects and NoEscape objects which are not
  1734   // scalar replaceable.
  1736   Compile* C = _compile;
  1738   int offset = ptn->offset();
  1739   Node* base = get_addp_base(n);
  1740   ptset.Clear();
  1741   PointsTo(ptset, base);
  1742   int ptset_size = ptset.Size();
  1744   // Check if a oop field's initializing value is recorded and add
  1745   // a corresponding NULL field's value if it is not recorded.
  1746   // Connection Graph does not record a default initialization by NULL
  1747   // captured by Initialize node.
  1748   //
  1749   // Note: it will disable scalar replacement in some cases:
  1750   //
  1751   //    Point p[] = new Point[1];
  1752   //    p[0] = new Point(); // Will be not scalar replaced
  1753   //
  1754   // but it will save us from incorrect optimizations in next cases:
  1755   //
  1756   //    Point p[] = new Point[1];
  1757   //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
  1758   //
  1759   // Do a simple control flow analysis to distinguish above cases.
  1760   //
  1761   if (offset != Type::OffsetBot && ptset_size == 1) {
  1762     uint elem = ptset.getelem(); // Allocation node's index
  1763     // It does not matter if it is not Allocation node since
  1764     // only non-escaping allocations are scalar replaced.
  1765     if (ptnode_adr(elem)->_node->is_Allocate() &&
  1766         ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
  1767       AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
  1768       InitializeNode* ini = alloc->initialization();
  1770       // Check only oop fields.
  1771       const Type* adr_type = n->as_AddP()->bottom_type();
  1772       BasicType basic_field_type = T_INT;
  1773       if (adr_type->isa_instptr()) {
  1774         ciField* field = C->alias_type(adr_type->isa_instptr())->field();
  1775         if (field != NULL) {
  1776           basic_field_type = field->layout_type();
  1777         } else {
  1778           // Ignore non field load (for example, klass load)
  1780       } else if (adr_type->isa_aryptr()) {
  1781         const Type* elemtype = adr_type->isa_aryptr()->elem();
  1782         basic_field_type = elemtype->array_element_basic_type();
  1783       } else {
  1784         // Raw pointers are used for initializing stores so skip it.
  1785         assert(adr_type->isa_rawptr() && base->is_Proj() &&
  1786                (base->in(0) == alloc),"unexpected pointer type");
  1788       if (basic_field_type == T_OBJECT ||
  1789           basic_field_type == T_NARROWOOP ||
  1790           basic_field_type == T_ARRAY) {
  1791         Node* value = NULL;
  1792         if (ini != NULL) {
  1793           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
  1794           Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
  1795           if (store != NULL && store->is_Store()) {
  1796             value = store->in(MemNode::ValueIn);
  1797           } else if (ptn->edge_count() > 0) { // Are there oop stores?
  1798             // Check for a store which follows allocation without branches.
  1799             // For example, a volatile field store is not collected
  1800             // by Initialize node. TODO: it would be nice to use idom() here.
  1801             for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1802               store = n->fast_out(i);
  1803               if (store->is_Store() && store->in(0) != NULL) {
  1804                 Node* ctrl = store->in(0);
  1805                 while(!(ctrl == ini || ctrl == alloc || ctrl == NULL ||
  1806                         ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() ||
  1807                         ctrl->is_IfTrue() || ctrl->is_IfFalse())) {
  1808                    ctrl = ctrl->in(0);
  1810                 if (ctrl == ini || ctrl == alloc) {
  1811                   value = store->in(MemNode::ValueIn);
  1812                   break;
  1818         if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
  1819           // A field's initializing value was not recorded. Add NULL.
  1820           uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
  1821           add_pointsto_edge(nidx, null_idx);
  1827   // An object is not scalar replaceable if the field which may point
  1828   // to it has unknown offset (unknown element of an array of objects).
  1829   //
  1830   if (offset == Type::OffsetBot) {
  1831     uint e_cnt = ptn->edge_count();
  1832     for (uint ei = 0; ei < e_cnt; ei++) {
  1833       uint npi = ptn->edge_target(ei);
  1834       set_escape_state(npi, PointsToNode::ArgEscape);
  1835       ptnode_adr(npi)->_scalar_replaceable = false;
  1839   // Currently an object is not scalar replaceable if a LoadStore node
  1840   // access its field since the field value is unknown after it.
  1841   //
  1842   bool has_LoadStore = false;
  1843   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1844     Node *use = n->fast_out(i);
  1845     if (use->is_LoadStore()) {
  1846       has_LoadStore = true;
  1847       break;
  1850   // An object is not scalar replaceable if the address points
  1851   // to unknown field (unknown element for arrays, offset is OffsetBot).
  1852   //
  1853   // Or the address may point to more then one object. This may produce
  1854   // the false positive result (set scalar_replaceable to false)
  1855   // since the flow-insensitive escape analysis can't separate
  1856   // the case when stores overwrite the field's value from the case
  1857   // when stores happened on different control branches.
  1858   //
  1859   if (ptset_size > 1 || ptset_size != 0 &&
  1860       (has_LoadStore || offset == Type::OffsetBot)) {
  1861     for( VectorSetI j(&ptset); j.test(); ++j ) {
  1862       set_escape_state(j.elem, PointsToNode::ArgEscape);
  1863       ptnode_adr(j.elem)->_scalar_replaceable = false;
  1868 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
  1870     switch (call->Opcode()) {
  1871 #ifdef ASSERT
  1872     case Op_Allocate:
  1873     case Op_AllocateArray:
  1874     case Op_Lock:
  1875     case Op_Unlock:
  1876       assert(false, "should be done already");
  1877       break;
  1878 #endif
  1879     case Op_CallLeaf:
  1880     case Op_CallLeafNoFP:
  1882       // Stub calls, objects do not escape but they are not scale replaceable.
  1883       // Adjust escape state for outgoing arguments.
  1884       const TypeTuple * d = call->tf()->domain();
  1885       VectorSet ptset(Thread::current()->resource_area());
  1886       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1887         const Type* at = d->field_at(i);
  1888         Node *arg = call->in(i)->uncast();
  1889         const Type *aat = phase->type(arg);
  1890         if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
  1891             ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
  1893           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
  1894                  aat->isa_ptr() != NULL, "expecting an Ptr");
  1895 #ifdef ASSERT
  1896           if (!(call->Opcode() == Op_CallLeafNoFP &&
  1897                 call->as_CallLeaf()->_name != NULL &&
  1898                 (strstr(call->as_CallLeaf()->_name, "arraycopy")  != 0) ||
  1899                 call->as_CallLeaf()->_name != NULL &&
  1900                 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
  1901                  strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
  1902           ) {
  1903             call->dump();
  1904             assert(false, "EA: unexpected CallLeaf");
  1906 #endif
  1907           set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  1908           if (arg->is_AddP()) {
  1909             //
  1910             // The inline_native_clone() case when the arraycopy stub is called
  1911             // after the allocation before Initialize and CheckCastPP nodes.
  1912             //
  1913             // Set AddP's base (Allocate) as not scalar replaceable since
  1914             // pointer to the base (with offset) is passed as argument.
  1915             //
  1916             arg = get_addp_base(arg);
  1918           ptset.Clear();
  1919           PointsTo(ptset, arg);
  1920           for( VectorSetI j(&ptset); j.test(); ++j ) {
  1921             uint pt = j.elem;
  1922             set_escape_state(pt, PointsToNode::ArgEscape);
  1926       break;
  1929     case Op_CallStaticJava:
  1930     // For a static call, we know exactly what method is being called.
  1931     // Use bytecode estimator to record the call's escape affects
  1933       ciMethod *meth = call->as_CallJava()->method();
  1934       BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
  1935       // fall-through if not a Java method or no analyzer information
  1936       if (call_analyzer != NULL) {
  1937         const TypeTuple * d = call->tf()->domain();
  1938         VectorSet ptset(Thread::current()->resource_area());
  1939         bool copy_dependencies = false;
  1940         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1941           const Type* at = d->field_at(i);
  1942           int k = i - TypeFunc::Parms;
  1943           Node *arg = call->in(i)->uncast();
  1945           if (at->isa_oopptr() != NULL &&
  1946               ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) {
  1948             bool global_escapes = false;
  1949             bool fields_escapes = false;
  1950             if (!call_analyzer->is_arg_stack(k)) {
  1951               // The argument global escapes, mark everything it could point to
  1952               set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  1953               global_escapes = true;
  1954             } else {
  1955               if (!call_analyzer->is_arg_local(k)) {
  1956                 // The argument itself doesn't escape, but any fields might
  1957                 fields_escapes = true;
  1959               set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  1960               copy_dependencies = true;
  1963             ptset.Clear();
  1964             PointsTo(ptset, arg);
  1965             for( VectorSetI j(&ptset); j.test(); ++j ) {
  1966               uint pt = j.elem;
  1967               if (global_escapes) {
  1968                 //The argument global escapes, mark everything it could point to
  1969                 set_escape_state(pt, PointsToNode::GlobalEscape);
  1970               } else {
  1971                 if (fields_escapes) {
  1972                   // The argument itself doesn't escape, but any fields might
  1973                   add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
  1975                 set_escape_state(pt, PointsToNode::ArgEscape);
  1980         if (copy_dependencies)
  1981           call_analyzer->copy_dependencies(_compile->dependencies());
  1982         break;
  1986     default:
  1987     // Fall-through here if not a Java method or no analyzer information
  1988     // or some other type of call, assume the worst case: all arguments
  1989     // globally escape.
  1991       // adjust escape state for  outgoing arguments
  1992       const TypeTuple * d = call->tf()->domain();
  1993       VectorSet ptset(Thread::current()->resource_area());
  1994       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1995         const Type* at = d->field_at(i);
  1996         if (at->isa_oopptr() != NULL) {
  1997           Node *arg = call->in(i)->uncast();
  1998           set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  1999           ptset.Clear();
  2000           PointsTo(ptset, arg);
  2001           for( VectorSetI j(&ptset); j.test(); ++j ) {
  2002             uint pt = j.elem;
  2003             set_escape_state(pt, PointsToNode::GlobalEscape);
  2010 void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) {
  2011   CallNode   *call = resproj->in(0)->as_Call();
  2012   uint    call_idx = call->_idx;
  2013   uint resproj_idx = resproj->_idx;
  2015   switch (call->Opcode()) {
  2016     case Op_Allocate:
  2018       Node *k = call->in(AllocateNode::KlassNode);
  2019       const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
  2020       assert(kt != NULL, "TypeKlassPtr  required.");
  2021       ciKlass* cik = kt->klass();
  2023       PointsToNode::EscapeState es;
  2024       uint edge_to;
  2025       if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
  2026          !cik->is_instance_klass() || // StressReflectiveCode
  2027           cik->as_instance_klass()->has_finalizer()) {
  2028         es = PointsToNode::GlobalEscape;
  2029         edge_to = _phantom_object; // Could not be worse
  2030       } else {
  2031         es = PointsToNode::NoEscape;
  2032         edge_to = call_idx;
  2034       set_escape_state(call_idx, es);
  2035       add_pointsto_edge(resproj_idx, edge_to);
  2036       _processed.set(resproj_idx);
  2037       break;
  2040     case Op_AllocateArray:
  2043       Node *k = call->in(AllocateNode::KlassNode);
  2044       const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
  2045       assert(kt != NULL, "TypeKlassPtr  required.");
  2046       ciKlass* cik = kt->klass();
  2048       PointsToNode::EscapeState es;
  2049       uint edge_to;
  2050       if (!cik->is_array_klass()) { // StressReflectiveCode
  2051         es = PointsToNode::GlobalEscape;
  2052         edge_to = _phantom_object;
  2053       } else {
  2054         es = PointsToNode::NoEscape;
  2055         edge_to = call_idx;
  2056         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
  2057         if (length < 0 || length > EliminateAllocationArraySizeLimit) {
  2058           // Not scalar replaceable if the length is not constant or too big.
  2059           ptnode_adr(call_idx)->_scalar_replaceable = false;
  2062       set_escape_state(call_idx, es);
  2063       add_pointsto_edge(resproj_idx, edge_to);
  2064       _processed.set(resproj_idx);
  2065       break;
  2068     case Op_CallStaticJava:
  2069     // For a static call, we know exactly what method is being called.
  2070     // Use bytecode estimator to record whether the call's return value escapes
  2072       bool done = true;
  2073       const TypeTuple *r = call->tf()->range();
  2074       const Type* ret_type = NULL;
  2076       if (r->cnt() > TypeFunc::Parms)
  2077         ret_type = r->field_at(TypeFunc::Parms);
  2079       // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  2080       //        _multianewarray functions return a TypeRawPtr.
  2081       if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
  2082         _processed.set(resproj_idx);
  2083         break;  // doesn't return a pointer type
  2085       ciMethod *meth = call->as_CallJava()->method();
  2086       const TypeTuple * d = call->tf()->domain();
  2087       if (meth == NULL) {
  2088         // not a Java method, assume global escape
  2089         set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2090         add_pointsto_edge(resproj_idx, _phantom_object);
  2091       } else {
  2092         BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
  2093         bool copy_dependencies = false;
  2095         if (call_analyzer->is_return_allocated()) {
  2096           // Returns a newly allocated unescaped object, simply
  2097           // update dependency information.
  2098           // Mark it as NoEscape so that objects referenced by
  2099           // it's fields will be marked as NoEscape at least.
  2100           set_escape_state(call_idx, PointsToNode::NoEscape);
  2101           add_pointsto_edge(resproj_idx, call_idx);
  2102           copy_dependencies = true;
  2103         } else if (call_analyzer->is_return_local()) {
  2104           // determine whether any arguments are returned
  2105           set_escape_state(call_idx, PointsToNode::NoEscape);
  2106           bool ret_arg = false;
  2107           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  2108             const Type* at = d->field_at(i);
  2110             if (at->isa_oopptr() != NULL) {
  2111               Node *arg = call->in(i)->uncast();
  2113               if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
  2114                 ret_arg = true;
  2115                 PointsToNode *arg_esp = ptnode_adr(arg->_idx);
  2116                 if (arg_esp->node_type() == PointsToNode::UnknownType)
  2117                   done = false;
  2118                 else if (arg_esp->node_type() == PointsToNode::JavaObject)
  2119                   add_pointsto_edge(resproj_idx, arg->_idx);
  2120                 else
  2121                   add_deferred_edge(resproj_idx, arg->_idx);
  2122                 arg_esp->_hidden_alias = true;
  2126           if (done && !ret_arg) {
  2127             // Returns unknown object.
  2128             set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2129             add_pointsto_edge(resproj_idx, _phantom_object);
  2131           copy_dependencies = true;
  2132         } else {
  2133           set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2134           add_pointsto_edge(resproj_idx, _phantom_object);
  2135           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  2136             const Type* at = d->field_at(i);
  2137             if (at->isa_oopptr() != NULL) {
  2138               Node *arg = call->in(i)->uncast();
  2139               PointsToNode *arg_esp = ptnode_adr(arg->_idx);
  2140               arg_esp->_hidden_alias = true;
  2144         if (copy_dependencies)
  2145           call_analyzer->copy_dependencies(_compile->dependencies());
  2147       if (done)
  2148         _processed.set(resproj_idx);
  2149       break;
  2152     default:
  2153     // Some other type of call, assume the worst case that the
  2154     // returned value, if any, globally escapes.
  2156       const TypeTuple *r = call->tf()->range();
  2157       if (r->cnt() > TypeFunc::Parms) {
  2158         const Type* ret_type = r->field_at(TypeFunc::Parms);
  2160         // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  2161         //        _multianewarray functions return a TypeRawPtr.
  2162         if (ret_type->isa_ptr() != NULL) {
  2163           set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2164           add_pointsto_edge(resproj_idx, _phantom_object);
  2167       _processed.set(resproj_idx);
  2172 // Populate Connection Graph with Ideal nodes and create simple
  2173 // connection graph edges (do not need to check the node_type of inputs
  2174 // or to call PointsTo() to walk the connection graph).
  2175 void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) {
  2176   if (_processed.test(n->_idx))
  2177     return; // No need to redefine node's state.
  2179   if (n->is_Call()) {
  2180     // Arguments to allocation and locking don't escape.
  2181     if (n->is_Allocate()) {
  2182       add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true);
  2183       record_for_optimizer(n);
  2184     } else if (n->is_Lock() || n->is_Unlock()) {
  2185       // Put Lock and Unlock nodes on IGVN worklist to process them during
  2186       // the first IGVN optimization when escape information is still available.
  2187       record_for_optimizer(n);
  2188       _processed.set(n->_idx);
  2189     } else {
  2190       // Don't mark as processed since call's arguments have to be processed.
  2191       PointsToNode::NodeType nt = PointsToNode::UnknownType;
  2192       PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
  2194       // Check if a call returns an object.
  2195       const TypeTuple *r = n->as_Call()->tf()->range();
  2196       if (r->cnt() > TypeFunc::Parms &&
  2197           r->field_at(TypeFunc::Parms)->isa_ptr() &&
  2198           n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
  2199         nt = PointsToNode::JavaObject;
  2200         if (!n->is_CallStaticJava()) {
  2201           // Since the called mathod is statically unknown assume
  2202           // the worst case that the returned value globally escapes.
  2203           es = PointsToNode::GlobalEscape;
  2206       add_node(n, nt, es, false);
  2208     return;
  2211   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
  2212   // ThreadLocal has RawPrt type.
  2213   switch (n->Opcode()) {
  2214     case Op_AddP:
  2216       add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false);
  2217       break;
  2219     case Op_CastX2P:
  2220     { // "Unsafe" memory access.
  2221       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  2222       break;
  2224     case Op_CastPP:
  2225     case Op_CheckCastPP:
  2226     case Op_EncodeP:
  2227     case Op_DecodeN:
  2229       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2230       int ti = n->in(1)->_idx;
  2231       PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2232       if (nt == PointsToNode::UnknownType) {
  2233         _delayed_worklist.push(n); // Process it later.
  2234         break;
  2235       } else if (nt == PointsToNode::JavaObject) {
  2236         add_pointsto_edge(n->_idx, ti);
  2237       } else {
  2238         add_deferred_edge(n->_idx, ti);
  2240       _processed.set(n->_idx);
  2241       break;
  2243     case Op_ConP:
  2245       // assume all pointer constants globally escape except for null
  2246       PointsToNode::EscapeState es;
  2247       if (phase->type(n) == TypePtr::NULL_PTR)
  2248         es = PointsToNode::NoEscape;
  2249       else
  2250         es = PointsToNode::GlobalEscape;
  2252       add_node(n, PointsToNode::JavaObject, es, true);
  2253       break;
  2255     case Op_ConN:
  2257       // assume all narrow oop constants globally escape except for null
  2258       PointsToNode::EscapeState es;
  2259       if (phase->type(n) == TypeNarrowOop::NULL_PTR)
  2260         es = PointsToNode::NoEscape;
  2261       else
  2262         es = PointsToNode::GlobalEscape;
  2264       add_node(n, PointsToNode::JavaObject, es, true);
  2265       break;
  2267     case Op_CreateEx:
  2269       // assume that all exception objects globally escape
  2270       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  2271       break;
  2273     case Op_LoadKlass:
  2274     case Op_LoadNKlass:
  2276       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  2277       break;
  2279     case Op_LoadP:
  2280     case Op_LoadN:
  2282       const Type *t = phase->type(n);
  2283       if (t->make_ptr() == NULL) {
  2284         _processed.set(n->_idx);
  2285         return;
  2287       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2288       break;
  2290     case Op_Parm:
  2292       _processed.set(n->_idx); // No need to redefine it state.
  2293       uint con = n->as_Proj()->_con;
  2294       if (con < TypeFunc::Parms)
  2295         return;
  2296       const Type *t = n->in(0)->as_Start()->_domain->field_at(con);
  2297       if (t->isa_ptr() == NULL)
  2298         return;
  2299       // We have to assume all input parameters globally escape
  2300       // (Note: passing 'false' since _processed is already set).
  2301       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
  2302       break;
  2304     case Op_Phi:
  2306       const Type *t = n->as_Phi()->type();
  2307       if (t->make_ptr() == NULL) {
  2308         // nothing to do if not an oop or narrow oop
  2309         _processed.set(n->_idx);
  2310         return;
  2312       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2313       uint i;
  2314       for (i = 1; i < n->req() ; i++) {
  2315         Node* in = n->in(i);
  2316         if (in == NULL)
  2317           continue;  // ignore NULL
  2318         in = in->uncast();
  2319         if (in->is_top() || in == n)
  2320           continue;  // ignore top or inputs which go back this node
  2321         int ti = in->_idx;
  2322         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2323         if (nt == PointsToNode::UnknownType) {
  2324           break;
  2325         } else if (nt == PointsToNode::JavaObject) {
  2326           add_pointsto_edge(n->_idx, ti);
  2327         } else {
  2328           add_deferred_edge(n->_idx, ti);
  2331       if (i >= n->req())
  2332         _processed.set(n->_idx);
  2333       else
  2334         _delayed_worklist.push(n);
  2335       break;
  2337     case Op_Proj:
  2339       // we are only interested in the oop result projection from a call
  2340       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  2341         const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
  2342         assert(r->cnt() > TypeFunc::Parms, "sanity");
  2343         if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
  2344           add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2345           int ti = n->in(0)->_idx;
  2346           // The call may not be registered yet (since not all its inputs are registered)
  2347           // if this is the projection from backbranch edge of Phi.
  2348           if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
  2349             process_call_result(n->as_Proj(), phase);
  2351           if (!_processed.test(n->_idx)) {
  2352             // The call's result may need to be processed later if the call
  2353             // returns it's argument and the argument is not processed yet.
  2354             _delayed_worklist.push(n);
  2356           break;
  2359       _processed.set(n->_idx);
  2360       break;
  2362     case Op_Return:
  2364       if( n->req() > TypeFunc::Parms &&
  2365           phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  2366         // Treat Return value as LocalVar with GlobalEscape escape state.
  2367         add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false);
  2368         int ti = n->in(TypeFunc::Parms)->_idx;
  2369         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2370         if (nt == PointsToNode::UnknownType) {
  2371           _delayed_worklist.push(n); // Process it later.
  2372           break;
  2373         } else if (nt == PointsToNode::JavaObject) {
  2374           add_pointsto_edge(n->_idx, ti);
  2375         } else {
  2376           add_deferred_edge(n->_idx, ti);
  2379       _processed.set(n->_idx);
  2380       break;
  2382     case Op_StoreP:
  2383     case Op_StoreN:
  2385       const Type *adr_type = phase->type(n->in(MemNode::Address));
  2386       adr_type = adr_type->make_ptr();
  2387       if (adr_type->isa_oopptr()) {
  2388         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2389       } else {
  2390         Node* adr = n->in(MemNode::Address);
  2391         if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL &&
  2392             adr->in(AddPNode::Address)->is_Proj() &&
  2393             adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
  2394           add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2395           // We are computing a raw address for a store captured
  2396           // by an Initialize compute an appropriate address type.
  2397           int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
  2398           assert(offs != Type::OffsetBot, "offset must be a constant");
  2399         } else {
  2400           _processed.set(n->_idx);
  2401           return;
  2404       break;
  2406     case Op_StorePConditional:
  2407     case Op_CompareAndSwapP:
  2408     case Op_CompareAndSwapN:
  2410       const Type *adr_type = phase->type(n->in(MemNode::Address));
  2411       adr_type = adr_type->make_ptr();
  2412       if (adr_type->isa_oopptr()) {
  2413         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2414       } else {
  2415         _processed.set(n->_idx);
  2416         return;
  2418       break;
  2420     case Op_AryEq:
  2421     case Op_StrComp:
  2422     case Op_StrEquals:
  2423     case Op_StrIndexOf:
  2425       // char[] arrays passed to string intrinsics are not scalar replaceable.
  2426       add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2427       break;
  2429     case Op_ThreadLocal:
  2431       add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
  2432       break;
  2434     default:
  2436       // nothing to do
  2438   return;
  2441 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
  2442   uint n_idx = n->_idx;
  2443   assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
  2445   // Don't set processed bit for AddP, LoadP, StoreP since
  2446   // they may need more then one pass to process.
  2447   if (_processed.test(n_idx))
  2448     return; // No need to redefine node's state.
  2450   if (n->is_Call()) {
  2451     CallNode *call = n->as_Call();
  2452     process_call_arguments(call, phase);
  2453     _processed.set(n_idx);
  2454     return;
  2457   switch (n->Opcode()) {
  2458     case Op_AddP:
  2460       Node *base = get_addp_base(n);
  2461       // Create a field edge to this node from everything base could point to.
  2462       VectorSet ptset(Thread::current()->resource_area());
  2463       PointsTo(ptset, base);
  2464       for( VectorSetI i(&ptset); i.test(); ++i ) {
  2465         uint pt = i.elem;
  2466         add_field_edge(pt, n_idx, address_offset(n, phase));
  2468       break;
  2470     case Op_CastX2P:
  2472       assert(false, "Op_CastX2P");
  2473       break;
  2475     case Op_CastPP:
  2476     case Op_CheckCastPP:
  2477     case Op_EncodeP:
  2478     case Op_DecodeN:
  2480       int ti = n->in(1)->_idx;
  2481       assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
  2482       if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
  2483         add_pointsto_edge(n_idx, ti);
  2484       } else {
  2485         add_deferred_edge(n_idx, ti);
  2487       _processed.set(n_idx);
  2488       break;
  2490     case Op_ConP:
  2492       assert(false, "Op_ConP");
  2493       break;
  2495     case Op_ConN:
  2497       assert(false, "Op_ConN");
  2498       break;
  2500     case Op_CreateEx:
  2502       assert(false, "Op_CreateEx");
  2503       break;
  2505     case Op_LoadKlass:
  2506     case Op_LoadNKlass:
  2508       assert(false, "Op_LoadKlass");
  2509       break;
  2511     case Op_LoadP:
  2512     case Op_LoadN:
  2514       const Type *t = phase->type(n);
  2515 #ifdef ASSERT
  2516       if (t->make_ptr() == NULL)
  2517         assert(false, "Op_LoadP");
  2518 #endif
  2520       Node* adr = n->in(MemNode::Address)->uncast();
  2521       Node* adr_base;
  2522       if (adr->is_AddP()) {
  2523         adr_base = get_addp_base(adr);
  2524       } else {
  2525         adr_base = adr;
  2528       // For everything "adr_base" could point to, create a deferred edge from
  2529       // this node to each field with the same offset.
  2530       VectorSet ptset(Thread::current()->resource_area());
  2531       PointsTo(ptset, adr_base);
  2532       int offset = address_offset(adr, phase);
  2533       for( VectorSetI i(&ptset); i.test(); ++i ) {
  2534         uint pt = i.elem;
  2535         add_deferred_edge_to_fields(n_idx, pt, offset);
  2537       break;
  2539     case Op_Parm:
  2541       assert(false, "Op_Parm");
  2542       break;
  2544     case Op_Phi:
  2546 #ifdef ASSERT
  2547       const Type *t = n->as_Phi()->type();
  2548       if (t->make_ptr() == NULL)
  2549         assert(false, "Op_Phi");
  2550 #endif
  2551       for (uint i = 1; i < n->req() ; i++) {
  2552         Node* in = n->in(i);
  2553         if (in == NULL)
  2554           continue;  // ignore NULL
  2555         in = in->uncast();
  2556         if (in->is_top() || in == n)
  2557           continue;  // ignore top or inputs which go back this node
  2558         int ti = in->_idx;
  2559         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2560         assert(nt != PointsToNode::UnknownType, "all nodes should be known");
  2561         if (nt == PointsToNode::JavaObject) {
  2562           add_pointsto_edge(n_idx, ti);
  2563         } else {
  2564           add_deferred_edge(n_idx, ti);
  2567       _processed.set(n_idx);
  2568       break;
  2570     case Op_Proj:
  2572       // we are only interested in the oop result projection from a call
  2573       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  2574         assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
  2575                "all nodes should be registered");
  2576         const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
  2577         assert(r->cnt() > TypeFunc::Parms, "sanity");
  2578         if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
  2579           process_call_result(n->as_Proj(), phase);
  2580           assert(_processed.test(n_idx), "all call results should be processed");
  2581           break;
  2584       assert(false, "Op_Proj");
  2585       break;
  2587     case Op_Return:
  2589 #ifdef ASSERT
  2590       if( n->req() <= TypeFunc::Parms ||
  2591           !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  2592         assert(false, "Op_Return");
  2594 #endif
  2595       int ti = n->in(TypeFunc::Parms)->_idx;
  2596       assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
  2597       if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
  2598         add_pointsto_edge(n_idx, ti);
  2599       } else {
  2600         add_deferred_edge(n_idx, ti);
  2602       _processed.set(n_idx);
  2603       break;
  2605     case Op_StoreP:
  2606     case Op_StoreN:
  2607     case Op_StorePConditional:
  2608     case Op_CompareAndSwapP:
  2609     case Op_CompareAndSwapN:
  2611       Node *adr = n->in(MemNode::Address);
  2612       const Type *adr_type = phase->type(adr)->make_ptr();
  2613 #ifdef ASSERT
  2614       if (!adr_type->isa_oopptr())
  2615         assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP");
  2616 #endif
  2618       assert(adr->is_AddP(), "expecting an AddP");
  2619       Node *adr_base = get_addp_base(adr);
  2620       Node *val = n->in(MemNode::ValueIn)->uncast();
  2621       // For everything "adr_base" could point to, create a deferred edge
  2622       // to "val" from each field with the same offset.
  2623       VectorSet ptset(Thread::current()->resource_area());
  2624       PointsTo(ptset, adr_base);
  2625       for( VectorSetI i(&ptset); i.test(); ++i ) {
  2626         uint pt = i.elem;
  2627         add_edge_from_fields(pt, val->_idx, address_offset(adr, phase));
  2629       break;
  2631     case Op_AryEq:
  2632     case Op_StrComp:
  2633     case Op_StrEquals:
  2634     case Op_StrIndexOf:
  2636       // char[] arrays passed to string intrinsic do not escape but
  2637       // they are not scalar replaceable. Adjust escape state for them.
  2638       // Start from in(2) edge since in(1) is memory edge.
  2639       for (uint i = 2; i < n->req(); i++) {
  2640         Node* adr = n->in(i)->uncast();
  2641         const Type *at = phase->type(adr);
  2642         if (!adr->is_top() && at->isa_ptr()) {
  2643           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
  2644                  at->isa_ptr() != NULL, "expecting an Ptr");
  2645           if (adr->is_AddP()) {
  2646             adr = get_addp_base(adr);
  2648           // Mark as ArgEscape everything "adr" could point to.
  2649           set_escape_state(adr->_idx, PointsToNode::ArgEscape);
  2652       _processed.set(n_idx);
  2653       break;
  2655     case Op_ThreadLocal:
  2657       assert(false, "Op_ThreadLocal");
  2658       break;
  2660     default:
  2661       // This method should be called only for EA specific nodes.
  2662       ShouldNotReachHere();
  2666 #ifndef PRODUCT
  2667 void ConnectionGraph::dump() {
  2668   bool first = true;
  2670   uint size = nodes_size();
  2671   for (uint ni = 0; ni < size; ni++) {
  2672     PointsToNode *ptn = ptnode_adr(ni);
  2673     PointsToNode::NodeType ptn_type = ptn->node_type();
  2675     if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
  2676       continue;
  2677     PointsToNode::EscapeState es = escape_state(ptn->_node);
  2678     if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
  2679       if (first) {
  2680         tty->cr();
  2681         tty->print("======== Connection graph for ");
  2682         _compile->method()->print_short_name();
  2683         tty->cr();
  2684         first = false;
  2686       tty->print("%6d ", ni);
  2687       ptn->dump();
  2688       // Print all locals which reference this allocation
  2689       for (uint li = ni; li < size; li++) {
  2690         PointsToNode *ptn_loc = ptnode_adr(li);
  2691         PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type();
  2692         if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL &&
  2693              ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) {
  2694           ptnode_adr(li)->dump(false);
  2697       if (Verbose) {
  2698         // Print all fields which reference this allocation
  2699         for (uint i = 0; i < ptn->edge_count(); i++) {
  2700           uint ei = ptn->edge_target(i);
  2701           ptnode_adr(ei)->dump(false);
  2704       tty->cr();
  2708 #endif

mercurial