src/share/vm/opto/escape.cpp

Mon, 01 Feb 2010 17:35:05 -0700

author
dcubed
date
Mon, 01 Feb 2010 17:35:05 -0700
changeset 1648
6deeaebad47a
parent 1571
4b84186a8248
child 1894
c52275c698d1
permissions
-rw-r--r--

6902182: 4/4 Starting with jdwp agent should not incur performance penalty
Summary: Rename can_post_exceptions support to can_post_on_exceptions. Add support for should_post_on_exceptions flag to permit per JavaThread optimizations.
Reviewed-by: never, kvn, dcubed
Contributed-by: tom.deneau@amd.com

     1 /*
     2  * Copyright 2005-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_escape.cpp.incl"
    28 void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) {
    29   uint v = (targIdx << EdgeShift) + ((uint) et);
    30   if (_edges == NULL) {
    31      Arena *a = Compile::current()->comp_arena();
    32     _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0);
    33   }
    34   _edges->append_if_missing(v);
    35 }
    37 void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
    38   uint v = (targIdx << EdgeShift) + ((uint) et);
    40   _edges->remove(v);
    41 }
    43 #ifndef PRODUCT
    44 static const char *node_type_names[] = {
    45   "UnknownType",
    46   "JavaObject",
    47   "LocalVar",
    48   "Field"
    49 };
    51 static const char *esc_names[] = {
    52   "UnknownEscape",
    53   "NoEscape",
    54   "ArgEscape",
    55   "GlobalEscape"
    56 };
    58 static const char *edge_type_suffix[] = {
    59  "?", // UnknownEdge
    60  "P", // PointsToEdge
    61  "D", // DeferredEdge
    62  "F"  // FieldEdge
    63 };
    65 void PointsToNode::dump(bool print_state) const {
    66   NodeType nt = node_type();
    67   tty->print("%s ", node_type_names[(int) nt]);
    68   if (print_state) {
    69     EscapeState es = escape_state();
    70     tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR");
    71   }
    72   tty->print("[[");
    73   for (uint i = 0; i < edge_count(); i++) {
    74     tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
    75   }
    76   tty->print("]]  ");
    77   if (_node == NULL)
    78     tty->print_cr("<null>");
    79   else
    80     _node->dump();
    81 }
    82 #endif
    84 ConnectionGraph::ConnectionGraph(Compile * C) :
    85   _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
    86   _processed(C->comp_arena()),
    87   _collecting(true),
    88   _compile(C),
    89   _node_map(C->comp_arena()) {
    91   _phantom_object = C->top()->_idx,
    92   add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true);
    94   // Add ConP(#NULL) and ConN(#NULL) nodes.
    95   PhaseGVN* igvn = C->initial_gvn();
    96   Node* oop_null = igvn->zerocon(T_OBJECT);
    97   _oop_null = oop_null->_idx;
    98   assert(_oop_null < C->unique(), "should be created already");
    99   add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
   101   if (UseCompressedOops) {
   102     Node* noop_null = igvn->zerocon(T_NARROWOOP);
   103     _noop_null = noop_null->_idx;
   104     assert(_noop_null < C->unique(), "should be created already");
   105     add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
   106   }
   107 }
   109 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
   110   PointsToNode *f = ptnode_adr(from_i);
   111   PointsToNode *t = ptnode_adr(to_i);
   113   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   114   assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge");
   115   assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge");
   116   f->add_edge(to_i, PointsToNode::PointsToEdge);
   117 }
   119 void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) {
   120   PointsToNode *f = ptnode_adr(from_i);
   121   PointsToNode *t = ptnode_adr(to_i);
   123   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   124   assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge");
   125   assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge");
   126   // don't add a self-referential edge, this can occur during removal of
   127   // deferred edges
   128   if (from_i != to_i)
   129     f->add_edge(to_i, PointsToNode::DeferredEdge);
   130 }
   132 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
   133   const Type *adr_type = phase->type(adr);
   134   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
   135       adr->in(AddPNode::Address)->is_Proj() &&
   136       adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
   137     // We are computing a raw address for a store captured by an Initialize
   138     // compute an appropriate address type. AddP cases #3 and #5 (see below).
   139     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
   140     assert(offs != Type::OffsetBot ||
   141            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
   142            "offset must be a constant or it is initialization of array");
   143     return offs;
   144   }
   145   const TypePtr *t_ptr = adr_type->isa_ptr();
   146   assert(t_ptr != NULL, "must be a pointer type");
   147   return t_ptr->offset();
   148 }
   150 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) {
   151   PointsToNode *f = ptnode_adr(from_i);
   152   PointsToNode *t = ptnode_adr(to_i);
   154   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   155   assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge");
   156   assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge");
   157   assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets");
   158   t->set_offset(offset);
   160   f->add_edge(to_i, PointsToNode::FieldEdge);
   161 }
   163 void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) {
   164   PointsToNode *npt = ptnode_adr(ni);
   165   PointsToNode::EscapeState old_es = npt->escape_state();
   166   if (es > old_es)
   167     npt->set_escape_state(es);
   168 }
   170 void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
   171                                PointsToNode::EscapeState es, bool done) {
   172   PointsToNode* ptadr = ptnode_adr(n->_idx);
   173   ptadr->_node = n;
   174   ptadr->set_node_type(nt);
   176   // inline set_escape_state(idx, es);
   177   PointsToNode::EscapeState old_es = ptadr->escape_state();
   178   if (es > old_es)
   179     ptadr->set_escape_state(es);
   181   if (done)
   182     _processed.set(n->_idx);
   183 }
   185 PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform *phase) {
   186   uint idx = n->_idx;
   187   PointsToNode::EscapeState es;
   189   // If we are still collecting or there were no non-escaping allocations
   190   // we don't know the answer yet
   191   if (_collecting)
   192     return PointsToNode::UnknownEscape;
   194   // if the node was created after the escape computation, return
   195   // UnknownEscape
   196   if (idx >= nodes_size())
   197     return PointsToNode::UnknownEscape;
   199   es = ptnode_adr(idx)->escape_state();
   201   // if we have already computed a value, return it
   202   if (es != PointsToNode::UnknownEscape &&
   203       ptnode_adr(idx)->node_type() == PointsToNode::JavaObject)
   204     return es;
   206   // PointsTo() calls n->uncast() which can return a new ideal node.
   207   if (n->uncast()->_idx >= nodes_size())
   208     return PointsToNode::UnknownEscape;
   210   // compute max escape state of anything this node could point to
   211   VectorSet ptset(Thread::current()->resource_area());
   212   PointsTo(ptset, n, phase);
   213   for(VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i) {
   214     uint pt = i.elem;
   215     PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
   216     if (pes > es)
   217       es = pes;
   218   }
   219   // cache the computed escape state
   220   assert(es != PointsToNode::UnknownEscape, "should have computed an escape state");
   221   ptnode_adr(idx)->set_escape_state(es);
   222   return es;
   223 }
   225 void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n, PhaseTransform *phase) {
   226   VectorSet visited(Thread::current()->resource_area());
   227   GrowableArray<uint>  worklist;
   229 #ifdef ASSERT
   230   Node *orig_n = n;
   231 #endif
   233   n = n->uncast();
   234   PointsToNode* npt = ptnode_adr(n->_idx);
   236   // If we have a JavaObject, return just that object
   237   if (npt->node_type() == PointsToNode::JavaObject) {
   238     ptset.set(n->_idx);
   239     return;
   240   }
   241 #ifdef ASSERT
   242   if (npt->_node == NULL) {
   243     if (orig_n != n)
   244       orig_n->dump();
   245     n->dump();
   246     assert(npt->_node != NULL, "unregistered node");
   247   }
   248 #endif
   249   worklist.push(n->_idx);
   250   while(worklist.length() > 0) {
   251     int ni = worklist.pop();
   252     if (visited.test_set(ni))
   253       continue;
   255     PointsToNode* pn = ptnode_adr(ni);
   256     // ensure that all inputs of a Phi have been processed
   257     assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),"");
   259     int edges_processed = 0;
   260     uint e_cnt = pn->edge_count();
   261     for (uint e = 0; e < e_cnt; e++) {
   262       uint etgt = pn->edge_target(e);
   263       PointsToNode::EdgeType et = pn->edge_type(e);
   264       if (et == PointsToNode::PointsToEdge) {
   265         ptset.set(etgt);
   266         edges_processed++;
   267       } else if (et == PointsToNode::DeferredEdge) {
   268         worklist.push(etgt);
   269         edges_processed++;
   270       } else {
   271         assert(false,"neither PointsToEdge or DeferredEdge");
   272       }
   273     }
   274     if (edges_processed == 0) {
   275       // no deferred or pointsto edges found.  Assume the value was set
   276       // outside this method.  Add the phantom object to the pointsto set.
   277       ptset.set(_phantom_object);
   278     }
   279   }
   280 }
   282 void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) {
   283   // This method is most expensive during ConnectionGraph construction.
   284   // Reuse vectorSet and an additional growable array for deferred edges.
   285   deferred_edges->clear();
   286   visited->Clear();
   288   visited->set(ni);
   289   PointsToNode *ptn = ptnode_adr(ni);
   291   // Mark current edges as visited and move deferred edges to separate array.
   292   for (uint i = 0; i < ptn->edge_count(); ) {
   293     uint t = ptn->edge_target(i);
   294 #ifdef ASSERT
   295     assert(!visited->test_set(t), "expecting no duplications");
   296 #else
   297     visited->set(t);
   298 #endif
   299     if (ptn->edge_type(i) == PointsToNode::DeferredEdge) {
   300       ptn->remove_edge(t, PointsToNode::DeferredEdge);
   301       deferred_edges->append(t);
   302     } else {
   303       i++;
   304     }
   305   }
   306   for (int next = 0; next < deferred_edges->length(); ++next) {
   307     uint t = deferred_edges->at(next);
   308     PointsToNode *ptt = ptnode_adr(t);
   309     uint e_cnt = ptt->edge_count();
   310     for (uint e = 0; e < e_cnt; e++) {
   311       uint etgt = ptt->edge_target(e);
   312       if (visited->test_set(etgt))
   313         continue;
   315       PointsToNode::EdgeType et = ptt->edge_type(e);
   316       if (et == PointsToNode::PointsToEdge) {
   317         add_pointsto_edge(ni, etgt);
   318         if(etgt == _phantom_object) {
   319           // Special case - field set outside (globally escaping).
   320           ptn->set_escape_state(PointsToNode::GlobalEscape);
   321         }
   322       } else if (et == PointsToNode::DeferredEdge) {
   323         deferred_edges->append(etgt);
   324       } else {
   325         assert(false,"invalid connection graph");
   326       }
   327     }
   328   }
   329 }
   332 //  Add an edge to node given by "to_i" from any field of adr_i whose offset
   333 //  matches "offset"  A deferred edge is added if to_i is a LocalVar, and
   334 //  a pointsto edge is added if it is a JavaObject
   336 void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) {
   337   PointsToNode* an = ptnode_adr(adr_i);
   338   PointsToNode* to = ptnode_adr(to_i);
   339   bool deferred = (to->node_type() == PointsToNode::LocalVar);
   341   for (uint fe = 0; fe < an->edge_count(); fe++) {
   342     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
   343     int fi = an->edge_target(fe);
   344     PointsToNode* pf = ptnode_adr(fi);
   345     int po = pf->offset();
   346     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
   347       if (deferred)
   348         add_deferred_edge(fi, to_i);
   349       else
   350         add_pointsto_edge(fi, to_i);
   351     }
   352   }
   353 }
   355 // Add a deferred  edge from node given by "from_i" to any field of adr_i
   356 // whose offset matches "offset".
   357 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
   358   PointsToNode* an = ptnode_adr(adr_i);
   359   for (uint fe = 0; fe < an->edge_count(); fe++) {
   360     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
   361     int fi = an->edge_target(fe);
   362     PointsToNode* pf = ptnode_adr(fi);
   363     int po = pf->offset();
   364     if (pf->edge_count() == 0) {
   365       // we have not seen any stores to this field, assume it was set outside this method
   366       add_pointsto_edge(fi, _phantom_object);
   367     }
   368     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
   369       add_deferred_edge(from_i, fi);
   370     }
   371   }
   372 }
   374 // Helper functions
   376 static Node* get_addp_base(Node *addp) {
   377   assert(addp->is_AddP(), "must be AddP");
   378   //
   379   // AddP cases for Base and Address inputs:
   380   // case #1. Direct object's field reference:
   381   //     Allocate
   382   //       |
   383   //     Proj #5 ( oop result )
   384   //       |
   385   //     CheckCastPP (cast to instance type)
   386   //      | |
   387   //     AddP  ( base == address )
   388   //
   389   // case #2. Indirect object's field reference:
   390   //      Phi
   391   //       |
   392   //     CastPP (cast to instance type)
   393   //      | |
   394   //     AddP  ( base == address )
   395   //
   396   // case #3. Raw object's field reference for Initialize node:
   397   //      Allocate
   398   //        |
   399   //      Proj #5 ( oop result )
   400   //  top   |
   401   //     \  |
   402   //     AddP  ( base == top )
   403   //
   404   // case #4. Array's element reference:
   405   //   {CheckCastPP | CastPP}
   406   //     |  | |
   407   //     |  AddP ( array's element offset )
   408   //     |  |
   409   //     AddP ( array's offset )
   410   //
   411   // case #5. Raw object's field reference for arraycopy stub call:
   412   //          The inline_native_clone() case when the arraycopy stub is called
   413   //          after the allocation before Initialize and CheckCastPP nodes.
   414   //      Allocate
   415   //        |
   416   //      Proj #5 ( oop result )
   417   //       | |
   418   //       AddP  ( base == address )
   419   //
   420   // case #6. Constant Pool, ThreadLocal, CastX2P or
   421   //          Raw object's field reference:
   422   //      {ConP, ThreadLocal, CastX2P, raw Load}
   423   //  top   |
   424   //     \  |
   425   //     AddP  ( base == top )
   426   //
   427   // case #7. Klass's field reference.
   428   //      LoadKlass
   429   //       | |
   430   //       AddP  ( base == address )
   431   //
   432   // case #8. narrow Klass's field reference.
   433   //      LoadNKlass
   434   //       |
   435   //      DecodeN
   436   //       | |
   437   //       AddP  ( base == address )
   438   //
   439   Node *base = addp->in(AddPNode::Base)->uncast();
   440   if (base->is_top()) { // The AddP case #3 and #6.
   441     base = addp->in(AddPNode::Address)->uncast();
   442     while (base->is_AddP()) {
   443       // Case #6 (unsafe access) may have several chained AddP nodes.
   444       assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only");
   445       base = base->in(AddPNode::Address)->uncast();
   446     }
   447     assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal ||
   448            base->Opcode() == Op_CastX2P || base->is_DecodeN() ||
   449            (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) ||
   450            (base->is_Proj() && base->in(0)->is_Allocate()), "sanity");
   451   }
   452   return base;
   453 }
   455 static Node* find_second_addp(Node* addp, Node* n) {
   456   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
   458   Node* addp2 = addp->raw_out(0);
   459   if (addp->outcnt() == 1 && addp2->is_AddP() &&
   460       addp2->in(AddPNode::Base) == n &&
   461       addp2->in(AddPNode::Address) == addp) {
   463     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
   464     //
   465     // Find array's offset to push it on worklist first and
   466     // as result process an array's element offset first (pushed second)
   467     // to avoid CastPP for the array's offset.
   468     // Otherwise the inserted CastPP (LocalVar) will point to what
   469     // the AddP (Field) points to. Which would be wrong since
   470     // the algorithm expects the CastPP has the same point as
   471     // as AddP's base CheckCastPP (LocalVar).
   472     //
   473     //    ArrayAllocation
   474     //     |
   475     //    CheckCastPP
   476     //     |
   477     //    memProj (from ArrayAllocation CheckCastPP)
   478     //     |  ||
   479     //     |  ||   Int (element index)
   480     //     |  ||    |   ConI (log(element size))
   481     //     |  ||    |   /
   482     //     |  ||   LShift
   483     //     |  ||  /
   484     //     |  AddP (array's element offset)
   485     //     |  |
   486     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
   487     //     | / /
   488     //     AddP (array's offset)
   489     //      |
   490     //     Load/Store (memory operation on array's element)
   491     //
   492     return addp2;
   493   }
   494   return NULL;
   495 }
   497 //
   498 // Adjust the type and inputs of an AddP which computes the
   499 // address of a field of an instance
   500 //
   501 bool ConnectionGraph::split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn) {
   502   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
   503   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
   504   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
   505   if (t == NULL) {
   506     // We are computing a raw address for a store captured by an Initialize
   507     // compute an appropriate address type (cases #3 and #5).
   508     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
   509     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
   510     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
   511     assert(offs != Type::OffsetBot, "offset must be a constant");
   512     t = base_t->add_offset(offs)->is_oopptr();
   513   }
   514   int inst_id =  base_t->instance_id();
   515   assert(!t->is_known_instance() || t->instance_id() == inst_id,
   516                              "old type must be non-instance or match new type");
   518   // The type 't' could be subclass of 'base_t'.
   519   // As result t->offset() could be large then base_t's size and it will
   520   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
   521   // constructor verifies correctness of the offset.
   522   //
   523   // It could happened on subclass's branch (from the type profiling
   524   // inlining) which was not eliminated during parsing since the exactness
   525   // of the allocation type was not propagated to the subclass type check.
   526   //
   527   // Or the type 't' could be not related to 'base_t' at all.
   528   // It could happened when CHA type is different from MDO type on a dead path
   529   // (for example, from instanceof check) which is not collapsed during parsing.
   530   //
   531   // Do nothing for such AddP node and don't process its users since
   532   // this code branch will go away.
   533   //
   534   if (!t->is_known_instance() &&
   535       !base_t->klass()->is_subtype_of(t->klass())) {
   536      return false; // bail out
   537   }
   539   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
   540   // Do NOT remove the next line: ensure a new alias index is allocated
   541   // for the instance type. Note: C++ will not remove it since the call
   542   // has side effect.
   543   int alias_idx = _compile->get_alias_index(tinst);
   544   igvn->set_type(addp, tinst);
   545   // record the allocation in the node map
   546   assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered");
   547   set_map(addp->_idx, get_map(base->_idx));
   549   // Set addp's Base and Address to 'base'.
   550   Node *abase = addp->in(AddPNode::Base);
   551   Node *adr   = addp->in(AddPNode::Address);
   552   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
   553       adr->in(0)->_idx == (uint)inst_id) {
   554     // Skip AddP cases #3 and #5.
   555   } else {
   556     assert(!abase->is_top(), "sanity"); // AddP case #3
   557     if (abase != base) {
   558       igvn->hash_delete(addp);
   559       addp->set_req(AddPNode::Base, base);
   560       if (abase == adr) {
   561         addp->set_req(AddPNode::Address, base);
   562       } else {
   563         // AddP case #4 (adr is array's element offset AddP node)
   564 #ifdef ASSERT
   565         const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
   566         assert(adr->is_AddP() && atype != NULL &&
   567                atype->instance_id() == inst_id, "array's element offset should be processed first");
   568 #endif
   569       }
   570       igvn->hash_insert(addp);
   571     }
   572   }
   573   // Put on IGVN worklist since at least addp's type was changed above.
   574   record_for_optimizer(addp);
   575   return true;
   576 }
   578 //
   579 // Create a new version of orig_phi if necessary. Returns either the newly
   580 // created phi or an existing phi.  Sets create_new to indicate wheter  a new
   581 // phi was created.  Cache the last newly created phi in the node map.
   582 //
   583 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created) {
   584   Compile *C = _compile;
   585   new_created = false;
   586   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
   587   // nothing to do if orig_phi is bottom memory or matches alias_idx
   588   if (phi_alias_idx == alias_idx) {
   589     return orig_phi;
   590   }
   591   // Have we recently created a Phi for this alias index?
   592   PhiNode *result = get_map_phi(orig_phi->_idx);
   593   if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
   594     return result;
   595   }
   596   // Previous check may fail when the same wide memory Phi was split into Phis
   597   // for different memory slices. Search all Phis for this region.
   598   if (result != NULL) {
   599     Node* region = orig_phi->in(0);
   600     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
   601       Node* phi = region->fast_out(i);
   602       if (phi->is_Phi() &&
   603           C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
   604         assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
   605         return phi->as_Phi();
   606       }
   607     }
   608   }
   609   if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
   610     if (C->do_escape_analysis() == true && !C->failing()) {
   611       // Retry compilation without escape analysis.
   612       // If this is the first failure, the sentinel string will "stick"
   613       // to the Compile object, and the C2Compiler will see it and retry.
   614       C->record_failure(C2Compiler::retry_no_escape_analysis());
   615     }
   616     return NULL;
   617   }
   618   orig_phi_worklist.append_if_missing(orig_phi);
   619   const TypePtr *atype = C->get_adr_type(alias_idx);
   620   result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
   621   C->copy_node_notes_to(result, orig_phi);
   622   igvn->set_type(result, result->bottom_type());
   623   record_for_optimizer(result);
   625   debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;)
   626   assert(pn == NULL || pn == orig_phi, "wrong node");
   627   set_map(orig_phi->_idx, result);
   628   ptnode_adr(orig_phi->_idx)->_node = orig_phi;
   630   new_created = true;
   631   return result;
   632 }
   634 //
   635 // Return a new version  of Memory Phi "orig_phi" with the inputs having the
   636 // specified alias index.
   637 //
   638 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn) {
   640   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
   641   Compile *C = _compile;
   642   bool new_phi_created;
   643   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created);
   644   if (!new_phi_created) {
   645     return result;
   646   }
   648   GrowableArray<PhiNode *>  phi_list;
   649   GrowableArray<uint>  cur_input;
   651   PhiNode *phi = orig_phi;
   652   uint idx = 1;
   653   bool finished = false;
   654   while(!finished) {
   655     while (idx < phi->req()) {
   656       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn);
   657       if (mem != NULL && mem->is_Phi()) {
   658         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created);
   659         if (new_phi_created) {
   660           // found an phi for which we created a new split, push current one on worklist and begin
   661           // processing new one
   662           phi_list.push(phi);
   663           cur_input.push(idx);
   664           phi = mem->as_Phi();
   665           result = newphi;
   666           idx = 1;
   667           continue;
   668         } else {
   669           mem = newphi;
   670         }
   671       }
   672       if (C->failing()) {
   673         return NULL;
   674       }
   675       result->set_req(idx++, mem);
   676     }
   677 #ifdef ASSERT
   678     // verify that the new Phi has an input for each input of the original
   679     assert( phi->req() == result->req(), "must have same number of inputs.");
   680     assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
   681 #endif
   682     // Check if all new phi's inputs have specified alias index.
   683     // Otherwise use old phi.
   684     for (uint i = 1; i < phi->req(); i++) {
   685       Node* in = result->in(i);
   686       assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
   687     }
   688     // we have finished processing a Phi, see if there are any more to do
   689     finished = (phi_list.length() == 0 );
   690     if (!finished) {
   691       phi = phi_list.pop();
   692       idx = cur_input.pop();
   693       PhiNode *prev_result = get_map_phi(phi->_idx);
   694       prev_result->set_req(idx++, result);
   695       result = prev_result;
   696     }
   697   }
   698   return result;
   699 }
   702 //
   703 // The next methods are derived from methods in MemNode.
   704 //
   705 static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *tinst) {
   706   Node *mem = mmem;
   707   // TypeInstPtr::NOTNULL+any is an OOP with unknown offset - generally
   708   // means an array I have not precisely typed yet.  Do not do any
   709   // alias stuff with it any time soon.
   710   if( tinst->base() != Type::AnyPtr &&
   711       !(tinst->klass()->is_java_lang_Object() &&
   712         tinst->offset() == Type::OffsetBot) ) {
   713     mem = mmem->memory_at(alias_idx);
   714     // Update input if it is progress over what we have now
   715   }
   716   return mem;
   717 }
   719 //
   720 // Move memory users to their memory slices.
   721 //
   722 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *igvn) {
   723   Compile* C = _compile;
   725   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
   726   assert(tp != NULL, "ptr type");
   727   int alias_idx = C->get_alias_index(tp);
   728   int general_idx = C->get_general_index(alias_idx);
   730   // Move users first
   731   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
   732     Node* use = n->fast_out(i);
   733     if (use->is_MergeMem()) {
   734       MergeMemNode* mmem = use->as_MergeMem();
   735       assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
   736       if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
   737         continue; // Nothing to do
   738       }
   739       // Replace previous general reference to mem node.
   740       uint orig_uniq = C->unique();
   741       Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
   742       assert(orig_uniq == C->unique(), "no new nodes");
   743       mmem->set_memory_at(general_idx, m);
   744       --imax;
   745       --i;
   746     } else if (use->is_MemBar()) {
   747       assert(!use->is_Initialize(), "initializing stores should not be moved");
   748       if (use->req() > MemBarNode::Precedent &&
   749           use->in(MemBarNode::Precedent) == n) {
   750         // Don't move related membars.
   751         record_for_optimizer(use);
   752         continue;
   753       }
   754       tp = use->as_MemBar()->adr_type()->isa_ptr();
   755       if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
   756           alias_idx == general_idx) {
   757         continue; // Nothing to do
   758       }
   759       // Move to general memory slice.
   760       uint orig_uniq = C->unique();
   761       Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
   762       assert(orig_uniq == C->unique(), "no new nodes");
   763       igvn->hash_delete(use);
   764       imax -= use->replace_edge(n, m);
   765       igvn->hash_insert(use);
   766       record_for_optimizer(use);
   767       --i;
   768 #ifdef ASSERT
   769     } else if (use->is_Mem()) {
   770       if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
   771         // Don't move related cardmark.
   772         continue;
   773       }
   774       // Memory nodes should have new memory input.
   775       tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
   776       assert(tp != NULL, "ptr type");
   777       int idx = C->get_alias_index(tp);
   778       assert(get_map(use->_idx) != NULL || idx == alias_idx,
   779              "Following memory nodes should have new memory input or be on the same memory slice");
   780     } else if (use->is_Phi()) {
   781       // Phi nodes should be split and moved already.
   782       tp = use->as_Phi()->adr_type()->isa_ptr();
   783       assert(tp != NULL, "ptr type");
   784       int idx = C->get_alias_index(tp);
   785       assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
   786     } else {
   787       use->dump();
   788       assert(false, "should not be here");
   789 #endif
   790     }
   791   }
   792 }
   794 //
   795 // Search memory chain of "mem" to find a MemNode whose address
   796 // is the specified alias index.
   797 //
   798 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *phase) {
   799   if (orig_mem == NULL)
   800     return orig_mem;
   801   Compile* C = phase->C;
   802   const TypeOopPtr *tinst = C->get_adr_type(alias_idx)->isa_oopptr();
   803   bool is_instance = (tinst != NULL) && tinst->is_known_instance();
   804   Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
   805   Node *prev = NULL;
   806   Node *result = orig_mem;
   807   while (prev != result) {
   808     prev = result;
   809     if (result == start_mem)
   810       break;  // hit one of our sentinels
   811     if (result->is_Mem()) {
   812       const Type *at = phase->type(result->in(MemNode::Address));
   813       if (at != Type::TOP) {
   814         assert (at->isa_ptr() != NULL, "pointer type required.");
   815         int idx = C->get_alias_index(at->is_ptr());
   816         if (idx == alias_idx)
   817           break;
   818       }
   819       result = result->in(MemNode::Memory);
   820     }
   821     if (!is_instance)
   822       continue;  // don't search further for non-instance types
   823     // skip over a call which does not affect this memory slice
   824     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
   825       Node *proj_in = result->in(0);
   826       if (proj_in->is_Allocate() && proj_in->_idx == (uint)tinst->instance_id()) {
   827         break;  // hit one of our sentinels
   828       } else if (proj_in->is_Call()) {
   829         CallNode *call = proj_in->as_Call();
   830         if (!call->may_modify(tinst, phase)) {
   831           result = call->in(TypeFunc::Memory);
   832         }
   833       } else if (proj_in->is_Initialize()) {
   834         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
   835         // Stop if this is the initialization for the object instance which
   836         // which contains this memory slice, otherwise skip over it.
   837         if (alloc == NULL || alloc->_idx != (uint)tinst->instance_id()) {
   838           result = proj_in->in(TypeFunc::Memory);
   839         }
   840       } else if (proj_in->is_MemBar()) {
   841         result = proj_in->in(TypeFunc::Memory);
   842       }
   843     } else if (result->is_MergeMem()) {
   844       MergeMemNode *mmem = result->as_MergeMem();
   845       result = step_through_mergemem(mmem, alias_idx, tinst);
   846       if (result == mmem->base_memory()) {
   847         // Didn't find instance memory, search through general slice recursively.
   848         result = mmem->memory_at(C->get_general_index(alias_idx));
   849         result = find_inst_mem(result, alias_idx, orig_phis, phase);
   850         if (C->failing()) {
   851           return NULL;
   852         }
   853         mmem->set_memory_at(alias_idx, result);
   854       }
   855     } else if (result->is_Phi() &&
   856                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
   857       Node *un = result->as_Phi()->unique_input(phase);
   858       if (un != NULL) {
   859         orig_phis.append_if_missing(result->as_Phi());
   860         result = un;
   861       } else {
   862         break;
   863       }
   864     } else if (result->is_ClearArray()) {
   865       if (!ClearArrayNode::step_through(&result, (uint)tinst->instance_id(), phase)) {
   866         // Can not bypass initialization of the instance
   867         // we are looking for.
   868         break;
   869       }
   870       // Otherwise skip it (the call updated 'result' value).
   871     } else if (result->Opcode() == Op_SCMemProj) {
   872       assert(result->in(0)->is_LoadStore(), "sanity");
   873       const Type *at = phase->type(result->in(0)->in(MemNode::Address));
   874       if (at != Type::TOP) {
   875         assert (at->isa_ptr() != NULL, "pointer type required.");
   876         int idx = C->get_alias_index(at->is_ptr());
   877         assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
   878         break;
   879       }
   880       result = result->in(0)->in(MemNode::Memory);
   881     }
   882   }
   883   if (result->is_Phi()) {
   884     PhiNode *mphi = result->as_Phi();
   885     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
   886     const TypePtr *t = mphi->adr_type();
   887     if (C->get_alias_index(t) != alias_idx) {
   888       // Create a new Phi with the specified alias index type.
   889       result = split_memory_phi(mphi, alias_idx, orig_phis, phase);
   890     } else if (!is_instance) {
   891       // Push all non-instance Phis on the orig_phis worklist to update inputs
   892       // during Phase 4 if needed.
   893       orig_phis.append_if_missing(mphi);
   894     }
   895   }
   896   // the result is either MemNode, PhiNode, InitializeNode.
   897   return result;
   898 }
   900 //
   901 //  Convert the types of unescaped object to instance types where possible,
   902 //  propagate the new type information through the graph, and update memory
   903 //  edges and MergeMem inputs to reflect the new type.
   904 //
   905 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
   906 //  The processing is done in 4 phases:
   907 //
   908 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
   909 //            types for the CheckCastPP for allocations where possible.
   910 //            Propagate the the new types through users as follows:
   911 //               casts and Phi:  push users on alloc_worklist
   912 //               AddP:  cast Base and Address inputs to the instance type
   913 //                      push any AddP users on alloc_worklist and push any memnode
   914 //                      users onto memnode_worklist.
   915 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
   916 //            search the Memory chain for a store with the appropriate type
   917 //            address type.  If a Phi is found, create a new version with
   918 //            the appropriate memory slices from each of the Phi inputs.
   919 //            For stores, process the users as follows:
   920 //               MemNode:  push on memnode_worklist
   921 //               MergeMem: push on mergemem_worklist
   922 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
   923 //            moving the first node encountered of each  instance type to the
   924 //            the input corresponding to its alias index.
   925 //            appropriate memory slice.
   926 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
   927 //
   928 // In the following example, the CheckCastPP nodes are the cast of allocation
   929 // results and the allocation of node 29 is unescaped and eligible to be an
   930 // instance type.
   931 //
   932 // We start with:
   933 //
   934 //     7 Parm #memory
   935 //    10  ConI  "12"
   936 //    19  CheckCastPP   "Foo"
   937 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
   938 //    29  CheckCastPP   "Foo"
   939 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
   940 //
   941 //    40  StoreP  25   7  20   ... alias_index=4
   942 //    50  StoreP  35  40  30   ... alias_index=4
   943 //    60  StoreP  45  50  20   ... alias_index=4
   944 //    70  LoadP    _  60  30   ... alias_index=4
   945 //    80  Phi     75  50  60   Memory alias_index=4
   946 //    90  LoadP    _  80  30   ... alias_index=4
   947 //   100  LoadP    _  80  20   ... alias_index=4
   948 //
   949 //
   950 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
   951 // and creating a new alias index for node 30.  This gives:
   952 //
   953 //     7 Parm #memory
   954 //    10  ConI  "12"
   955 //    19  CheckCastPP   "Foo"
   956 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
   957 //    29  CheckCastPP   "Foo"  iid=24
   958 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
   959 //
   960 //    40  StoreP  25   7  20   ... alias_index=4
   961 //    50  StoreP  35  40  30   ... alias_index=6
   962 //    60  StoreP  45  50  20   ... alias_index=4
   963 //    70  LoadP    _  60  30   ... alias_index=6
   964 //    80  Phi     75  50  60   Memory alias_index=4
   965 //    90  LoadP    _  80  30   ... alias_index=6
   966 //   100  LoadP    _  80  20   ... alias_index=4
   967 //
   968 // In phase 2, new memory inputs are computed for the loads and stores,
   969 // And a new version of the phi is created.  In phase 4, the inputs to
   970 // node 80 are updated and then the memory nodes are updated with the
   971 // values computed in phase 2.  This results in:
   972 //
   973 //     7 Parm #memory
   974 //    10  ConI  "12"
   975 //    19  CheckCastPP   "Foo"
   976 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
   977 //    29  CheckCastPP   "Foo"  iid=24
   978 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
   979 //
   980 //    40  StoreP  25  7   20   ... alias_index=4
   981 //    50  StoreP  35  7   30   ... alias_index=6
   982 //    60  StoreP  45  40  20   ... alias_index=4
   983 //    70  LoadP    _  50  30   ... alias_index=6
   984 //    80  Phi     75  40  60   Memory alias_index=4
   985 //   120  Phi     75  50  50   Memory alias_index=6
   986 //    90  LoadP    _ 120  30   ... alias_index=6
   987 //   100  LoadP    _  80  20   ... alias_index=4
   988 //
   989 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist) {
   990   GrowableArray<Node *>  memnode_worklist;
   991   GrowableArray<PhiNode *>  orig_phis;
   993   PhaseGVN  *igvn = _compile->initial_gvn();
   994   uint new_index_start = (uint) _compile->num_alias_types();
   995   Arena* arena = Thread::current()->resource_area();
   996   VectorSet visited(arena);
   997   VectorSet ptset(arena);
  1000   //  Phase 1:  Process possible allocations from alloc_worklist.
  1001   //  Create instance types for the CheckCastPP for allocations where possible.
  1002   //
  1003   // (Note: don't forget to change the order of the second AddP node on
  1004   //  the alloc_worklist if the order of the worklist processing is changed,
  1005   //  see the comment in find_second_addp().)
  1006   //
  1007   while (alloc_worklist.length() != 0) {
  1008     Node *n = alloc_worklist.pop();
  1009     uint ni = n->_idx;
  1010     const TypeOopPtr* tinst = NULL;
  1011     if (n->is_Call()) {
  1012       CallNode *alloc = n->as_Call();
  1013       // copy escape information to call node
  1014       PointsToNode* ptn = ptnode_adr(alloc->_idx);
  1015       PointsToNode::EscapeState es = escape_state(alloc, igvn);
  1016       // We have an allocation or call which returns a Java object,
  1017       // see if it is unescaped.
  1018       if (es != PointsToNode::NoEscape || !ptn->_scalar_replaceable)
  1019         continue;
  1021       // Find CheckCastPP for the allocate or for the return value of a call
  1022       n = alloc->result_cast();
  1023       if (n == NULL) {            // No uses except Initialize node
  1024         if (alloc->is_Allocate()) {
  1025           // Set the scalar_replaceable flag for allocation
  1026           // so it could be eliminated if it has no uses.
  1027           alloc->as_Allocate()->_is_scalar_replaceable = true;
  1029         continue;
  1031       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
  1032         assert(!alloc->is_Allocate(), "allocation should have unique type");
  1033         continue;
  1036       // The inline code for Object.clone() casts the allocation result to
  1037       // java.lang.Object and then to the actual type of the allocated
  1038       // object. Detect this case and use the second cast.
  1039       // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
  1040       // the allocation result is cast to java.lang.Object and then
  1041       // to the actual Array type.
  1042       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
  1043           && (alloc->is_AllocateArray() ||
  1044               igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {
  1045         Node *cast2 = NULL;
  1046         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1047           Node *use = n->fast_out(i);
  1048           if (use->is_CheckCastPP()) {
  1049             cast2 = use;
  1050             break;
  1053         if (cast2 != NULL) {
  1054           n = cast2;
  1055         } else {
  1056           // Non-scalar replaceable if the allocation type is unknown statically
  1057           // (reflection allocation), the object can't be restored during
  1058           // deoptimization without precise type.
  1059           continue;
  1062       if (alloc->is_Allocate()) {
  1063         // Set the scalar_replaceable flag for allocation
  1064         // so it could be eliminated.
  1065         alloc->as_Allocate()->_is_scalar_replaceable = true;
  1067       set_escape_state(n->_idx, es);
  1068       // in order for an object to be scalar-replaceable, it must be:
  1069       //   - a direct allocation (not a call returning an object)
  1070       //   - non-escaping
  1071       //   - eligible to be a unique type
  1072       //   - not determined to be ineligible by escape analysis
  1073       assert(ptnode_adr(alloc->_idx)->_node != NULL &&
  1074              ptnode_adr(n->_idx)->_node != NULL, "should be registered");
  1075       set_map(alloc->_idx, n);
  1076       set_map(n->_idx, alloc);
  1077       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
  1078       if (t == NULL)
  1079         continue;  // not a TypeInstPtr
  1080       tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
  1081       igvn->hash_delete(n);
  1082       igvn->set_type(n,  tinst);
  1083       n->raise_bottom_type(tinst);
  1084       igvn->hash_insert(n);
  1085       record_for_optimizer(n);
  1086       if (alloc->is_Allocate() && ptn->_scalar_replaceable &&
  1087           (t->isa_instptr() || t->isa_aryptr())) {
  1089         // First, put on the worklist all Field edges from Connection Graph
  1090         // which is more accurate then putting immediate users from Ideal Graph.
  1091         for (uint e = 0; e < ptn->edge_count(); e++) {
  1092           Node *use = ptnode_adr(ptn->edge_target(e))->_node;
  1093           assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
  1094                  "only AddP nodes are Field edges in CG");
  1095           if (use->outcnt() > 0) { // Don't process dead nodes
  1096             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
  1097             if (addp2 != NULL) {
  1098               assert(alloc->is_AllocateArray(),"array allocation was expected");
  1099               alloc_worklist.append_if_missing(addp2);
  1101             alloc_worklist.append_if_missing(use);
  1105         // An allocation may have an Initialize which has raw stores. Scan
  1106         // the users of the raw allocation result and push AddP users
  1107         // on alloc_worklist.
  1108         Node *raw_result = alloc->proj_out(TypeFunc::Parms);
  1109         assert (raw_result != NULL, "must have an allocation result");
  1110         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
  1111           Node *use = raw_result->fast_out(i);
  1112           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
  1113             Node* addp2 = find_second_addp(use, raw_result);
  1114             if (addp2 != NULL) {
  1115               assert(alloc->is_AllocateArray(),"array allocation was expected");
  1116               alloc_worklist.append_if_missing(addp2);
  1118             alloc_worklist.append_if_missing(use);
  1119           } else if (use->is_MemBar()) {
  1120             memnode_worklist.append_if_missing(use);
  1124     } else if (n->is_AddP()) {
  1125       ptset.Clear();
  1126       PointsTo(ptset, get_addp_base(n), igvn);
  1127       assert(ptset.Size() == 1, "AddP address is unique");
  1128       uint elem = ptset.getelem(); // Allocation node's index
  1129       if (elem == _phantom_object) {
  1130         assert(false, "escaped allocation");
  1131         continue; // Assume the value was set outside this method.
  1133       Node *base = get_map(elem);  // CheckCastPP node
  1134       if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path
  1135       tinst = igvn->type(base)->isa_oopptr();
  1136     } else if (n->is_Phi() ||
  1137                n->is_CheckCastPP() ||
  1138                n->is_EncodeP() ||
  1139                n->is_DecodeN() ||
  1140                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
  1141       if (visited.test_set(n->_idx)) {
  1142         assert(n->is_Phi(), "loops only through Phi's");
  1143         continue;  // already processed
  1145       ptset.Clear();
  1146       PointsTo(ptset, n, igvn);
  1147       if (ptset.Size() == 1) {
  1148         uint elem = ptset.getelem(); // Allocation node's index
  1149         if (elem == _phantom_object) {
  1150           assert(false, "escaped allocation");
  1151           continue; // Assume the value was set outside this method.
  1153         Node *val = get_map(elem);   // CheckCastPP node
  1154         TypeNode *tn = n->as_Type();
  1155         tinst = igvn->type(val)->isa_oopptr();
  1156         assert(tinst != NULL && tinst->is_known_instance() &&
  1157                (uint)tinst->instance_id() == elem , "instance type expected.");
  1159         const Type *tn_type = igvn->type(tn);
  1160         const TypeOopPtr *tn_t;
  1161         if (tn_type->isa_narrowoop()) {
  1162           tn_t = tn_type->make_ptr()->isa_oopptr();
  1163         } else {
  1164           tn_t = tn_type->isa_oopptr();
  1167         if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
  1168           if (tn_type->isa_narrowoop()) {
  1169             tn_type = tinst->make_narrowoop();
  1170           } else {
  1171             tn_type = tinst;
  1173           igvn->hash_delete(tn);
  1174           igvn->set_type(tn, tn_type);
  1175           tn->set_type(tn_type);
  1176           igvn->hash_insert(tn);
  1177           record_for_optimizer(n);
  1178         } else {
  1179           assert(tn_type == TypePtr::NULL_PTR ||
  1180                  tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
  1181                  "unexpected type");
  1182           continue; // Skip dead path with different type
  1185     } else {
  1186       debug_only(n->dump();)
  1187       assert(false, "EA: unexpected node");
  1188       continue;
  1190     // push allocation's users on appropriate worklist
  1191     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1192       Node *use = n->fast_out(i);
  1193       if(use->is_Mem() && use->in(MemNode::Address) == n) {
  1194         // Load/store to instance's field
  1195         memnode_worklist.append_if_missing(use);
  1196       } else if (use->is_MemBar()) {
  1197         memnode_worklist.append_if_missing(use);
  1198       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
  1199         Node* addp2 = find_second_addp(use, n);
  1200         if (addp2 != NULL) {
  1201           alloc_worklist.append_if_missing(addp2);
  1203         alloc_worklist.append_if_missing(use);
  1204       } else if (use->is_Phi() ||
  1205                  use->is_CheckCastPP() ||
  1206                  use->is_EncodeP() ||
  1207                  use->is_DecodeN() ||
  1208                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
  1209         alloc_worklist.append_if_missing(use);
  1210 #ifdef ASSERT
  1211       } else if (use->is_Mem()) {
  1212         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
  1213       } else if (use->is_MergeMem()) {
  1214         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
  1215       } else if (use->is_SafePoint()) {
  1216         // Look for MergeMem nodes for calls which reference unique allocation
  1217         // (through CheckCastPP nodes) even for debug info.
  1218         Node* m = use->in(TypeFunc::Memory);
  1219         if (m->is_MergeMem()) {
  1220           assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
  1222       } else {
  1223         uint op = use->Opcode();
  1224         if (!(op == Op_CmpP || op == Op_Conv2B ||
  1225               op == Op_CastP2X || op == Op_StoreCM ||
  1226               op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
  1227               op == Op_StrEquals || op == Op_StrIndexOf)) {
  1228           n->dump();
  1229           use->dump();
  1230           assert(false, "EA: missing allocation reference path");
  1232 #endif
  1237   // New alias types were created in split_AddP().
  1238   uint new_index_end = (uint) _compile->num_alias_types();
  1240   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
  1241   //            compute new values for Memory inputs  (the Memory inputs are not
  1242   //            actually updated until phase 4.)
  1243   if (memnode_worklist.length() == 0)
  1244     return;  // nothing to do
  1246   while (memnode_worklist.length() != 0) {
  1247     Node *n = memnode_worklist.pop();
  1248     if (visited.test_set(n->_idx))
  1249       continue;
  1250     if (n->is_Phi() || n->is_ClearArray()) {
  1251       // we don't need to do anything, but the users must be pushed
  1252     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
  1253       // we don't need to do anything, but the users must be pushed
  1254       n = n->as_MemBar()->proj_out(TypeFunc::Memory);
  1255       if (n == NULL)
  1256         continue;
  1257     } else {
  1258       assert(n->is_Mem(), "memory node required.");
  1259       Node *addr = n->in(MemNode::Address);
  1260       const Type *addr_t = igvn->type(addr);
  1261       if (addr_t == Type::TOP)
  1262         continue;
  1263       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
  1264       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
  1265       assert ((uint)alias_idx < new_index_end, "wrong alias index");
  1266       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn);
  1267       if (_compile->failing()) {
  1268         return;
  1270       if (mem != n->in(MemNode::Memory)) {
  1271         // We delay the memory edge update since we need old one in
  1272         // MergeMem code below when instances memory slices are separated.
  1273         debug_only(Node* pn = ptnode_adr(n->_idx)->_node;)
  1274         assert(pn == NULL || pn == n, "wrong node");
  1275         set_map(n->_idx, mem);
  1276         ptnode_adr(n->_idx)->_node = n;
  1278       if (n->is_Load()) {
  1279         continue;  // don't push users
  1280       } else if (n->is_LoadStore()) {
  1281         // get the memory projection
  1282         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1283           Node *use = n->fast_out(i);
  1284           if (use->Opcode() == Op_SCMemProj) {
  1285             n = use;
  1286             break;
  1289         assert(n->Opcode() == Op_SCMemProj, "memory projection required");
  1292     // push user on appropriate worklist
  1293     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1294       Node *use = n->fast_out(i);
  1295       if (use->is_Phi() || use->is_ClearArray()) {
  1296         memnode_worklist.append_if_missing(use);
  1297       } else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
  1298         if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
  1299           continue;
  1300         memnode_worklist.append_if_missing(use);
  1301       } else if (use->is_MemBar()) {
  1302         memnode_worklist.append_if_missing(use);
  1303 #ifdef ASSERT
  1304       } else if(use->is_Mem()) {
  1305         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
  1306       } else if (use->is_MergeMem()) {
  1307         assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
  1308       } else {
  1309         uint op = use->Opcode();
  1310         if (!(op == Op_StoreCM ||
  1311               (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
  1312                strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
  1313               op == Op_AryEq || op == Op_StrComp ||
  1314               op == Op_StrEquals || op == Op_StrIndexOf)) {
  1315           n->dump();
  1316           use->dump();
  1317           assert(false, "EA: missing memory path");
  1319 #endif
  1324   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
  1325   //            Walk each memory slice moving the first node encountered of each
  1326   //            instance type to the the input corresponding to its alias index.
  1327   uint length = _mergemem_worklist.length();
  1328   for( uint next = 0; next < length; ++next ) {
  1329     MergeMemNode* nmm = _mergemem_worklist.at(next);
  1330     assert(!visited.test_set(nmm->_idx), "should not be visited before");
  1331     // Note: we don't want to use MergeMemStream here because we only want to
  1332     // scan inputs which exist at the start, not ones we add during processing.
  1333     // Note 2: MergeMem may already contains instance memory slices added
  1334     // during find_inst_mem() call when memory nodes were processed above.
  1335     igvn->hash_delete(nmm);
  1336     uint nslices = nmm->req();
  1337     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
  1338       Node* mem = nmm->in(i);
  1339       Node* cur = NULL;
  1340       if (mem == NULL || mem->is_top())
  1341         continue;
  1342       // First, update mergemem by moving memory nodes to corresponding slices
  1343       // if their type became more precise since this mergemem was created.
  1344       while (mem->is_Mem()) {
  1345         const Type *at = igvn->type(mem->in(MemNode::Address));
  1346         if (at != Type::TOP) {
  1347           assert (at->isa_ptr() != NULL, "pointer type required.");
  1348           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
  1349           if (idx == i) {
  1350             if (cur == NULL)
  1351               cur = mem;
  1352           } else {
  1353             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
  1354               nmm->set_memory_at(idx, mem);
  1358         mem = mem->in(MemNode::Memory);
  1360       nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
  1361       // Find any instance of the current type if we haven't encountered
  1362       // already a memory slice of the instance along the memory chain.
  1363       for (uint ni = new_index_start; ni < new_index_end; ni++) {
  1364         if((uint)_compile->get_general_index(ni) == i) {
  1365           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
  1366           if (nmm->is_empty_memory(m)) {
  1367             Node* result = find_inst_mem(mem, ni, orig_phis, igvn);
  1368             if (_compile->failing()) {
  1369               return;
  1371             nmm->set_memory_at(ni, result);
  1376     // Find the rest of instances values
  1377     for (uint ni = new_index_start; ni < new_index_end; ni++) {
  1378       const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
  1379       Node* result = step_through_mergemem(nmm, ni, tinst);
  1380       if (result == nmm->base_memory()) {
  1381         // Didn't find instance memory, search through general slice recursively.
  1382         result = nmm->memory_at(_compile->get_general_index(ni));
  1383         result = find_inst_mem(result, ni, orig_phis, igvn);
  1384         if (_compile->failing()) {
  1385           return;
  1387         nmm->set_memory_at(ni, result);
  1390     igvn->hash_insert(nmm);
  1391     record_for_optimizer(nmm);
  1394   //  Phase 4:  Update the inputs of non-instance memory Phis and
  1395   //            the Memory input of memnodes
  1396   // First update the inputs of any non-instance Phi's from
  1397   // which we split out an instance Phi.  Note we don't have
  1398   // to recursively process Phi's encounted on the input memory
  1399   // chains as is done in split_memory_phi() since they  will
  1400   // also be processed here.
  1401   for (int j = 0; j < orig_phis.length(); j++) {
  1402     PhiNode *phi = orig_phis.at(j);
  1403     int alias_idx = _compile->get_alias_index(phi->adr_type());
  1404     igvn->hash_delete(phi);
  1405     for (uint i = 1; i < phi->req(); i++) {
  1406       Node *mem = phi->in(i);
  1407       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn);
  1408       if (_compile->failing()) {
  1409         return;
  1411       if (mem != new_mem) {
  1412         phi->set_req(i, new_mem);
  1415     igvn->hash_insert(phi);
  1416     record_for_optimizer(phi);
  1419   // Update the memory inputs of MemNodes with the value we computed
  1420   // in Phase 2 and move stores memory users to corresponding memory slices.
  1421 #ifdef ASSERT
  1422   visited.Clear();
  1423   Node_Stack old_mems(arena, _compile->unique() >> 2);
  1424 #endif
  1425   for (uint i = 0; i < nodes_size(); i++) {
  1426     Node *nmem = get_map(i);
  1427     if (nmem != NULL) {
  1428       Node *n = ptnode_adr(i)->_node;
  1429       assert(n != NULL, "sanity");
  1430       if (n->is_Mem()) {
  1431 #ifdef ASSERT
  1432         Node* old_mem = n->in(MemNode::Memory);
  1433         if (!visited.test_set(old_mem->_idx)) {
  1434           old_mems.push(old_mem, old_mem->outcnt());
  1436 #endif
  1437         assert(n->in(MemNode::Memory) != nmem, "sanity");
  1438         if (!n->is_Load()) {
  1439           // Move memory users of a store first.
  1440           move_inst_mem(n, orig_phis, igvn);
  1442         // Now update memory input
  1443         igvn->hash_delete(n);
  1444         n->set_req(MemNode::Memory, nmem);
  1445         igvn->hash_insert(n);
  1446         record_for_optimizer(n);
  1447       } else {
  1448         assert(n->is_Allocate() || n->is_CheckCastPP() ||
  1449                n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
  1453 #ifdef ASSERT
  1454   // Verify that memory was split correctly
  1455   while (old_mems.is_nonempty()) {
  1456     Node* old_mem = old_mems.node();
  1457     uint  old_cnt = old_mems.index();
  1458     old_mems.pop();
  1459     assert(old_cnt = old_mem->outcnt(), "old mem could be lost");
  1461 #endif
  1464 bool ConnectionGraph::has_candidates(Compile *C) {
  1465   // EA brings benefits only when the code has allocations and/or locks which
  1466   // are represented by ideal Macro nodes.
  1467   int cnt = C->macro_count();
  1468   for( int i=0; i < cnt; i++ ) {
  1469     Node *n = C->macro_node(i);
  1470     if ( n->is_Allocate() )
  1471       return true;
  1472     if( n->is_Lock() ) {
  1473       Node* obj = n->as_Lock()->obj_node()->uncast();
  1474       if( !(obj->is_Parm() || obj->is_Con()) )
  1475         return true;
  1478   return false;
  1481 bool ConnectionGraph::compute_escape() {
  1482   Compile* C = _compile;
  1484   // 1. Populate Connection Graph (CG) with Ideal nodes.
  1486   Unique_Node_List worklist_init;
  1487   worklist_init.map(C->unique(), NULL);  // preallocate space
  1489   // Initialize worklist
  1490   if (C->root() != NULL) {
  1491     worklist_init.push(C->root());
  1494   GrowableArray<int> cg_worklist;
  1495   PhaseGVN* igvn = C->initial_gvn();
  1496   bool has_allocations = false;
  1498   // Push all useful nodes onto CG list and set their type.
  1499   for( uint next = 0; next < worklist_init.size(); ++next ) {
  1500     Node* n = worklist_init.at(next);
  1501     record_for_escape_analysis(n, igvn);
  1502     // Only allocations and java static calls results are checked
  1503     // for an escape status. See process_call_result() below.
  1504     if (n->is_Allocate() || n->is_CallStaticJava() &&
  1505         ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
  1506       has_allocations = true;
  1508     if(n->is_AddP()) {
  1509       // Collect address nodes which directly reference an allocation.
  1510       // Use them during stage 3 below to build initial connection graph
  1511       // field edges. Other field edges could be added after StoreP/LoadP
  1512       // nodes are processed during stage 4 below.
  1513       Node* base = get_addp_base(n);
  1514       if(base->is_Proj() && base->in(0)->is_Allocate()) {
  1515         cg_worklist.append(n->_idx);
  1517     } else if (n->is_MergeMem()) {
  1518       // Collect all MergeMem nodes to add memory slices for
  1519       // scalar replaceable objects in split_unique_types().
  1520       _mergemem_worklist.append(n->as_MergeMem());
  1522     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1523       Node* m = n->fast_out(i);   // Get user
  1524       worklist_init.push(m);
  1528   if (!has_allocations) {
  1529     _collecting = false;
  1530     return false; // Nothing to do.
  1533   // 2. First pass to create simple CG edges (doesn't require to walk CG).
  1534   uint delayed_size = _delayed_worklist.size();
  1535   for( uint next = 0; next < delayed_size; ++next ) {
  1536     Node* n = _delayed_worklist.at(next);
  1537     build_connection_graph(n, igvn);
  1540   // 3. Pass to create fields edges (Allocate -F-> AddP).
  1541   uint cg_length = cg_worklist.length();
  1542   for( uint next = 0; next < cg_length; ++next ) {
  1543     int ni = cg_worklist.at(next);
  1544     build_connection_graph(ptnode_adr(ni)->_node, igvn);
  1547   cg_worklist.clear();
  1548   cg_worklist.append(_phantom_object);
  1550   // 4. Build Connection Graph which need
  1551   //    to walk the connection graph.
  1552   for (uint ni = 0; ni < nodes_size(); ni++) {
  1553     PointsToNode* ptn = ptnode_adr(ni);
  1554     Node *n = ptn->_node;
  1555     if (n != NULL) { // Call, AddP, LoadP, StoreP
  1556       build_connection_graph(n, igvn);
  1557       if (ptn->node_type() != PointsToNode::UnknownType)
  1558         cg_worklist.append(n->_idx); // Collect CG nodes
  1562   Arena* arena = Thread::current()->resource_area();
  1563   VectorSet ptset(arena);
  1564   GrowableArray<uint>  deferred_edges;
  1565   VectorSet visited(arena);
  1567   // 5. Remove deferred edges from the graph and adjust
  1568   //    escape state of nonescaping objects.
  1569   cg_length = cg_worklist.length();
  1570   for( uint next = 0; next < cg_length; ++next ) {
  1571     int ni = cg_worklist.at(next);
  1572     PointsToNode* ptn = ptnode_adr(ni);
  1573     PointsToNode::NodeType nt = ptn->node_type();
  1574     if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
  1575       remove_deferred(ni, &deferred_edges, &visited);
  1576       Node *n = ptn->_node;
  1577       if (n->is_AddP()) {
  1578         // Search for objects which are not scalar replaceable
  1579         // and adjust their escape state.
  1580         verify_escape_state(ni, ptset, igvn);
  1585   // 6. Propagate escape states.
  1586   GrowableArray<int>  worklist;
  1587   bool has_non_escaping_obj = false;
  1589   // push all GlobalEscape nodes on the worklist
  1590   for( uint next = 0; next < cg_length; ++next ) {
  1591     int nk = cg_worklist.at(next);
  1592     if (ptnode_adr(nk)->escape_state() == PointsToNode::GlobalEscape)
  1593       worklist.push(nk);
  1595   // mark all nodes reachable from GlobalEscape nodes
  1596   while(worklist.length() > 0) {
  1597     PointsToNode* ptn = ptnode_adr(worklist.pop());
  1598     uint e_cnt = ptn->edge_count();
  1599     for (uint ei = 0; ei < e_cnt; ei++) {
  1600       uint npi = ptn->edge_target(ei);
  1601       PointsToNode *np = ptnode_adr(npi);
  1602       if (np->escape_state() < PointsToNode::GlobalEscape) {
  1603         np->set_escape_state(PointsToNode::GlobalEscape);
  1604         worklist.push(npi);
  1609   // push all ArgEscape nodes on the worklist
  1610   for( uint next = 0; next < cg_length; ++next ) {
  1611     int nk = cg_worklist.at(next);
  1612     if (ptnode_adr(nk)->escape_state() == PointsToNode::ArgEscape)
  1613       worklist.push(nk);
  1615   // mark all nodes reachable from ArgEscape nodes
  1616   while(worklist.length() > 0) {
  1617     PointsToNode* ptn = ptnode_adr(worklist.pop());
  1618     if (ptn->node_type() == PointsToNode::JavaObject)
  1619       has_non_escaping_obj = true; // Non GlobalEscape
  1620     uint e_cnt = ptn->edge_count();
  1621     for (uint ei = 0; ei < e_cnt; ei++) {
  1622       uint npi = ptn->edge_target(ei);
  1623       PointsToNode *np = ptnode_adr(npi);
  1624       if (np->escape_state() < PointsToNode::ArgEscape) {
  1625         np->set_escape_state(PointsToNode::ArgEscape);
  1626         worklist.push(npi);
  1631   GrowableArray<Node*> alloc_worklist;
  1633   // push all NoEscape nodes on the worklist
  1634   for( uint next = 0; next < cg_length; ++next ) {
  1635     int nk = cg_worklist.at(next);
  1636     if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape)
  1637       worklist.push(nk);
  1639   // mark all nodes reachable from NoEscape nodes
  1640   while(worklist.length() > 0) {
  1641     PointsToNode* ptn = ptnode_adr(worklist.pop());
  1642     if (ptn->node_type() == PointsToNode::JavaObject)
  1643       has_non_escaping_obj = true; // Non GlobalEscape
  1644     Node* n = ptn->_node;
  1645     if (n->is_Allocate() && ptn->_scalar_replaceable ) {
  1646       // Push scalar replaceable allocations on alloc_worklist
  1647       // for processing in split_unique_types().
  1648       alloc_worklist.append(n);
  1650     uint e_cnt = ptn->edge_count();
  1651     for (uint ei = 0; ei < e_cnt; ei++) {
  1652       uint npi = ptn->edge_target(ei);
  1653       PointsToNode *np = ptnode_adr(npi);
  1654       if (np->escape_state() < PointsToNode::NoEscape) {
  1655         np->set_escape_state(PointsToNode::NoEscape);
  1656         worklist.push(npi);
  1661   _collecting = false;
  1662   assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
  1664   bool has_scalar_replaceable_candidates = alloc_worklist.length() > 0;
  1665   if ( has_scalar_replaceable_candidates &&
  1666        C->AliasLevel() >= 3 && EliminateAllocations ) {
  1668     // Now use the escape information to create unique types for
  1669     // scalar replaceable objects.
  1670     split_unique_types(alloc_worklist);
  1672     if (C->failing())  return false;
  1674     // Clean up after split unique types.
  1675     ResourceMark rm;
  1676     PhaseRemoveUseless pru(C->initial_gvn(), C->for_igvn());
  1678     C->print_method("After Escape Analysis", 2);
  1680 #ifdef ASSERT
  1681   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
  1682     tty->print("=== No allocations eliminated for ");
  1683     C->method()->print_short_name();
  1684     if(!EliminateAllocations) {
  1685       tty->print(" since EliminateAllocations is off ===");
  1686     } else if(!has_scalar_replaceable_candidates) {
  1687       tty->print(" since there are no scalar replaceable candidates ===");
  1688     } else if(C->AliasLevel() < 3) {
  1689       tty->print(" since AliasLevel < 3 ===");
  1691     tty->cr();
  1692 #endif
  1694   return has_non_escaping_obj;
  1697 // Search for objects which are not scalar replaceable.
  1698 void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase) {
  1699   PointsToNode* ptn = ptnode_adr(nidx);
  1700   Node* n = ptn->_node;
  1701   assert(n->is_AddP(), "Should be called for AddP nodes only");
  1702   // Search for objects which are not scalar replaceable.
  1703   // Mark their escape state as ArgEscape to propagate the state
  1704   // to referenced objects.
  1705   // Note: currently there are no difference in compiler optimizations
  1706   // for ArgEscape objects and NoEscape objects which are not
  1707   // scalar replaceable.
  1709   Compile* C = _compile;
  1711   int offset = ptn->offset();
  1712   Node* base = get_addp_base(n);
  1713   ptset.Clear();
  1714   PointsTo(ptset, base, phase);
  1715   int ptset_size = ptset.Size();
  1717   // Check if a oop field's initializing value is recorded and add
  1718   // a corresponding NULL field's value if it is not recorded.
  1719   // Connection Graph does not record a default initialization by NULL
  1720   // captured by Initialize node.
  1721   //
  1722   // Note: it will disable scalar replacement in some cases:
  1723   //
  1724   //    Point p[] = new Point[1];
  1725   //    p[0] = new Point(); // Will be not scalar replaced
  1726   //
  1727   // but it will save us from incorrect optimizations in next cases:
  1728   //
  1729   //    Point p[] = new Point[1];
  1730   //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
  1731   //
  1732   // Do a simple control flow analysis to distinguish above cases.
  1733   //
  1734   if (offset != Type::OffsetBot && ptset_size == 1) {
  1735     uint elem = ptset.getelem(); // Allocation node's index
  1736     // It does not matter if it is not Allocation node since
  1737     // only non-escaping allocations are scalar replaced.
  1738     if (ptnode_adr(elem)->_node->is_Allocate() &&
  1739         ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
  1740       AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
  1741       InitializeNode* ini = alloc->initialization();
  1743       // Check only oop fields.
  1744       const Type* adr_type = n->as_AddP()->bottom_type();
  1745       BasicType basic_field_type = T_INT;
  1746       if (adr_type->isa_instptr()) {
  1747         ciField* field = C->alias_type(adr_type->isa_instptr())->field();
  1748         if (field != NULL) {
  1749           basic_field_type = field->layout_type();
  1750         } else {
  1751           // Ignore non field load (for example, klass load)
  1753       } else if (adr_type->isa_aryptr()) {
  1754         const Type* elemtype = adr_type->isa_aryptr()->elem();
  1755         basic_field_type = elemtype->array_element_basic_type();
  1756       } else {
  1757         // Raw pointers are used for initializing stores so skip it.
  1758         assert(adr_type->isa_rawptr() && base->is_Proj() &&
  1759                (base->in(0) == alloc),"unexpected pointer type");
  1761       if (basic_field_type == T_OBJECT ||
  1762           basic_field_type == T_NARROWOOP ||
  1763           basic_field_type == T_ARRAY) {
  1764         Node* value = NULL;
  1765         if (ini != NULL) {
  1766           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
  1767           Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
  1768           if (store != NULL && store->is_Store()) {
  1769             value = store->in(MemNode::ValueIn);
  1770           } else if (ptn->edge_count() > 0) { // Are there oop stores?
  1771             // Check for a store which follows allocation without branches.
  1772             // For example, a volatile field store is not collected
  1773             // by Initialize node. TODO: it would be nice to use idom() here.
  1774             for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1775               store = n->fast_out(i);
  1776               if (store->is_Store() && store->in(0) != NULL) {
  1777                 Node* ctrl = store->in(0);
  1778                 while(!(ctrl == ini || ctrl == alloc || ctrl == NULL ||
  1779                         ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() ||
  1780                         ctrl->is_IfTrue() || ctrl->is_IfFalse())) {
  1781                    ctrl = ctrl->in(0);
  1783                 if (ctrl == ini || ctrl == alloc) {
  1784                   value = store->in(MemNode::ValueIn);
  1785                   break;
  1791         if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
  1792           // A field's initializing value was not recorded. Add NULL.
  1793           uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
  1794           add_pointsto_edge(nidx, null_idx);
  1800   // An object is not scalar replaceable if the field which may point
  1801   // to it has unknown offset (unknown element of an array of objects).
  1802   //
  1803   if (offset == Type::OffsetBot) {
  1804     uint e_cnt = ptn->edge_count();
  1805     for (uint ei = 0; ei < e_cnt; ei++) {
  1806       uint npi = ptn->edge_target(ei);
  1807       set_escape_state(npi, PointsToNode::ArgEscape);
  1808       ptnode_adr(npi)->_scalar_replaceable = false;
  1812   // Currently an object is not scalar replaceable if a LoadStore node
  1813   // access its field since the field value is unknown after it.
  1814   //
  1815   bool has_LoadStore = false;
  1816   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1817     Node *use = n->fast_out(i);
  1818     if (use->is_LoadStore()) {
  1819       has_LoadStore = true;
  1820       break;
  1823   // An object is not scalar replaceable if the address points
  1824   // to unknown field (unknown element for arrays, offset is OffsetBot).
  1825   //
  1826   // Or the address may point to more then one object. This may produce
  1827   // the false positive result (set scalar_replaceable to false)
  1828   // since the flow-insensitive escape analysis can't separate
  1829   // the case when stores overwrite the field's value from the case
  1830   // when stores happened on different control branches.
  1831   //
  1832   if (ptset_size > 1 || ptset_size != 0 &&
  1833       (has_LoadStore || offset == Type::OffsetBot)) {
  1834     for( VectorSetI j(&ptset); j.test(); ++j ) {
  1835       set_escape_state(j.elem, PointsToNode::ArgEscape);
  1836       ptnode_adr(j.elem)->_scalar_replaceable = false;
  1841 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
  1843     switch (call->Opcode()) {
  1844 #ifdef ASSERT
  1845     case Op_Allocate:
  1846     case Op_AllocateArray:
  1847     case Op_Lock:
  1848     case Op_Unlock:
  1849       assert(false, "should be done already");
  1850       break;
  1851 #endif
  1852     case Op_CallLeaf:
  1853     case Op_CallLeafNoFP:
  1855       // Stub calls, objects do not escape but they are not scale replaceable.
  1856       // Adjust escape state for outgoing arguments.
  1857       const TypeTuple * d = call->tf()->domain();
  1858       VectorSet ptset(Thread::current()->resource_area());
  1859       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1860         const Type* at = d->field_at(i);
  1861         Node *arg = call->in(i)->uncast();
  1862         const Type *aat = phase->type(arg);
  1863         if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
  1864             ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
  1866           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
  1867                  aat->isa_ptr() != NULL, "expecting an Ptr");
  1868 #ifdef ASSERT
  1869           if (!(call->Opcode() == Op_CallLeafNoFP &&
  1870                 call->as_CallLeaf()->_name != NULL &&
  1871                 (strstr(call->as_CallLeaf()->_name, "arraycopy")  != 0) ||
  1872                 call->as_CallLeaf()->_name != NULL &&
  1873                 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
  1874                  strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
  1875           ) {
  1876             call->dump();
  1877             assert(false, "EA: unexpected CallLeaf");
  1879 #endif
  1880           set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  1881           if (arg->is_AddP()) {
  1882             //
  1883             // The inline_native_clone() case when the arraycopy stub is called
  1884             // after the allocation before Initialize and CheckCastPP nodes.
  1885             //
  1886             // Set AddP's base (Allocate) as not scalar replaceable since
  1887             // pointer to the base (with offset) is passed as argument.
  1888             //
  1889             arg = get_addp_base(arg);
  1891           ptset.Clear();
  1892           PointsTo(ptset, arg, phase);
  1893           for( VectorSetI j(&ptset); j.test(); ++j ) {
  1894             uint pt = j.elem;
  1895             set_escape_state(pt, PointsToNode::ArgEscape);
  1899       break;
  1902     case Op_CallStaticJava:
  1903     // For a static call, we know exactly what method is being called.
  1904     // Use bytecode estimator to record the call's escape affects
  1906       ciMethod *meth = call->as_CallJava()->method();
  1907       BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
  1908       // fall-through if not a Java method or no analyzer information
  1909       if (call_analyzer != NULL) {
  1910         const TypeTuple * d = call->tf()->domain();
  1911         VectorSet ptset(Thread::current()->resource_area());
  1912         bool copy_dependencies = false;
  1913         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1914           const Type* at = d->field_at(i);
  1915           int k = i - TypeFunc::Parms;
  1916           Node *arg = call->in(i)->uncast();
  1918           if (at->isa_oopptr() != NULL &&
  1919               ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) {
  1921             bool global_escapes = false;
  1922             bool fields_escapes = false;
  1923             if (!call_analyzer->is_arg_stack(k)) {
  1924               // The argument global escapes, mark everything it could point to
  1925               set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  1926               global_escapes = true;
  1927             } else {
  1928               if (!call_analyzer->is_arg_local(k)) {
  1929                 // The argument itself doesn't escape, but any fields might
  1930                 fields_escapes = true;
  1932               set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  1933               copy_dependencies = true;
  1936             ptset.Clear();
  1937             PointsTo(ptset, arg, phase);
  1938             for( VectorSetI j(&ptset); j.test(); ++j ) {
  1939               uint pt = j.elem;
  1940               if (global_escapes) {
  1941                 //The argument global escapes, mark everything it could point to
  1942                 set_escape_state(pt, PointsToNode::GlobalEscape);
  1943               } else {
  1944                 if (fields_escapes) {
  1945                   // The argument itself doesn't escape, but any fields might
  1946                   add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
  1948                 set_escape_state(pt, PointsToNode::ArgEscape);
  1953         if (copy_dependencies)
  1954           call_analyzer->copy_dependencies(_compile->dependencies());
  1955         break;
  1959     default:
  1960     // Fall-through here if not a Java method or no analyzer information
  1961     // or some other type of call, assume the worst case: all arguments
  1962     // globally escape.
  1964       // adjust escape state for  outgoing arguments
  1965       const TypeTuple * d = call->tf()->domain();
  1966       VectorSet ptset(Thread::current()->resource_area());
  1967       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1968         const Type* at = d->field_at(i);
  1969         if (at->isa_oopptr() != NULL) {
  1970           Node *arg = call->in(i)->uncast();
  1971           set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  1972           ptset.Clear();
  1973           PointsTo(ptset, arg, phase);
  1974           for( VectorSetI j(&ptset); j.test(); ++j ) {
  1975             uint pt = j.elem;
  1976             set_escape_state(pt, PointsToNode::GlobalEscape);
  1983 void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) {
  1984   CallNode   *call = resproj->in(0)->as_Call();
  1985   uint    call_idx = call->_idx;
  1986   uint resproj_idx = resproj->_idx;
  1988   switch (call->Opcode()) {
  1989     case Op_Allocate:
  1991       Node *k = call->in(AllocateNode::KlassNode);
  1992       const TypeKlassPtr *kt;
  1993       if (k->Opcode() == Op_LoadKlass) {
  1994         kt = k->as_Load()->type()->isa_klassptr();
  1995       } else {
  1996         // Also works for DecodeN(LoadNKlass).
  1997         kt = k->as_Type()->type()->isa_klassptr();
  1999       assert(kt != NULL, "TypeKlassPtr  required.");
  2000       ciKlass* cik = kt->klass();
  2001       ciInstanceKlass* ciik = cik->as_instance_klass();
  2003       PointsToNode::EscapeState es;
  2004       uint edge_to;
  2005       if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) {
  2006         es = PointsToNode::GlobalEscape;
  2007         edge_to = _phantom_object; // Could not be worse
  2008       } else {
  2009         es = PointsToNode::NoEscape;
  2010         edge_to = call_idx;
  2012       set_escape_state(call_idx, es);
  2013       add_pointsto_edge(resproj_idx, edge_to);
  2014       _processed.set(resproj_idx);
  2015       break;
  2018     case Op_AllocateArray:
  2020       int length = call->in(AllocateNode::ALength)->find_int_con(-1);
  2021       if (length < 0 || length > EliminateAllocationArraySizeLimit) {
  2022         // Not scalar replaceable if the length is not constant or too big.
  2023         ptnode_adr(call_idx)->_scalar_replaceable = false;
  2025       set_escape_state(call_idx, PointsToNode::NoEscape);
  2026       add_pointsto_edge(resproj_idx, call_idx);
  2027       _processed.set(resproj_idx);
  2028       break;
  2031     case Op_CallStaticJava:
  2032     // For a static call, we know exactly what method is being called.
  2033     // Use bytecode estimator to record whether the call's return value escapes
  2035       bool done = true;
  2036       const TypeTuple *r = call->tf()->range();
  2037       const Type* ret_type = NULL;
  2039       if (r->cnt() > TypeFunc::Parms)
  2040         ret_type = r->field_at(TypeFunc::Parms);
  2042       // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  2043       //        _multianewarray functions return a TypeRawPtr.
  2044       if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
  2045         _processed.set(resproj_idx);
  2046         break;  // doesn't return a pointer type
  2048       ciMethod *meth = call->as_CallJava()->method();
  2049       const TypeTuple * d = call->tf()->domain();
  2050       if (meth == NULL) {
  2051         // not a Java method, assume global escape
  2052         set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2053         add_pointsto_edge(resproj_idx, _phantom_object);
  2054       } else {
  2055         BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
  2056         bool copy_dependencies = false;
  2058         if (call_analyzer->is_return_allocated()) {
  2059           // Returns a newly allocated unescaped object, simply
  2060           // update dependency information.
  2061           // Mark it as NoEscape so that objects referenced by
  2062           // it's fields will be marked as NoEscape at least.
  2063           set_escape_state(call_idx, PointsToNode::NoEscape);
  2064           add_pointsto_edge(resproj_idx, call_idx);
  2065           copy_dependencies = true;
  2066         } else if (call_analyzer->is_return_local()) {
  2067           // determine whether any arguments are returned
  2068           set_escape_state(call_idx, PointsToNode::NoEscape);
  2069           bool ret_arg = false;
  2070           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  2071             const Type* at = d->field_at(i);
  2073             if (at->isa_oopptr() != NULL) {
  2074               Node *arg = call->in(i)->uncast();
  2076               if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
  2077                 ret_arg = true;
  2078                 PointsToNode *arg_esp = ptnode_adr(arg->_idx);
  2079                 if (arg_esp->node_type() == PointsToNode::UnknownType)
  2080                   done = false;
  2081                 else if (arg_esp->node_type() == PointsToNode::JavaObject)
  2082                   add_pointsto_edge(resproj_idx, arg->_idx);
  2083                 else
  2084                   add_deferred_edge(resproj_idx, arg->_idx);
  2085                 arg_esp->_hidden_alias = true;
  2089           if (done && !ret_arg) {
  2090             // Returns unknown object.
  2091             set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2092             add_pointsto_edge(resproj_idx, _phantom_object);
  2094           copy_dependencies = true;
  2095         } else {
  2096           set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2097           add_pointsto_edge(resproj_idx, _phantom_object);
  2098           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  2099             const Type* at = d->field_at(i);
  2100             if (at->isa_oopptr() != NULL) {
  2101               Node *arg = call->in(i)->uncast();
  2102               PointsToNode *arg_esp = ptnode_adr(arg->_idx);
  2103               arg_esp->_hidden_alias = true;
  2107         if (copy_dependencies)
  2108           call_analyzer->copy_dependencies(_compile->dependencies());
  2110       if (done)
  2111         _processed.set(resproj_idx);
  2112       break;
  2115     default:
  2116     // Some other type of call, assume the worst case that the
  2117     // returned value, if any, globally escapes.
  2119       const TypeTuple *r = call->tf()->range();
  2120       if (r->cnt() > TypeFunc::Parms) {
  2121         const Type* ret_type = r->field_at(TypeFunc::Parms);
  2123         // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  2124         //        _multianewarray functions return a TypeRawPtr.
  2125         if (ret_type->isa_ptr() != NULL) {
  2126           set_escape_state(call_idx, PointsToNode::GlobalEscape);
  2127           add_pointsto_edge(resproj_idx, _phantom_object);
  2130       _processed.set(resproj_idx);
  2135 // Populate Connection Graph with Ideal nodes and create simple
  2136 // connection graph edges (do not need to check the node_type of inputs
  2137 // or to call PointsTo() to walk the connection graph).
  2138 void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) {
  2139   if (_processed.test(n->_idx))
  2140     return; // No need to redefine node's state.
  2142   if (n->is_Call()) {
  2143     // Arguments to allocation and locking don't escape.
  2144     if (n->is_Allocate()) {
  2145       add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true);
  2146       record_for_optimizer(n);
  2147     } else if (n->is_Lock() || n->is_Unlock()) {
  2148       // Put Lock and Unlock nodes on IGVN worklist to process them during
  2149       // the first IGVN optimization when escape information is still available.
  2150       record_for_optimizer(n);
  2151       _processed.set(n->_idx);
  2152     } else {
  2153       // Don't mark as processed since call's arguments have to be processed.
  2154       PointsToNode::NodeType nt = PointsToNode::UnknownType;
  2155       PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
  2157       // Check if a call returns an object.
  2158       const TypeTuple *r = n->as_Call()->tf()->range();
  2159       if (r->cnt() > TypeFunc::Parms &&
  2160           r->field_at(TypeFunc::Parms)->isa_ptr() &&
  2161           n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
  2162         nt = PointsToNode::JavaObject;
  2163         if (!n->is_CallStaticJava()) {
  2164           // Since the called mathod is statically unknown assume
  2165           // the worst case that the returned value globally escapes.
  2166           es = PointsToNode::GlobalEscape;
  2169       add_node(n, nt, es, false);
  2171     return;
  2174   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
  2175   // ThreadLocal has RawPrt type.
  2176   switch (n->Opcode()) {
  2177     case Op_AddP:
  2179       add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false);
  2180       break;
  2182     case Op_CastX2P:
  2183     { // "Unsafe" memory access.
  2184       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  2185       break;
  2187     case Op_CastPP:
  2188     case Op_CheckCastPP:
  2189     case Op_EncodeP:
  2190     case Op_DecodeN:
  2192       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2193       int ti = n->in(1)->_idx;
  2194       PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2195       if (nt == PointsToNode::UnknownType) {
  2196         _delayed_worklist.push(n); // Process it later.
  2197         break;
  2198       } else if (nt == PointsToNode::JavaObject) {
  2199         add_pointsto_edge(n->_idx, ti);
  2200       } else {
  2201         add_deferred_edge(n->_idx, ti);
  2203       _processed.set(n->_idx);
  2204       break;
  2206     case Op_ConP:
  2208       // assume all pointer constants globally escape except for null
  2209       PointsToNode::EscapeState es;
  2210       if (phase->type(n) == TypePtr::NULL_PTR)
  2211         es = PointsToNode::NoEscape;
  2212       else
  2213         es = PointsToNode::GlobalEscape;
  2215       add_node(n, PointsToNode::JavaObject, es, true);
  2216       break;
  2218     case Op_ConN:
  2220       // assume all narrow oop constants globally escape except for null
  2221       PointsToNode::EscapeState es;
  2222       if (phase->type(n) == TypeNarrowOop::NULL_PTR)
  2223         es = PointsToNode::NoEscape;
  2224       else
  2225         es = PointsToNode::GlobalEscape;
  2227       add_node(n, PointsToNode::JavaObject, es, true);
  2228       break;
  2230     case Op_CreateEx:
  2232       // assume that all exception objects globally escape
  2233       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  2234       break;
  2236     case Op_LoadKlass:
  2237     case Op_LoadNKlass:
  2239       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  2240       break;
  2242     case Op_LoadP:
  2243     case Op_LoadN:
  2245       const Type *t = phase->type(n);
  2246       if (t->make_ptr() == NULL) {
  2247         _processed.set(n->_idx);
  2248         return;
  2250       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2251       break;
  2253     case Op_Parm:
  2255       _processed.set(n->_idx); // No need to redefine it state.
  2256       uint con = n->as_Proj()->_con;
  2257       if (con < TypeFunc::Parms)
  2258         return;
  2259       const Type *t = n->in(0)->as_Start()->_domain->field_at(con);
  2260       if (t->isa_ptr() == NULL)
  2261         return;
  2262       // We have to assume all input parameters globally escape
  2263       // (Note: passing 'false' since _processed is already set).
  2264       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
  2265       break;
  2267     case Op_Phi:
  2269       const Type *t = n->as_Phi()->type();
  2270       if (t->make_ptr() == NULL) {
  2271         // nothing to do if not an oop or narrow oop
  2272         _processed.set(n->_idx);
  2273         return;
  2275       add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2276       uint i;
  2277       for (i = 1; i < n->req() ; i++) {
  2278         Node* in = n->in(i);
  2279         if (in == NULL)
  2280           continue;  // ignore NULL
  2281         in = in->uncast();
  2282         if (in->is_top() || in == n)
  2283           continue;  // ignore top or inputs which go back this node
  2284         int ti = in->_idx;
  2285         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2286         if (nt == PointsToNode::UnknownType) {
  2287           break;
  2288         } else if (nt == PointsToNode::JavaObject) {
  2289           add_pointsto_edge(n->_idx, ti);
  2290         } else {
  2291           add_deferred_edge(n->_idx, ti);
  2294       if (i >= n->req())
  2295         _processed.set(n->_idx);
  2296       else
  2297         _delayed_worklist.push(n);
  2298       break;
  2300     case Op_Proj:
  2302       // we are only interested in the oop result projection from a call
  2303       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  2304         const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
  2305         assert(r->cnt() > TypeFunc::Parms, "sanity");
  2306         if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
  2307           add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  2308           int ti = n->in(0)->_idx;
  2309           // The call may not be registered yet (since not all its inputs are registered)
  2310           // if this is the projection from backbranch edge of Phi.
  2311           if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
  2312             process_call_result(n->as_Proj(), phase);
  2314           if (!_processed.test(n->_idx)) {
  2315             // The call's result may need to be processed later if the call
  2316             // returns it's argument and the argument is not processed yet.
  2317             _delayed_worklist.push(n);
  2319           break;
  2322       _processed.set(n->_idx);
  2323       break;
  2325     case Op_Return:
  2327       if( n->req() > TypeFunc::Parms &&
  2328           phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  2329         // Treat Return value as LocalVar with GlobalEscape escape state.
  2330         add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false);
  2331         int ti = n->in(TypeFunc::Parms)->_idx;
  2332         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2333         if (nt == PointsToNode::UnknownType) {
  2334           _delayed_worklist.push(n); // Process it later.
  2335           break;
  2336         } else if (nt == PointsToNode::JavaObject) {
  2337           add_pointsto_edge(n->_idx, ti);
  2338         } else {
  2339           add_deferred_edge(n->_idx, ti);
  2342       _processed.set(n->_idx);
  2343       break;
  2345     case Op_StoreP:
  2346     case Op_StoreN:
  2348       const Type *adr_type = phase->type(n->in(MemNode::Address));
  2349       adr_type = adr_type->make_ptr();
  2350       if (adr_type->isa_oopptr()) {
  2351         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2352       } else {
  2353         Node* adr = n->in(MemNode::Address);
  2354         if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL &&
  2355             adr->in(AddPNode::Address)->is_Proj() &&
  2356             adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
  2357           add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2358           // We are computing a raw address for a store captured
  2359           // by an Initialize compute an appropriate address type.
  2360           int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
  2361           assert(offs != Type::OffsetBot, "offset must be a constant");
  2362         } else {
  2363           _processed.set(n->_idx);
  2364           return;
  2367       break;
  2369     case Op_StorePConditional:
  2370     case Op_CompareAndSwapP:
  2371     case Op_CompareAndSwapN:
  2373       const Type *adr_type = phase->type(n->in(MemNode::Address));
  2374       adr_type = adr_type->make_ptr();
  2375       if (adr_type->isa_oopptr()) {
  2376         add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2377       } else {
  2378         _processed.set(n->_idx);
  2379         return;
  2381       break;
  2383     case Op_AryEq:
  2384     case Op_StrComp:
  2385     case Op_StrEquals:
  2386     case Op_StrIndexOf:
  2388       // char[] arrays passed to string intrinsics are not scalar replaceable.
  2389       add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  2390       break;
  2392     case Op_ThreadLocal:
  2394       add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
  2395       break;
  2397     default:
  2399       // nothing to do
  2401   return;
  2404 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
  2405   uint n_idx = n->_idx;
  2406   assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
  2408   // Don't set processed bit for AddP, LoadP, StoreP since
  2409   // they may need more then one pass to process.
  2410   if (_processed.test(n_idx))
  2411     return; // No need to redefine node's state.
  2413   if (n->is_Call()) {
  2414     CallNode *call = n->as_Call();
  2415     process_call_arguments(call, phase);
  2416     _processed.set(n_idx);
  2417     return;
  2420   switch (n->Opcode()) {
  2421     case Op_AddP:
  2423       Node *base = get_addp_base(n);
  2424       // Create a field edge to this node from everything base could point to.
  2425       VectorSet ptset(Thread::current()->resource_area());
  2426       PointsTo(ptset, base, phase);
  2427       for( VectorSetI i(&ptset); i.test(); ++i ) {
  2428         uint pt = i.elem;
  2429         add_field_edge(pt, n_idx, address_offset(n, phase));
  2431       break;
  2433     case Op_CastX2P:
  2435       assert(false, "Op_CastX2P");
  2436       break;
  2438     case Op_CastPP:
  2439     case Op_CheckCastPP:
  2440     case Op_EncodeP:
  2441     case Op_DecodeN:
  2443       int ti = n->in(1)->_idx;
  2444       assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
  2445       if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
  2446         add_pointsto_edge(n_idx, ti);
  2447       } else {
  2448         add_deferred_edge(n_idx, ti);
  2450       _processed.set(n_idx);
  2451       break;
  2453     case Op_ConP:
  2455       assert(false, "Op_ConP");
  2456       break;
  2458     case Op_ConN:
  2460       assert(false, "Op_ConN");
  2461       break;
  2463     case Op_CreateEx:
  2465       assert(false, "Op_CreateEx");
  2466       break;
  2468     case Op_LoadKlass:
  2469     case Op_LoadNKlass:
  2471       assert(false, "Op_LoadKlass");
  2472       break;
  2474     case Op_LoadP:
  2475     case Op_LoadN:
  2477       const Type *t = phase->type(n);
  2478 #ifdef ASSERT
  2479       if (t->make_ptr() == NULL)
  2480         assert(false, "Op_LoadP");
  2481 #endif
  2483       Node* adr = n->in(MemNode::Address)->uncast();
  2484       Node* adr_base;
  2485       if (adr->is_AddP()) {
  2486         adr_base = get_addp_base(adr);
  2487       } else {
  2488         adr_base = adr;
  2491       // For everything "adr_base" could point to, create a deferred edge from
  2492       // this node to each field with the same offset.
  2493       VectorSet ptset(Thread::current()->resource_area());
  2494       PointsTo(ptset, adr_base, phase);
  2495       int offset = address_offset(adr, phase);
  2496       for( VectorSetI i(&ptset); i.test(); ++i ) {
  2497         uint pt = i.elem;
  2498         add_deferred_edge_to_fields(n_idx, pt, offset);
  2500       break;
  2502     case Op_Parm:
  2504       assert(false, "Op_Parm");
  2505       break;
  2507     case Op_Phi:
  2509 #ifdef ASSERT
  2510       const Type *t = n->as_Phi()->type();
  2511       if (t->make_ptr() == NULL)
  2512         assert(false, "Op_Phi");
  2513 #endif
  2514       for (uint i = 1; i < n->req() ; i++) {
  2515         Node* in = n->in(i);
  2516         if (in == NULL)
  2517           continue;  // ignore NULL
  2518         in = in->uncast();
  2519         if (in->is_top() || in == n)
  2520           continue;  // ignore top or inputs which go back this node
  2521         int ti = in->_idx;
  2522         PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  2523         assert(nt != PointsToNode::UnknownType, "all nodes should be known");
  2524         if (nt == PointsToNode::JavaObject) {
  2525           add_pointsto_edge(n_idx, ti);
  2526         } else {
  2527           add_deferred_edge(n_idx, ti);
  2530       _processed.set(n_idx);
  2531       break;
  2533     case Op_Proj:
  2535       // we are only interested in the oop result projection from a call
  2536       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  2537         assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
  2538                "all nodes should be registered");
  2539         const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
  2540         assert(r->cnt() > TypeFunc::Parms, "sanity");
  2541         if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
  2542           process_call_result(n->as_Proj(), phase);
  2543           assert(_processed.test(n_idx), "all call results should be processed");
  2544           break;
  2547       assert(false, "Op_Proj");
  2548       break;
  2550     case Op_Return:
  2552 #ifdef ASSERT
  2553       if( n->req() <= TypeFunc::Parms ||
  2554           !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  2555         assert(false, "Op_Return");
  2557 #endif
  2558       int ti = n->in(TypeFunc::Parms)->_idx;
  2559       assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
  2560       if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
  2561         add_pointsto_edge(n_idx, ti);
  2562       } else {
  2563         add_deferred_edge(n_idx, ti);
  2565       _processed.set(n_idx);
  2566       break;
  2568     case Op_StoreP:
  2569     case Op_StoreN:
  2570     case Op_StorePConditional:
  2571     case Op_CompareAndSwapP:
  2572     case Op_CompareAndSwapN:
  2574       Node *adr = n->in(MemNode::Address);
  2575       const Type *adr_type = phase->type(adr)->make_ptr();
  2576 #ifdef ASSERT
  2577       if (!adr_type->isa_oopptr())
  2578         assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP");
  2579 #endif
  2581       assert(adr->is_AddP(), "expecting an AddP");
  2582       Node *adr_base = get_addp_base(adr);
  2583       Node *val = n->in(MemNode::ValueIn)->uncast();
  2584       // For everything "adr_base" could point to, create a deferred edge
  2585       // to "val" from each field with the same offset.
  2586       VectorSet ptset(Thread::current()->resource_area());
  2587       PointsTo(ptset, adr_base, phase);
  2588       for( VectorSetI i(&ptset); i.test(); ++i ) {
  2589         uint pt = i.elem;
  2590         add_edge_from_fields(pt, val->_idx, address_offset(adr, phase));
  2592       break;
  2594     case Op_AryEq:
  2595     case Op_StrComp:
  2596     case Op_StrEquals:
  2597     case Op_StrIndexOf:
  2599       // char[] arrays passed to string intrinsic do not escape but
  2600       // they are not scalar replaceable. Adjust escape state for them.
  2601       // Start from in(2) edge since in(1) is memory edge.
  2602       for (uint i = 2; i < n->req(); i++) {
  2603         Node* adr = n->in(i)->uncast();
  2604         const Type *at = phase->type(adr);
  2605         if (!adr->is_top() && at->isa_ptr()) {
  2606           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
  2607                  at->isa_ptr() != NULL, "expecting an Ptr");
  2608           if (adr->is_AddP()) {
  2609             adr = get_addp_base(adr);
  2611           // Mark as ArgEscape everything "adr" could point to.
  2612           set_escape_state(adr->_idx, PointsToNode::ArgEscape);
  2615       _processed.set(n_idx);
  2616       break;
  2618     case Op_ThreadLocal:
  2620       assert(false, "Op_ThreadLocal");
  2621       break;
  2623     default:
  2624       // This method should be called only for EA specific nodes.
  2625       ShouldNotReachHere();
  2629 #ifndef PRODUCT
  2630 void ConnectionGraph::dump() {
  2631   PhaseGVN  *igvn = _compile->initial_gvn();
  2632   bool first = true;
  2634   uint size = nodes_size();
  2635   for (uint ni = 0; ni < size; ni++) {
  2636     PointsToNode *ptn = ptnode_adr(ni);
  2637     PointsToNode::NodeType ptn_type = ptn->node_type();
  2639     if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
  2640       continue;
  2641     PointsToNode::EscapeState es = escape_state(ptn->_node, igvn);
  2642     if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
  2643       if (first) {
  2644         tty->cr();
  2645         tty->print("======== Connection graph for ");
  2646         _compile->method()->print_short_name();
  2647         tty->cr();
  2648         first = false;
  2650       tty->print("%6d ", ni);
  2651       ptn->dump();
  2652       // Print all locals which reference this allocation
  2653       for (uint li = ni; li < size; li++) {
  2654         PointsToNode *ptn_loc = ptnode_adr(li);
  2655         PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type();
  2656         if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL &&
  2657              ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) {
  2658           ptnode_adr(li)->dump(false);
  2661       if (Verbose) {
  2662         // Print all fields which reference this allocation
  2663         for (uint i = 0; i < ptn->edge_count(); i++) {
  2664           uint ei = ptn->edge_target(i);
  2665           ptnode_adr(ei)->dump(false);
  2668       tty->cr();
  2672 #endif

mercurial