7147744: CTW: assert(false) failed: infinite EA connection graph build

Mon, 12 Mar 2012 10:46:47 -0700

author
kvn
date
Mon, 12 Mar 2012 10:46:47 -0700
changeset 3651
ee138854b3a6
parent 3636
fde683df4c27
child 3652
2537ebc321d4

7147744: CTW: assert(false) failed: infinite EA connection graph build
Summary: rewrote Connection graph construction code in EA to reduce time spent there.
Reviewed-by: never

src/share/vm/opto/c2_globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/callnode.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/callnode.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/compile.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/escape.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/escape.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/phase.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/phase.hpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/growableArray.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/opto/c2_globals.hpp	Fri Mar 09 13:34:45 2012 -0800
     1.2 +++ b/src/share/vm/opto/c2_globals.hpp	Mon Mar 12 10:46:47 2012 -0700
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
     1.6 + * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -465,6 +465,9 @@
    1.11    notproduct(bool, PrintOptimizePtrCompare, false,                          \
    1.12            "Print information about optimized pointers compare")             \
    1.13                                                                              \
    1.14 +  notproduct(bool, VerifyConnectionGraph , true,                            \
    1.15 +          "Verify Connection Graph construction in Escape Analysis")        \
    1.16 +                                                                            \
    1.17    product(bool, UseOptoBiasInlining, true,                                  \
    1.18            "Generate biased locking code in C2 ideal graph")                 \
    1.19                                                                              \
     2.1 --- a/src/share/vm/opto/callnode.cpp	Fri Mar 09 13:34:45 2012 -0800
     2.2 +++ b/src/share/vm/opto/callnode.cpp	Mon Mar 12 10:46:47 2012 -0700
     2.3 @@ -1,5 +1,5 @@
     2.4  /*
     2.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     2.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     2.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.8   *
     2.9   * This code is free software; you can redistribute it and/or modify it
    2.10 @@ -1538,10 +1538,7 @@
    2.11      // If we are locking an unescaped object, the lock/unlock is unnecessary
    2.12      //
    2.13      ConnectionGraph *cgr = phase->C->congraph();
    2.14 -    PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
    2.15 -    if (cgr != NULL)
    2.16 -      es = cgr->escape_state(obj_node());
    2.17 -    if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
    2.18 +    if (cgr != NULL && cgr->not_global_escape(obj_node())) {
    2.19        assert(!is_eliminated() || is_coarsened(), "sanity");
    2.20        // The lock could be marked eliminated by lock coarsening
    2.21        // code during first IGVN before EA. Replace coarsened flag
    2.22 @@ -1680,10 +1677,7 @@
    2.23      // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
    2.24      //
    2.25      ConnectionGraph *cgr = phase->C->congraph();
    2.26 -    PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
    2.27 -    if (cgr != NULL)
    2.28 -      es = cgr->escape_state(obj_node());
    2.29 -    if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
    2.30 +    if (cgr != NULL && cgr->not_global_escape(obj_node())) {
    2.31        assert(!is_eliminated() || is_coarsened(), "sanity");
    2.32        // The lock could be marked eliminated by lock coarsening
    2.33        // code during first IGVN before EA. Replace coarsened flag
     3.1 --- a/src/share/vm/opto/callnode.hpp	Fri Mar 09 13:34:45 2012 -0800
     3.2 +++ b/src/share/vm/opto/callnode.hpp	Mon Mar 12 10:46:47 2012 -0700
     3.3 @@ -1,5 +1,5 @@
     3.4  /*
     3.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3.8   *
     3.9   * This code is free software; you can redistribute it and/or modify it
    3.10 @@ -546,6 +546,12 @@
    3.11    // or result projection is there are several CheckCastPP
    3.12    // or returns NULL if there is no one.
    3.13    Node *result_cast();
    3.14 +  // Does this node returns pointer?
    3.15 +  bool returns_pointer() const {
    3.16 +    const TypeTuple *r = tf()->range();
    3.17 +    return (r->cnt() > TypeFunc::Parms &&
    3.18 +            r->field_at(TypeFunc::Parms)->isa_ptr());
    3.19 +  }
    3.20  
    3.21    // Collect all the interesting edges from a call for use in
    3.22    // replacing the call by something else.  Used by macro expansion
     4.1 --- a/src/share/vm/opto/compile.cpp	Fri Mar 09 13:34:45 2012 -0800
     4.2 +++ b/src/share/vm/opto/compile.cpp	Mon Mar 12 10:46:47 2012 -0700
     4.3 @@ -1,5 +1,5 @@
     4.4  /*
     4.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     4.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     4.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4.8   *
     4.9   * This code is free software; you can redistribute it and/or modify it
    4.10 @@ -1707,7 +1707,6 @@
    4.11        if (major_progress()) print_method("PhaseIdealLoop before EA", 2);
    4.12        if (failing())  return;
    4.13      }
    4.14 -    TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true);
    4.15      ConnectionGraph::do_analysis(this, &igvn);
    4.16  
    4.17      if (failing())  return;
    4.18 @@ -1719,6 +1718,7 @@
    4.19      if (failing())  return;
    4.20  
    4.21      if (congraph() != NULL && macro_count() > 0) {
    4.22 +      NOT_PRODUCT( TracePhase t2("macroEliminate", &_t_macroEliminate, TimeCompiler); )
    4.23        PhaseMacroExpand mexp(igvn);
    4.24        mexp.eliminate_macro_nodes();
    4.25        igvn.set_delay_transform(false);
     5.1 --- a/src/share/vm/opto/escape.cpp	Fri Mar 09 13:34:45 2012 -0800
     5.2 +++ b/src/share/vm/opto/escape.cpp	Mon Mar 12 10:46:47 2012 -0700
     5.3 @@ -24,6 +24,7 @@
     5.4  
     5.5  #include "precompiled.hpp"
     5.6  #include "ci/bcEscapeAnalyzer.hpp"
     5.7 +#include "compiler/compileLog.hpp"
     5.8  #include "libadt/vectset.hpp"
     5.9  #include "memory/allocation.hpp"
    5.10  #include "opto/c2compiler.hpp"
    5.11 @@ -34,125 +35,1901 @@
    5.12  #include "opto/phaseX.hpp"
    5.13  #include "opto/rootnode.hpp"
    5.14  
    5.15 -void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) {
    5.16 -  uint v = (targIdx << EdgeShift) + ((uint) et);
    5.17 -  if (_edges == NULL) {
    5.18 -     Arena *a = Compile::current()->comp_arena();
    5.19 -    _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0);
    5.20 -  }
    5.21 -  _edges->append_if_missing(v);
    5.22 -}
    5.23 -
    5.24 -void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
    5.25 -  uint v = (targIdx << EdgeShift) + ((uint) et);
    5.26 -
    5.27 -  _edges->remove(v);
    5.28 -}
    5.29 -
    5.30 -#ifndef PRODUCT
    5.31 -static const char *node_type_names[] = {
    5.32 -  "UnknownType",
    5.33 -  "JavaObject",
    5.34 -  "LocalVar",
    5.35 -  "Field"
    5.36 -};
    5.37 -
    5.38 -static const char *esc_names[] = {
    5.39 -  "UnknownEscape",
    5.40 -  "NoEscape",
    5.41 -  "ArgEscape",
    5.42 -  "GlobalEscape"
    5.43 -};
    5.44 -
    5.45 -static const char *edge_type_suffix[] = {
    5.46 - "?", // UnknownEdge
    5.47 - "P", // PointsToEdge
    5.48 - "D", // DeferredEdge
    5.49 - "F"  // FieldEdge
    5.50 -};
    5.51 -
    5.52 -void PointsToNode::dump(bool print_state) const {
    5.53 -  NodeType nt = node_type();
    5.54 -  tty->print("%s ", node_type_names[(int) nt]);
    5.55 -  if (print_state) {
    5.56 -    EscapeState es = escape_state();
    5.57 -    tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR");
    5.58 -  }
    5.59 -  tty->print("[[");
    5.60 -  for (uint i = 0; i < edge_count(); i++) {
    5.61 -    tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
    5.62 -  }
    5.63 -  tty->print("]]  ");
    5.64 -  if (_node == NULL)
    5.65 -    tty->print_cr("<null>");
    5.66 -  else
    5.67 -    _node->dump();
    5.68 -}
    5.69 -#endif
    5.70 -
    5.71  ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
    5.72 -  _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
    5.73 -  _processed(C->comp_arena()),
    5.74 -  pt_ptset(C->comp_arena()),
    5.75 -  pt_visited(C->comp_arena()),
    5.76 -  pt_worklist(C->comp_arena(), 4, 0, 0),
    5.77 +  _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
    5.78    _collecting(true),
    5.79 -  _progress(false),
    5.80 +  _verify(false),
    5.81    _compile(C),
    5.82    _igvn(igvn),
    5.83    _node_map(C->comp_arena()) {
    5.84 -
    5.85 -  _phantom_object = C->top()->_idx,
    5.86 -  add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true);
    5.87 -
    5.88 +  // Add unknown java object.
    5.89 +  add_java_object(C->top(), PointsToNode::GlobalEscape);
    5.90 +  phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
    5.91    // Add ConP(#NULL) and ConN(#NULL) nodes.
    5.92    Node* oop_null = igvn->zerocon(T_OBJECT);
    5.93 -  _oop_null = oop_null->_idx;
    5.94 -  assert(_oop_null < nodes_size(), "should be created already");
    5.95 -  add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
    5.96 -
    5.97 +  assert(oop_null->_idx < nodes_size(), "should be created already");
    5.98 +  add_java_object(oop_null, PointsToNode::NoEscape);
    5.99 +  null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
   5.100    if (UseCompressedOops) {
   5.101      Node* noop_null = igvn->zerocon(T_NARROWOOP);
   5.102 -    _noop_null = noop_null->_idx;
   5.103 -    assert(_noop_null < nodes_size(), "should be created already");
   5.104 -    add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
   5.105 -  } else {
   5.106 -    _noop_null = _oop_null; // Should be initialized
   5.107 +    assert(noop_null->_idx < nodes_size(), "should be created already");
   5.108 +    map_ideal_node(noop_null, null_obj);
   5.109    }
   5.110    _pcmp_neq = NULL; // Should be initialized
   5.111    _pcmp_eq  = NULL;
   5.112  }
   5.113  
   5.114 -void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
   5.115 -  PointsToNode *f = ptnode_adr(from_i);
   5.116 -  PointsToNode *t = ptnode_adr(to_i);
   5.117 -
   5.118 -  assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   5.119 -  assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge");
   5.120 -  assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge");
   5.121 -  if (to_i == _phantom_object) { // Quick test for most common object
   5.122 -    if (f->has_unknown_ptr()) {
   5.123 -      return;
   5.124 -    } else {
   5.125 -      f->set_has_unknown_ptr();
   5.126 +bool ConnectionGraph::has_candidates(Compile *C) {
   5.127 +  // EA brings benefits only when the code has allocations and/or locks which
   5.128 +  // are represented by ideal Macro nodes.
   5.129 +  int cnt = C->macro_count();
   5.130 +  for( int i=0; i < cnt; i++ ) {
   5.131 +    Node *n = C->macro_node(i);
   5.132 +    if ( n->is_Allocate() )
   5.133 +      return true;
   5.134 +    if( n->is_Lock() ) {
   5.135 +      Node* obj = n->as_Lock()->obj_node()->uncast();
   5.136 +      if( !(obj->is_Parm() || obj->is_Con()) )
   5.137 +        return true;
   5.138      }
   5.139    }
   5.140 -  add_edge(f, to_i, PointsToNode::PointsToEdge);
   5.141 +  return false;
   5.142  }
   5.143  
   5.144 -void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) {
   5.145 -  PointsToNode *f = ptnode_adr(from_i);
   5.146 -  PointsToNode *t = ptnode_adr(to_i);
   5.147 +void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
   5.148 +  Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true);
   5.149 +  ResourceMark rm;
   5.150  
   5.151 -  assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   5.152 -  assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge");
   5.153 -  assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge");
   5.154 -  // don't add a self-referential edge, this can occur during removal of
   5.155 -  // deferred edges
   5.156 -  if (from_i != to_i)
   5.157 -    add_edge(f, to_i, PointsToNode::DeferredEdge);
   5.158 +  // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
   5.159 +  // to create space for them in ConnectionGraph::_nodes[].
   5.160 +  Node* oop_null = igvn->zerocon(T_OBJECT);
   5.161 +  Node* noop_null = igvn->zerocon(T_NARROWOOP);
   5.162 +  ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
   5.163 +  // Perform escape analysis
   5.164 +  if (congraph->compute_escape()) {
   5.165 +    // There are non escaping objects.
   5.166 +    C->set_congraph(congraph);
   5.167 +  }
   5.168 +  // Cleanup.
   5.169 +  if (oop_null->outcnt() == 0)
   5.170 +    igvn->hash_delete(oop_null);
   5.171 +  if (noop_null->outcnt() == 0)
   5.172 +    igvn->hash_delete(noop_null);
   5.173  }
   5.174  
   5.175 +bool ConnectionGraph::compute_escape() {
   5.176 +  Compile* C = _compile;
   5.177 +  PhaseGVN* igvn = _igvn;
   5.178 +
   5.179 +  // Worklists used by EA.
   5.180 +  Unique_Node_List delayed_worklist;
   5.181 +  GrowableArray<Node*> alloc_worklist;
   5.182 +  GrowableArray<Node*> ptr_cmp_worklist;
   5.183 +  GrowableArray<Node*> storestore_worklist;
   5.184 +  GrowableArray<PointsToNode*>   ptnodes_worklist;
   5.185 +  GrowableArray<JavaObjectNode*> java_objects_worklist;
   5.186 +  GrowableArray<JavaObjectNode*> non_escaped_worklist;
   5.187 +  GrowableArray<FieldNode*>      oop_fields_worklist;
   5.188 +  DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
   5.189 +
   5.190 +  { Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);
   5.191 +
   5.192 +  // 1. Populate Connection Graph (CG) with PointsTo nodes.
   5.193 +  ideal_nodes.map(C->unique(), NULL);  // preallocate space
   5.194 +  // Initialize worklist
   5.195 +  if (C->root() != NULL) {
   5.196 +    ideal_nodes.push(C->root());
   5.197 +  }
   5.198 +  for( uint next = 0; next < ideal_nodes.size(); ++next ) {
   5.199 +    Node* n = ideal_nodes.at(next);
   5.200 +    // Create PointsTo nodes and add them to Connection Graph. Called
   5.201 +    // only once per ideal node since ideal_nodes is Unique_Node list.
   5.202 +    add_node_to_connection_graph(n, &delayed_worklist);
   5.203 +    PointsToNode* ptn = ptnode_adr(n->_idx);
   5.204 +    if (ptn != NULL) {
   5.205 +      ptnodes_worklist.append(ptn);
   5.206 +      if (ptn->is_JavaObject()) {
   5.207 +        java_objects_worklist.append(ptn->as_JavaObject());
   5.208 +        if ((n->is_Allocate() || n->is_CallStaticJava()) &&
   5.209 +            (ptn->escape_state() < PointsToNode::GlobalEscape)) {
   5.210 +          // Only allocations and java static calls results are interesting.
   5.211 +          non_escaped_worklist.append(ptn->as_JavaObject());
   5.212 +        }
   5.213 +      } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
   5.214 +        oop_fields_worklist.append(ptn->as_Field());
   5.215 +      }
   5.216 +    }
   5.217 +    if (n->is_MergeMem()) {
   5.218 +      // Collect all MergeMem nodes to add memory slices for
   5.219 +      // scalar replaceable objects in split_unique_types().
   5.220 +      _mergemem_worklist.append(n->as_MergeMem());
   5.221 +    } else if (OptimizePtrCompare && n->is_Cmp() &&
   5.222 +               (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
   5.223 +      // Collect compare pointers nodes.
   5.224 +      ptr_cmp_worklist.append(n);
   5.225 +    } else if (n->is_MemBarStoreStore()) {
   5.226 +      // Collect all MemBarStoreStore nodes so that depending on the
   5.227 +      // escape status of the associated Allocate node some of them
   5.228 +      // may be eliminated.
   5.229 +      storestore_worklist.append(n);
   5.230 +#ifdef ASSERT
   5.231 +    } else if(n->is_AddP()) {
   5.232 +      // Collect address nodes for graph verification.
   5.233 +      addp_worklist.append(n);
   5.234 +#endif
   5.235 +    }
   5.236 +    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
   5.237 +      Node* m = n->fast_out(i);   // Get user
   5.238 +      ideal_nodes.push(m);
   5.239 +    }
   5.240 +  }
   5.241 +  if (non_escaped_worklist.length() == 0) {
   5.242 +    _collecting = false;
   5.243 +    return false; // Nothing to do.
   5.244 +  }
   5.245 +  // Add final simple edges to graph.
   5.246 +  while(delayed_worklist.size() > 0) {
   5.247 +    Node* n = delayed_worklist.pop();
   5.248 +    add_final_edges(n);
   5.249 +  }
   5.250 +  int ptnodes_length = ptnodes_worklist.length();
   5.251 +
   5.252 +#ifdef ASSERT
   5.253 +  if (VerifyConnectionGraph) {
   5.254 +    // Verify that no new simple edges could be created and all
   5.255 +    // local vars has edges.
   5.256 +    _verify = true;
   5.257 +    for (int next = 0; next < ptnodes_length; ++next) {
   5.258 +      PointsToNode* ptn = ptnodes_worklist.at(next);
   5.259 +      add_final_edges(ptn->ideal_node());
   5.260 +      if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
   5.261 +        ptn->dump();
   5.262 +        assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
   5.263 +      }
   5.264 +    }
   5.265 +    _verify = false;
   5.266 +  }
   5.267 +#endif
   5.268 +
   5.269 +  // 2. Finish Graph construction by propagating references to all
   5.270 +  //    java objects through graph.
   5.271 +  if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist,
   5.272 +                                 java_objects_worklist, oop_fields_worklist)) {
   5.273 +    // All objects escaped or hit time or iterations limits.
   5.274 +    _collecting = false;
   5.275 +    return false;
   5.276 +  }
   5.277 +
   5.278 +  // 3. Adjust scalar_replaceable state of nonescaping objects and push
   5.279 +  //    scalar replaceable allocations on alloc_worklist for processing
   5.280 +  //    in split_unique_types().
   5.281 +  int non_escaped_length = non_escaped_worklist.length();
   5.282 +  for (int next = 0; next < non_escaped_length; next++) {
   5.283 +    JavaObjectNode* ptn = non_escaped_worklist.at(next);
   5.284 +    if (ptn->escape_state() == PointsToNode::NoEscape &&
   5.285 +        ptn->scalar_replaceable()) {
   5.286 +      adjust_scalar_replaceable_state(ptn);
   5.287 +      if (ptn->scalar_replaceable()) {
   5.288 +        alloc_worklist.append(ptn->ideal_node());
   5.289 +      }
   5.290 +    }
   5.291 +  }
   5.292 +
   5.293 +#ifdef ASSERT
   5.294 +  if (VerifyConnectionGraph) {
   5.295 +    // Verify that graph is complete - no new edges could be added or needed.
   5.296 +    verify_connection_graph(ptnodes_worklist, non_escaped_worklist,
   5.297 +                            java_objects_worklist, addp_worklist);
   5.298 +  }
   5.299 +  assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
   5.300 +  assert(null_obj->escape_state() == PointsToNode::NoEscape &&
   5.301 +         null_obj->edge_count() == 0 &&
   5.302 +         !null_obj->arraycopy_src() &&
   5.303 +         !null_obj->arraycopy_dst(), "sanity");
   5.304 +#endif
   5.305 +
   5.306 +  _collecting = false;
   5.307 +
   5.308 +  } // TracePhase t3("connectionGraph")
   5.309 +
   5.310 +  // 4. Optimize ideal graph based on EA information.
   5.311 +  bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);
   5.312 +  if (has_non_escaping_obj) {
   5.313 +    optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
   5.314 +  }
   5.315 +
   5.316 +#ifndef PRODUCT
   5.317 +  if (PrintEscapeAnalysis) {
   5.318 +    dump(ptnodes_worklist); // Dump ConnectionGraph
   5.319 +  }
   5.320 +#endif
   5.321 +
   5.322 +  bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
   5.323 +#ifdef ASSERT
   5.324 +  if (VerifyConnectionGraph) {
   5.325 +    int alloc_length = alloc_worklist.length();
   5.326 +    for (int next = 0; next < alloc_length; ++next) {
   5.327 +      Node* n = alloc_worklist.at(next);
   5.328 +      PointsToNode* ptn = ptnode_adr(n->_idx);
   5.329 +      assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
   5.330 +    }
   5.331 +  }
   5.332 +#endif
   5.333 +
   5.334 +  // 5. Separate memory graph for scalar replaceable allcations.
   5.335 +  if (has_scalar_replaceable_candidates &&
   5.336 +      C->AliasLevel() >= 3 && EliminateAllocations) {
   5.337 +    // Now use the escape information to create unique types for
   5.338 +    // scalar replaceable objects.
   5.339 +    split_unique_types(alloc_worklist);
   5.340 +    if (C->failing())  return false;
   5.341 +    C->print_method("After Escape Analysis", 2);
   5.342 +
   5.343 +#ifdef ASSERT
   5.344 +  } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
   5.345 +    tty->print("=== No allocations eliminated for ");
   5.346 +    C->method()->print_short_name();
   5.347 +    if(!EliminateAllocations) {
   5.348 +      tty->print(" since EliminateAllocations is off ===");
   5.349 +    } else if(!has_scalar_replaceable_candidates) {
   5.350 +      tty->print(" since there are no scalar replaceable candidates ===");
   5.351 +    } else if(C->AliasLevel() < 3) {
   5.352 +      tty->print(" since AliasLevel < 3 ===");
   5.353 +    }
   5.354 +    tty->cr();
   5.355 +#endif
   5.356 +  }
   5.357 +  return has_non_escaping_obj;
   5.358 +}
   5.359 +
   5.360 +// Populate Connection Graph with PointsTo nodes and create simple
   5.361 +// connection graph edges.
   5.362 +void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
   5.363 +  assert(!_verify, "this method sould not be called for verification");
   5.364 +  PhaseGVN* igvn = _igvn;
   5.365 +  uint n_idx = n->_idx;
   5.366 +  PointsToNode* n_ptn = ptnode_adr(n_idx);
   5.367 +  if (n_ptn != NULL)
   5.368 +    return; // No need to redefine PointsTo node during first iteration.
   5.369 +
   5.370 +  if (n->is_Call()) {
   5.371 +    // Arguments to allocation and locking don't escape.
   5.372 +    if (n->is_AbstractLock()) {
   5.373 +      // Put Lock and Unlock nodes on IGVN worklist to process them during
   5.374 +      // first IGVN optimization when escape information is still available.
   5.375 +      record_for_optimizer(n);
   5.376 +    } else if (n->is_Allocate()) {
   5.377 +      add_call_node(n->as_Call());
   5.378 +      record_for_optimizer(n);
   5.379 +    } else {
   5.380 +      if (n->is_CallStaticJava()) {
   5.381 +        const char* name = n->as_CallStaticJava()->_name;
   5.382 +        if (name != NULL && strcmp(name, "uncommon_trap") == 0)
   5.383 +          return; // Skip uncommon traps
   5.384 +      }
   5.385 +      // Don't mark as processed since call's arguments have to be processed.
   5.386 +      delayed_worklist->push(n);
   5.387 +      // Check if a call returns an object.
   5.388 +      if (n->as_Call()->returns_pointer() &&
   5.389 +          n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
   5.390 +        add_call_node(n->as_Call());
   5.391 +      }
   5.392 +    }
   5.393 +    return;
   5.394 +  }
   5.395 +  // Put this check here to process call arguments since some call nodes
   5.396 +  // point to phantom_obj.
   5.397 +  if (n_ptn == phantom_obj || n_ptn == null_obj)
   5.398 +    return; // Skip predefined nodes.
   5.399 +
   5.400 +  int opcode = n->Opcode();
   5.401 +  switch (opcode) {
   5.402 +    case Op_AddP: {
   5.403 +      Node* base = get_addp_base(n);
   5.404 +      PointsToNode* ptn_base = ptnode_adr(base->_idx);
   5.405 +      // Field nodes are created for all field types. They are used in
   5.406 +      // adjust_scalar_replaceable_state() and split_unique_types().
   5.407 +      // Note, non-oop fields will have only base edges in Connection
   5.408 +      // Graph because such fields are not used for oop loads and stores.
   5.409 +      int offset = address_offset(n, igvn);
   5.410 +      add_field(n, PointsToNode::NoEscape, offset);
   5.411 +      if (ptn_base == NULL) {
   5.412 +        delayed_worklist->push(n); // Process it later.
   5.413 +      } else {
   5.414 +        n_ptn = ptnode_adr(n_idx);
   5.415 +        add_base(n_ptn->as_Field(), ptn_base);
   5.416 +      }
   5.417 +      break;
   5.418 +    }
   5.419 +    case Op_CastX2P: {
   5.420 +      map_ideal_node(n, phantom_obj);
   5.421 +      break;
   5.422 +    }
   5.423 +    case Op_CastPP:
   5.424 +    case Op_CheckCastPP:
   5.425 +    case Op_EncodeP:
   5.426 +    case Op_DecodeN: {
   5.427 +      add_local_var_and_edge(n, PointsToNode::NoEscape,
   5.428 +                             n->in(1), delayed_worklist);
   5.429 +      break;
   5.430 +    }
   5.431 +    case Op_CMoveP: {
   5.432 +      add_local_var(n, PointsToNode::NoEscape);
   5.433 +      // Do not add edges during first iteration because some could be
   5.434 +      // not defined yet.
   5.435 +      delayed_worklist->push(n);
   5.436 +      break;
   5.437 +    }
   5.438 +    case Op_ConP:
   5.439 +    case Op_ConN: {
   5.440 +      // assume all oop constants globally escape except for null
   5.441 +      PointsToNode::EscapeState es;
   5.442 +      if (igvn->type(n) == TypePtr::NULL_PTR ||
   5.443 +          igvn->type(n) == TypeNarrowOop::NULL_PTR) {
   5.444 +        es = PointsToNode::NoEscape;
   5.445 +      } else {
   5.446 +        es = PointsToNode::GlobalEscape;
   5.447 +      }
   5.448 +      add_java_object(n, es);
   5.449 +      break;
   5.450 +    }
   5.451 +    case Op_CreateEx: {
   5.452 +      // assume that all exception objects globally escape
   5.453 +      add_java_object(n, PointsToNode::GlobalEscape);
   5.454 +      break;
   5.455 +    }
   5.456 +    case Op_LoadKlass:
   5.457 +    case Op_LoadNKlass: {
   5.458 +      // Unknown class is loaded
   5.459 +      map_ideal_node(n, phantom_obj);
   5.460 +      break;
   5.461 +    }
   5.462 +    case Op_LoadP:
   5.463 +    case Op_LoadN:
   5.464 +    case Op_LoadPLocked: {
   5.465 +      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
   5.466 +      // ThreadLocal has RawPrt type.
   5.467 +      const Type* t = igvn->type(n);
   5.468 +      if (t->make_ptr() != NULL) {
   5.469 +        Node* adr = n->in(MemNode::Address);
   5.470 +#ifdef ASSERT
   5.471 +        if (!adr->is_AddP()) {
   5.472 +          assert(igvn->type(adr)->isa_rawptr(), "sanity");
   5.473 +        } else {
   5.474 +          assert((ptnode_adr(adr->_idx) == NULL ||
   5.475 +                  ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
   5.476 +        }
   5.477 +#endif
   5.478 +        add_local_var_and_edge(n, PointsToNode::NoEscape,
   5.479 +                               adr, delayed_worklist);
   5.480 +      }
   5.481 +      break;
   5.482 +    }
   5.483 +    case Op_Parm: {
   5.484 +      map_ideal_node(n, phantom_obj);
   5.485 +      break;
   5.486 +    }
   5.487 +    case Op_PartialSubtypeCheck: {
   5.488 +      // Produces Null or notNull and is used in only in CmpP so
   5.489 +      // phantom_obj could be used.
   5.490 +      map_ideal_node(n, phantom_obj); // Result is unknown
   5.491 +      break;
   5.492 +    }
   5.493 +    case Op_Phi: {
   5.494 +      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
   5.495 +      // ThreadLocal has RawPrt type.
   5.496 +      const Type* t = n->as_Phi()->type();
   5.497 +      if (t->make_ptr() != NULL) {
   5.498 +        add_local_var(n, PointsToNode::NoEscape);
   5.499 +        // Do not add edges during first iteration because some could be
   5.500 +        // not defined yet.
   5.501 +        delayed_worklist->push(n);
   5.502 +      }
   5.503 +      break;
   5.504 +    }
   5.505 +    case Op_Proj: {
   5.506 +      // we are only interested in the oop result projection from a call
   5.507 +      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
   5.508 +          n->in(0)->as_Call()->returns_pointer()) {
   5.509 +        add_local_var_and_edge(n, PointsToNode::NoEscape,
   5.510 +                               n->in(0), delayed_worklist);
   5.511 +      }
   5.512 +      break;
   5.513 +    }
   5.514 +    case Op_Rethrow: // Exception object escapes
   5.515 +    case Op_Return: {
   5.516 +      if (n->req() > TypeFunc::Parms &&
   5.517 +          igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
   5.518 +        // Treat Return value as LocalVar with GlobalEscape escape state.
   5.519 +        add_local_var_and_edge(n, PointsToNode::GlobalEscape,
   5.520 +                               n->in(TypeFunc::Parms), delayed_worklist);
   5.521 +      }
   5.522 +      break;
   5.523 +    }
   5.524 +    case Op_StoreP:
   5.525 +    case Op_StoreN:
   5.526 +    case Op_StorePConditional:
   5.527 +    case Op_CompareAndSwapP:
   5.528 +    case Op_CompareAndSwapN: {
   5.529 +      Node* adr = n->in(MemNode::Address);
   5.530 +      const Type *adr_type = igvn->type(adr);
   5.531 +      adr_type = adr_type->make_ptr();
   5.532 +      if (adr_type->isa_oopptr() ||
   5.533 +          (opcode == Op_StoreP || opcode == Op_StoreN) &&
   5.534 +                        (adr_type == TypeRawPtr::NOTNULL &&
   5.535 +                         adr->in(AddPNode::Address)->is_Proj() &&
   5.536 +                         adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
   5.537 +        delayed_worklist->push(n); // Process it later.
   5.538 +#ifdef ASSERT
   5.539 +        assert(adr->is_AddP(), "expecting an AddP");
   5.540 +        if (adr_type == TypeRawPtr::NOTNULL) {
   5.541 +          // Verify a raw address for a store captured by Initialize node.
   5.542 +          int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
   5.543 +          assert(offs != Type::OffsetBot, "offset must be a constant");
   5.544 +        }
   5.545 +      } else {
   5.546 +        // Ignore copy the displaced header to the BoxNode (OSR compilation).
   5.547 +        if (adr->is_BoxLock())
   5.548 +          break;
   5.549 +
   5.550 +        if (!adr->is_AddP()) {
   5.551 +          n->dump(1);
   5.552 +          assert(adr->is_AddP(), "expecting an AddP");
   5.553 +        }
   5.554 +        // Ignore G1 barrier's stores.
   5.555 +        if (!UseG1GC || (opcode != Op_StoreP) ||
   5.556 +            (adr_type != TypeRawPtr::BOTTOM)) {
   5.557 +          n->dump(1);
   5.558 +          assert(false, "not G1 barrier raw StoreP");
   5.559 +        }
   5.560 +#endif
   5.561 +      }
   5.562 +      break;
   5.563 +    }
   5.564 +    case Op_AryEq:
   5.565 +    case Op_StrComp:
   5.566 +    case Op_StrEquals:
   5.567 +    case Op_StrIndexOf: {
   5.568 +      add_local_var(n, PointsToNode::ArgEscape);
   5.569 +      delayed_worklist->push(n); // Process it later.
   5.570 +      break;
   5.571 +    }
   5.572 +    case Op_ThreadLocal: {
   5.573 +      add_java_object(n, PointsToNode::ArgEscape);
   5.574 +      break;
   5.575 +    }
   5.576 +    default:
   5.577 +      ; // Do nothing for nodes not related to EA.
   5.578 +  }
   5.579 +  return;
   5.580 +}
   5.581 +
   5.582 +#ifdef ASSERT
   5.583 +#define ELSE_FAIL(name)                               \
   5.584 +      /* Should not be called for not pointer type. */  \
   5.585 +      n->dump(1);                                       \
   5.586 +      assert(false, name);                              \
   5.587 +      break;
   5.588 +#else
   5.589 +#define ELSE_FAIL(name) \
   5.590 +      break;
   5.591 +#endif
   5.592 +
   5.593 +// Add final simple edges to graph.
   5.594 +void ConnectionGraph::add_final_edges(Node *n) {
   5.595 +  PointsToNode* n_ptn = ptnode_adr(n->_idx);
   5.596 +#ifdef ASSERT
   5.597 +  if (_verify && n_ptn->is_JavaObject())
   5.598 +    return; // This method does not change graph for JavaObject.
   5.599 +#endif
   5.600 +
   5.601 +  if (n->is_Call()) {
   5.602 +    process_call_arguments(n->as_Call());
   5.603 +    return;
   5.604 +  }
   5.605 +  assert(n->is_Store() || n->is_LoadStore() ||
   5.606 +         (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
   5.607 +         "node should be registered already");
   5.608 +  int opcode = n->Opcode();
   5.609 +  switch (opcode) {
   5.610 +    case Op_AddP: {
   5.611 +      Node* base = get_addp_base(n);
   5.612 +      PointsToNode* ptn_base = ptnode_adr(base->_idx);
   5.613 +      assert(ptn_base != NULL, "field's base should be registered");
   5.614 +      add_base(n_ptn->as_Field(), ptn_base);
   5.615 +      break;
   5.616 +    }
   5.617 +    case Op_CastPP:
   5.618 +    case Op_CheckCastPP:
   5.619 +    case Op_EncodeP:
   5.620 +    case Op_DecodeN: {
   5.621 +      add_local_var_and_edge(n, PointsToNode::NoEscape,
   5.622 +                             n->in(1), NULL);
   5.623 +      break;
   5.624 +    }
   5.625 +    case Op_CMoveP: {
   5.626 +      for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
   5.627 +        Node* in = n->in(i);
   5.628 +        if (in == NULL)
   5.629 +          continue;  // ignore NULL
   5.630 +        Node* uncast_in = in->uncast();
   5.631 +        if (uncast_in->is_top() || uncast_in == n)
   5.632 +          continue;  // ignore top or inputs which go back this node
   5.633 +        PointsToNode* ptn = ptnode_adr(in->_idx);
   5.634 +        assert(ptn != NULL, "node should be registered");
   5.635 +        add_edge(n_ptn, ptn);
   5.636 +      }
   5.637 +      break;
   5.638 +    }
   5.639 +    case Op_LoadP:
   5.640 +    case Op_LoadN:
   5.641 +    case Op_LoadPLocked: {
   5.642 +      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
   5.643 +      // ThreadLocal has RawPrt type.
   5.644 +      const Type* t = _igvn->type(n);
   5.645 +      if (t->make_ptr() != NULL) {
   5.646 +        Node* adr = n->in(MemNode::Address);
   5.647 +        add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
   5.648 +        break;
   5.649 +      }
   5.650 +      ELSE_FAIL("Op_LoadP");
   5.651 +    }
   5.652 +    case Op_Phi: {
   5.653 +      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
   5.654 +      // ThreadLocal has RawPrt type.
   5.655 +      const Type* t = n->as_Phi()->type();
   5.656 +      if (t->make_ptr() != NULL) {
   5.657 +        for (uint i = 1; i < n->req(); i++) {
   5.658 +          Node* in = n->in(i);
   5.659 +          if (in == NULL)
   5.660 +            continue;  // ignore NULL
   5.661 +          Node* uncast_in = in->uncast();
   5.662 +          if (uncast_in->is_top() || uncast_in == n)
   5.663 +            continue;  // ignore top or inputs which go back this node
   5.664 +          PointsToNode* ptn = ptnode_adr(in->_idx);
   5.665 +          assert(ptn != NULL, "node should be registered");
   5.666 +          add_edge(n_ptn, ptn);
   5.667 +        }
   5.668 +        break;
   5.669 +      }
   5.670 +      ELSE_FAIL("Op_Phi");
   5.671 +    }
   5.672 +    case Op_Proj: {
   5.673 +      // we are only interested in the oop result projection from a call
   5.674 +      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
   5.675 +          n->in(0)->as_Call()->returns_pointer()) {
   5.676 +        add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
   5.677 +        break;
   5.678 +      }
   5.679 +      ELSE_FAIL("Op_Proj");
   5.680 +    }
   5.681 +    case Op_Rethrow: // Exception object escapes
   5.682 +    case Op_Return: {
   5.683 +      if (n->req() > TypeFunc::Parms &&
   5.684 +          _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
   5.685 +        // Treat Return value as LocalVar with GlobalEscape escape state.
   5.686 +        add_local_var_and_edge(n, PointsToNode::GlobalEscape,
   5.687 +                               n->in(TypeFunc::Parms), NULL);
   5.688 +        break;
   5.689 +      }
   5.690 +      ELSE_FAIL("Op_Return");
   5.691 +    }
   5.692 +    case Op_StoreP:
   5.693 +    case Op_StoreN:
   5.694 +    case Op_StorePConditional:
   5.695 +    case Op_CompareAndSwapP:
   5.696 +    case Op_CompareAndSwapN: {
   5.697 +      Node* adr = n->in(MemNode::Address);
   5.698 +      const Type *adr_type = _igvn->type(adr);
   5.699 +      adr_type = adr_type->make_ptr();
   5.700 +      if (adr_type->isa_oopptr() ||
   5.701 +          (opcode == Op_StoreP || opcode == Op_StoreN) &&
   5.702 +                        (adr_type == TypeRawPtr::NOTNULL &&
   5.703 +                         adr->in(AddPNode::Address)->is_Proj() &&
   5.704 +                         adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
   5.705 +        // Point Address to Value
   5.706 +        PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
   5.707 +        assert(adr_ptn != NULL &&
   5.708 +               adr_ptn->as_Field()->is_oop(), "node should be registered");
   5.709 +        Node *val = n->in(MemNode::ValueIn);
   5.710 +        PointsToNode* ptn = ptnode_adr(val->_idx);
   5.711 +        assert(ptn != NULL, "node should be registered");
   5.712 +        add_edge(adr_ptn, ptn);
   5.713 +        break;
   5.714 +      }
   5.715 +      ELSE_FAIL("Op_StoreP");
   5.716 +    }
   5.717 +    case Op_AryEq:
   5.718 +    case Op_StrComp:
   5.719 +    case Op_StrEquals:
   5.720 +    case Op_StrIndexOf: {
   5.721 +      // char[] arrays passed to string intrinsic do not escape but
   5.722 +      // they are not scalar replaceable. Adjust escape state for them.
   5.723 +      // Start from in(2) edge since in(1) is memory edge.
   5.724 +      for (uint i = 2; i < n->req(); i++) {
   5.725 +        Node* adr = n->in(i);
   5.726 +        const Type* at = _igvn->type(adr);
   5.727 +        if (!adr->is_top() && at->isa_ptr()) {
   5.728 +          assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
   5.729 +                 at->isa_ptr() != NULL, "expecting a pointer");
   5.730 +          if (adr->is_AddP()) {
   5.731 +            adr = get_addp_base(adr);
   5.732 +          }
   5.733 +          PointsToNode* ptn = ptnode_adr(adr->_idx);
   5.734 +          assert(ptn != NULL, "node should be registered");
   5.735 +          add_edge(n_ptn, ptn);
   5.736 +        }
   5.737 +      }
   5.738 +      break;
   5.739 +    }
   5.740 +    default: {
   5.741 +      // This method should be called only for EA specific nodes which may
   5.742 +      // miss some edges when they were created.
   5.743 +#ifdef ASSERT
   5.744 +      n->dump(1);
   5.745 +#endif
   5.746 +      guarantee(false, "unknown node");
   5.747 +    }
   5.748 +  }
   5.749 +  return;
   5.750 +}
   5.751 +
   5.752 +void ConnectionGraph::add_call_node(CallNode* call) {
   5.753 +  assert(call->returns_pointer(), "only for call which returns pointer");
   5.754 +  uint call_idx = call->_idx;
   5.755 +  if (call->is_Allocate()) {
   5.756 +    Node* k = call->in(AllocateNode::KlassNode);
   5.757 +    const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
   5.758 +    assert(kt != NULL, "TypeKlassPtr  required.");
   5.759 +    ciKlass* cik = kt->klass();
   5.760 +    PointsToNode::EscapeState es = PointsToNode::NoEscape;
   5.761 +    bool scalar_replaceable = true;
   5.762 +    if (call->is_AllocateArray()) {
   5.763 +      if (!cik->is_array_klass()) { // StressReflectiveCode
   5.764 +        es = PointsToNode::GlobalEscape;
   5.765 +      } else {
   5.766 +        int length = call->in(AllocateNode::ALength)->find_int_con(-1);
   5.767 +        if (length < 0 || length > EliminateAllocationArraySizeLimit) {
   5.768 +          // Not scalar replaceable if the length is not constant or too big.
   5.769 +          scalar_replaceable = false;
   5.770 +        }
   5.771 +      }
   5.772 +    } else {  // Allocate instance
   5.773 +      if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
   5.774 +         !cik->is_instance_klass() || // StressReflectiveCode
   5.775 +          cik->as_instance_klass()->has_finalizer()) {
   5.776 +        es = PointsToNode::GlobalEscape;
   5.777 +      }
   5.778 +    }
   5.779 +    add_java_object(call, es);
   5.780 +    PointsToNode* ptn = ptnode_adr(call_idx);
   5.781 +    if (!scalar_replaceable && ptn->scalar_replaceable()) {
   5.782 +      ptn->set_scalar_replaceable(false);
   5.783 +    }
   5.784 +  } else if (call->is_CallStaticJava()) {
   5.785 +    // Call nodes could be different types:
   5.786 +    //
   5.787 +    // 1. CallDynamicJavaNode (what happened during call is unknown):
   5.788 +    //
   5.789 +    //    - mapped to GlobalEscape JavaObject node if oop is returned;
   5.790 +    //
   5.791 +    //    - all oop arguments are escaping globally;
   5.792 +    //
   5.793 +    // 2. CallStaticJavaNode (execute bytecode analysis if possible):
   5.794 +    //
   5.795 +    //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
   5.796 +    //
   5.797 +    //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
   5.798 +    //    - mapped to NoEscape JavaObject node if non-escaping object allocated
   5.799 +    //      during call is returned;
   5.800 +    //    - mapped to ArgEscape LocalVar node pointed to object arguments
   5.801 +    //      which are returned and does not escape during call;
   5.802 +    //
   5.803 +    //    - oop arguments escaping status is defined by bytecode analysis;
   5.804 +    //
   5.805 +    // For a static call, we know exactly what method is being called.
   5.806 +    // Use bytecode estimator to record whether the call's return value escapes.
   5.807 +    ciMethod* meth = call->as_CallJava()->method();
   5.808 +    if (meth == NULL) {
   5.809 +      const char* name = call->as_CallStaticJava()->_name;
   5.810 +      assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
   5.811 +      // Returns a newly allocated unescaped object.
   5.812 +      add_java_object(call, PointsToNode::NoEscape);
   5.813 +      ptnode_adr(call_idx)->set_scalar_replaceable(false);
   5.814 +    } else {
   5.815 +      BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
   5.816 +      call_analyzer->copy_dependencies(_compile->dependencies());
   5.817 +      if (call_analyzer->is_return_allocated()) {
   5.818 +        // Returns a newly allocated unescaped object, simply
   5.819 +        // update dependency information.
   5.820 +        // Mark it as NoEscape so that objects referenced by
   5.821 +        // it's fields will be marked as NoEscape at least.
   5.822 +        add_java_object(call, PointsToNode::NoEscape);
   5.823 +        ptnode_adr(call_idx)->set_scalar_replaceable(false);
   5.824 +      } else {
   5.825 +        // Determine whether any arguments are returned.
   5.826 +        const TypeTuple* d = call->tf()->domain();
   5.827 +        bool ret_arg = false;
   5.828 +        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
   5.829 +          if (d->field_at(i)->isa_ptr() != NULL &&
   5.830 +              call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
   5.831 +            ret_arg = true;
   5.832 +            break;
   5.833 +          }
   5.834 +        }
   5.835 +        if (ret_arg) {
   5.836 +          add_local_var(call, PointsToNode::ArgEscape);
   5.837 +        } else {
   5.838 +          // Returns unknown object.
   5.839 +          map_ideal_node(call, phantom_obj);
   5.840 +        }
   5.841 +      }
   5.842 +    }
   5.843 +  } else {
   5.844 +    // An other type of call, assume the worst case:
   5.845 +    // returned value is unknown and globally escapes.
   5.846 +    assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
   5.847 +    map_ideal_node(call, phantom_obj);
   5.848 +  }
   5.849 +}
   5.850 +
   5.851 +void ConnectionGraph::process_call_arguments(CallNode *call) {
   5.852 +    bool is_arraycopy = false;
   5.853 +    switch (call->Opcode()) {
   5.854 +#ifdef ASSERT
   5.855 +    case Op_Allocate:
   5.856 +    case Op_AllocateArray:
   5.857 +    case Op_Lock:
   5.858 +    case Op_Unlock:
   5.859 +      assert(false, "should be done already");
   5.860 +      break;
   5.861 +#endif
   5.862 +    case Op_CallLeafNoFP:
   5.863 +      is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
   5.864 +                      strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
   5.865 +      // fall through
   5.866 +    case Op_CallLeaf: {
   5.867 +      // Stub calls, objects do not escape but they are not scale replaceable.
   5.868 +      // Adjust escape state for outgoing arguments.
   5.869 +      const TypeTuple * d = call->tf()->domain();
   5.870 +      bool src_has_oops = false;
   5.871 +      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
   5.872 +        const Type* at = d->field_at(i);
   5.873 +        Node *arg = call->in(i);
   5.874 +        const Type *aat = _igvn->type(arg);
   5.875 +        if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
   5.876 +          continue;
   5.877 +        if (arg->is_AddP()) {
   5.878 +          //
   5.879 +          // The inline_native_clone() case when the arraycopy stub is called
   5.880 +          // after the allocation before Initialize and CheckCastPP nodes.
   5.881 +          // Or normal arraycopy for object arrays case.
   5.882 +          //
   5.883 +          // Set AddP's base (Allocate) as not scalar replaceable since
   5.884 +          // pointer to the base (with offset) is passed as argument.
   5.885 +          //
   5.886 +          arg = get_addp_base(arg);
   5.887 +        }
   5.888 +        PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
   5.889 +        assert(arg_ptn != NULL, "should be registered");
   5.890 +        PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
   5.891 +        if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
   5.892 +          assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
   5.893 +                 aat->isa_ptr() != NULL, "expecting an Ptr");
   5.894 +          bool arg_has_oops = aat->isa_oopptr() &&
   5.895 +                              (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
   5.896 +                               (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
   5.897 +          if (i == TypeFunc::Parms) {
   5.898 +            src_has_oops = arg_has_oops;
   5.899 +          }
   5.900 +          //
   5.901 +          // src or dst could be j.l.Object when other is basic type array:
   5.902 +          //
   5.903 +          //   arraycopy(char[],0,Object*,0,size);
   5.904 +          //   arraycopy(Object*,0,char[],0,size);
   5.905 +          //
   5.906 +          // Don't add edges in such cases.
   5.907 +          //
   5.908 +          bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
   5.909 +                                       arg_has_oops && (i > TypeFunc::Parms);
   5.910 +#ifdef ASSERT
   5.911 +          if (!(is_arraycopy ||
   5.912 +                call->as_CallLeaf()->_name != NULL &&
   5.913 +                (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
   5.914 +                 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
   5.915 +          ) {
   5.916 +            call->dump();
   5.917 +            assert(false, "EA: unexpected CallLeaf");
   5.918 +          }
   5.919 +#endif
   5.920 +          // Always process arraycopy's destination object since
   5.921 +          // we need to add all possible edges to references in
   5.922 +          // source object.
   5.923 +          if (arg_esc >= PointsToNode::ArgEscape &&
   5.924 +              !arg_is_arraycopy_dest) {
   5.925 +            continue;
   5.926 +          }
   5.927 +          set_escape_state(arg_ptn, PointsToNode::ArgEscape);
   5.928 +          if (arg_is_arraycopy_dest) {
   5.929 +            Node* src = call->in(TypeFunc::Parms);
   5.930 +            if (src->is_AddP()) {
   5.931 +              src = get_addp_base(src);
   5.932 +            }
   5.933 +            PointsToNode* src_ptn = ptnode_adr(src->_idx);
   5.934 +            assert(src_ptn != NULL, "should be registered");
   5.935 +            if (arg_ptn != src_ptn) {
   5.936 +              // Special arraycopy edge:
   5.937 +              // A destination object's field can't have the source object
   5.938 +              // as base since objects escape states are not related.
   5.939 +              // Only escape state of destination object's fields affects
   5.940 +              // escape state of fields in source object.
   5.941 +              add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn);
   5.942 +            }
   5.943 +          }
   5.944 +        }
   5.945 +      }
   5.946 +      break;
   5.947 +    }
   5.948 +    case Op_CallStaticJava: {
   5.949 +      // For a static call, we know exactly what method is being called.
   5.950 +      // Use bytecode estimator to record the call's escape affects
   5.951 +#ifdef ASSERT
   5.952 +      const char* name = call->as_CallStaticJava()->_name;
   5.953 +      assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
   5.954 +#endif
   5.955 +      ciMethod* meth = call->as_CallJava()->method();
   5.956 +      BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
   5.957 +      // fall-through if not a Java method or no analyzer information
   5.958 +      if (call_analyzer != NULL) {
   5.959 +        PointsToNode* call_ptn = ptnode_adr(call->_idx);
   5.960 +        const TypeTuple* d = call->tf()->domain();
   5.961 +        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
   5.962 +          const Type* at = d->field_at(i);
   5.963 +          int k = i - TypeFunc::Parms;
   5.964 +          Node* arg = call->in(i);
   5.965 +          PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
   5.966 +          if (at->isa_ptr() != NULL &&
   5.967 +              call_analyzer->is_arg_returned(k)) {
   5.968 +            // The call returns arguments.
   5.969 +            if (call_ptn != NULL) { // Is call's result used?
   5.970 +              assert(call_ptn->is_LocalVar(), "node should be registered");
   5.971 +              assert(arg_ptn != NULL, "node should be registered");
   5.972 +              add_edge(call_ptn, arg_ptn);
   5.973 +            }
   5.974 +          }
   5.975 +          if (at->isa_oopptr() != NULL &&
   5.976 +              arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
   5.977 +            if (!call_analyzer->is_arg_stack(k)) {
   5.978 +              // The argument global escapes
   5.979 +              set_escape_state(arg_ptn, PointsToNode::GlobalEscape);
   5.980 +            } else {
   5.981 +              set_escape_state(arg_ptn, PointsToNode::ArgEscape);
   5.982 +              if (!call_analyzer->is_arg_local(k)) {
   5.983 +                // The argument itself doesn't escape, but any fields might
   5.984 +                set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);
   5.985 +              }
   5.986 +            }
   5.987 +          }
   5.988 +        }
   5.989 +        if (call_ptn != NULL && call_ptn->is_LocalVar()) {
   5.990 +          // The call returns arguments.
   5.991 +          assert(call_ptn->edge_count() > 0, "sanity");
   5.992 +          if (!call_analyzer->is_return_local()) {
   5.993 +            // Returns also unknown object.
   5.994 +            add_edge(call_ptn, phantom_obj);
   5.995 +          }
   5.996 +        }
   5.997 +        break;
   5.998 +      }
   5.999 +    }
  5.1000 +    default: {
  5.1001 +      // Fall-through here if not a Java method or no analyzer information
  5.1002 +      // or some other type of call, assume the worst case: all arguments
  5.1003 +      // globally escape.
  5.1004 +      const TypeTuple* d = call->tf()->domain();
  5.1005 +      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  5.1006 +        const Type* at = d->field_at(i);
  5.1007 +        if (at->isa_oopptr() != NULL) {
  5.1008 +          Node* arg = call->in(i);
  5.1009 +          if (arg->is_AddP()) {
  5.1010 +            arg = get_addp_base(arg);
  5.1011 +          }
  5.1012 +          assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
  5.1013 +          set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);
  5.1014 +        }
  5.1015 +      }
  5.1016 +    }
  5.1017 +  }
  5.1018 +}
  5.1019 +
  5.1020 +
  5.1021 +// Finish Graph construction.
  5.1022 +bool ConnectionGraph::complete_connection_graph(
  5.1023 +                         GrowableArray<PointsToNode*>&   ptnodes_worklist,
  5.1024 +                         GrowableArray<JavaObjectNode*>& non_escaped_worklist,
  5.1025 +                         GrowableArray<JavaObjectNode*>& java_objects_worklist,
  5.1026 +                         GrowableArray<FieldNode*>&      oop_fields_worklist) {
  5.1027 +  // Normally only 1-3 passes needed to build Connection Graph depending
  5.1028 +  // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
  5.1029 +  // Set limit to 20 to catch situation when something did go wrong and
  5.1030 +  // bailout Escape Analysis.
  5.1031 +  // Also limit build time to 30 sec (60 in debug VM).
  5.1032 +#define CG_BUILD_ITER_LIMIT 20
  5.1033 +#ifdef ASSERT
  5.1034 +#define CG_BUILD_TIME_LIMIT 60.0
  5.1035 +#else
  5.1036 +#define CG_BUILD_TIME_LIMIT 30.0
  5.1037 +#endif
  5.1038 +
  5.1039 +  // Propagate GlobalEscape and ArgEscape escape states and check that
  5.1040 +  // we still have non-escaping objects. The method pushs on _worklist
  5.1041 +  // Field nodes which reference phantom_object.
  5.1042 +  if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
  5.1043 +    return false; // Nothing to do.
  5.1044 +  }
  5.1045 +  // Now propagate references to all JavaObject nodes.
  5.1046 +  int java_objects_length = java_objects_worklist.length();
  5.1047 +  elapsedTimer time;
  5.1048 +  int new_edges = 1;
  5.1049 +  int iterations = 0;
  5.1050 +  do {
  5.1051 +    while ((new_edges > 0) &&
  5.1052 +          (iterations++   < CG_BUILD_ITER_LIMIT) &&
  5.1053 +          (time.seconds() < CG_BUILD_TIME_LIMIT)) {
  5.1054 +      time.start();
  5.1055 +      new_edges = 0;
  5.1056 +      // Propagate references to phantom_object for nodes pushed on _worklist
  5.1057 +      // by find_non_escaped_objects() and find_field_value().
  5.1058 +      new_edges += add_java_object_edges(phantom_obj, false);
  5.1059 +      for (int next = 0; next < java_objects_length; ++next) {
  5.1060 +        JavaObjectNode* ptn = java_objects_worklist.at(next);
  5.1061 +        new_edges += add_java_object_edges(ptn, true);
  5.1062 +      }
  5.1063 +      if (new_edges > 0) {
  5.1064 +        // Update escape states on each iteration if graph was updated.
  5.1065 +        if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
  5.1066 +          return false; // Nothing to do.
  5.1067 +        }
  5.1068 +      }
  5.1069 +      time.stop();
  5.1070 +    }
  5.1071 +    if ((iterations     < CG_BUILD_ITER_LIMIT) &&
  5.1072 +        (time.seconds() < CG_BUILD_TIME_LIMIT)) {
  5.1073 +      time.start();
  5.1074 +      // Find fields which have unknown value.
  5.1075 +      int fields_length = oop_fields_worklist.length();
  5.1076 +      for (int next = 0; next < fields_length; next++) {
  5.1077 +        FieldNode* field = oop_fields_worklist.at(next);
  5.1078 +        if (field->edge_count() == 0) {
  5.1079 +          new_edges += find_field_value(field);
  5.1080 +          // This code may added new edges to phantom_object.
  5.1081 +          // Need an other cycle to propagate references to phantom_object.
  5.1082 +        }
  5.1083 +      }
  5.1084 +      time.stop();
  5.1085 +    } else {
  5.1086 +      new_edges = 0; // Bailout
  5.1087 +    }
  5.1088 +  } while (new_edges > 0);
  5.1089 +
  5.1090 +  // Bailout if passed limits.
  5.1091 +  if ((iterations     >= CG_BUILD_ITER_LIMIT) ||
  5.1092 +      (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
  5.1093 +    Compile* C = _compile;
  5.1094 +    if (C->log() != NULL) {
  5.1095 +      C->log()->begin_elem("connectionGraph_bailout reason='reached ");
  5.1096 +      C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
  5.1097 +      C->log()->end_elem(" limit'");
  5.1098 +    }
  5.1099 +    assert(false, err_msg("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
  5.1100 +           time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()));
  5.1101 +    // Possible infinite build_connection_graph loop,
  5.1102 +    // bailout (no changes to ideal graph were made).
  5.1103 +    return false;
  5.1104 +  }
  5.1105 +#ifdef ASSERT
  5.1106 +  if (Verbose && PrintEscapeAnalysis) {
  5.1107 +    tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",
  5.1108 +                  iterations, nodes_size(), ptnodes_worklist.length());
  5.1109 +  }
  5.1110 +#endif
  5.1111 +
  5.1112 +#undef CG_BUILD_ITER_LIMIT
  5.1113 +#undef CG_BUILD_TIME_LIMIT
  5.1114 +
  5.1115 +  // Find fields initialized by NULL for non-escaping Allocations.
  5.1116 +  int non_escaped_length = non_escaped_worklist.length();
  5.1117 +  for (int next = 0; next < non_escaped_length; next++) {
  5.1118 +    JavaObjectNode* ptn = non_escaped_worklist.at(next);
  5.1119 +    PointsToNode::EscapeState es = ptn->escape_state();
  5.1120 +    assert(es <= PointsToNode::ArgEscape, "sanity");
  5.1121 +    if (es == PointsToNode::NoEscape) {
  5.1122 +      if (find_init_values(ptn, null_obj, _igvn) > 0) {
  5.1123 +        // Adding references to NULL object does not change escape states
  5.1124 +        // since it does not escape. Also no fields are added to NULL object.
  5.1125 +        add_java_object_edges(null_obj, false);
  5.1126 +      }
  5.1127 +    }
  5.1128 +    Node* n = ptn->ideal_node();
  5.1129 +    if (n->is_Allocate()) {
  5.1130 +      // The object allocated by this Allocate node will never be
  5.1131 +      // seen by an other thread. Mark it so that when it is
  5.1132 +      // expanded no MemBarStoreStore is added.
  5.1133 +      InitializeNode* ini = n->as_Allocate()->initialization();
  5.1134 +      if (ini != NULL)
  5.1135 +        ini->set_does_not_escape();
  5.1136 +    }
  5.1137 +  }
  5.1138 +  return true; // Finished graph construction.
  5.1139 +}
  5.1140 +
  5.1141 +// Propagate GlobalEscape and ArgEscape escape states to all nodes
  5.1142 +// and check that we still have non-escaping java objects.
  5.1143 +bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
  5.1144 +                                               GrowableArray<JavaObjectNode*>& non_escaped_worklist) {
  5.1145 +  GrowableArray<PointsToNode*> escape_worklist;
  5.1146 +  // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
  5.1147 +  int ptnodes_length = ptnodes_worklist.length();
  5.1148 +  for (int next = 0; next < ptnodes_length; ++next) {
  5.1149 +    PointsToNode* ptn = ptnodes_worklist.at(next);
  5.1150 +    if (ptn->escape_state() >= PointsToNode::ArgEscape ||
  5.1151 +        ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
  5.1152 +      escape_worklist.push(ptn);
  5.1153 +    }
  5.1154 +  }
  5.1155 +  // Set escape states to referenced nodes (edges list).
  5.1156 +  while (escape_worklist.length() > 0) {
  5.1157 +    PointsToNode* ptn = escape_worklist.pop();
  5.1158 +    PointsToNode::EscapeState es  = ptn->escape_state();
  5.1159 +    PointsToNode::EscapeState field_es = ptn->fields_escape_state();
  5.1160 +    if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
  5.1161 +        es >= PointsToNode::ArgEscape) {
  5.1162 +      // GlobalEscape or ArgEscape state of field means it has unknown value.
  5.1163 +      if (add_edge(ptn, phantom_obj)) {
  5.1164 +        // New edge was added
  5.1165 +        add_field_uses_to_worklist(ptn->as_Field());
  5.1166 +      }
  5.1167 +    }
  5.1168 +    for (EdgeIterator i(ptn); i.has_next(); i.next()) {
  5.1169 +      PointsToNode* e = i.get();
  5.1170 +      if (e->is_Arraycopy()) {
  5.1171 +        assert(ptn->arraycopy_dst(), "sanity");
  5.1172 +        // Propagate only fields escape state through arraycopy edge.
  5.1173 +        if (e->fields_escape_state() < field_es) {
  5.1174 +          set_fields_escape_state(e, field_es);
  5.1175 +          escape_worklist.push(e);
  5.1176 +        }
  5.1177 +      } else if (es >= field_es) {
  5.1178 +        // fields_escape_state is also set to 'es' if it is less than 'es'.
  5.1179 +        if (e->escape_state() < es) {
  5.1180 +          set_escape_state(e, es);
  5.1181 +          escape_worklist.push(e);
  5.1182 +        }
  5.1183 +      } else {
  5.1184 +        // Propagate field escape state.
  5.1185 +        bool es_changed = false;
  5.1186 +        if (e->fields_escape_state() < field_es) {
  5.1187 +          set_fields_escape_state(e, field_es);
  5.1188 +          es_changed = true;
  5.1189 +        }
  5.1190 +        if ((e->escape_state() < field_es) &&
  5.1191 +            e->is_Field() && ptn->is_JavaObject() &&
  5.1192 +            e->as_Field()->is_oop()) {
  5.1193 +          // Change escape state of referenced fileds.
  5.1194 +          set_escape_state(e, field_es);
  5.1195 +          es_changed = true;;
  5.1196 +        } else if (e->escape_state() < es) {
  5.1197 +          set_escape_state(e, es);
  5.1198 +          es_changed = true;;
  5.1199 +        }
  5.1200 +        if (es_changed) {
  5.1201 +          escape_worklist.push(e);
  5.1202 +        }
  5.1203 +      }
  5.1204 +    }
  5.1205 +  }
  5.1206 +  // Remove escaped objects from non_escaped list.
  5.1207 +  for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) {
  5.1208 +    JavaObjectNode* ptn = non_escaped_worklist.at(next);
  5.1209 +    if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
  5.1210 +      non_escaped_worklist.delete_at(next);
  5.1211 +    }
  5.1212 +    if (ptn->escape_state() == PointsToNode::NoEscape) {
  5.1213 +      // Find fields in non-escaped allocations which have unknown value.
  5.1214 +      find_init_values(ptn, phantom_obj, NULL);
  5.1215 +    }
  5.1216 +  }
  5.1217 +  return (non_escaped_worklist.length() > 0);
  5.1218 +}
  5.1219 +
  5.1220 +// Add all references to JavaObject node by walking over all uses.
  5.1221 +int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
  5.1222 +  int new_edges = 0;
  5.1223 +  if (populate_worklist) {
  5.1224 +    // Populate _worklist by uses of jobj's uses.
  5.1225 +    for (UseIterator i(jobj); i.has_next(); i.next()) {
  5.1226 +      PointsToNode* use = i.get();
  5.1227 +      if (use->is_Arraycopy())
  5.1228 +        continue;
  5.1229 +      add_uses_to_worklist(use);
  5.1230 +      if (use->is_Field() && use->as_Field()->is_oop()) {
  5.1231 +        // Put on worklist all field's uses (loads) and
  5.1232 +        // related field nodes (same base and offset).
  5.1233 +        add_field_uses_to_worklist(use->as_Field());
  5.1234 +      }
  5.1235 +    }
  5.1236 +  }
  5.1237 +  while(_worklist.length() > 0) {
  5.1238 +    PointsToNode* use = _worklist.pop();
  5.1239 +    if (PointsToNode::is_base_use(use)) {
  5.1240 +      // Add reference from jobj to field and from field to jobj (field's base).
  5.1241 +      use = PointsToNode::get_use_node(use)->as_Field();
  5.1242 +      if (add_base(use->as_Field(), jobj)) {
  5.1243 +        new_edges++;
  5.1244 +      }
  5.1245 +      continue;
  5.1246 +    }
  5.1247 +    assert(!use->is_JavaObject(), "sanity");
  5.1248 +    if (use->is_Arraycopy()) {
  5.1249 +      if (jobj == null_obj) // NULL object does not have field edges
  5.1250 +        continue;
  5.1251 +      // Added edge from Arraycopy node to arraycopy's source java object
  5.1252 +      if (add_edge(use, jobj)) {
  5.1253 +        jobj->set_arraycopy_src();
  5.1254 +        new_edges++;
  5.1255 +      }
  5.1256 +      // and stop here.
  5.1257 +      continue;
  5.1258 +    }
  5.1259 +    if (!add_edge(use, jobj))
  5.1260 +      continue; // No new edge added, there was such edge already.
  5.1261 +    new_edges++;
  5.1262 +    if (use->is_LocalVar()) {
  5.1263 +      add_uses_to_worklist(use);
  5.1264 +      if (use->arraycopy_dst()) {
  5.1265 +        for (EdgeIterator i(use); i.has_next(); i.next()) {
  5.1266 +          PointsToNode* e = i.get();
  5.1267 +          if (e->is_Arraycopy()) {
  5.1268 +            if (jobj == null_obj) // NULL object does not have field edges
  5.1269 +              continue;
  5.1270 +            // Add edge from arraycopy's destination java object to Arraycopy node.
  5.1271 +            if (add_edge(jobj, e)) {
  5.1272 +              new_edges++;
  5.1273 +              jobj->set_arraycopy_dst();
  5.1274 +            }
  5.1275 +          }
  5.1276 +        }
  5.1277 +      }
  5.1278 +    } else {
  5.1279 +      // Added new edge to stored in field values.
  5.1280 +      // Put on worklist all field's uses (loads) and
  5.1281 +      // related field nodes (same base and offset).
  5.1282 +      add_field_uses_to_worklist(use->as_Field());
  5.1283 +    }
  5.1284 +  }
  5.1285 +  return new_edges;
  5.1286 +}
  5.1287 +
  5.1288 +// Put on worklist all related field nodes.
  5.1289 +void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
  5.1290 +  assert(field->is_oop(), "sanity");
  5.1291 +  int offset = field->offset();
  5.1292 +  add_uses_to_worklist(field);
  5.1293 +  // Loop over all bases of this field and push on worklist Field nodes
  5.1294 +  // with the same offset and base (since they may reference the same field).
  5.1295 +  for (BaseIterator i(field); i.has_next(); i.next()) {
  5.1296 +    PointsToNode* base = i.get();
  5.1297 +    add_fields_to_worklist(field, base);
  5.1298 +    // Check if the base was source object of arraycopy and go over arraycopy's
  5.1299 +    // destination objects since values stored to a field of source object are
  5.1300 +    // accessable by uses (loads) of fields of destination objects.
  5.1301 +    if (base->arraycopy_src()) {
  5.1302 +      for (UseIterator j(base); j.has_next(); j.next()) {
  5.1303 +        PointsToNode* arycp = j.get();
  5.1304 +        if (arycp->is_Arraycopy()) {
  5.1305 +          for (UseIterator k(arycp); k.has_next(); k.next()) {
  5.1306 +            PointsToNode* abase = k.get();
  5.1307 +            if (abase->arraycopy_dst() && abase != base) {
  5.1308 +              // Look for the same arracopy reference.
  5.1309 +              add_fields_to_worklist(field, abase);
  5.1310 +            }
  5.1311 +          }
  5.1312 +        }
  5.1313 +      }
  5.1314 +    }
  5.1315 +  }
  5.1316 +}
  5.1317 +
  5.1318 +// Put on worklist all related field nodes.
  5.1319 +void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
  5.1320 +  int offset = field->offset();
  5.1321 +  if (base->is_LocalVar()) {
  5.1322 +    for (UseIterator j(base); j.has_next(); j.next()) {
  5.1323 +      PointsToNode* f = j.get();
  5.1324 +      if (PointsToNode::is_base_use(f)) { // Field
  5.1325 +        f = PointsToNode::get_use_node(f);
  5.1326 +        if (f == field || !f->as_Field()->is_oop())
  5.1327 +          continue;
  5.1328 +        int offs = f->as_Field()->offset();
  5.1329 +        if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
  5.1330 +          add_to_worklist(f);
  5.1331 +        }
  5.1332 +      }
  5.1333 +    }
  5.1334 +  } else {
  5.1335 +    assert(base->is_JavaObject(), "sanity");
  5.1336 +    if (// Skip phantom_object since it is only used to indicate that
  5.1337 +        // this field's content globally escapes.
  5.1338 +        (base != phantom_obj) &&
  5.1339 +        // NULL object node does not have fields.
  5.1340 +        (base != null_obj)) {
  5.1341 +      for (EdgeIterator i(base); i.has_next(); i.next()) {
  5.1342 +        PointsToNode* f = i.get();
  5.1343 +        // Skip arraycopy edge since store to destination object field
  5.1344 +        // does not update value in source object field.
  5.1345 +        if (f->is_Arraycopy()) {
  5.1346 +          assert(base->arraycopy_dst(), "sanity");
  5.1347 +          continue;
  5.1348 +        }
  5.1349 +        if (f == field || !f->as_Field()->is_oop())
  5.1350 +          continue;
  5.1351 +        int offs = f->as_Field()->offset();
  5.1352 +        if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
  5.1353 +          add_to_worklist(f);
  5.1354 +        }
  5.1355 +      }
  5.1356 +    }
  5.1357 +  }
  5.1358 +}
  5.1359 +
  5.1360 +// Find fields which have unknown value.
  5.1361 +int ConnectionGraph::find_field_value(FieldNode* field) {
  5.1362 +  // Escaped fields should have init value already.
  5.1363 +  assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
  5.1364 +  int new_edges = 0;
  5.1365 +  for (BaseIterator i(field); i.has_next(); i.next()) {
  5.1366 +    PointsToNode* base = i.get();
  5.1367 +    if (base->is_JavaObject()) {
  5.1368 +      // Skip Allocate's fields which will be processed later.
  5.1369 +      if (base->ideal_node()->is_Allocate())
  5.1370 +        return 0;
  5.1371 +      assert(base == null_obj, "only NULL ptr base expected here");
  5.1372 +    }
  5.1373 +  }
  5.1374 +  if (add_edge(field, phantom_obj)) {
  5.1375 +    // New edge was added
  5.1376 +    new_edges++;
  5.1377 +    add_field_uses_to_worklist(field);
  5.1378 +  }
  5.1379 +  return new_edges;
  5.1380 +}
  5.1381 +
  5.1382 +// Find fields initializing values for allocations.
  5.1383 +int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) {
  5.1384 +  assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
  5.1385 +  int new_edges = 0;
  5.1386 +  Node* alloc = pta->ideal_node();
  5.1387 +  if (init_val == phantom_obj) {
  5.1388 +    // Do nothing for Allocate nodes since its fields values are "known".
  5.1389 +    if (alloc->is_Allocate())
  5.1390 +      return 0;
  5.1391 +    assert(alloc->as_CallStaticJava(), "sanity");
  5.1392 +#ifdef ASSERT
  5.1393 +    if (alloc->as_CallStaticJava()->method() == NULL) {
  5.1394 +      const char* name = alloc->as_CallStaticJava()->_name;
  5.1395 +      assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
  5.1396 +    }
  5.1397 +#endif
  5.1398 +    // Non-escaped allocation returned from Java or runtime call have
  5.1399 +    // unknown values in fields.
  5.1400 +    for (EdgeIterator i(pta); i.has_next(); i.next()) {
  5.1401 +      PointsToNode* ptn = i.get();
  5.1402 +      if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
  5.1403 +        if (add_edge(ptn, phantom_obj)) {
  5.1404 +          // New edge was added
  5.1405 +          new_edges++;
  5.1406 +          add_field_uses_to_worklist(ptn->as_Field());
  5.1407 +        }
  5.1408 +      }
  5.1409 +    }
  5.1410 +    return new_edges;
  5.1411 +  }
  5.1412 +  assert(init_val == null_obj, "sanity");
  5.1413 +  // Do nothing for Call nodes since its fields values are unknown.
  5.1414 +  if (!alloc->is_Allocate())
  5.1415 +    return 0;
  5.1416 +
  5.1417 +  InitializeNode* ini = alloc->as_Allocate()->initialization();
  5.1418 +  Compile* C = _compile;
  5.1419 +  bool visited_bottom_offset = false;
  5.1420 +  GrowableArray<int> offsets_worklist;
  5.1421 +
  5.1422 +  // Check if an oop field's initializing value is recorded and add
  5.1423 +  // a corresponding NULL if field's value if it is not recorded.
  5.1424 +  // Connection Graph does not record a default initialization by NULL
  5.1425 +  // captured by Initialize node.
  5.1426 +  //
  5.1427 +  for (EdgeIterator i(pta); i.has_next(); i.next()) {
  5.1428 +    PointsToNode* ptn = i.get(); // Field (AddP)
  5.1429 +    if (!ptn->is_Field() || !ptn->as_Field()->is_oop())
  5.1430 +      continue; // Not oop field
  5.1431 +    int offset = ptn->as_Field()->offset();
  5.1432 +    if (offset == Type::OffsetBot) {
  5.1433 +      if (!visited_bottom_offset) {
  5.1434 +        // OffsetBot is used to reference array's element,
  5.1435 +        // always add reference to NULL to all Field nodes since we don't
  5.1436 +        // known which element is referenced.
  5.1437 +        if (add_edge(ptn, null_obj)) {
  5.1438 +          // New edge was added
  5.1439 +          new_edges++;
  5.1440 +          add_field_uses_to_worklist(ptn->as_Field());
  5.1441 +          visited_bottom_offset = true;
  5.1442 +        }
  5.1443 +      }
  5.1444 +    } else {
  5.1445 +      // Check only oop fields.
  5.1446 +      const Type* adr_type = ptn->ideal_node()->as_AddP()->bottom_type();
  5.1447 +      if (adr_type->isa_rawptr()) {
  5.1448 +#ifdef ASSERT
  5.1449 +        // Raw pointers are used for initializing stores so skip it
  5.1450 +        // since it should be recorded already
  5.1451 +        Node* base = get_addp_base(ptn->ideal_node());
  5.1452 +        assert(adr_type->isa_rawptr() && base->is_Proj() &&
  5.1453 +               (base->in(0) == alloc),"unexpected pointer type");
  5.1454 +#endif
  5.1455 +        continue;
  5.1456 +      }
  5.1457 +      if (!offsets_worklist.contains(offset)) {
  5.1458 +        offsets_worklist.append(offset);
  5.1459 +        Node* value = NULL;
  5.1460 +        if (ini != NULL) {
  5.1461 +          BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
  5.1462 +          Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
  5.1463 +          if (store != NULL && store->is_Store()) {
  5.1464 +            value = store->in(MemNode::ValueIn);
  5.1465 +          } else {
  5.1466 +            // There could be initializing stores which follow allocation.
  5.1467 +            // For example, a volatile field store is not collected
  5.1468 +            // by Initialize node.
  5.1469 +            //
  5.1470 +            // Need to check for dependent loads to separate such stores from
  5.1471 +            // stores which follow loads. For now, add initial value NULL so
  5.1472 +            // that compare pointers optimization works correctly.
  5.1473 +          }
  5.1474 +        }
  5.1475 +        if (value == NULL) {
  5.1476 +          // A field's initializing value was not recorded. Add NULL.
  5.1477 +          if (add_edge(ptn, null_obj)) {
  5.1478 +            // New edge was added
  5.1479 +            new_edges++;
  5.1480 +            add_field_uses_to_worklist(ptn->as_Field());
  5.1481 +          }
  5.1482 +        }
  5.1483 +      }
  5.1484 +    }
  5.1485 +  }
  5.1486 +  return new_edges;
  5.1487 +}
  5.1488 +
  5.1489 +// Adjust scalar_replaceable state after Connection Graph is built.
  5.1490 +void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {
  5.1491 +  // Search for non-escaping objects which are not scalar replaceable
  5.1492 +  // and mark them to propagate the state to referenced objects.
  5.1493 +
  5.1494 +  // 1. An object is not scalar replaceable if the field into which it is
  5.1495 +  // stored has unknown offset (stored into unknown element of an array).
  5.1496 +  //
  5.1497 +  for (UseIterator i(jobj); i.has_next(); i.next()) {
  5.1498 +    PointsToNode* use = i.get();
  5.1499 +    assert(!use->is_Arraycopy(), "sanity");
  5.1500 +    if (use->is_Field()) {
  5.1501 +      FieldNode* field = use->as_Field();
  5.1502 +      assert(field->is_oop() && field->scalar_replaceable() &&
  5.1503 +             field->fields_escape_state() == PointsToNode::NoEscape, "sanity");
  5.1504 +      if (field->offset() == Type::OffsetBot) {
  5.1505 +        jobj->set_scalar_replaceable(false);
  5.1506 +        return;
  5.1507 +      }
  5.1508 +    }
  5.1509 +    assert(use->is_Field() || use->is_LocalVar(), "sanity");
  5.1510 +    // 2. An object is not scalar replaceable if it is merged with other objects.
  5.1511 +    for (EdgeIterator j(use); j.has_next(); j.next()) {
  5.1512 +      PointsToNode* ptn = j.get();
  5.1513 +      if (ptn->is_JavaObject() && ptn != jobj) {
  5.1514 +        // Mark all objects.
  5.1515 +        jobj->set_scalar_replaceable(false);
  5.1516 +         ptn->set_scalar_replaceable(false);
  5.1517 +      }
  5.1518 +    }
  5.1519 +    if (!jobj->scalar_replaceable()) {
  5.1520 +      return;
  5.1521 +    }
  5.1522 +  }
  5.1523 +
  5.1524 +  for (EdgeIterator j(jobj); j.has_next(); j.next()) {
  5.1525 +    // Non-escaping object node should point only to field nodes.
  5.1526 +    FieldNode* field = j.get()->as_Field();
  5.1527 +    int offset = field->as_Field()->offset();
  5.1528 +
  5.1529 +    // 3. An object is not scalar replaceable if it has a field with unknown
  5.1530 +    // offset (array's element is accessed in loop).
  5.1531 +    if (offset == Type::OffsetBot) {
  5.1532 +      jobj->set_scalar_replaceable(false);
  5.1533 +      return;
  5.1534 +    }
  5.1535 +    // 4. Currently an object is not scalar replaceable if a LoadStore node
  5.1536 +    // access its field since the field value is unknown after it.
  5.1537 +    //
  5.1538 +    Node* n = field->ideal_node();
  5.1539 +    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  5.1540 +      if (n->fast_out(i)->is_LoadStore()) {
  5.1541 +        jobj->set_scalar_replaceable(false);
  5.1542 +        return;
  5.1543 +      }
  5.1544 +    }
  5.1545 +
  5.1546 +    // 5. Or the address may point to more then one object. This may produce
  5.1547 +    // the false positive result (set not scalar replaceable)
  5.1548 +    // since the flow-insensitive escape analysis can't separate
  5.1549 +    // the case when stores overwrite the field's value from the case
  5.1550 +    // when stores happened on different control branches.
  5.1551 +    //
  5.1552 +    // Note: it will disable scalar replacement in some cases:
  5.1553 +    //
  5.1554 +    //    Point p[] = new Point[1];
  5.1555 +    //    p[0] = new Point(); // Will be not scalar replaced
  5.1556 +    //
  5.1557 +    // but it will save us from incorrect optimizations in next cases:
  5.1558 +    //
  5.1559 +    //    Point p[] = new Point[1];
  5.1560 +    //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
  5.1561 +    //
  5.1562 +    if (field->base_count() > 1) {
  5.1563 +      for (BaseIterator i(field); i.has_next(); i.next()) {
  5.1564 +        PointsToNode* base = i.get();
  5.1565 +        // Don't take into account LocalVar nodes which
  5.1566 +        // may point to only one object which should be also
  5.1567 +        // this field's base by now.
  5.1568 +        if (base->is_JavaObject() && base != jobj) {
  5.1569 +          // Mark all bases.
  5.1570 +          jobj->set_scalar_replaceable(false);
  5.1571 +          base->set_scalar_replaceable(false);
  5.1572 +        }
  5.1573 +      }
  5.1574 +    }
  5.1575 +  }
  5.1576 +}
  5.1577 +
  5.1578 +#ifdef ASSERT
  5.1579 +void ConnectionGraph::verify_connection_graph(
  5.1580 +                         GrowableArray<PointsToNode*>&   ptnodes_worklist,
  5.1581 +                         GrowableArray<JavaObjectNode*>& non_escaped_worklist,
  5.1582 +                         GrowableArray<JavaObjectNode*>& java_objects_worklist,
  5.1583 +                         GrowableArray<Node*>& addp_worklist) {
  5.1584 +  // Verify that graph is complete - no new edges could be added.
  5.1585 +  int java_objects_length = java_objects_worklist.length();
  5.1586 +  int non_escaped_length  = non_escaped_worklist.length();
  5.1587 +  int new_edges = 0;
  5.1588 +  for (int next = 0; next < java_objects_length; ++next) {
  5.1589 +    JavaObjectNode* ptn = java_objects_worklist.at(next);
  5.1590 +    new_edges += add_java_object_edges(ptn, true);
  5.1591 +  }
  5.1592 +  assert(new_edges == 0, "graph was not complete");
  5.1593 +  // Verify that escape state is final.
  5.1594 +  int length = non_escaped_worklist.length();
  5.1595 +  find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist);
  5.1596 +  assert((non_escaped_length == non_escaped_worklist.length()) &&
  5.1597 +         (non_escaped_length == length) &&
  5.1598 +         (_worklist.length() == 0), "escape state was not final");
  5.1599 +
  5.1600 +  // Verify fields information.
  5.1601 +  int addp_length = addp_worklist.length();
  5.1602 +  for (int next = 0; next < addp_length; ++next ) {
  5.1603 +    Node* n = addp_worklist.at(next);
  5.1604 +    FieldNode* field = ptnode_adr(n->_idx)->as_Field();
  5.1605 +    if (field->is_oop()) {
  5.1606 +      // Verify that field has all bases
  5.1607 +      Node* base = get_addp_base(n);
  5.1608 +      PointsToNode* ptn = ptnode_adr(base->_idx);
  5.1609 +      if (ptn->is_JavaObject()) {
  5.1610 +        assert(field->has_base(ptn->as_JavaObject()), "sanity");
  5.1611 +      } else {
  5.1612 +        assert(ptn->is_LocalVar(), "sanity");
  5.1613 +        for (EdgeIterator i(ptn); i.has_next(); i.next()) {
  5.1614 +          PointsToNode* e = i.get();
  5.1615 +          if (e->is_JavaObject()) {
  5.1616 +            assert(field->has_base(e->as_JavaObject()), "sanity");
  5.1617 +          }
  5.1618 +        }
  5.1619 +      }
  5.1620 +      // Verify that all fields have initializing values.
  5.1621 +      if (field->edge_count() == 0) {
  5.1622 +        field->dump();
  5.1623 +        assert(field->edge_count() > 0, "sanity");
  5.1624 +      }
  5.1625 +    }
  5.1626 +  }
  5.1627 +}
  5.1628 +#endif
  5.1629 +
  5.1630 +// Optimize ideal graph.
  5.1631 +void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
  5.1632 +                                           GrowableArray<Node*>& storestore_worklist) {
  5.1633 +  Compile* C = _compile;
  5.1634 +  PhaseIterGVN* igvn = _igvn;
  5.1635 +  if (EliminateLocks) {
  5.1636 +    // Mark locks before changing ideal graph.
  5.1637 +    int cnt = C->macro_count();
  5.1638 +    for( int i=0; i < cnt; i++ ) {
  5.1639 +      Node *n = C->macro_node(i);
  5.1640 +      if (n->is_AbstractLock()) { // Lock and Unlock nodes
  5.1641 +        AbstractLockNode* alock = n->as_AbstractLock();
  5.1642 +        if (!alock->is_non_esc_obj()) {
  5.1643 +          if (not_global_escape(alock->obj_node())) {
  5.1644 +            assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
  5.1645 +            // The lock could be marked eliminated by lock coarsening
  5.1646 +            // code during first IGVN before EA. Replace coarsened flag
  5.1647 +            // to eliminate all associated locks/unlocks.
  5.1648 +            alock->set_non_esc_obj();
  5.1649 +          }
  5.1650 +        }
  5.1651 +      }
  5.1652 +    }
  5.1653 +  }
  5.1654 +
  5.1655 +  if (OptimizePtrCompare) {
  5.1656 +    // Add ConI(#CC_GT) and ConI(#CC_EQ).
  5.1657 +    _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
  5.1658 +    _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
  5.1659 +    // Optimize objects compare.
  5.1660 +    while (ptr_cmp_worklist.length() != 0) {
  5.1661 +      Node *n = ptr_cmp_worklist.pop();
  5.1662 +      Node *res = optimize_ptr_compare(n);
  5.1663 +      if (res != NULL) {
  5.1664 +#ifndef PRODUCT
  5.1665 +        if (PrintOptimizePtrCompare) {
  5.1666 +          tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
  5.1667 +          if (Verbose) {
  5.1668 +            n->dump(1);
  5.1669 +          }
  5.1670 +        }
  5.1671 +#endif
  5.1672 +        igvn->replace_node(n, res);
  5.1673 +      }
  5.1674 +    }
  5.1675 +    // cleanup
  5.1676 +    if (_pcmp_neq->outcnt() == 0)
  5.1677 +      igvn->hash_delete(_pcmp_neq);
  5.1678 +    if (_pcmp_eq->outcnt()  == 0)
  5.1679 +      igvn->hash_delete(_pcmp_eq);
  5.1680 +  }
  5.1681 +
  5.1682 +  // For MemBarStoreStore nodes added in library_call.cpp, check
  5.1683 +  // escape status of associated AllocateNode and optimize out
  5.1684 +  // MemBarStoreStore node if the allocated object never escapes.
  5.1685 +  while (storestore_worklist.length() != 0) {
  5.1686 +    Node *n = storestore_worklist.pop();
  5.1687 +    MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
  5.1688 +    Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
  5.1689 +    assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
  5.1690 +    if (not_global_escape(alloc)) {
  5.1691 +      MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
  5.1692 +      mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
  5.1693 +      mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
  5.1694 +      igvn->register_new_node_with_optimizer(mb);
  5.1695 +      igvn->replace_node(storestore, mb);
  5.1696 +    }
  5.1697 +  }
  5.1698 +}
  5.1699 +
  5.1700 +// Optimize objects compare.
  5.1701 +Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
  5.1702 +  assert(OptimizePtrCompare, "sanity");
  5.1703 +  PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
  5.1704 +  PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
  5.1705 +  JavaObjectNode* jobj1 = unique_java_object(n->in(1));
  5.1706 +  JavaObjectNode* jobj2 = unique_java_object(n->in(2));
  5.1707 +  assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
  5.1708 +  assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
  5.1709 +
  5.1710 +  // Check simple cases first.
  5.1711 +  if (jobj1 != NULL) {
  5.1712 +    if (jobj1->escape_state() == PointsToNode::NoEscape) {
  5.1713 +      if (jobj1 == jobj2) {
  5.1714 +        // Comparing the same not escaping object.
  5.1715 +        return _pcmp_eq;
  5.1716 +      }
  5.1717 +      Node* obj = jobj1->ideal_node();
  5.1718 +      // Comparing not escaping allocation.
  5.1719 +      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
  5.1720 +          !ptn2->points_to(jobj1)) {
  5.1721 +        return _pcmp_neq; // This includes nullness check.
  5.1722 +      }
  5.1723 +    }
  5.1724 +  }
  5.1725 +  if (jobj2 != NULL) {
  5.1726 +    if (jobj2->escape_state() == PointsToNode::NoEscape) {
  5.1727 +      Node* obj = jobj2->ideal_node();
  5.1728 +      // Comparing not escaping allocation.
  5.1729 +      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
  5.1730 +          !ptn1->points_to(jobj2)) {
  5.1731 +        return _pcmp_neq; // This includes nullness check.
  5.1732 +      }
  5.1733 +    }
  5.1734 +  }
  5.1735 +  if (jobj1 != NULL && jobj1 != phantom_obj &&
  5.1736 +      jobj2 != NULL && jobj2 != phantom_obj &&
  5.1737 +      jobj1->ideal_node()->is_Con() &&
  5.1738 +      jobj2->ideal_node()->is_Con()) {
  5.1739 +    // Klass or String constants compare. Need to be careful with
  5.1740 +    // compressed pointers - compare types of ConN and ConP instead of nodes.
  5.1741 +    const Type* t1 = jobj1->ideal_node()->bottom_type()->make_ptr();
  5.1742 +    const Type* t2 = jobj2->ideal_node()->bottom_type()->make_ptr();
  5.1743 +    assert(t1 != NULL && t2 != NULL, "sanity");
  5.1744 +    if (t1->make_ptr() == t2->make_ptr()) {
  5.1745 +      return _pcmp_eq;
  5.1746 +    } else {
  5.1747 +      return _pcmp_neq;
  5.1748 +    }
  5.1749 +  }
  5.1750 +  if (ptn1->meet(ptn2)) {
  5.1751 +    return NULL; // Sets are not disjoint
  5.1752 +  }
  5.1753 +
  5.1754 +  // Sets are disjoint.
  5.1755 +  bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
  5.1756 +  bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
  5.1757 +  bool set1_has_null_ptr    = ptn1->points_to(null_obj);
  5.1758 +  bool set2_has_null_ptr    = ptn2->points_to(null_obj);
  5.1759 +  if (set1_has_unknown_ptr && set2_has_null_ptr ||
  5.1760 +      set2_has_unknown_ptr && set1_has_null_ptr) {
  5.1761 +    // Check nullness of unknown object.
  5.1762 +    return NULL;
  5.1763 +  }
  5.1764 +
  5.1765 +  // Disjointness by itself is not sufficient since
  5.1766 +  // alias analysis is not complete for escaped objects.
  5.1767 +  // Disjoint sets are definitely unrelated only when
  5.1768 +  // at least one set has only not escaping allocations.
  5.1769 +  if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
  5.1770 +    if (ptn1->non_escaping_allocation()) {
  5.1771 +      return _pcmp_neq;
  5.1772 +    }
  5.1773 +  }
  5.1774 +  if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
  5.1775 +    if (ptn2->non_escaping_allocation()) {
  5.1776 +      return _pcmp_neq;
  5.1777 +    }
  5.1778 +  }
  5.1779 +  return NULL;
  5.1780 +}
  5.1781 +
  5.1782 +// Connection Graph constuction functions.
  5.1783 +
  5.1784 +void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
  5.1785 +  PointsToNode* ptadr = _nodes.at(n->_idx);
  5.1786 +  if (ptadr != NULL) {
  5.1787 +    assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
  5.1788 +    return;
  5.1789 +  }
  5.1790 +  Compile* C = _compile;
  5.1791 +  ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
  5.1792 +  _nodes.at_put(n->_idx, ptadr);
  5.1793 +}
  5.1794 +
  5.1795 +void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
  5.1796 +  PointsToNode* ptadr = _nodes.at(n->_idx);
  5.1797 +  if (ptadr != NULL) {
  5.1798 +    assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
  5.1799 +    return;
  5.1800 +  }
  5.1801 +  Compile* C = _compile;
  5.1802 +  ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
  5.1803 +  _nodes.at_put(n->_idx, ptadr);
  5.1804 +}
  5.1805 +
  5.1806 +void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
  5.1807 +  PointsToNode* ptadr = _nodes.at(n->_idx);
  5.1808 +  if (ptadr != NULL) {
  5.1809 +    assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
  5.1810 +    return;
  5.1811 +  }
  5.1812 +  Compile* C = _compile;
  5.1813 +  bool is_oop = is_oop_field(n, offset);
  5.1814 +  FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
  5.1815 +  _nodes.at_put(n->_idx, field);
  5.1816 +}
  5.1817 +
  5.1818 +void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
  5.1819 +                                    PointsToNode* src, PointsToNode* dst) {
  5.1820 +  assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
  5.1821 +  assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
  5.1822 +  PointsToNode* ptadr = _nodes.at(n->_idx);
  5.1823 +  if (ptadr != NULL) {
  5.1824 +    assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
  5.1825 +    return;
  5.1826 +  }
  5.1827 +  Compile* C = _compile;
  5.1828 +  ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
  5.1829 +  _nodes.at_put(n->_idx, ptadr);
  5.1830 +  // Add edge from arraycopy node to source object.
  5.1831 +  (void)add_edge(ptadr, src);
  5.1832 +  src->set_arraycopy_src();
  5.1833 +  // Add edge from destination object to arraycopy node.
  5.1834 +  (void)add_edge(dst, ptadr);
  5.1835 +  dst->set_arraycopy_dst();
  5.1836 +}
  5.1837 +
  5.1838 +bool ConnectionGraph::is_oop_field(Node* n, int offset) {
  5.1839 +  const Type* adr_type = n->as_AddP()->bottom_type();
  5.1840 +  BasicType bt = T_INT;
  5.1841 +  if (offset == Type::OffsetBot) {
  5.1842 +    // Check only oop fields.
  5.1843 +    if (!adr_type->isa_aryptr() ||
  5.1844 +        (adr_type->isa_aryptr()->klass() == NULL) ||
  5.1845 +         adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
  5.1846 +      // OffsetBot is used to reference array's element. Ignore first AddP.
  5.1847 +      if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
  5.1848 +        bt = T_OBJECT;
  5.1849 +      }
  5.1850 +    }
  5.1851 +  } else if (offset != oopDesc::klass_offset_in_bytes()) {
  5.1852 +    if (adr_type->isa_instptr()) {
  5.1853 +      ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
  5.1854 +      if (field != NULL) {
  5.1855 +        bt = field->layout_type();
  5.1856 +      } else {
  5.1857 +        // Ignore non field load (for example, klass load)
  5.1858 +      }
  5.1859 +    } else if (adr_type->isa_aryptr()) {
  5.1860 +      if (offset == arrayOopDesc::length_offset_in_bytes()) {
  5.1861 +        // Ignore array length load.
  5.1862 +      } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
  5.1863 +        // Ignore first AddP.
  5.1864 +      } else {
  5.1865 +        const Type* elemtype = adr_type->isa_aryptr()->elem();
  5.1866 +        bt = elemtype->array_element_basic_type();
  5.1867 +      }
  5.1868 +    } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
  5.1869 +      // Allocation initialization, ThreadLocal field access, unsafe access
  5.1870 +      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  5.1871 +        int opcode = n->fast_out(i)->Opcode();
  5.1872 +        if (opcode == Op_StoreP || opcode == Op_LoadP ||
  5.1873 +            opcode == Op_StoreN || opcode == Op_LoadN) {
  5.1874 +          bt = T_OBJECT;
  5.1875 +        }
  5.1876 +      }
  5.1877 +    }
  5.1878 +  }
  5.1879 +  return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
  5.1880 +}
  5.1881 +
  5.1882 +// Returns unique pointed java object or NULL.
  5.1883 +JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
  5.1884 +  assert(!_collecting, "should not call when contructed graph");
  5.1885 +  // If the node was created after the escape computation we can't answer.
  5.1886 +  uint idx = n->_idx;
  5.1887 +  if (idx >= nodes_size()) {
  5.1888 +    return NULL;
  5.1889 +  }
  5.1890 +  PointsToNode* ptn = ptnode_adr(idx);
  5.1891 +  if (ptn->is_JavaObject()) {
  5.1892 +    return ptn->as_JavaObject();
  5.1893 +  }
  5.1894 +  assert(ptn->is_LocalVar(), "sanity");
  5.1895 +  // Check all java objects it points to.
  5.1896 +  JavaObjectNode* jobj = NULL;
  5.1897 +  for (EdgeIterator i(ptn); i.has_next(); i.next()) {
  5.1898 +    PointsToNode* e = i.get();
  5.1899 +    if (e->is_JavaObject()) {
  5.1900 +      if (jobj == NULL) {
  5.1901 +        jobj = e->as_JavaObject();
  5.1902 +      } else if (jobj != e) {
  5.1903 +        return NULL;
  5.1904 +      }
  5.1905 +    }
  5.1906 +  }
  5.1907 +  return jobj;
  5.1908 +}
  5.1909 +
  5.1910 +// Return true if this node points only to non-escaping allocations.
  5.1911 +bool PointsToNode::non_escaping_allocation() {
  5.1912 +  if (is_JavaObject()) {
  5.1913 +    Node* n = ideal_node();
  5.1914 +    if (n->is_Allocate() || n->is_CallStaticJava()) {
  5.1915 +      return (escape_state() == PointsToNode::NoEscape);
  5.1916 +    } else {
  5.1917 +      return false;
  5.1918 +    }
  5.1919 +  }
  5.1920 +  assert(is_LocalVar(), "sanity");
  5.1921 +  // Check all java objects it points to.
  5.1922 +  for (EdgeIterator i(this); i.has_next(); i.next()) {
  5.1923 +    PointsToNode* e = i.get();
  5.1924 +    if (e->is_JavaObject()) {
  5.1925 +      Node* n = e->ideal_node();
  5.1926 +      if ((e->escape_state() != PointsToNode::NoEscape) ||
  5.1927 +          !(n->is_Allocate() || n->is_CallStaticJava())) {
  5.1928 +        return false;
  5.1929 +      }
  5.1930 +    }
  5.1931 +  }
  5.1932 +  return true;
  5.1933 +}
  5.1934 +
  5.1935 +// Return true if we know the node does not escape globally.
  5.1936 +bool ConnectionGraph::not_global_escape(Node *n) {
  5.1937 +  assert(!_collecting, "should not call during graph construction");
  5.1938 +  // If the node was created after the escape computation we can't answer.
  5.1939 +  uint idx = n->_idx;
  5.1940 +  if (idx >= nodes_size()) {
  5.1941 +    return false;
  5.1942 +  }
  5.1943 +  PointsToNode* ptn = ptnode_adr(idx);
  5.1944 +  PointsToNode::EscapeState es = ptn->escape_state();
  5.1945 +  // If we have already computed a value, return it.
  5.1946 +  if (es >= PointsToNode::GlobalEscape)
  5.1947 +    return false;
  5.1948 +  if (ptn->is_JavaObject()) {
  5.1949 +    return true; // (es < PointsToNode::GlobalEscape);
  5.1950 +  }
  5.1951 +  assert(ptn->is_LocalVar(), "sanity");
  5.1952 +  // Check all java objects it points to.
  5.1953 +  for (EdgeIterator i(ptn); i.has_next(); i.next()) {
  5.1954 +    if (i.get()->escape_state() >= PointsToNode::GlobalEscape)
  5.1955 +      return false;
  5.1956 +  }
  5.1957 +  return true;
  5.1958 +}
  5.1959 +
  5.1960 +
  5.1961 +// Helper functions
  5.1962 +
  5.1963 +// Return true if this node points to specified node or nodes it points to.
  5.1964 +bool PointsToNode::points_to(JavaObjectNode* ptn) const {
  5.1965 +  if (is_JavaObject()) {
  5.1966 +    return (this == ptn);
  5.1967 +  }
  5.1968 +  assert(is_LocalVar(), "sanity");
  5.1969 +  for (EdgeIterator i(this); i.has_next(); i.next()) {
  5.1970 +    if (i.get() == ptn)
  5.1971 +      return true;
  5.1972 +  }
  5.1973 +  return false;
  5.1974 +}
  5.1975 +
  5.1976 +// Return true if one node points to an other.
  5.1977 +bool PointsToNode::meet(PointsToNode* ptn) {
  5.1978 +  if (this == ptn) {
  5.1979 +    return true;
  5.1980 +  } else if (ptn->is_JavaObject()) {
  5.1981 +    return this->points_to(ptn->as_JavaObject());
  5.1982 +  } else if (this->is_JavaObject()) {
  5.1983 +    return ptn->points_to(this->as_JavaObject());
  5.1984 +  }
  5.1985 +  assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
  5.1986 +  int ptn_count =  ptn->edge_count();
  5.1987 +  for (EdgeIterator i(this); i.has_next(); i.next()) {
  5.1988 +    PointsToNode* this_e = i.get();
  5.1989 +    for (int j = 0; j < ptn_count; j++) {
  5.1990 +      if (this_e == ptn->edge(j))
  5.1991 +        return true;
  5.1992 +    }
  5.1993 +  }
  5.1994 +  return false;
  5.1995 +}
  5.1996 +
  5.1997 +#ifdef ASSERT
  5.1998 +// Return true if bases point to this java object.
  5.1999 +bool FieldNode::has_base(JavaObjectNode* jobj) const {
  5.2000 +  for (BaseIterator i(this); i.has_next(); i.next()) {
  5.2001 +    if (i.get() == jobj)
  5.2002 +      return true;
  5.2003 +  }
  5.2004 +  return false;
  5.2005 +}
  5.2006 +#endif
  5.2007 +
  5.2008  int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
  5.2009    const Type *adr_type = phase->type(adr);
  5.2010    if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
  5.2011 @@ -171,286 +1948,7 @@
  5.2012    return t_ptr->offset();
  5.2013  }
  5.2014  
  5.2015 -void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) {
  5.2016 -  // Don't add fields to NULL pointer.
  5.2017 -  if (is_null_ptr(from_i))
  5.2018 -    return;
  5.2019 -  PointsToNode *f = ptnode_adr(from_i);
  5.2020 -  PointsToNode *t = ptnode_adr(to_i);
  5.2021 -
  5.2022 -  assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
  5.2023 -  assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge");
  5.2024 -  assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge");
  5.2025 -  assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets");
  5.2026 -  t->set_offset(offset);
  5.2027 -
  5.2028 -  add_edge(f, to_i, PointsToNode::FieldEdge);
  5.2029 -}
  5.2030 -
  5.2031 -void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) {
  5.2032 -  // Don't change non-escaping state of NULL pointer.
  5.2033 -  if (is_null_ptr(ni))
  5.2034 -    return;
  5.2035 -  PointsToNode *npt = ptnode_adr(ni);
  5.2036 -  PointsToNode::EscapeState old_es = npt->escape_state();
  5.2037 -  if (es > old_es)
  5.2038 -    npt->set_escape_state(es);
  5.2039 -}
  5.2040 -
  5.2041 -void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
  5.2042 -                               PointsToNode::EscapeState es, bool done) {
  5.2043 -  PointsToNode* ptadr = ptnode_adr(n->_idx);
  5.2044 -  ptadr->_node = n;
  5.2045 -  ptadr->set_node_type(nt);
  5.2046 -
  5.2047 -  // inline set_escape_state(idx, es);
  5.2048 -  PointsToNode::EscapeState old_es = ptadr->escape_state();
  5.2049 -  if (es > old_es)
  5.2050 -    ptadr->set_escape_state(es);
  5.2051 -
  5.2052 -  if (done)
  5.2053 -    _processed.set(n->_idx);
  5.2054 -}
  5.2055 -
  5.2056 -PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n) {
  5.2057 -  uint idx = n->_idx;
  5.2058 -  PointsToNode::EscapeState es;
  5.2059 -
  5.2060 -  // If we are still collecting or there were no non-escaping allocations
  5.2061 -  // we don't know the answer yet
  5.2062 -  if (_collecting)
  5.2063 -    return PointsToNode::UnknownEscape;
  5.2064 -
  5.2065 -  // if the node was created after the escape computation, return
  5.2066 -  // UnknownEscape
  5.2067 -  if (idx >= nodes_size())
  5.2068 -    return PointsToNode::UnknownEscape;
  5.2069 -
  5.2070 -  es = ptnode_adr(idx)->escape_state();
  5.2071 -
  5.2072 -  // if we have already computed a value, return it
  5.2073 -  if (es != PointsToNode::UnknownEscape &&
  5.2074 -      ptnode_adr(idx)->node_type() == PointsToNode::JavaObject)
  5.2075 -    return es;
  5.2076 -
  5.2077 -  // PointsTo() calls n->uncast() which can return a new ideal node.
  5.2078 -  if (n->uncast()->_idx >= nodes_size())
  5.2079 -    return PointsToNode::UnknownEscape;
  5.2080 -
  5.2081 -  PointsToNode::EscapeState orig_es = es;
  5.2082 -
  5.2083 -  // compute max escape state of anything this node could point to
  5.2084 -  for(VectorSetI i(PointsTo(n)); i.test() && es != PointsToNode::GlobalEscape; ++i) {
  5.2085 -    uint pt = i.elem;
  5.2086 -    PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
  5.2087 -    if (pes > es)
  5.2088 -      es = pes;
  5.2089 -  }
  5.2090 -  if (orig_es != es) {
  5.2091 -    // cache the computed escape state
  5.2092 -    assert(es > orig_es, "should have computed an escape state");
  5.2093 -    set_escape_state(idx, es);
  5.2094 -  } // orig_es could be PointsToNode::UnknownEscape
  5.2095 -  return es;
  5.2096 -}
  5.2097 -
  5.2098 -VectorSet* ConnectionGraph::PointsTo(Node * n) {
  5.2099 -  pt_ptset.Reset();
  5.2100 -  pt_visited.Reset();
  5.2101 -  pt_worklist.clear();
  5.2102 -
  5.2103 -#ifdef ASSERT
  5.2104 -  Node *orig_n = n;
  5.2105 -#endif
  5.2106 -
  5.2107 -  n = n->uncast();
  5.2108 -  PointsToNode* npt = ptnode_adr(n->_idx);
  5.2109 -
  5.2110 -  // If we have a JavaObject, return just that object
  5.2111 -  if (npt->node_type() == PointsToNode::JavaObject) {
  5.2112 -    pt_ptset.set(n->_idx);
  5.2113 -    return &pt_ptset;
  5.2114 -  }
  5.2115 -#ifdef ASSERT
  5.2116 -  if (npt->_node == NULL) {
  5.2117 -    if (orig_n != n)
  5.2118 -      orig_n->dump();
  5.2119 -    n->dump();
  5.2120 -    assert(npt->_node != NULL, "unregistered node");
  5.2121 -  }
  5.2122 -#endif
  5.2123 -  pt_worklist.push(n->_idx);
  5.2124 -  while(pt_worklist.length() > 0) {
  5.2125 -    int ni = pt_worklist.pop();
  5.2126 -    if (pt_visited.test_set(ni))
  5.2127 -      continue;
  5.2128 -
  5.2129 -    PointsToNode* pn = ptnode_adr(ni);
  5.2130 -    // ensure that all inputs of a Phi have been processed
  5.2131 -    assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),"");
  5.2132 -
  5.2133 -    int edges_processed = 0;
  5.2134 -    uint e_cnt = pn->edge_count();
  5.2135 -    for (uint e = 0; e < e_cnt; e++) {
  5.2136 -      uint etgt = pn->edge_target(e);
  5.2137 -      PointsToNode::EdgeType et = pn->edge_type(e);
  5.2138 -      if (et == PointsToNode::PointsToEdge) {
  5.2139 -        pt_ptset.set(etgt);
  5.2140 -        edges_processed++;
  5.2141 -      } else if (et == PointsToNode::DeferredEdge) {
  5.2142 -        pt_worklist.push(etgt);
  5.2143 -        edges_processed++;
  5.2144 -      } else {
  5.2145 -        assert(false,"neither PointsToEdge or DeferredEdge");
  5.2146 -      }
  5.2147 -    }
  5.2148 -    if (edges_processed == 0) {
  5.2149 -      // no deferred or pointsto edges found.  Assume the value was set
  5.2150 -      // outside this method.  Add the phantom object to the pointsto set.
  5.2151 -      pt_ptset.set(_phantom_object);
  5.2152 -    }
  5.2153 -  }
  5.2154 -  return &pt_ptset;
  5.2155 -}
  5.2156 -
  5.2157 -void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) {
  5.2158 -  // This method is most expensive during ConnectionGraph construction.
  5.2159 -  // Reuse vectorSet and an additional growable array for deferred edges.
  5.2160 -  deferred_edges->clear();
  5.2161 -  visited->Reset();
  5.2162 -
  5.2163 -  visited->set(ni);
  5.2164 -  PointsToNode *ptn = ptnode_adr(ni);
  5.2165 -  assert(ptn->node_type() == PointsToNode::LocalVar ||
  5.2166 -         ptn->node_type() == PointsToNode::Field, "sanity");
  5.2167 -  assert(ptn->edge_count() != 0, "should have at least phantom_object");
  5.2168 -
  5.2169 -  // Mark current edges as visited and move deferred edges to separate array.
  5.2170 -  for (uint i = 0; i < ptn->edge_count(); ) {
  5.2171 -    uint t = ptn->edge_target(i);
  5.2172 -#ifdef ASSERT
  5.2173 -    assert(!visited->test_set(t), "expecting no duplications");
  5.2174 -#else
  5.2175 -    visited->set(t);
  5.2176 -#endif
  5.2177 -    if (ptn->edge_type(i) == PointsToNode::DeferredEdge) {
  5.2178 -      ptn->remove_edge(t, PointsToNode::DeferredEdge);
  5.2179 -      deferred_edges->append(t);
  5.2180 -    } else {
  5.2181 -      i++;
  5.2182 -    }
  5.2183 -  }
  5.2184 -  for (int next = 0; next < deferred_edges->length(); ++next) {
  5.2185 -    uint t = deferred_edges->at(next);
  5.2186 -    PointsToNode *ptt = ptnode_adr(t);
  5.2187 -    uint e_cnt = ptt->edge_count();
  5.2188 -    assert(e_cnt != 0, "should have at least phantom_object");
  5.2189 -    for (uint e = 0; e < e_cnt; e++) {
  5.2190 -      uint etgt = ptt->edge_target(e);
  5.2191 -      if (visited->test_set(etgt))
  5.2192 -        continue;
  5.2193 -
  5.2194 -      PointsToNode::EdgeType et = ptt->edge_type(e);
  5.2195 -      if (et == PointsToNode::PointsToEdge) {
  5.2196 -        add_pointsto_edge(ni, etgt);
  5.2197 -      } else if (et == PointsToNode::DeferredEdge) {
  5.2198 -        deferred_edges->append(etgt);
  5.2199 -      } else {
  5.2200 -        assert(false,"invalid connection graph");
  5.2201 -      }
  5.2202 -    }
  5.2203 -  }
  5.2204 -  if (ptn->edge_count() == 0) {
  5.2205 -    // No pointsto edges found after deferred edges are removed.
  5.2206 -    // For example, in the next case where call is replaced
  5.2207 -    // with uncommon trap and as result array's load references
  5.2208 -    // itself through deferred edges:
  5.2209 -    //
  5.2210 -    // A a = b[i];
  5.2211 -    // if (c!=null) a = c.foo();
  5.2212 -    // b[i] = a;
  5.2213 -    //
  5.2214 -    // Assume the value was set outside this method and
  5.2215 -    // add edge to phantom object.
  5.2216 -    add_pointsto_edge(ni, _phantom_object);
  5.2217 -  }
  5.2218 -}
  5.2219 -
  5.2220 -
  5.2221 -//  Add an edge to node given by "to_i" from any field of adr_i whose offset
  5.2222 -//  matches "offset"  A deferred edge is added if to_i is a LocalVar, and
  5.2223 -//  a pointsto edge is added if it is a JavaObject
  5.2224 -
  5.2225 -void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) {
  5.2226 -  // No fields for NULL pointer.
  5.2227 -  if (is_null_ptr(adr_i)) {
  5.2228 -    return;
  5.2229 -  }
  5.2230 -  PointsToNode* an = ptnode_adr(adr_i);
  5.2231 -  PointsToNode* to = ptnode_adr(to_i);
  5.2232 -  bool deferred = (to->node_type() == PointsToNode::LocalVar);
  5.2233 -  bool escaped  = (to_i == _phantom_object) && (offs == Type::OffsetTop);
  5.2234 -  if (escaped) {
  5.2235 -    // Values in fields escaped during call.
  5.2236 -    assert(an->escape_state() >= PointsToNode::ArgEscape, "sanity");
  5.2237 -    offs = Type::OffsetBot;
  5.2238 -  }
  5.2239 -  for (uint fe = 0; fe < an->edge_count(); fe++) {
  5.2240 -    assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
  5.2241 -    int fi = an->edge_target(fe);
  5.2242 -    if (escaped) {
  5.2243 -      set_escape_state(fi, PointsToNode::GlobalEscape);
  5.2244 -    }
  5.2245 -    PointsToNode* pf = ptnode_adr(fi);
  5.2246 -    int po = pf->offset();
  5.2247 -    if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
  5.2248 -      if (deferred)
  5.2249 -        add_deferred_edge(fi, to_i);
  5.2250 -      else
  5.2251 -        add_pointsto_edge(fi, to_i);
  5.2252 -    }
  5.2253 -  }
  5.2254 -}
  5.2255 -
  5.2256 -// Add a deferred  edge from node given by "from_i" to any field of adr_i
  5.2257 -// whose offset matches "offset".
  5.2258 -void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
  5.2259 -  // No fields for NULL pointer.
  5.2260 -  if (is_null_ptr(adr_i)) {
  5.2261 -    return;
  5.2262 -  }
  5.2263 -  if (adr_i == _phantom_object) {
  5.2264 -    // Add only one edge for unknown object.
  5.2265 -    add_pointsto_edge(from_i, _phantom_object);
  5.2266 -    return;
  5.2267 -  }
  5.2268 -  PointsToNode* an = ptnode_adr(adr_i);
  5.2269 -  bool is_alloc = an->_node->is_Allocate();
  5.2270 -  for (uint fe = 0; fe < an->edge_count(); fe++) {
  5.2271 -    assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
  5.2272 -    int fi = an->edge_target(fe);
  5.2273 -    PointsToNode* pf = ptnode_adr(fi);
  5.2274 -    int offset = pf->offset();
  5.2275 -    if (!is_alloc) {
  5.2276 -      // Assume the field was set outside this method if it is not Allocation
  5.2277 -      add_pointsto_edge(fi, _phantom_object);
  5.2278 -    }
  5.2279 -    if (offset == offs || offset == Type::OffsetBot || offs == Type::OffsetBot) {
  5.2280 -      add_deferred_edge(from_i, fi);
  5.2281 -    }
  5.2282 -  }
  5.2283 -  // Some fields references (AddP) may still be missing
  5.2284 -  // until Connection Graph construction is complete.
  5.2285 -  // For example, loads from RAW pointers with offset 0
  5.2286 -  // which don't have AddP.
  5.2287 -  // A reference to phantom_object will be added if
  5.2288 -  // a field reference is still missing after completing
  5.2289 -  // Connection Graph (see remove_deferred()).
  5.2290 -}
  5.2291 -
  5.2292 -// Helper functions
  5.2293 -
  5.2294 -static Node* get_addp_base(Node *addp) {
  5.2295 +Node* ConnectionGraph::get_addp_base(Node *addp) {
  5.2296    assert(addp->is_AddP(), "must be AddP");
  5.2297    //
  5.2298    // AddP cases for Base and Address inputs:
  5.2299 @@ -513,30 +2011,30 @@
  5.2300    //       | |
  5.2301    //       AddP  ( base == address )
  5.2302    //
  5.2303 -  Node *base = addp->in(AddPNode::Base)->uncast();
  5.2304 -  if (base->is_top()) { // The AddP case #3 and #6.
  5.2305 -    base = addp->in(AddPNode::Address)->uncast();
  5.2306 +  Node *base = addp->in(AddPNode::Base);
  5.2307 +  if (base->uncast()->is_top()) { // The AddP case #3 and #6.
  5.2308 +    base = addp->in(AddPNode::Address);
  5.2309      while (base->is_AddP()) {
  5.2310        // Case #6 (unsafe access) may have several chained AddP nodes.
  5.2311 -      assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only");
  5.2312 -      base = base->in(AddPNode::Address)->uncast();
  5.2313 +      assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
  5.2314 +      base = base->in(AddPNode::Address);
  5.2315      }
  5.2316 -    assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal ||
  5.2317 -           base->Opcode() == Op_CastX2P || base->is_DecodeN() ||
  5.2318 -           (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) ||
  5.2319 -           (base->is_Proj() && base->in(0)->is_Allocate()), "sanity");
  5.2320 +    Node* uncast_base = base->uncast();
  5.2321 +    int opcode = uncast_base->Opcode();
  5.2322 +    assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
  5.2323 +           opcode == Op_CastX2P || uncast_base->is_DecodeN() ||
  5.2324 +           (uncast_base->is_Mem() && uncast_base->bottom_type() == TypeRawPtr::NOTNULL) ||
  5.2325 +           (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
  5.2326    }
  5.2327    return base;
  5.2328  }
  5.2329  
  5.2330 -static Node* find_second_addp(Node* addp, Node* n) {
  5.2331 +Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
  5.2332    assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
  5.2333 -
  5.2334    Node* addp2 = addp->raw_out(0);
  5.2335    if (addp->outcnt() == 1 && addp2->is_AddP() &&
  5.2336        addp2->in(AddPNode::Base) == n &&
  5.2337        addp2->in(AddPNode::Address) == addp) {
  5.2338 -
  5.2339      assert(addp->in(AddPNode::Base) == n, "expecting the same base");
  5.2340      //
  5.2341      // Find array's offset to push it on worklist first and
  5.2342 @@ -575,7 +2073,8 @@
  5.2343  // Adjust the type and inputs of an AddP which computes the
  5.2344  // address of a field of an instance
  5.2345  //
  5.2346 -bool ConnectionGraph::split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn) {
  5.2347 +bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
  5.2348 +  PhaseGVN* igvn = _igvn;
  5.2349    const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
  5.2350    assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
  5.2351    const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
  5.2352 @@ -612,7 +2111,6 @@
  5.2353        !base_t->klass()->is_subtype_of(t->klass())) {
  5.2354       return false; // bail out
  5.2355    }
  5.2356 -
  5.2357    const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
  5.2358    // Do NOT remove the next line: ensure a new alias index is allocated
  5.2359    // for the instance type. Note: C++ will not remove it since the call
  5.2360 @@ -620,9 +2118,7 @@
  5.2361    int alias_idx = _compile->get_alias_index(tinst);
  5.2362    igvn->set_type(addp, tinst);
  5.2363    // record the allocation in the node map
  5.2364 -  assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered");
  5.2365 -  set_map(addp->_idx, get_map(base->_idx));
  5.2366 -
  5.2367 +  set_map(addp, get_map(base->_idx));
  5.2368    // Set addp's Base and Address to 'base'.
  5.2369    Node *abase = addp->in(AddPNode::Base);
  5.2370    Node *adr   = addp->in(AddPNode::Address);
  5.2371 @@ -657,8 +2153,9 @@
  5.2372  // created phi or an existing phi.  Sets create_new to indicate whether a new
  5.2373  // phi was created.  Cache the last newly created phi in the node map.
  5.2374  //
  5.2375 -PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created) {
  5.2376 +PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created) {
  5.2377    Compile *C = _compile;
  5.2378 +  PhaseGVN* igvn = _igvn;
  5.2379    new_created = false;
  5.2380    int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
  5.2381    // nothing to do if orig_phi is bottom memory or matches alias_idx
  5.2382 @@ -698,12 +2195,7 @@
  5.2383    C->copy_node_notes_to(result, orig_phi);
  5.2384    igvn->set_type(result, result->bottom_type());
  5.2385    record_for_optimizer(result);
  5.2386 -
  5.2387 -  debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;)
  5.2388 -  assert(pn == NULL || pn == orig_phi, "wrong node");
  5.2389 -  set_map(orig_phi->_idx, result);
  5.2390 -  ptnode_adr(orig_phi->_idx)->_node = orig_phi;
  5.2391 -
  5.2392 +  set_map(orig_phi, result);
  5.2393    new_created = true;
  5.2394    return result;
  5.2395  }
  5.2396 @@ -712,27 +2204,25 @@
  5.2397  // Return a new version of Memory Phi "orig_phi" with the inputs having the
  5.2398  // specified alias index.
  5.2399  //
  5.2400 -PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn) {
  5.2401 -
  5.2402 +PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist) {
  5.2403    assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
  5.2404    Compile *C = _compile;
  5.2405 +  PhaseGVN* igvn = _igvn;
  5.2406    bool new_phi_created;
  5.2407 -  PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created);
  5.2408 +  PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
  5.2409    if (!new_phi_created) {
  5.2410      return result;
  5.2411    }
  5.2412 -
  5.2413    GrowableArray<PhiNode *>  phi_list;
  5.2414    GrowableArray<uint>  cur_input;
  5.2415 -
  5.2416    PhiNode *phi = orig_phi;
  5.2417    uint idx = 1;
  5.2418    bool finished = false;
  5.2419    while(!finished) {
  5.2420      while (idx < phi->req()) {
  5.2421 -      Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn);
  5.2422 +      Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);
  5.2423        if (mem != NULL && mem->is_Phi()) {
  5.2424 -        PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created);
  5.2425 +        PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
  5.2426          if (new_phi_created) {
  5.2427            // found an phi for which we created a new split, push current one on worklist and begin
  5.2428            // processing new one
  5.2429 @@ -775,19 +2265,18 @@
  5.2430    return result;
  5.2431  }
  5.2432  
  5.2433 -
  5.2434  //
  5.2435  // The next methods are derived from methods in MemNode.
  5.2436  //
  5.2437 -static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
  5.2438 +Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
  5.2439    Node *mem = mmem;
  5.2440    // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
  5.2441    // means an array I have not precisely typed yet.  Do not do any
  5.2442    // alias stuff with it any time soon.
  5.2443 -  if( toop->base() != Type::AnyPtr &&
  5.2444 +  if (toop->base() != Type::AnyPtr &&
  5.2445        !(toop->klass() != NULL &&
  5.2446          toop->klass()->is_java_lang_Object() &&
  5.2447 -        toop->offset() == Type::OffsetBot) ) {
  5.2448 +        toop->offset() == Type::OffsetBot)) {
  5.2449      mem = mmem->memory_at(alias_idx);
  5.2450      // Update input if it is progress over what we have now
  5.2451    }
  5.2452 @@ -797,9 +2286,9 @@
  5.2453  //
  5.2454  // Move memory users to their memory slices.
  5.2455  //
  5.2456 -void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *igvn) {
  5.2457 +void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis) {
  5.2458    Compile* C = _compile;
  5.2459 -
  5.2460 +  PhaseGVN* igvn = _igvn;
  5.2461    const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
  5.2462    assert(tp != NULL, "ptr type");
  5.2463    int alias_idx = C->get_alias_index(tp);
  5.2464 @@ -816,7 +2305,7 @@
  5.2465        }
  5.2466        // Replace previous general reference to mem node.
  5.2467        uint orig_uniq = C->unique();
  5.2468 -      Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
  5.2469 +      Node* m = find_inst_mem(n, general_idx, orig_phis);
  5.2470        assert(orig_uniq == C->unique(), "no new nodes");
  5.2471        mmem->set_memory_at(general_idx, m);
  5.2472        --imax;
  5.2473 @@ -836,7 +2325,7 @@
  5.2474        }
  5.2475        // Move to general memory slice.
  5.2476        uint orig_uniq = C->unique();
  5.2477 -      Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
  5.2478 +      Node* m = find_inst_mem(n, general_idx, orig_phis);
  5.2479        assert(orig_uniq == C->unique(), "no new nodes");
  5.2480        igvn->hash_delete(use);
  5.2481        imax -= use->replace_edge(n, m);
  5.2482 @@ -873,10 +2362,11 @@
  5.2483  // Search memory chain of "mem" to find a MemNode whose address
  5.2484  // is the specified alias index.
  5.2485  //
  5.2486 -Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *phase) {
  5.2487 +Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis) {
  5.2488    if (orig_mem == NULL)
  5.2489      return orig_mem;
  5.2490 -  Compile* C = phase->C;
  5.2491 +  Compile* C = _compile;
  5.2492 +  PhaseGVN* igvn = _igvn;
  5.2493    const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
  5.2494    bool is_instance = (toop != NULL) && toop->is_known_instance();
  5.2495    Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
  5.2496 @@ -887,7 +2377,7 @@
  5.2497      if (result == start_mem)
  5.2498        break;  // hit one of our sentinels
  5.2499      if (result->is_Mem()) {
  5.2500 -      const Type *at = phase->type(result->in(MemNode::Address));
  5.2501 +      const Type *at = igvn->type(result->in(MemNode::Address));
  5.2502        if (at == Type::TOP)
  5.2503          break; // Dead
  5.2504        assert (at->isa_ptr() != NULL, "pointer type required.");
  5.2505 @@ -909,7 +2399,7 @@
  5.2506          break;  // hit one of our sentinels
  5.2507        } else if (proj_in->is_Call()) {
  5.2508          CallNode *call = proj_in->as_Call();
  5.2509 -        if (!call->may_modify(toop, phase)) {
  5.2510 +        if (!call->may_modify(toop, igvn)) {
  5.2511            result = call->in(TypeFunc::Memory);
  5.2512          }
  5.2513        } else if (proj_in->is_Initialize()) {
  5.2514 @@ -928,7 +2418,7 @@
  5.2515        if (result == mmem->base_memory()) {
  5.2516          // Didn't find instance memory, search through general slice recursively.
  5.2517          result = mmem->memory_at(C->get_general_index(alias_idx));
  5.2518 -        result = find_inst_mem(result, alias_idx, orig_phis, phase);
  5.2519 +        result = find_inst_mem(result, alias_idx, orig_phis);
  5.2520          if (C->failing()) {
  5.2521            return NULL;
  5.2522          }
  5.2523 @@ -936,7 +2426,7 @@
  5.2524        }
  5.2525      } else if (result->is_Phi() &&
  5.2526                 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
  5.2527 -      Node *un = result->as_Phi()->unique_input(phase);
  5.2528 +      Node *un = result->as_Phi()->unique_input(igvn);
  5.2529        if (un != NULL) {
  5.2530          orig_phis.append_if_missing(result->as_Phi());
  5.2531          result = un;
  5.2532 @@ -944,7 +2434,7 @@
  5.2533          break;
  5.2534        }
  5.2535      } else if (result->is_ClearArray()) {
  5.2536 -      if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), phase)) {
  5.2537 +      if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
  5.2538          // Can not bypass initialization of the instance
  5.2539          // we are looking for.
  5.2540          break;
  5.2541 @@ -952,7 +2442,7 @@
  5.2542        // Otherwise skip it (the call updated 'result' value).
  5.2543      } else if (result->Opcode() == Op_SCMemProj) {
  5.2544        assert(result->in(0)->is_LoadStore(), "sanity");
  5.2545 -      const Type *at = phase->type(result->in(0)->in(MemNode::Address));
  5.2546 +      const Type *at = igvn->type(result->in(0)->in(MemNode::Address));
  5.2547        if (at != Type::TOP) {
  5.2548          assert (at->isa_ptr() != NULL, "pointer type required.");
  5.2549          int idx = C->get_alias_index(at->is_ptr());
  5.2550 @@ -972,7 +2462,7 @@
  5.2551        orig_phis.append_if_missing(mphi);
  5.2552      } else if (C->get_alias_index(t) != alias_idx) {
  5.2553        // Create a new Phi with the specified alias index type.
  5.2554 -      result = split_memory_phi(mphi, alias_idx, orig_phis, phase);
  5.2555 +      result = split_memory_phi(mphi, alias_idx, orig_phis);
  5.2556      }
  5.2557    }
  5.2558    // the result is either MemNode, PhiNode, InitializeNode.
  5.2559 @@ -1071,12 +2561,12 @@
  5.2560  void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist) {
  5.2561    GrowableArray<Node *>  memnode_worklist;
  5.2562    GrowableArray<PhiNode *>  orig_phis;
  5.2563 -
  5.2564    PhaseIterGVN  *igvn = _igvn;
  5.2565    uint new_index_start = (uint) _compile->num_alias_types();
  5.2566    Arena* arena = Thread::current()->resource_area();
  5.2567    VectorSet visited(arena);
  5.2568 -
  5.2569 +  ideal_nodes.clear(); // Reset for use with set_map/get_map.
  5.2570 +  uint unique_old = _compile->unique();
  5.2571  
  5.2572    //  Phase 1:  Process possible allocations from alloc_worklist.
  5.2573    //  Create instance types for the CheckCastPP for allocations where possible.
  5.2574 @@ -1088,17 +2578,15 @@
  5.2575    while (alloc_worklist.length() != 0) {
  5.2576      Node *n = alloc_worklist.pop();
  5.2577      uint ni = n->_idx;
  5.2578 -    const TypeOopPtr* tinst = NULL;
  5.2579      if (n->is_Call()) {
  5.2580        CallNode *alloc = n->as_Call();
  5.2581        // copy escape information to call node
  5.2582        PointsToNode* ptn = ptnode_adr(alloc->_idx);
  5.2583 -      PointsToNode::EscapeState es = escape_state(alloc);
  5.2584 +      PointsToNode::EscapeState es = ptn->escape_state();
  5.2585        // We have an allocation or call which returns a Java object,
  5.2586        // see if it is unescaped.
  5.2587        if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())
  5.2588          continue;
  5.2589 -
  5.2590        // Find CheckCastPP for the allocate or for the return value of a call
  5.2591        n = alloc->result_cast();
  5.2592        if (n == NULL) {            // No uses except Initialize node
  5.2593 @@ -1145,20 +2633,18 @@
  5.2594          // so it could be eliminated.
  5.2595          alloc->as_Allocate()->_is_scalar_replaceable = true;
  5.2596        }
  5.2597 -      set_escape_state(n->_idx, es); // CheckCastPP escape state
  5.2598 +      set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state
  5.2599        // in order for an object to be scalar-replaceable, it must be:
  5.2600        //   - a direct allocation (not a call returning an object)
  5.2601        //   - non-escaping
  5.2602        //   - eligible to be a unique type
  5.2603        //   - not determined to be ineligible by escape analysis
  5.2604 -      assert(ptnode_adr(alloc->_idx)->_node != NULL &&
  5.2605 -             ptnode_adr(n->_idx)->_node != NULL, "should be registered");
  5.2606 -      set_map(alloc->_idx, n);
  5.2607 -      set_map(n->_idx, alloc);
  5.2608 +      set_map(alloc, n);
  5.2609 +      set_map(n, alloc);
  5.2610        const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
  5.2611        if (t == NULL)
  5.2612          continue;  // not a TypeOopPtr
  5.2613 -      tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
  5.2614 +      const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
  5.2615        igvn->hash_delete(n);
  5.2616        igvn->set_type(n,  tinst);
  5.2617        n->raise_bottom_type(tinst);
  5.2618 @@ -1168,9 +2654,10 @@
  5.2619  
  5.2620          // First, put on the worklist all Field edges from Connection Graph
  5.2621          // which is more accurate then putting immediate users from Ideal Graph.
  5.2622 -        for (uint e = 0; e < ptn->edge_count(); e++) {
  5.2623 -          Node *use = ptnode_adr(ptn->edge_target(e))->_node;
  5.2624 -          assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
  5.2625 +        for (EdgeIterator e(ptn); e.has_next(); e.next()) {
  5.2626 +          PointsToNode* tgt = e.get();
  5.2627 +          Node* use = tgt->ideal_node();
  5.2628 +          assert(tgt->is_Field() && use->is_AddP(),
  5.2629                   "only AddP nodes are Field edges in CG");
  5.2630            if (use->outcnt() > 0) { // Don't process dead nodes
  5.2631              Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
  5.2632 @@ -1202,16 +2689,18 @@
  5.2633          }
  5.2634        }
  5.2635      } else if (n->is_AddP()) {
  5.2636 -      VectorSet* ptset = PointsTo(get_addp_base(n));
  5.2637 -      assert(ptset->Size() == 1, "AddP address is unique");
  5.2638 -      uint elem = ptset->getelem(); // Allocation node's index
  5.2639 -      if (elem == _phantom_object) {
  5.2640 -        assert(false, "escaped allocation");
  5.2641 -        continue; // Assume the value was set outside this method.
  5.2642 +      JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
  5.2643 +      if (jobj == NULL || jobj == phantom_obj) {
  5.2644 +#ifdef ASSERT
  5.2645 +        ptnode_adr(get_addp_base(n)->_idx)->dump();
  5.2646 +        ptnode_adr(n->_idx)->dump();
  5.2647 +        assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
  5.2648 +#endif
  5.2649 +        _compile->record_failure(C2Compiler::retry_no_escape_analysis());
  5.2650 +        return;
  5.2651        }
  5.2652 -      Node *base = get_map(elem);  // CheckCastPP node
  5.2653 -      if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path
  5.2654 -      tinst = igvn->type(base)->isa_oopptr();
  5.2655 +      Node *base = get_map(jobj->idx());  // CheckCastPP node
  5.2656 +      if (!split_AddP(n, base)) continue; // wrong type from dead path
  5.2657      } else if (n->is_Phi() ||
  5.2658                 n->is_CheckCastPP() ||
  5.2659                 n->is_EncodeP() ||
  5.2660 @@ -1221,18 +2710,20 @@
  5.2661          assert(n->is_Phi(), "loops only through Phi's");
  5.2662          continue;  // already processed
  5.2663        }
  5.2664 -      VectorSet* ptset = PointsTo(n);
  5.2665 -      if (ptset->Size() == 1) {
  5.2666 -        uint elem = ptset->getelem(); // Allocation node's index
  5.2667 -        if (elem == _phantom_object) {
  5.2668 -          assert(false, "escaped allocation");
  5.2669 -          continue; // Assume the value was set outside this method.
  5.2670 -        }
  5.2671 -        Node *val = get_map(elem);   // CheckCastPP node
  5.2672 +      JavaObjectNode* jobj = unique_java_object(n);
  5.2673 +      if (jobj == NULL || jobj == phantom_obj) {
  5.2674 +#ifdef ASSERT
  5.2675 +        ptnode_adr(n->_idx)->dump();
  5.2676 +        assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
  5.2677 +#endif
  5.2678 +        _compile->record_failure(C2Compiler::retry_no_escape_analysis());
  5.2679 +        return;
  5.2680 +      } else {
  5.2681 +        Node *val = get_map(jobj->idx());   // CheckCastPP node
  5.2682          TypeNode *tn = n->as_Type();
  5.2683 -        tinst = igvn->type(val)->isa_oopptr();
  5.2684 +        const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
  5.2685          assert(tinst != NULL && tinst->is_known_instance() &&
  5.2686 -               (uint)tinst->instance_id() == elem , "instance type expected.");
  5.2687 +               tinst->instance_id() == jobj->idx() , "instance type expected.");
  5.2688  
  5.2689          const Type *tn_type = igvn->type(tn);
  5.2690          const TypeOopPtr *tn_t;
  5.2691 @@ -1241,7 +2732,6 @@
  5.2692          } else {
  5.2693            tn_t = tn_type->isa_oopptr();
  5.2694          }
  5.2695 -
  5.2696          if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
  5.2697            if (tn_type->isa_narrowoop()) {
  5.2698              tn_type = tinst->make_narrowoop();
  5.2699 @@ -1314,13 +2804,13 @@
  5.2700    }
  5.2701    // New alias types were created in split_AddP().
  5.2702    uint new_index_end = (uint) _compile->num_alias_types();
  5.2703 +  assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
  5.2704  
  5.2705    //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
  5.2706    //            compute new values for Memory inputs  (the Memory inputs are not
  5.2707    //            actually updated until phase 4.)
  5.2708    if (memnode_worklist.length() == 0)
  5.2709      return;  // nothing to do
  5.2710 -
  5.2711    while (memnode_worklist.length() != 0) {
  5.2712      Node *n = memnode_worklist.pop();
  5.2713      if (visited.test_set(n->_idx))
  5.2714 @@ -1341,17 +2831,14 @@
  5.2715        assert (addr_t->isa_ptr() != NULL, "pointer type required.");
  5.2716        int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
  5.2717        assert ((uint)alias_idx < new_index_end, "wrong alias index");
  5.2718 -      Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn);
  5.2719 +      Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
  5.2720        if (_compile->failing()) {
  5.2721          return;
  5.2722        }
  5.2723        if (mem != n->in(MemNode::Memory)) {
  5.2724          // We delay the memory edge update since we need old one in
  5.2725          // MergeMem code below when instances memory slices are separated.
  5.2726 -        debug_only(Node* pn = ptnode_adr(n->_idx)->_node;)
  5.2727 -        assert(pn == NULL || pn == n, "wrong node");
  5.2728 -        set_map(n->_idx, mem);
  5.2729 -        ptnode_adr(n->_idx)->_node = n;
  5.2730 +        set_map(n, mem);
  5.2731        }
  5.2732        if (n->is_Load()) {
  5.2733          continue;  // don't push users
  5.2734 @@ -1442,7 +2929,7 @@
  5.2735          if((uint)_compile->get_general_index(ni) == i) {
  5.2736            Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
  5.2737            if (nmm->is_empty_memory(m)) {
  5.2738 -            Node* result = find_inst_mem(mem, ni, orig_phis, igvn);
  5.2739 +            Node* result = find_inst_mem(mem, ni, orig_phis);
  5.2740              if (_compile->failing()) {
  5.2741                return;
  5.2742              }
  5.2743 @@ -1458,7 +2945,7 @@
  5.2744        if (result == nmm->base_memory()) {
  5.2745          // Didn't find instance memory, search through general slice recursively.
  5.2746          result = nmm->memory_at(_compile->get_general_index(ni));
  5.2747 -        result = find_inst_mem(result, ni, orig_phis, igvn);
  5.2748 +        result = find_inst_mem(result, ni, orig_phis);
  5.2749          if (_compile->failing()) {
  5.2750            return;
  5.2751          }
  5.2752 @@ -1482,7 +2969,7 @@
  5.2753      igvn->hash_delete(phi);
  5.2754      for (uint i = 1; i < phi->req(); i++) {
  5.2755        Node *mem = phi->in(i);
  5.2756 -      Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn);
  5.2757 +      Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
  5.2758        if (_compile->failing()) {
  5.2759          return;
  5.2760        }
  5.2761 @@ -1496,39 +2983,36 @@
  5.2762  
  5.2763    // Update the memory inputs of MemNodes with the value we computed
  5.2764    // in Phase 2 and move stores memory users to corresponding memory slices.
  5.2765 -
  5.2766    // Disable memory split verification code until the fix for 6984348.
  5.2767    // Currently it produces false negative results since it does not cover all cases.
  5.2768  #if 0 // ifdef ASSERT
  5.2769    visited.Reset();
  5.2770    Node_Stack old_mems(arena, _compile->unique() >> 2);
  5.2771  #endif
  5.2772 -  for (uint i = 0; i < nodes_size(); i++) {
  5.2773 -    Node *nmem = get_map(i);
  5.2774 -    if (nmem != NULL) {
  5.2775 -      Node *n = ptnode_adr(i)->_node;
  5.2776 -      assert(n != NULL, "sanity");
  5.2777 -      if (n->is_Mem()) {
  5.2778 +  for (uint i = 0; i < ideal_nodes.size(); i++) {
  5.2779 +    Node*    n = ideal_nodes.at(i);
  5.2780 +    Node* nmem = get_map(n->_idx);
  5.2781 +    assert(nmem != NULL, "sanity");
  5.2782 +    if (n->is_Mem()) {
  5.2783  #if 0 // ifdef ASSERT
  5.2784 -        Node* old_mem = n->in(MemNode::Memory);
  5.2785 -        if (!visited.test_set(old_mem->_idx)) {
  5.2786 -          old_mems.push(old_mem, old_mem->outcnt());
  5.2787 -        }
  5.2788 +      Node* old_mem = n->in(MemNode::Memory);
  5.2789 +      if (!visited.test_set(old_mem->_idx)) {
  5.2790 +        old_mems.push(old_mem, old_mem->outcnt());
  5.2791 +      }
  5.2792  #endif
  5.2793 -        assert(n->in(MemNode::Memory) != nmem, "sanity");
  5.2794 -        if (!n->is_Load()) {
  5.2795 -          // Move memory users of a store first.
  5.2796 -          move_inst_mem(n, orig_phis, igvn);
  5.2797 -        }
  5.2798 -        // Now update memory input
  5.2799 -        igvn->hash_delete(n);
  5.2800 -        n->set_req(MemNode::Memory, nmem);
  5.2801 -        igvn->hash_insert(n);
  5.2802 -        record_for_optimizer(n);
  5.2803 -      } else {
  5.2804 -        assert(n->is_Allocate() || n->is_CheckCastPP() ||
  5.2805 -               n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
  5.2806 +      assert(n->in(MemNode::Memory) != nmem, "sanity");
  5.2807 +      if (!n->is_Load()) {
  5.2808 +        // Move memory users of a store first.
  5.2809 +        move_inst_mem(n, orig_phis);
  5.2810        }
  5.2811 +      // Now update memory input
  5.2812 +      igvn->hash_delete(n);
  5.2813 +      n->set_req(MemNode::Memory, nmem);
  5.2814 +      igvn->hash_insert(n);
  5.2815 +      record_for_optimizer(n);
  5.2816 +    } else {
  5.2817 +      assert(n->is_Allocate() || n->is_CheckCastPP() ||
  5.2818 +             n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
  5.2819      }
  5.2820    }
  5.2821  #if 0 // ifdef ASSERT
  5.2822 @@ -1542,1571 +3026,72 @@
  5.2823  #endif
  5.2824  }
  5.2825  
  5.2826 -bool ConnectionGraph::has_candidates(Compile *C) {
  5.2827 -  // EA brings benefits only when the code has allocations and/or locks which
  5.2828 -  // are represented by ideal Macro nodes.
  5.2829 -  int cnt = C->macro_count();
  5.2830 -  for( int i=0; i < cnt; i++ ) {
  5.2831 -    Node *n = C->macro_node(i);
  5.2832 -    if ( n->is_Allocate() )
  5.2833 -      return true;
  5.2834 -    if( n->is_Lock() ) {
  5.2835 -      Node* obj = n->as_Lock()->obj_node()->uncast();
  5.2836 -      if( !(obj->is_Parm() || obj->is_Con()) )
  5.2837 -        return true;
  5.2838 +#ifndef PRODUCT
  5.2839 +static const char *node_type_names[] = {
  5.2840 +  "UnknownType",
  5.2841 +  "JavaObject",
  5.2842 +  "LocalVar",
  5.2843 +  "Field",
  5.2844 +  "Arraycopy"
  5.2845 +};
  5.2846 +
  5.2847 +static const char *esc_names[] = {
  5.2848 +  "UnknownEscape",
  5.2849 +  "NoEscape",
  5.2850 +  "ArgEscape",
  5.2851 +  "GlobalEscape"
  5.2852 +};
  5.2853 +
  5.2854 +void PointsToNode::dump(bool print_state) const {
  5.2855 +  NodeType nt = node_type();
  5.2856 +  tty->print("%s ", node_type_names[(int) nt]);
  5.2857 +  if (print_state) {
  5.2858 +    EscapeState es = escape_state();
  5.2859 +    EscapeState fields_es = fields_escape_state();
  5.2860 +    tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
  5.2861 +    if (nt == PointsToNode::JavaObject && !this->scalar_replaceable())
  5.2862 +      tty->print("NSR");
  5.2863 +  }
  5.2864 +  if (is_Field()) {
  5.2865 +    FieldNode* f = (FieldNode*)this;
  5.2866 +    tty->print("(");
  5.2867 +    for (BaseIterator i(f); i.has_next(); i.next()) {
  5.2868 +      PointsToNode* b = i.get();
  5.2869 +      tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
  5.2870      }
  5.2871 +    tty->print(" )");
  5.2872    }
  5.2873 -  return false;
  5.2874 +  tty->print("[");
  5.2875 +  for (EdgeIterator i(this); i.has_next(); i.next()) {
  5.2876 +    PointsToNode* e = i.get();
  5.2877 +    tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
  5.2878 +  }
  5.2879 +  tty->print(" [");
  5.2880 +  for (UseIterator i(this); i.has_next(); i.next()) {
  5.2881 +    PointsToNode* u = i.get();
  5.2882 +    bool is_base = false;
  5.2883 +    if (PointsToNode::is_base_use(u)) {
  5.2884 +      is_base = true;
  5.2885 +      u = PointsToNode::get_use_node(u)->as_Field();
  5.2886 +    }
  5.2887 +    tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
  5.2888 +  }
  5.2889 +  tty->print(" ]]  ");
  5.2890 +  if (_node == NULL)
  5.2891 +    tty->print_cr("<null>");
  5.2892 +  else
  5.2893 +    _node->dump();
  5.2894  }
  5.2895  
  5.2896 -void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
  5.2897 -  // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
  5.2898 -  // to create space for them in ConnectionGraph::_nodes[].
  5.2899 -  Node* oop_null = igvn->zerocon(T_OBJECT);
  5.2900 -  Node* noop_null = igvn->zerocon(T_NARROWOOP);
  5.2901 -
  5.2902 -  ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
  5.2903 -  // Perform escape analysis
  5.2904 -  if (congraph->compute_escape()) {
  5.2905 -    // There are non escaping objects.
  5.2906 -    C->set_congraph(congraph);
  5.2907 -  }
  5.2908 -
  5.2909 -  // Cleanup.
  5.2910 -  if (oop_null->outcnt() == 0)
  5.2911 -    igvn->hash_delete(oop_null);
  5.2912 -  if (noop_null->outcnt() == 0)
  5.2913 -    igvn->hash_delete(noop_null);
  5.2914 -}
  5.2915 -
  5.2916 -bool ConnectionGraph::compute_escape() {
  5.2917 -  Compile* C = _compile;
  5.2918 -
  5.2919 -  // 1. Populate Connection Graph (CG) with Ideal nodes.
  5.2920 -
  5.2921 -  Unique_Node_List worklist_init;
  5.2922 -  worklist_init.map(C->unique(), NULL);  // preallocate space
  5.2923 -
  5.2924 -  // Initialize worklist
  5.2925 -  if (C->root() != NULL) {
  5.2926 -    worklist_init.push(C->root());
  5.2927 -  }
  5.2928 -
  5.2929 -  GrowableArray<Node*> alloc_worklist;
  5.2930 -  GrowableArray<Node*> addp_worklist;
  5.2931 -  GrowableArray<Node*> ptr_cmp_worklist;
  5.2932 -  GrowableArray<Node*> storestore_worklist;
  5.2933 -  PhaseGVN* igvn = _igvn;
  5.2934 -
  5.2935 -  // Push all useful nodes onto CG list and set their type.
  5.2936 -  for( uint next = 0; next < worklist_init.size(); ++next ) {
  5.2937 -    Node* n = worklist_init.at(next);
  5.2938 -    record_for_escape_analysis(n, igvn);
  5.2939 -    // Only allocations and java static calls results are checked
  5.2940 -    // for an escape status. See process_call_result() below.
  5.2941 -    if (n->is_Allocate() || n->is_CallStaticJava() &&
  5.2942 -        ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
  5.2943 -      alloc_worklist.append(n);
  5.2944 -    } else if(n->is_AddP()) {
  5.2945 -      // Collect address nodes. Use them during stage 3 below
  5.2946 -      // to build initial connection graph field edges.
  5.2947 -      addp_worklist.append(n);
  5.2948 -    } else if (n->is_MergeMem()) {
  5.2949 -      // Collect all MergeMem nodes to add memory slices for
  5.2950 -      // scalar replaceable objects in split_unique_types().
  5.2951 -      _mergemem_worklist.append(n->as_MergeMem());
  5.2952 -    } else if (OptimizePtrCompare && n->is_Cmp() &&
  5.2953 -               (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
  5.2954 -      // Compare pointers nodes
  5.2955 -      ptr_cmp_worklist.append(n);
  5.2956 -    } else if (n->is_MemBarStoreStore()) {
  5.2957 -      // Collect all MemBarStoreStore nodes so that depending on the
  5.2958 -      // escape status of the associated Allocate node some of them
  5.2959 -      // may be eliminated.
  5.2960 -      storestore_worklist.append(n);
  5.2961 -    }
  5.2962 -    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  5.2963 -      Node* m = n->fast_out(i);   // Get user
  5.2964 -      worklist_init.push(m);
  5.2965 -    }
  5.2966 -  }
  5.2967 -
  5.2968 -  if (alloc_worklist.length() == 0) {
  5.2969 -    _collecting = false;
  5.2970 -    return false; // Nothing to do.
  5.2971 -  }
  5.2972 -
  5.2973 -  // 2. First pass to create simple CG edges (doesn't require to walk CG).
  5.2974 -  uint delayed_size = _delayed_worklist.size();
  5.2975 -  for( uint next = 0; next < delayed_size; ++next ) {
  5.2976 -    Node* n = _delayed_worklist.at(next);
  5.2977 -    build_connection_graph(n, igvn);
  5.2978 -  }
  5.2979 -
  5.2980 -  // 3. Pass to create initial fields edges (JavaObject -F-> AddP)
  5.2981 -  //    to reduce number of iterations during stage 4 below.
  5.2982 -  uint addp_length = addp_worklist.length();
  5.2983 -  for( uint next = 0; next < addp_length; ++next ) {
  5.2984 -    Node* n = addp_worklist.at(next);
  5.2985 -    Node* base = get_addp_base(n);
  5.2986 -    if (base->is_Proj() && base->in(0)->is_Call())
  5.2987 -      base = base->in(0);
  5.2988 -    PointsToNode::NodeType nt = ptnode_adr(base->_idx)->node_type();
  5.2989 -    if (nt == PointsToNode::JavaObject) {
  5.2990 -      build_connection_graph(n, igvn);
  5.2991 -    }
  5.2992 -  }
  5.2993 -
  5.2994 -  GrowableArray<int> cg_worklist;
  5.2995 -  cg_worklist.append(_phantom_object);
  5.2996 -  GrowableArray<uint>  worklist;
  5.2997 -
  5.2998 -  // 4. Build Connection Graph which need
  5.2999 -  //    to walk the connection graph.
  5.3000 -  _progress = false;
  5.3001 -  for (uint ni = 0; ni < nodes_size(); ni++) {
  5.3002 -    PointsToNode* ptn = ptnode_adr(ni);
  5.3003 -    Node *n = ptn->_node;
  5.3004 -    if (n != NULL) { // Call, AddP, LoadP, StoreP
  5.3005 -      build_connection_graph(n, igvn);
  5.3006 -      if (ptn->node_type() != PointsToNode::UnknownType)
  5.3007 -        cg_worklist.append(n->_idx); // Collect CG nodes
  5.3008 -      if (!_processed.test(n->_idx))
  5.3009 -        worklist.append(n->_idx); // Collect C/A/L/S nodes
  5.3010 -    }
  5.3011 -  }
  5.3012 -
  5.3013 -  // After IGVN user nodes may have smaller _idx than
  5.3014 -  // their inputs so they will be processed first in
  5.3015 -  // previous loop. Because of that not all Graph
  5.3016 -  // edges will be created. Walk over interesting
  5.3017 -  // nodes again until no new edges are created.
  5.3018 -  //
  5.3019 -  // Normally only 1-3 passes needed to build
  5.3020 -  // Connection Graph depending on graph complexity.
  5.3021 -  // Observed 8 passes in jvm2008 compiler.compiler.
  5.3022 -  // Set limit to 20 to catch situation when something
  5.3023 -  // did go wrong and recompile the method without EA.
  5.3024 -  // Also limit build time to 30 sec (60 in debug VM).
  5.3025 -
  5.3026 -#define CG_BUILD_ITER_LIMIT 20
  5.3027 -
  5.3028 -#ifdef ASSERT
  5.3029 -#define CG_BUILD_TIME_LIMIT 60.0
  5.3030 -#else
  5.3031 -#define CG_BUILD_TIME_LIMIT 30.0
  5.3032 -#endif
  5.3033 -
  5.3034 -  uint length = worklist.length();
  5.3035 -  int iterations = 0;
  5.3036 -  elapsedTimer time;
  5.3037 -  while(_progress &&
  5.3038 -        (iterations++   < CG_BUILD_ITER_LIMIT) &&
  5.3039 -        (time.seconds() < CG_BUILD_TIME_LIMIT)) {
  5.3040 -    time.start();
  5.3041 -    _progress = false;
  5.3042 -    for( uint next = 0; next < length; ++next ) {
  5.3043 -      int ni = worklist.at(next);
  5.3044 -      PointsToNode* ptn = ptnode_adr(ni);
  5.3045 -      Node* n = ptn->_node;
  5.3046 -      assert(n != NULL, "should be known node");
  5.3047 -      build_connection_graph(n, igvn);
  5.3048 -    }
  5.3049 -    time.stop();
  5.3050 -  }
  5.3051 -  if ((iterations     >= CG_BUILD_ITER_LIMIT) ||
  5.3052 -      (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
  5.3053 -    assert(false, err_msg("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
  5.3054 -           time.seconds(), iterations, nodes_size(), length));
  5.3055 -    // Possible infinite build_connection_graph loop,
  5.3056 -    // bailout (no changes to ideal graph were made).
  5.3057 -    _collecting = false;
  5.3058 -    return false;
  5.3059 -  }
  5.3060 -#undef CG_BUILD_ITER_LIMIT
  5.3061 -#undef CG_BUILD_TIME_LIMIT
  5.3062 -
  5.3063 -  // 5. Propagate escaped states.
  5.3064 -  worklist.clear();
  5.3065 -
  5.3066 -  // mark all nodes reachable from GlobalEscape nodes
  5.3067 -  (void)propagate_escape_state(&cg_worklist, &worklist, PointsToNode::GlobalEscape);
  5.3068 -
  5.3069 -  // mark all nodes reachable from ArgEscape nodes
  5.3070 -  bool has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape);
  5.3071 -
  5.3072 -  Arena* arena = Thread::current()->resource_area();
  5.3073 -  VectorSet visited(arena);
  5.3074 -
  5.3075 -  // 6. Find fields initializing values for not escaped allocations
  5.3076 -  uint alloc_length = alloc_worklist.length();
  5.3077 -  for (uint next = 0; next < alloc_length; ++next) {
  5.3078 -    Node* n = alloc_worklist.at(next);
  5.3079 -    PointsToNode::EscapeState es = ptnode_adr(n->_idx)->escape_state();
  5.3080 -    if (es == PointsToNode::NoEscape) {
  5.3081 -      has_non_escaping_obj = true;
  5.3082 -      if (n->is_Allocate()) {
  5.3083 -        find_init_values(n, &visited, igvn);
  5.3084 -        // The object allocated by this Allocate node will never be
  5.3085 -        // seen by an other thread. Mark it so that when it is
  5.3086 -        // expanded no MemBarStoreStore is added.
  5.3087 -        n->as_Allocate()->initialization()->set_does_not_escape();
  5.3088 -      }
  5.3089 -    } else if ((es == PointsToNode::ArgEscape) && n->is_Allocate()) {
  5.3090 -      // Same as above. Mark this Allocate node so that when it is
  5.3091 -      // expanded no MemBarStoreStore is added.
  5.3092 -      n->as_Allocate()->initialization()->set_does_not_escape();
  5.3093 -    }
  5.3094 -  }
  5.3095 -
  5.3096 -  uint cg_length = cg_worklist.length();
  5.3097 -
  5.3098 -  // Skip the rest of code if all objects escaped.
  5.3099 -  if (!has_non_escaping_obj) {
  5.3100 -    cg_length = 0;
  5.3101 -    addp_length = 0;
  5.3102 -  }
  5.3103 -
  5.3104 -  for (uint next = 0; next < cg_length; ++next) {
  5.3105 -    int ni = cg_worklist.at(next);
  5.3106 -    PointsToNode* ptn = ptnode_adr(ni);
  5.3107 -    PointsToNode::NodeType nt = ptn->node_type();
  5.3108 -    if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
  5.3109 -      if (ptn->edge_count() == 0) {
  5.3110 -        // No values were found. Assume the value was set
  5.3111 -        // outside this method - add edge to phantom object.
  5.3112 -        add_pointsto_edge(ni, _phantom_object);
  5.3113 -      }
  5.3114 -    }
  5.3115 -  }
  5.3116 -
  5.3117 -  // 7. Remove deferred edges from the graph.
  5.3118 -  for (uint next = 0; next < cg_length; ++next) {
  5.3119 -    int ni = cg_worklist.at(next);
  5.3120 -    PointsToNode* ptn = ptnode_adr(ni);
  5.3121 -    PointsToNode::NodeType nt = ptn->node_type();
  5.3122 -    if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
  5.3123 -      remove_deferred(ni, &worklist, &visited);
  5.3124 -    }
  5.3125 -  }
  5.3126 -
  5.3127 -  // 8. Adjust escape state of nonescaping objects.
  5.3128 -  for (uint next = 0; next < addp_length; ++next) {
  5.3129 -    Node* n = addp_worklist.at(next);
  5.3130 -    adjust_escape_state(n);
  5.3131 -  }
  5.3132 -
  5.3133 -  // push all NoEscape nodes on the worklist
  5.3134 -  worklist.clear();
  5.3135 -  for( uint next = 0; next < cg_length; ++next ) {
  5.3136 -    int nk = cg_worklist.at(next);
  5.3137 -    if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape &&
  5.3138 -        !is_null_ptr(nk))
  5.3139 -      worklist.push(nk);
  5.3140 -  }
  5.3141 -
  5.3142 -  alloc_worklist.clear();
  5.3143 -  // Propagate scalar_replaceable value.
  5.3144 -  while(worklist.length() > 0) {
  5.3145 -    uint nk = worklist.pop();
  5.3146 -    PointsToNode* ptn = ptnode_adr(nk);
  5.3147 -    Node* n = ptn->_node;
  5.3148 -    bool scalar_replaceable = ptn->scalar_replaceable();
  5.3149 -    if (n->is_Allocate() && scalar_replaceable) {
  5.3150 -      // Push scalar replaceable allocations on alloc_worklist
  5.3151 -      // for processing in split_unique_types(). Note,
  5.3152 -      // following code may change scalar_replaceable value.
  5.3153 -      alloc_worklist.append(n);
  5.3154 -    }
  5.3155 -    uint e_cnt = ptn->edge_count();
  5.3156 -    for (uint ei = 0; ei < e_cnt; ei++) {
  5.3157 -      uint npi = ptn->edge_target(ei);
  5.3158 -      if (is_null_ptr(npi))
  5.3159 -        continue;
  5.3160 -      PointsToNode *np = ptnode_adr(npi);
  5.3161 -      if (np->escape_state() < PointsToNode::NoEscape) {
  5.3162 -        set_escape_state(npi, PointsToNode::NoEscape);
  5.3163 -        if (!scalar_replaceable) {
  5.3164 -          np->set_scalar_replaceable(false);
  5.3165 -        }
  5.3166 -        worklist.push(npi);
  5.3167 -      } else if (np->scalar_replaceable() && !scalar_replaceable) {
  5.3168 -        np->set_scalar_replaceable(false);
  5.3169 -        worklist.push(npi);
  5.3170 -      }
  5.3171 -    }
  5.3172 -  }
  5.3173 -
  5.3174 -  _collecting = false;
  5.3175 -  assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
  5.3176 -
  5.3177 -  assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape &&
  5.3178 -         ptnode_adr(_oop_null)->edge_count() == 0, "sanity");
  5.3179 -  if (UseCompressedOops) {
  5.3180 -    assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape &&
  5.3181 -           ptnode_adr(_noop_null)->edge_count() == 0, "sanity");
  5.3182 -  }
  5.3183 -
  5.3184 -  if (EliminateLocks && has_non_escaping_obj) {
  5.3185 -    // Mark locks before changing ideal graph.
  5.3186 -    int cnt = C->macro_count();
  5.3187 -    for( int i=0; i < cnt; i++ ) {
  5.3188 -      Node *n = C->macro_node(i);
  5.3189 -      if (n->is_AbstractLock()) { // Lock and Unlock nodes
  5.3190 -        AbstractLockNode* alock = n->as_AbstractLock();
  5.3191 -        if (!alock->is_non_esc_obj()) {
  5.3192 -          PointsToNode::EscapeState es = escape_state(alock->obj_node());
  5.3193 -          assert(es != PointsToNode::UnknownEscape, "should know");
  5.3194 -          if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
  5.3195 -            assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
  5.3196 -            // The lock could be marked eliminated by lock coarsening
  5.3197 -            // code during first IGVN before EA. Replace coarsened flag
  5.3198 -            // to eliminate all associated locks/unlocks.
  5.3199 -            alock->set_non_esc_obj();
  5.3200 -          }
  5.3201 -        }
  5.3202 -      }
  5.3203 -    }
  5.3204 -  }
  5.3205 -
  5.3206 -  if (OptimizePtrCompare && has_non_escaping_obj) {
  5.3207 -    // Add ConI(#CC_GT) and ConI(#CC_EQ).
  5.3208 -    _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
  5.3209 -    _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
  5.3210 -    // Optimize objects compare.
  5.3211 -    while (ptr_cmp_worklist.length() != 0) {
  5.3212 -      Node *n = ptr_cmp_worklist.pop();
  5.3213 -      Node *res = optimize_ptr_compare(n);
  5.3214 -      if (res != NULL) {
  5.3215 -#ifndef PRODUCT
  5.3216 -        if (PrintOptimizePtrCompare) {
  5.3217 -          tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
  5.3218 -          if (Verbose) {
  5.3219 -            n->dump(1);
  5.3220 -          }
  5.3221 -        }
  5.3222 -#endif
  5.3223 -        _igvn->replace_node(n, res);
  5.3224 -      }
  5.3225 -    }
  5.3226 -    // cleanup
  5.3227 -    if (_pcmp_neq->outcnt() == 0)
  5.3228 -      igvn->hash_delete(_pcmp_neq);
  5.3229 -    if (_pcmp_eq->outcnt()  == 0)
  5.3230 -      igvn->hash_delete(_pcmp_eq);
  5.3231 -  }
  5.3232 -
  5.3233 -  // For MemBarStoreStore nodes added in library_call.cpp, check
  5.3234 -  // escape status of associated AllocateNode and optimize out
  5.3235 -  // MemBarStoreStore node if the allocated object never escapes.
  5.3236 -  while (storestore_worklist.length() != 0) {
  5.3237 -    Node *n = storestore_worklist.pop();
  5.3238 -    MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
  5.3239 -    Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
  5.3240 -    assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
  5.3241 -    PointsToNode::EscapeState es = ptnode_adr(alloc->_idx)->escape_state();
  5.3242 -    if (es == PointsToNode::NoEscape || es == PointsToNode::ArgEscape) {
  5.3243 -      MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
  5.3244 -      mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
  5.3245 -      mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
  5.3246 -
  5.3247 -      _igvn->register_new_node_with_optimizer(mb);
  5.3248 -      _igvn->replace_node(storestore, mb);
  5.3249 -    }
  5.3250 -  }
  5.3251 -
  5.3252 -#ifndef PRODUCT
  5.3253 -  if (PrintEscapeAnalysis) {
  5.3254 -    dump(); // Dump ConnectionGraph
  5.3255 -  }
  5.3256 -#endif
  5.3257 -
  5.3258 -  bool has_scalar_replaceable_candidates = false;
  5.3259 -  alloc_length = alloc_worklist.length();
  5.3260 -  for (uint next = 0; next < alloc_length; ++next) {
  5.3261 -    Node* n = alloc_worklist.at(next);
  5.3262 -    PointsToNode* ptn = ptnode_adr(n->_idx);
  5.3263 -    assert(ptn->escape_state() == PointsToNode::NoEscape, "sanity");
  5.3264 -    if (ptn->scalar_replaceable()) {
  5.3265 -      has_scalar_replaceable_candidates = true;
  5.3266 -      break;
  5.3267 -    }
  5.3268 -  }
  5.3269 -
  5.3270 -  if ( has_scalar_replaceable_candidates &&
  5.3271 -       C->AliasLevel() >= 3 && EliminateAllocations ) {
  5.3272 -
  5.3273 -    // Now use the escape information to create unique types for
  5.3274 -    // scalar replaceable objects.
  5.3275 -    split_unique_types(alloc_worklist);
  5.3276 -
  5.3277 -    if (C->failing())  return false;
  5.3278 -
  5.3279 -    C->print_method("After Escape Analysis", 2);
  5.3280 -
  5.3281 -#ifdef ASSERT
  5.3282 -  } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
  5.3283 -    tty->print("=== No allocations eliminated for ");
  5.3284 -    C->method()->print_short_name();
  5.3285 -    if(!EliminateAllocations) {
  5.3286 -      tty->print(" since EliminateAllocations is off ===");
  5.3287 -    } else if(!has_scalar_replaceable_candidates) {
  5.3288 -      tty->print(" since there are no scalar replaceable candidates ===");
  5.3289 -    } else if(C->AliasLevel() < 3) {
  5.3290 -      tty->print(" since AliasLevel < 3 ===");
  5.3291 -    }
  5.3292 -    tty->cr();
  5.3293 -#endif
  5.3294 -  }
  5.3295 -  return has_non_escaping_obj;
  5.3296 -}
  5.3297 -
  5.3298 -// Find fields initializing values for allocations.
  5.3299 -void ConnectionGraph::find_init_values(Node* alloc, VectorSet* visited, PhaseTransform* phase) {
  5.3300 -  assert(alloc->is_Allocate(), "Should be called for Allocate nodes only");
  5.3301 -  PointsToNode* pta = ptnode_adr(alloc->_idx);
  5.3302 -  assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
  5.3303 -  InitializeNode* ini = alloc->as_Allocate()->initialization();
  5.3304 -
  5.3305 -  Compile* C = _compile;
  5.3306 -  visited->Reset();
  5.3307 -  // Check if a oop field's initializing value is recorded and add
  5.3308 -  // a corresponding NULL field's value if it is not recorded.
  5.3309 -  // Connection Graph does not record a default initialization by NULL
  5.3310 -  // captured by Initialize node.
  5.3311 -  //
  5.3312 -  uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
  5.3313 -  uint ae_cnt = pta->edge_count();
  5.3314 -  bool visited_bottom_offset = false;
  5.3315 -  for (uint ei = 0; ei < ae_cnt; ei++) {
  5.3316 -    uint nidx = pta->edge_target(ei); // Field (AddP)
  5.3317 -    PointsToNode* ptn = ptnode_adr(nidx);
  5.3318 -    assert(ptn->_node->is_AddP(), "Should be AddP nodes only");
  5.3319 -    int offset = ptn->offset();
  5.3320 -    if (offset == Type::OffsetBot) {
  5.3321 -      if (!visited_bottom_offset) {
  5.3322 -        visited_bottom_offset = true;
  5.3323 -        // Check only oop fields.
  5.3324 -        const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
  5.3325 -        if (!adr_type->isa_aryptr() ||
  5.3326 -            (adr_type->isa_aryptr()->klass() == NULL) ||
  5.3327 -             adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
  5.3328 -          // OffsetBot is used to reference array's element,
  5.3329 -          // always add reference to NULL since we don't
  5.3330 -          // known which element is referenced.
  5.3331 -          add_edge_from_fields(alloc->_idx, null_idx, offset);
  5.3332 -        }
  5.3333 -      }
  5.3334 -    } else if (offset != oopDesc::klass_offset_in_bytes() &&
  5.3335 -               !visited->test_set(offset)) {
  5.3336 -
  5.3337 -      // Check only oop fields.
  5.3338 -      const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
  5.3339 -      BasicType basic_field_type = T_INT;
  5.3340 -      if (adr_type->isa_instptr()) {
  5.3341 -        ciField* field = C->alias_type(adr_type->isa_instptr())->field();
  5.3342 -        if (field != NULL) {
  5.3343 -          basic_field_type = field->layout_type();
  5.3344 -        } else {
  5.3345 -          // Ignore non field load (for example, klass load)
  5.3346 -        }
  5.3347 -      } else if (adr_type->isa_aryptr()) {
  5.3348 -        if (offset != arrayOopDesc::length_offset_in_bytes()) {
  5.3349 -          const Type* elemtype = adr_type->isa_aryptr()->elem();
  5.3350 -          basic_field_type = elemtype->array_element_basic_type();
  5.3351 -        } else {
  5.3352 -          // Ignore array length load
  5.3353 -        }
  5.3354 -#ifdef ASSERT
  5.3355 -      } else {
  5.3356 -        // Raw pointers are used for initializing stores so skip it
  5.3357 -        // since it should be recorded already
  5.3358 -        Node* base = get_addp_base(ptn->_node);
  5.3359 -        assert(adr_type->isa_rawptr() && base->is_Proj() &&
  5.3360 -               (base->in(0) == alloc),"unexpected pointer type");
  5.3361 -#endif
  5.3362 -      }
  5.3363 -      if (basic_field_type == T_OBJECT ||
  5.3364 -          basic_field_type == T_NARROWOOP ||
  5.3365 -          basic_field_type == T_ARRAY) {
  5.3366 -        Node* value = NULL;
  5.3367 -        if (ini != NULL) {
  5.3368 -          BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
  5.3369 -          Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
  5.3370 -          if (store != NULL && store->is_Store()) {
  5.3371 -            value = store->in(MemNode::ValueIn);
  5.3372 -          } else {
  5.3373 -            // There could be initializing stores which follow allocation.
  5.3374 -            // For example, a volatile field store is not collected
  5.3375 -            // by Initialize node.
  5.3376 -            //
  5.3377 -            // Need to check for dependent loads to separate such stores from
  5.3378 -            // stores which follow loads. For now, add initial value NULL so
  5.3379 -            // that compare pointers optimization works correctly.
  5.3380 -          }
  5.3381 -        }
  5.3382 -        if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
  5.3383 -          // A field's initializing value was not recorded. Add NULL.
  5.3384 -          add_edge_from_fields(alloc->_idx, null_idx, offset);
  5.3385 -        }
  5.3386 -      }
  5.3387 -    }
  5.3388 -  }
  5.3389 -}
  5.3390 -
  5.3391 -// Adjust escape state after Connection Graph is built.
  5.3392 -void ConnectionGraph::adjust_escape_state(Node* n) {
  5.3393 -  PointsToNode* ptn = ptnode_adr(n->_idx);
  5.3394 -  assert(n->is_AddP(), "Should be called for AddP nodes only");
  5.3395 -  // Search for objects which are not scalar replaceable
  5.3396 -  // and mark them to propagate the state to referenced objects.
  5.3397 -  //
  5.3398 -
  5.3399 -  int offset = ptn->offset();
  5.3400 -  Node* base = get_addp_base(n);
  5.3401 -  VectorSet* ptset = PointsTo(base);
  5.3402 -  int ptset_size = ptset->Size();
  5.3403 -
  5.3404 -  // An object is not scalar replaceable if the field which may point
  5.3405 -  // to it has unknown offset (unknown element of an array of objects).
  5.3406 -  //
  5.3407 -
  5.3408 -  if (offset == Type::OffsetBot) {
  5.3409 -    uint e_cnt = ptn->edge_count();
  5.3410 -    for (uint ei = 0; ei < e_cnt; ei++) {
  5.3411 -      uint npi = ptn->edge_target(ei);
  5.3412 -      ptnode_adr(npi)->set_scalar_replaceable(false);
  5.3413 -    }
  5.3414 -  }
  5.3415 -
  5.3416 -  // Currently an object is not scalar replaceable if a LoadStore node
  5.3417 -  // access its field since the field value is unknown after it.
  5.3418 -  //
  5.3419 -  bool has_LoadStore = false;
  5.3420 -  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  5.3421 -    Node *use = n->fast_out(i);
  5.3422 -    if (use->is_LoadStore()) {
  5.3423 -      has_LoadStore = true;
  5.3424 -      break;
  5.3425 -    }
  5.3426 -  }
  5.3427 -  // An object is not scalar replaceable if the address points
  5.3428 -  // to unknown field (unknown element for arrays, offset is OffsetBot).
  5.3429 -  //
  5.3430 -  // Or the address may point to more then one object. This may produce
  5.3431 -  // the false positive result (set not scalar replaceable)
  5.3432 -  // since the flow-insensitive escape analysis can't separate
  5.3433 -  // the case when stores overwrite the field's value from the case
  5.3434 -  // when stores happened on different control branches.
  5.3435 -  //
  5.3436 -  // Note: it will disable scalar replacement in some cases:
  5.3437 -  //
  5.3438 -  //    Point p[] = new Point[1];
  5.3439 -  //    p[0] = new Point(); // Will be not scalar replaced
  5.3440 -  //
  5.3441 -  // but it will save us from incorrect optimizations in next cases:
  5.3442 -  //
  5.3443 -  //    Point p[] = new Point[1];
  5.3444 -  //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
  5.3445 -  //
  5.3446 -  if (ptset_size > 1 || ptset_size != 0 &&
  5.3447 -      (has_LoadStore || offset == Type::OffsetBot)) {
  5.3448 -    for( VectorSetI j(ptset); j.test(); ++j ) {
  5.3449 -      ptnode_adr(j.elem)->set_scalar_replaceable(false);
  5.3450 -    }
  5.3451 -  }
  5.3452 -}
  5.3453 -
  5.3454 -// Propagate escape states to referenced nodes.
  5.3455 -bool ConnectionGraph::propagate_escape_state(GrowableArray<int>* cg_worklist,
  5.3456 -                                             GrowableArray<uint>* worklist,
  5.3457 -                                             PointsToNode::EscapeState esc_state) {
  5.3458 -  bool has_java_obj = false;
  5.3459 -
  5.3460 -  // push all nodes with the same escape state on the worklist
  5.3461 -  uint cg_length = cg_worklist->length();
  5.3462 -  for (uint next = 0; next < cg_length; ++next) {
  5.3463 -    int nk = cg_worklist->at(next);
  5.3464 -    if (ptnode_adr(nk)->escape_state() == esc_state)
  5.3465 -      worklist->push(nk);
  5.3466 -  }
  5.3467 -  // mark all reachable nodes
  5.3468 -  while (worklist->length() > 0) {
  5.3469 -    int pt = worklist->pop();
  5.3470 -    PointsToNode* ptn = ptnode_adr(pt);
  5.3471 -    if (ptn->node_type() == PointsToNode::JavaObject &&
  5.3472 -        !is_null_ptr(pt)) {
  5.3473 -      has_java_obj = true;
  5.3474 -      if (esc_state > PointsToNode::NoEscape) {
  5.3475 -        // fields values are unknown if object escapes
  5.3476 -        add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
  5.3477 -      }
  5.3478 -    }
  5.3479 -    uint e_cnt = ptn->edge_count();
  5.3480 -    for (uint ei = 0; ei < e_cnt; ei++) {
  5.3481 -      uint npi = ptn->edge_target(ei);
  5.3482 -      if (is_null_ptr(npi))
  5.3483 -        continue;
  5.3484 -      PointsToNode *np = ptnode_adr(npi);
  5.3485 -      if (np->escape_state() < esc_state) {
  5.3486 -        set_escape_state(npi, esc_state);
  5.3487 -        worklist->push(npi);
  5.3488 -      }
  5.3489 -    }
  5.3490 -  }
  5.3491 -  // Has not escaping java objects
  5.3492 -  return has_java_obj && (esc_state < PointsToNode::GlobalEscape);
  5.3493 -}
  5.3494 -
  5.3495 -// Optimize objects compare.
  5.3496 -Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
  5.3497 -  assert(OptimizePtrCompare, "sanity");
  5.3498 -  // Clone returned Set since PointsTo() returns pointer
  5.3499 -  // to the same structure ConnectionGraph.pt_ptset.
  5.3500 -  VectorSet ptset1 = *PointsTo(n->in(1));
  5.3501 -  VectorSet ptset2 = *PointsTo(n->in(2));
  5.3502 -
  5.3503 -  // Check simple cases first.
  5.3504 -  if (ptset1.Size() == 1) {
  5.3505 -    uint pt1 = ptset1.getelem();
  5.3506 -    PointsToNode* ptn1 = ptnode_adr(pt1);
  5.3507 -    if (ptn1->escape_state() == PointsToNode::NoEscape) {
  5.3508 -      if (ptset2.Size() == 1 && ptset2.getelem() == pt1) {
  5.3509 -        // Comparing the same not escaping object.
  5.3510 -        return _pcmp_eq;
  5.3511 -      }
  5.3512 -      Node* obj = ptn1->_node;
  5.3513 -      // Comparing not escaping allocation.
  5.3514 -      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
  5.3515 -          !ptset2.test(pt1)) {
  5.3516 -        return _pcmp_neq; // This includes nullness check.
  5.3517 -      }
  5.3518 -    }
  5.3519 -  } else if (ptset2.Size() == 1) {
  5.3520 -    uint pt2 = ptset2.getelem();
  5.3521 -    PointsToNode* ptn2 = ptnode_adr(pt2);
  5.3522 -    if (ptn2->escape_state() == PointsToNode::NoEscape) {
  5.3523 -      Node* obj = ptn2->_node;
  5.3524 -      // Comparing not escaping allocation.
  5.3525 -      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
  5.3526 -          !ptset1.test(pt2)) {
  5.3527 -        return _pcmp_neq; // This includes nullness check.
  5.3528 -      }
  5.3529 -    }
  5.3530 -  }
  5.3531 -
  5.3532 -  if (!ptset1.disjoint(ptset2)) {
  5.3533 -    return NULL; // Sets are not disjoint
  5.3534 -  }
  5.3535 -
  5.3536 -  // Sets are disjoint.
  5.3537 -  bool set1_has_unknown_ptr = ptset1.test(_phantom_object) != 0;
  5.3538 -  bool set2_has_unknown_ptr = ptset2.test(_phantom_object) != 0;
  5.3539 -  bool set1_has_null_ptr   = (ptset1.test(_oop_null) | ptset1.test(_noop_null)) != 0;
  5.3540 -  bool set2_has_null_ptr   = (ptset2.test(_oop_null) | ptset2.test(_noop_null)) != 0;
  5.3541 -
  5.3542 -  if (set1_has_unknown_ptr && set2_has_null_ptr ||
  5.3543 -      set2_has_unknown_ptr && set1_has_null_ptr) {
  5.3544 -    // Check nullness of unknown object.
  5.3545 -    return NULL;
  5.3546 -  }
  5.3547 -
  5.3548 -  // Disjointness by itself is not sufficient since
  5.3549 -  // alias analysis is not complete for escaped objects.
  5.3550 -  // Disjoint sets are definitely unrelated only when
  5.3551 -  // at least one set has only not escaping objects.
  5.3552 -  if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
  5.3553 -    bool has_only_non_escaping_alloc = true;
  5.3554 -    for (VectorSetI i(&ptset1); i.test(); ++i) {
  5.3555 -      uint pt = i.elem;
  5.3556 -      PointsToNode* ptn = ptnode_adr(pt);
  5.3557 -      Node* obj = ptn->_node;
  5.3558 -      if (ptn->escape_state() != PointsToNode::NoEscape ||
  5.3559 -          !(obj->is_Allocate() || obj->is_CallStaticJava())) {
  5.3560 -        has_only_non_escaping_alloc = false;
  5.3561 -        break;
  5.3562 -      }
  5.3563 -    }
  5.3564 -    if (has_only_non_escaping_alloc) {
  5.3565 -      return _pcmp_neq;
  5.3566 -    }
  5.3567 -  }
  5.3568 -  if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
  5.3569 -    bool has_only_non_escaping_alloc = true;
  5.3570 -    for (VectorSetI i(&ptset2); i.test(); ++i) {
  5.3571 -      uint pt = i.elem;
  5.3572 -      PointsToNode* ptn = ptnode_adr(pt);
  5.3573 -      Node* obj = ptn->_node;
  5.3574 -      if (ptn->escape_state() != PointsToNode::NoEscape ||
  5.3575 -          !(obj->is_Allocate() || obj->is_CallStaticJava())) {
  5.3576 -        has_only_non_escaping_alloc = false;
  5.3577 -        break;
  5.3578 -      }
  5.3579 -    }
  5.3580 -    if (has_only_non_escaping_alloc) {
  5.3581 -      return _pcmp_neq;
  5.3582 -    }
  5.3583 -  }
  5.3584 -  return NULL;
  5.3585 -}
  5.3586 -
  5.3587 -void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
  5.3588 -    bool is_arraycopy = false;
  5.3589 -    switch (call->Opcode()) {
  5.3590 -#ifdef ASSERT
  5.3591 -    case Op_Allocate:
  5.3592 -    case Op_AllocateArray:
  5.3593 -    case Op_Lock:
  5.3594 -    case Op_Unlock:
  5.3595 -      assert(false, "should be done already");
  5.3596 -      break;
  5.3597 -#endif
  5.3598 -    case Op_CallLeafNoFP:
  5.3599 -      is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
  5.3600 -                      strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
  5.3601 -      // fall through
  5.3602 -    case Op_CallLeaf:
  5.3603 -    {
  5.3604 -      // Stub calls, objects do not escape but they are not scale replaceable.
  5.3605 -      // Adjust escape state for outgoing arguments.
  5.3606 -      const TypeTuple * d = call->tf()->domain();
  5.3607 -      bool src_has_oops = false;
  5.3608 -      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  5.3609 -        const Type* at = d->field_at(i);
  5.3610 -        Node *arg = call->in(i)->uncast();
  5.3611 -        const Type *aat = phase->type(arg);
  5.3612 -        PointsToNode::EscapeState arg_esc = ptnode_adr(arg->_idx)->escape_state();
  5.3613 -        if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
  5.3614 -            (is_arraycopy || arg_esc < PointsToNode::ArgEscape)) {
  5.3615 -#ifdef ASSERT
  5.3616 -          assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
  5.3617 -                 aat->isa_ptr() != NULL, "expecting an Ptr");
  5.3618 -          if (!(is_arraycopy ||
  5.3619 -                call->as_CallLeaf()->_name != NULL &&
  5.3620 -                (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
  5.3621 -                 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
  5.3622 -          ) {
  5.3623 -            call->dump();
  5.3624 -            assert(false, "EA: unexpected CallLeaf");
  5.3625 -          }
  5.3626 -#endif
  5.3627 -          if (arg_esc < PointsToNode::ArgEscape) {
  5.3628 -            set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  5.3629 -            Node* arg_base = arg;
  5.3630 -            if (arg->is_AddP()) {
  5.3631 -              //
  5.3632 -              // The inline_native_clone() case when the arraycopy stub is called
  5.3633 -              // after the allocation before Initialize and CheckCastPP nodes.
  5.3634 -              // Or normal arraycopy for object arrays case.
  5.3635 -              //
  5.3636 -              // Set AddP's base (Allocate) as not scalar replaceable since
  5.3637 -              // pointer to the base (with offset) is passed as argument.
  5.3638 -              //
  5.3639 -              arg_base = get_addp_base(arg);
  5.3640 -              set_escape_state(arg_base->_idx, PointsToNode::ArgEscape);
  5.3641 -            }
  5.3642 -          }
  5.3643 -
  5.3644 -          bool arg_has_oops = aat->isa_oopptr() &&
  5.3645 -                              (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
  5.3646 -                               (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
  5.3647 -          if (i == TypeFunc::Parms) {
  5.3648 -            src_has_oops = arg_has_oops;
  5.3649 -          }
  5.3650 -          //
  5.3651 -          // src or dst could be j.l.Object when other is basic type array:
  5.3652 -          //
  5.3653 -          //   arraycopy(char[],0,Object*,0,size);
  5.3654 -          //   arraycopy(Object*,0,char[],0,size);
  5.3655 -          //
  5.3656 -          // Do nothing special in such cases.
  5.3657 -          //
  5.3658 -          if (is_arraycopy && (i > TypeFunc::Parms) &&
  5.3659 -              src_has_oops && arg_has_oops) {
  5.3660 -            // Destination object's fields reference an unknown object.
  5.3661 -            Node* arg_base = arg;
  5.3662 -            if (arg->is_AddP()) {
  5.3663 -              arg_base = get_addp_base(arg);
  5.3664 -            }
  5.3665 -            for (VectorSetI s(PointsTo(arg_base)); s.test(); ++s) {
  5.3666 -              uint ps = s.elem;
  5.3667 -              set_escape_state(ps, PointsToNode::ArgEscape);
  5.3668 -              add_edge_from_fields(ps, _phantom_object, Type::OffsetBot);
  5.3669 -            }
  5.3670 -            // Conservatively all values in source object fields globally escape
  5.3671 -            // since we don't know if values in destination object fields
  5.3672 -            // escape (it could be traced but it is too expensive).
  5.3673 -            Node* src = call->in(TypeFunc::Parms)->uncast();
  5.3674 -            Node* src_base = src;
  5.3675 -            if (src->is_AddP()) {
  5.3676 -              src_base  = get_addp_base(src);
  5.3677 -            }
  5.3678 -            for (VectorSetI s(PointsTo(src_base)); s.test(); ++s) {
  5.3679 -              uint ps = s.elem;
  5.3680 -              set_escape_state(ps, PointsToNode::ArgEscape);
  5.3681 -              // Use OffsetTop to indicate fields global escape.
  5.3682 -              add_edge_from_fields(ps, _phantom_object, Type::OffsetTop);
  5.3683 -            }
  5.3684 -          }
  5.3685 -        }
  5.3686 -      }
  5.3687 -      break;
  5.3688 -    }
  5.3689 -
  5.3690 -    case Op_CallStaticJava:
  5.3691 -    // For a static call, we know exactly what method is being called.
  5.3692 -    // Use bytecode estimator to record the call's escape affects
  5.3693 -    {
  5.3694 -      ciMethod *meth = call->as_CallJava()->method();
  5.3695 -      BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
  5.3696 -      // fall-through if not a Java method or no analyzer information
  5.3697 -      if (call_analyzer != NULL) {
  5.3698 -        const TypeTuple * d = call->tf()->domain();
  5.3699 -        bool copy_dependencies = false;
  5.3700 -        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  5.3701 -          const Type* at = d->field_at(i);
  5.3702 -          int k = i - TypeFunc::Parms;
  5.3703 -          Node *arg = call->in(i)->uncast();
  5.3704 -
  5.3705 -          if (at->isa_oopptr() != NULL &&
  5.3706 -              ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) {
  5.3707 -
  5.3708 -            bool global_escapes = false;
  5.3709 -            bool fields_escapes = false;
  5.3710 -            if (!call_analyzer->is_arg_stack(k)) {
  5.3711 -              // The argument global escapes, mark everything it could point to
  5.3712 -              set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  5.3713 -              global_escapes = true;
  5.3714 -            } else {
  5.3715 -              if (!call_analyzer->is_arg_local(k)) {
  5.3716 -                // The argument itself doesn't escape, but any fields might
  5.3717 -                fields_escapes = true;
  5.3718 -              }
  5.3719 -              set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  5.3720 -              copy_dependencies = true;
  5.3721 -            }
  5.3722 -
  5.3723 -            for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
  5.3724 -              uint pt = j.elem;
  5.3725 -              if (global_escapes) {
  5.3726 -                // The argument global escapes, mark everything it could point to
  5.3727 -                set_escape_state(pt, PointsToNode::GlobalEscape);
  5.3728 -                add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
  5.3729 -              } else {
  5.3730 -                set_escape_state(pt, PointsToNode::ArgEscape);
  5.3731 -                if (fields_escapes) {
  5.3732 -                  // The argument itself doesn't escape, but any fields might.
  5.3733 -                  // Use OffsetTop to indicate such case.
  5.3734 -                  add_edge_from_fields(pt, _phantom_object, Type::OffsetTop);
  5.3735 -                }
  5.3736 -              }
  5.3737 -            }
  5.3738 -          }
  5.3739 -        }
  5.3740 -        if (copy_dependencies)
  5.3741 -          call_analyzer->copy_dependencies(_compile->dependencies());
  5.3742 -        break;
  5.3743 -      }
  5.3744 -    }
  5.3745 -
  5.3746 -    default:
  5.3747 -    // Fall-through here if not a Java method or no analyzer information
  5.3748 -    // or some other type of call, assume the worst case: all arguments
  5.3749 -    // globally escape.
  5.3750 -    {
  5.3751 -      // adjust escape state for  outgoing arguments
  5.3752 -      const TypeTuple * d = call->tf()->domain();
  5.3753 -      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  5.3754 -        const Type* at = d->field_at(i);
  5.3755 -        if (at->isa_oopptr() != NULL) {
  5.3756 -          Node *arg = call->in(i)->uncast();
  5.3757 -          set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  5.3758 -          for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
  5.3759 -            uint pt = j.elem;
  5.3760 -            set_escape_state(pt, PointsToNode::GlobalEscape);
  5.3761 -            add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
  5.3762 -          }
  5.3763 -        }
  5.3764 -      }
  5.3765 -    }
  5.3766 -  }
  5.3767 -}
  5.3768 -void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) {
  5.3769 -  CallNode   *call = resproj->in(0)->as_Call();
  5.3770 -  uint    call_idx = call->_idx;
  5.3771 -  uint resproj_idx = resproj->_idx;
  5.3772 -
  5.3773 -  switch (call->Opcode()) {
  5.3774 -    case Op_Allocate:
  5.3775 -    {
  5.3776 -      Node *k = call->in(AllocateNode::KlassNode);
  5.3777 -      const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
  5.3778 -      assert(kt != NULL, "TypeKlassPtr  required.");
  5.3779 -      ciKlass* cik = kt->klass();
  5.3780 -
  5.3781 -      PointsToNode::EscapeState es;
  5.3782 -      uint edge_to;
  5.3783 -      if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
  5.3784 -         !cik->is_instance_klass() || // StressReflectiveCode
  5.3785 -          cik->as_instance_klass()->has_finalizer()) {
  5.3786 -        es = PointsToNode::GlobalEscape;
  5.3787 -        edge_to = _phantom_object; // Could not be worse
  5.3788 -      } else {
  5.3789 -        es = PointsToNode::NoEscape;
  5.3790 -        edge_to = call_idx;
  5.3791 -        assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
  5.3792 -      }
  5.3793 -      set_escape_state(call_idx, es);
  5.3794 -      add_pointsto_edge(resproj_idx, edge_to);
  5.3795 -      _processed.set(resproj_idx);
  5.3796 -      break;
  5.3797 -    }
  5.3798 -
  5.3799 -    case Op_AllocateArray:
  5.3800 -    {
  5.3801 -
  5.3802 -      Node *k = call->in(AllocateNode::KlassNode);
  5.3803 -      const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
  5.3804 -      assert(kt != NULL, "TypeKlassPtr  required.");
  5.3805 -      ciKlass* cik = kt->klass();
  5.3806 -
  5.3807 -      PointsToNode::EscapeState es;
  5.3808 -      uint edge_to;
  5.3809 -      if (!cik->is_array_klass()) { // StressReflectiveCode
  5.3810 -        es = PointsToNode::GlobalEscape;
  5.3811 -        edge_to = _phantom_object;
  5.3812 -      } else {
  5.3813 -        es = PointsToNode::NoEscape;
  5.3814 -        edge_to = call_idx;
  5.3815 -        assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
  5.3816 -        int length = call->in(AllocateNode::ALength)->find_int_con(-1);
  5.3817 -        if (length < 0 || length > EliminateAllocationArraySizeLimit) {
  5.3818 -          // Not scalar replaceable if the length is not constant or too big.
  5.3819 -          ptnode_adr(call_idx)->set_scalar_replaceable(false);
  5.3820 -        }
  5.3821 -      }
  5.3822 -      set_escape_state(call_idx, es);
  5.3823 -      add_pointsto_edge(resproj_idx, edge_to);
  5.3824 -      _processed.set(resproj_idx);
  5.3825 -      break;
  5.3826 -    }
  5.3827 -
  5.3828 -    case Op_CallStaticJava:
  5.3829 -    // For a static call, we know exactly what method is being called.
  5.3830 -    // Use bytecode estimator to record whether the call's return value escapes
  5.3831 -    {
  5.3832 -      bool done = true;
  5.3833 -      const TypeTuple *r = call->tf()->range();
  5.3834 -      const Type* ret_type = NULL;
  5.3835 -
  5.3836 -      if (r->cnt() > TypeFunc::Parms)
  5.3837 -        ret_type = r->field_at(TypeFunc::Parms);
  5.3838 -
  5.3839 -      // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  5.3840 -      //        _multianewarray functions return a TypeRawPtr.
  5.3841 -      if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
  5.3842 -        _processed.set(resproj_idx);
  5.3843 -        break;  // doesn't return a pointer type
  5.3844 -      }
  5.3845 -      ciMethod *meth = call->as_CallJava()->method();
  5.3846 -      const TypeTuple * d = call->tf()->domain();
  5.3847 -      if (meth == NULL) {
  5.3848 -        // not a Java method, assume global escape
  5.3849 -        set_escape_state(call_idx, PointsToNode::GlobalEscape);
  5.3850 -        add_pointsto_edge(resproj_idx, _phantom_object);
  5.3851 -      } else {
  5.3852 -        BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
  5.3853 -        bool copy_dependencies = false;
  5.3854 -
  5.3855 -        if (call_analyzer->is_return_allocated()) {
  5.3856 -          // Returns a newly allocated unescaped object, simply
  5.3857 -          // update dependency information.
  5.3858 -          // Mark it as NoEscape so that objects referenced by
  5.3859 -          // it's fields will be marked as NoEscape at least.
  5.3860 -          set_escape_state(call_idx, PointsToNode::NoEscape);
  5.3861 -          ptnode_adr(call_idx)->set_scalar_replaceable(false);
  5.3862 -          // Fields values are unknown
  5.3863 -          add_edge_from_fields(call_idx, _phantom_object, Type::OffsetBot);
  5.3864 -          add_pointsto_edge(resproj_idx, call_idx);
  5.3865 -          copy_dependencies = true;
  5.3866 -        } else {
  5.3867 -          // determine whether any arguments are returned
  5.3868 -          set_escape_state(call_idx, PointsToNode::ArgEscape);
  5.3869 -          bool ret_arg = false;
  5.3870 -          for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  5.3871 -            const Type* at = d->field_at(i);
  5.3872 -            if (at->isa_oopptr() != NULL) {
  5.3873 -              Node *arg = call->in(i)->uncast();
  5.3874 -
  5.3875 -              if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
  5.3876 -                ret_arg = true;
  5.3877 -                PointsToNode *arg_esp = ptnode_adr(arg->_idx);
  5.3878 -                if (arg_esp->node_type() == PointsToNode::UnknownType)
  5.3879 -                  done = false;
  5.3880 -                else if (arg_esp->node_type() == PointsToNode::JavaObject)
  5.3881 -                  add_pointsto_edge(resproj_idx, arg->_idx);
  5.3882 -                else
  5.3883 -                  add_deferred_edge(resproj_idx, arg->_idx);
  5.3884 -              }
  5.3885 -            }
  5.3886 -          }
  5.3887 -          if (done) {
  5.3888 -            copy_dependencies = true;
  5.3889 -            // is_return_local() is true when only arguments are returned.
  5.3890 -            if (!ret_arg || !call_analyzer->is_return_local()) {
  5.3891 -              // Returns unknown object.
  5.3892 -              add_pointsto_edge(resproj_idx, _phantom_object);
  5.3893 -            }
  5.3894 -          }
  5.3895 -        }
  5.3896 -        if (copy_dependencies)
  5.3897 -          call_analyzer->copy_dependencies(_compile->dependencies());
  5.3898 -      }
  5.3899 -      if (done)
  5.3900 -        _processed.set(resproj_idx);
  5.3901 -      break;
  5.3902 -    }
  5.3903 -
  5.3904 -    default:
  5.3905 -    // Some other type of call, assume the worst case that the
  5.3906 -    // returned value, if any, globally escapes.
  5.3907 -    {
  5.3908 -      const TypeTuple *r = call->tf()->range();
  5.3909 -      if (r->cnt() > TypeFunc::Parms) {
  5.3910 -        const Type* ret_type = r->field_at(TypeFunc::Parms);
  5.3911 -
  5.3912 -        // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  5.3913 -        //        _multianewarray functions return a TypeRawPtr.
  5.3914 -        if (ret_type->isa_ptr() != NULL) {
  5.3915 -          set_escape_state(call_idx, PointsToNode::GlobalEscape);
  5.3916 -          add_pointsto_edge(resproj_idx, _phantom_object);
  5.3917 -        }
  5.3918 -      }
  5.3919 -      _processed.set(resproj_idx);
  5.3920 -    }
  5.3921 -  }
  5.3922 -}
  5.3923 -
  5.3924 -// Populate Connection Graph with Ideal nodes and create simple
  5.3925 -// connection graph edges (do not need to check the node_type of inputs
  5.3926 -// or to call PointsTo() to walk the connection graph).
  5.3927 -void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) {
  5.3928 -  if (_processed.test(n->_idx))
  5.3929 -    return; // No need to redefine node's state.
  5.3930 -
  5.3931 -  if (n->is_Call()) {
  5.3932 -    // Arguments to allocation and locking don't escape.
  5.3933 -    if (n->is_Allocate()) {
  5.3934 -      add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true);
  5.3935 -      record_for_optimizer(n);
  5.3936 -    } else if (n->is_Lock() || n->is_Unlock()) {
  5.3937 -      // Put Lock and Unlock nodes on IGVN worklist to process them during
  5.3938 -      // the first IGVN optimization when escape information is still available.
  5.3939 -      record_for_optimizer(n);
  5.3940 -      _processed.set(n->_idx);
  5.3941 -    } else {
  5.3942 -      // Don't mark as processed since call's arguments have to be processed.
  5.3943 -      PointsToNode::NodeType nt = PointsToNode::UnknownType;
  5.3944 -      PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
  5.3945 -
  5.3946 -      // Check if a call returns an object.
  5.3947 -      const TypeTuple *r = n->as_Call()->tf()->range();
  5.3948 -      if (r->cnt() > TypeFunc::Parms &&
  5.3949 -          r->field_at(TypeFunc::Parms)->isa_ptr() &&
  5.3950 -          n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
  5.3951 -        nt = PointsToNode::JavaObject;
  5.3952 -        if (!n->is_CallStaticJava()) {
  5.3953 -          // Since the called mathod is statically unknown assume
  5.3954 -          // the worst case that the returned value globally escapes.
  5.3955 -          es = PointsToNode::GlobalEscape;
  5.3956 -        }
  5.3957 -      }
  5.3958 -      add_node(n, nt, es, false);
  5.3959 -    }
  5.3960 -    return;
  5.3961 -  }
  5.3962 -
  5.3963 -  // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
  5.3964 -  // ThreadLocal has RawPrt type.
  5.3965 -  switch (n->Opcode()) {
  5.3966 -    case Op_AddP:
  5.3967 -    {
  5.3968 -      add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false);
  5.3969 -      break;
  5.3970 -    }
  5.3971 -    case Op_CastX2P:
  5.3972 -    { // "Unsafe" memory access.
  5.3973 -      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  5.3974 -      break;
  5.3975 -    }
  5.3976 -    case Op_CastPP:
  5.3977 -    case Op_CheckCastPP:
  5.3978 -    case Op_EncodeP:
  5.3979 -    case Op_DecodeN:
  5.3980 -    {
  5.3981 -      add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  5.3982 -      int ti = n->in(1)->_idx;
  5.3983 -      PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  5.3984 -      if (nt == PointsToNode::UnknownType) {
  5.3985 -        _delayed_worklist.push(n); // Process it later.
  5.3986 -        break;
  5.3987 -      } else if (nt == PointsToNode::JavaObject) {
  5.3988 -        add_pointsto_edge(n->_idx, ti);
  5.3989 -      } else {
  5.3990 -        add_deferred_edge(n->_idx, ti);
  5.3991 -      }
  5.3992 -      _processed.set(n->_idx);
  5.3993 -      break;
  5.3994 -    }
  5.3995 -    case Op_ConP:
  5.3996 -    {
  5.3997 -      // assume all pointer constants globally escape except for null
  5.3998 -      PointsToNode::EscapeState es;
  5.3999 -      if (phase->type(n) == TypePtr::NULL_PTR)
  5.4000 -        es = PointsToNode::NoEscape;
  5.4001 -      else
  5.4002 -        es = PointsToNode::GlobalEscape;
  5.4003 -
  5.4004 -      add_node(n, PointsToNode::JavaObject, es, true);
  5.4005 -      break;
  5.4006 -    }
  5.4007 -    case Op_ConN:
  5.4008 -    {
  5.4009 -      // assume all narrow oop constants globally escape except for null
  5.4010 -      PointsToNode::EscapeState es;
  5.4011 -      if (phase->type(n) == TypeNarrowOop::NULL_PTR)
  5.4012 -        es = PointsToNode::NoEscape;
  5.4013 -      else
  5.4014 -        es = PointsToNode::GlobalEscape;
  5.4015 -
  5.4016 -      add_node(n, PointsToNode::JavaObject, es, true);
  5.4017 -      break;
  5.4018 -    }
  5.4019 -    case Op_CreateEx:
  5.4020 -    {
  5.4021 -      // assume that all exception objects globally escape
  5.4022 -      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  5.4023 -      break;
  5.4024 -    }
  5.4025 -    case Op_LoadKlass:
  5.4026 -    case Op_LoadNKlass:
  5.4027 -    {
  5.4028 -      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  5.4029 -      break;
  5.4030 -    }
  5.4031 -    case Op_LoadP:
  5.4032 -    case Op_LoadN:
  5.4033 -    {
  5.4034 -      const Type *t = phase->type(n);
  5.4035 -      if (t->make_ptr() == NULL) {
  5.4036 -        _processed.set(n->_idx);
  5.4037 -        return;
  5.4038 -      }
  5.4039 -      add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  5.4040 -      break;
  5.4041 -    }
  5.4042 -    case Op_Parm:
  5.4043 -    {
  5.4044 -      _processed.set(n->_idx); // No need to redefine it state.
  5.4045 -      uint con = n->as_Proj()->_con;
  5.4046 -      if (con < TypeFunc::Parms)
  5.4047 -        return;
  5.4048 -      const Type *t = n->in(0)->as_Start()->_domain->field_at(con);
  5.4049 -      if (t->isa_ptr() == NULL)
  5.4050 -        return;
  5.4051 -      // We have to assume all input parameters globally escape
  5.4052 -      // (Note: passing 'false' since _processed is already set).
  5.4053 -      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
  5.4054 -      break;
  5.4055 -    }
  5.4056 -    case Op_PartialSubtypeCheck:
  5.4057 -    { // Produces Null or notNull and is used in CmpP.
  5.4058 -      add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
  5.4059 -      break;
  5.4060 -    }
  5.4061 -    case Op_Phi:
  5.4062 -    {
  5.4063 -      const Type *t = n->as_Phi()->type();
  5.4064 -      if (t->make_ptr() == NULL) {
  5.4065 -        // nothing to do if not an oop or narrow oop
  5.4066 -        _processed.set(n->_idx);
  5.4067 -        return;
  5.4068 -      }
  5.4069 -      add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  5.4070 -      uint i;
  5.4071 -      for (i = 1; i < n->req() ; i++) {
  5.4072 -        Node* in = n->in(i);
  5.4073 -        if (in == NULL)
  5.4074 -          continue;  // ignore NULL
  5.4075 -        in = in->uncast();
  5.4076 -        if (in->is_top() || in == n)
  5.4077 -          continue;  // ignore top or inputs which go back this node
  5.4078 -        int ti = in->_idx;
  5.4079 -        PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  5.4080 -        if (nt == PointsToNode::UnknownType) {
  5.4081 -          break;
  5.4082 -        } else if (nt == PointsToNode::JavaObject) {
  5.4083 -          add_pointsto_edge(n->_idx, ti);
  5.4084 -        } else {
  5.4085 -          add_deferred_edge(n->_idx, ti);
  5.4086 -        }
  5.4087 -      }
  5.4088 -      if (i >= n->req())
  5.4089 -        _processed.set(n->_idx);
  5.4090 -      else
  5.4091 -        _delayed_worklist.push(n);
  5.4092 -      break;
  5.4093 -    }
  5.4094 -    case Op_Proj:
  5.4095 -    {
  5.4096 -      // we are only interested in the oop result projection from a call
  5.4097 -      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  5.4098 -        const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
  5.4099 -        assert(r->cnt() > TypeFunc::Parms, "sanity");
  5.4100 -        if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
  5.4101 -          add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  5.4102 -          int ti = n->in(0)->_idx;
  5.4103 -          // The call may not be registered yet (since not all its inputs are registered)
  5.4104 -          // if this is the projection from backbranch edge of Phi.
  5.4105 -          if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
  5.4106 -            process_call_result(n->as_Proj(), phase);
  5.4107 -          }
  5.4108 -          if (!_processed.test(n->_idx)) {
  5.4109 -            // The call's result may need to be processed later if the call
  5.4110 -            // returns it's argument and the argument is not processed yet.
  5.4111 -            _delayed_worklist.push(n);
  5.4112 -          }
  5.4113 -          break;
  5.4114 -        }
  5.4115 -      }
  5.4116 -      _processed.set(n->_idx);
  5.4117 -      break;
  5.4118 -    }
  5.4119 -    case Op_Return:
  5.4120 -    {
  5.4121 -      if( n->req() > TypeFunc::Parms &&
  5.4122 -          phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  5.4123 -        // Treat Return value as LocalVar with GlobalEscape escape state.
  5.4124 -        add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false);
  5.4125 -        int ti = n->in(TypeFunc::Parms)->_idx;
  5.4126 -        PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  5.4127 -        if (nt == PointsToNode::UnknownType) {
  5.4128 -          _delayed_worklist.push(n); // Process it later.
  5.4129 -          break;
  5.4130 -        } else if (nt == PointsToNode::JavaObject) {
  5.4131 -          add_pointsto_edge(n->_idx, ti);
  5.4132 -        } else {
  5.4133 -          add_deferred_edge(n->_idx, ti);
  5.4134 -        }
  5.4135 -      }
  5.4136 -      _processed.set(n->_idx);
  5.4137 -      break;
  5.4138 -    }
  5.4139 -    case Op_StoreP:
  5.4140 -    case Op_StoreN:
  5.4141 -    {
  5.4142 -      const Type *adr_type = phase->type(n->in(MemNode::Address));
  5.4143 -      adr_type = adr_type->make_ptr();
  5.4144 -      if (adr_type->isa_oopptr()) {
  5.4145 -        add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  5.4146 -      } else {
  5.4147 -        Node* adr = n->in(MemNode::Address);
  5.4148 -        if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL &&
  5.4149 -            adr->in(AddPNode::Address)->is_Proj() &&
  5.4150 -            adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
  5.4151 -          add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  5.4152 -          // We are computing a raw address for a store captured
  5.4153 -          // by an Initialize compute an appropriate address type.
  5.4154 -          int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
  5.4155 -          assert(offs != Type::OffsetBot, "offset must be a constant");
  5.4156 -        } else {
  5.4157 -          _processed.set(n->_idx);
  5.4158 -          return;
  5.4159 -        }
  5.4160 -      }
  5.4161 -      break;
  5.4162 -    }
  5.4163 -    case Op_StorePConditional:
  5.4164 -    case Op_CompareAndSwapP:
  5.4165 -    case Op_CompareAndSwapN:
  5.4166 -    {
  5.4167 -      const Type *adr_type = phase->type(n->in(MemNode::Address));
  5.4168 -      adr_type = adr_type->make_ptr();
  5.4169 -      if (adr_type->isa_oopptr()) {
  5.4170 -        add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  5.4171 -      } else {
  5.4172 -        _processed.set(n->_idx);
  5.4173 -        return;
  5.4174 -      }
  5.4175 -      break;
  5.4176 -    }
  5.4177 -    case Op_AryEq:
  5.4178 -    case Op_StrComp:
  5.4179 -    case Op_StrEquals:
  5.4180 -    case Op_StrIndexOf:
  5.4181 -    {
  5.4182 -      // char[] arrays passed to string intrinsics are not scalar replaceable.
  5.4183 -      add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  5.4184 -      break;
  5.4185 -    }
  5.4186 -    case Op_ThreadLocal:
  5.4187 -    {
  5.4188 -      add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
  5.4189 -      break;
  5.4190 -    }
  5.4191 -    default:
  5.4192 -      ;
  5.4193 -      // nothing to do
  5.4194 -  }
  5.4195 -  return;
  5.4196 -}
  5.4197 -
  5.4198 -void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
  5.4199 -  uint n_idx = n->_idx;
  5.4200 -  assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
  5.4201 -
  5.4202 -  // Don't set processed bit for AddP, LoadP, StoreP since
  5.4203 -  // they may need more then one pass to process.
  5.4204 -  // Also don't mark as processed Call nodes since their
  5.4205 -  // arguments may need more then one pass to process.
  5.4206 -  if (_processed.test(n_idx))
  5.4207 -    return; // No need to redefine node's state.
  5.4208 -
  5.4209 -  if (n->is_Call()) {
  5.4210 -    CallNode *call = n->as_Call();
  5.4211 -    process_call_arguments(call, phase);
  5.4212 -    return;
  5.4213 -  }
  5.4214 -
  5.4215 -  switch (n->Opcode()) {
  5.4216 -    case Op_AddP:
  5.4217 -    {
  5.4218 -      Node *base = get_addp_base(n);
  5.4219 -      int offset = address_offset(n, phase);
  5.4220 -      // Create a field edge to this node from everything base could point to.
  5.4221 -      for( VectorSetI i(PointsTo(base)); i.test(); ++i ) {
  5.4222 -        uint pt = i.elem;
  5.4223 -        add_field_edge(pt, n_idx, offset);
  5.4224 -      }
  5.4225 -      break;
  5.4226 -    }
  5.4227 -    case Op_CastX2P:
  5.4228 -    {
  5.4229 -      assert(false, "Op_CastX2P");
  5.4230 -      break;
  5.4231 -    }
  5.4232 -    case Op_CastPP:
  5.4233 -    case Op_CheckCastPP:
  5.4234 -    case Op_EncodeP:
  5.4235 -    case Op_DecodeN:
  5.4236 -    {
  5.4237 -      int ti = n->in(1)->_idx;
  5.4238 -      assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
  5.4239 -      if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
  5.4240 -        add_pointsto_edge(n_idx, ti);
  5.4241 -      } else {
  5.4242 -        add_deferred_edge(n_idx, ti);
  5.4243 -      }
  5.4244 -      _processed.set(n_idx);
  5.4245 -      break;
  5.4246 -    }
  5.4247 -    case Op_ConP:
  5.4248 -    {
  5.4249 -      assert(false, "Op_ConP");
  5.4250 -      break;
  5.4251 -    }
  5.4252 -    case Op_ConN:
  5.4253 -    {
  5.4254 -      assert(false, "Op_ConN");
  5.4255 -      break;
  5.4256 -    }
  5.4257 -    case Op_CreateEx:
  5.4258 -    {
  5.4259 -      assert(false, "Op_CreateEx");
  5.4260 -      break;
  5.4261 -    }
  5.4262 -    case Op_LoadKlass:
  5.4263 -    case Op_LoadNKlass:
  5.4264 -    {
  5.4265 -      assert(false, "Op_LoadKlass");
  5.4266 -      break;
  5.4267 -    }
  5.4268 -    case Op_LoadP:
  5.4269 -    case Op_LoadN:
  5.4270 -    {
  5.4271 -      const Type *t = phase->type(n);
  5.4272 -#ifdef ASSERT
  5.4273 -      if (t->make_ptr() == NULL)
  5.4274 -        assert(false, "Op_LoadP");
  5.4275 -#endif
  5.4276 -
  5.4277 -      Node* adr = n->in(MemNode::Address)->uncast();
  5.4278 -      Node* adr_base;
  5.4279 -      if (adr->is_AddP()) {
  5.4280 -        adr_base = get_addp_base(adr);
  5.4281 -      } else {
  5.4282 -        adr_base = adr;
  5.4283 -      }
  5.4284 -
  5.4285 -      // For everything "adr_base" could point to, create a deferred edge from
  5.4286 -      // this node to each field with the same offset.
  5.4287 -      int offset = address_offset(adr, phase);
  5.4288 -      for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
  5.4289 -        uint pt = i.elem;
  5.4290 -        if (adr->is_AddP()) {
  5.4291 -          // Add field edge if it is missing.
  5.4292 -          add_field_edge(pt, adr->_idx, offset);
  5.4293 -        }
  5.4294 -        add_deferred_edge_to_fields(n_idx, pt, offset);
  5.4295 -      }
  5.4296 -      break;
  5.4297 -    }
  5.4298 -    case Op_Parm:
  5.4299 -    {
  5.4300 -      assert(false, "Op_Parm");
  5.4301 -      break;
  5.4302 -    }
  5.4303 -    case Op_PartialSubtypeCheck:
  5.4304 -    {
  5.4305 -      assert(false, "Op_PartialSubtypeCheck");
  5.4306 -      break;
  5.4307 -    }
  5.4308 -    case Op_Phi:
  5.4309 -    {
  5.4310 -#ifdef ASSERT
  5.4311 -      const Type *t = n->as_Phi()->type();
  5.4312 -      if (t->make_ptr() == NULL)
  5.4313 -        assert(false, "Op_Phi");
  5.4314 -#endif
  5.4315 -      for (uint i = 1; i < n->req() ; i++) {
  5.4316 -        Node* in = n->in(i);
  5.4317 -        if (in == NULL)
  5.4318 -          continue;  // ignore NULL
  5.4319 -        in = in->uncast();
  5.4320 -        if (in->is_top() || in == n)
  5.4321 -          continue;  // ignore top or inputs which go back this node
  5.4322 -        int ti = in->_idx;
  5.4323 -        PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  5.4324 -        assert(nt != PointsToNode::UnknownType, "all nodes should be known");
  5.4325 -        if (nt == PointsToNode::JavaObject) {
  5.4326 -          add_pointsto_edge(n_idx, ti);
  5.4327 -        } else {
  5.4328 -          add_deferred_edge(n_idx, ti);
  5.4329 -        }
  5.4330 -      }
  5.4331 -      _processed.set(n_idx);
  5.4332 -      break;
  5.4333 -    }
  5.4334 -    case Op_Proj:
  5.4335 -    {
  5.4336 -      // we are only interested in the oop result projection from a call
  5.4337 -      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  5.4338 -        assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
  5.4339 -               "all nodes should be registered");
  5.4340 -        const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
  5.4341 -        assert(r->cnt() > TypeFunc::Parms, "sanity");
  5.4342 -        if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
  5.4343 -          process_call_result(n->as_Proj(), phase);
  5.4344 -          assert(_processed.test(n_idx), "all call results should be processed");
  5.4345 -          break;
  5.4346 -        }
  5.4347 -      }
  5.4348 -      assert(false, "Op_Proj");
  5.4349 -      break;
  5.4350 -    }
  5.4351 -    case Op_Return:
  5.4352 -    {
  5.4353 -#ifdef ASSERT
  5.4354 -      if( n->req() <= TypeFunc::Parms ||
  5.4355 -          !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  5.4356 -        assert(false, "Op_Return");
  5.4357 -      }
  5.4358 -#endif
  5.4359 -      int ti = n->in(TypeFunc::Parms)->_idx;
  5.4360 -      assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
  5.4361 -      if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
  5.4362 -        add_pointsto_edge(n_idx, ti);
  5.4363 -      } else {
  5.4364 -        add_deferred_edge(n_idx, ti);
  5.4365 -      }
  5.4366 -      _processed.set(n_idx);
  5.4367 -      break;
  5.4368 -    }
  5.4369 -    case Op_StoreP:
  5.4370 -    case Op_StoreN:
  5.4371 -    case Op_StorePConditional:
  5.4372 -    case Op_CompareAndSwapP:
  5.4373 -    case Op_CompareAndSwapN:
  5.4374 -    {
  5.4375 -      Node *adr = n->in(MemNode::Address);
  5.4376 -      const Type *adr_type = phase->type(adr)->make_ptr();
  5.4377 -#ifdef ASSERT
  5.4378 -      if (!adr_type->isa_oopptr())
  5.4379 -        assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP");
  5.4380 -#endif
  5.4381 -
  5.4382 -      assert(adr->is_AddP(), "expecting an AddP");
  5.4383 -      Node *adr_base = get_addp_base(adr);
  5.4384 -      Node *val = n->in(MemNode::ValueIn)->uncast();
  5.4385 -      int offset = address_offset(adr, phase);
  5.4386 -      // For everything "adr_base" could point to, create a deferred edge
  5.4387 -      // to "val" from each field with the same offset.
  5.4388 -      for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
  5.4389 -        uint pt = i.elem;
  5.4390 -        // Add field edge if it is missing.
  5.4391 -        add_field_edge(pt, adr->_idx, offset);
  5.4392 -        add_edge_from_fields(pt, val->_idx, offset);
  5.4393 -      }
  5.4394 -      break;
  5.4395 -    }
  5.4396 -    case Op_AryEq:
  5.4397 -    case Op_StrComp:
  5.4398 -    case Op_StrEquals:
  5.4399 -    case Op_StrIndexOf:
  5.4400 -    {
  5.4401 -      // char[] arrays passed to string intrinsic do not escape but
  5.4402 -      // they are not scalar replaceable. Adjust escape state for them.
  5.4403 -      // Start from in(2) edge since in(1) is memory edge.
  5.4404 -      for (uint i = 2; i < n->req(); i++) {
  5.4405 -        Node* adr = n->in(i)->uncast();
  5.4406 -        const Type *at = phase->type(adr);
  5.4407 -        if (!adr->is_top() && at->isa_ptr()) {
  5.4408 -          assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
  5.4409 -                 at->isa_ptr() != NULL, "expecting an Ptr");
  5.4410 -          if (adr->is_AddP()) {
  5.4411 -            adr = get_addp_base(adr);
  5.4412 -          }
  5.4413 -          // Mark as ArgEscape everything "adr" could point to.
  5.4414 -          set_escape_state(adr->_idx, PointsToNode::ArgEscape);
  5.4415 -        }
  5.4416 -      }
  5.4417 -      _processed.set(n_idx);
  5.4418 -      break;
  5.4419 -    }
  5.4420 -    case Op_ThreadLocal:
  5.4421 -    {
  5.4422 -      assert(false, "Op_ThreadLocal");
  5.4423 -      break;
  5.4424 -    }
  5.4425 -    default:
  5.4426 -      // This method should be called only for EA specific nodes.
  5.4427 -      ShouldNotReachHere();
  5.4428 -  }
  5.4429 -}
  5.4430 -
  5.4431 -#ifndef PRODUCT
  5.4432 -void ConnectionGraph::dump() {
  5.4433 +void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
  5.4434    bool first = true;
  5.4435 -
  5.4436 -  uint size = nodes_size();
  5.4437 -  for (uint ni = 0; ni < size; ni++) {
  5.4438 -    PointsToNode *ptn = ptnode_adr(ni);
  5.4439 -    PointsToNode::NodeType ptn_type = ptn->node_type();
  5.4440 -
  5.4441 -    if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
  5.4442 +  int ptnodes_length = ptnodes_worklist.length();
  5.4443 +  for (int i = 0; i < ptnodes_length; i++) {
  5.4444 +    PointsToNode *ptn = ptnodes_worklist.at(i);
  5.4445 +    if (ptn == NULL || !ptn->is_JavaObject())
  5.4446        continue;
  5.4447 -    PointsToNode::EscapeState es = escape_state(ptn->_node);
  5.4448 -    if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
  5.4449 +    PointsToNode::EscapeState es = ptn->escape_state();
  5.4450 +    if (ptn->ideal_node()->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
  5.4451        if (first) {
  5.4452          tty->cr();
  5.4453          tty->print("======== Connection graph for ");
  5.4454 @@ -3114,22 +3099,14 @@
  5.4455          tty->cr();
  5.4456          first = false;
  5.4457        }
  5.4458 -      tty->print("%6d ", ni);
  5.4459        ptn->dump();
  5.4460 -      // Print all locals which reference this allocation
  5.4461 -      for (uint li = ni; li < size; li++) {
  5.4462 -        PointsToNode *ptn_loc = ptnode_adr(li);
  5.4463 -        PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type();
  5.4464 -        if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL &&
  5.4465 -             ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) {
  5.4466 -          ptnode_adr(li)->dump(false);
  5.4467 -        }
  5.4468 -      }
  5.4469 -      if (Verbose) {
  5.4470 -        // Print all fields which reference this allocation
  5.4471 -        for (uint i = 0; i < ptn->edge_count(); i++) {
  5.4472 -          uint ei = ptn->edge_target(i);
  5.4473 -          ptnode_adr(ei)->dump(false);
  5.4474 +      // Print all locals and fields which reference this allocation
  5.4475 +      for (UseIterator j(ptn); j.has_next(); j.next()) {
  5.4476 +        PointsToNode* use = j.get();
  5.4477 +        if (use->is_LocalVar()) {
  5.4478 +          use->dump(Verbose);
  5.4479 +        } else if (Verbose) {
  5.4480 +          use->dump();
  5.4481          }
  5.4482        }
  5.4483        tty->cr();
     6.1 --- a/src/share/vm/opto/escape.hpp	Fri Mar 09 13:34:45 2012 -0800
     6.2 +++ b/src/share/vm/opto/escape.hpp	Mon Mar 12 10:46:47 2012 -0700
     6.3 @@ -1,5 +1,5 @@
     6.4  /*
     6.5 - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
     6.6 + * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
     6.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.8   *
     6.9   * This code is free software; you can redistribute it and/or modify it
    6.10 @@ -115,18 +115,36 @@
    6.11  class  CallNode;
    6.12  class  PhiNode;
    6.13  class  PhaseTransform;
    6.14 +class  PointsToNode;
    6.15  class  Type;
    6.16  class  TypePtr;
    6.17  class  VectorSet;
    6.18  
    6.19 -class PointsToNode {
    6.20 -friend class ConnectionGraph;
    6.21 +class JavaObjectNode;
    6.22 +class LocalVarNode;
    6.23 +class FieldNode;
    6.24 +class ArraycopyNode;
    6.25 +
    6.26 +// ConnectionGraph nodes
    6.27 +class PointsToNode : public ResourceObj {
    6.28 +  GrowableArray<PointsToNode*> _edges; // List of nodes this node points to
    6.29 +  GrowableArray<PointsToNode*> _uses;  // List of nodes which point to this node
    6.30 +
    6.31 +  const u1           _type;  // NodeType
    6.32 +  u1                _flags;  // NodeFlags
    6.33 +  u1               _escape;  // EscapeState of object
    6.34 +  u1        _fields_escape;  // EscapeState of object's fields
    6.35 +
    6.36 +  Node* const        _node;  // Ideal node corresponding to this PointsTo node.
    6.37 +  const int           _idx;  // Cached ideal node's _idx
    6.38 +
    6.39  public:
    6.40    typedef enum {
    6.41      UnknownType = 0,
    6.42      JavaObject  = 1,
    6.43      LocalVar    = 2,
    6.44 -    Field       = 3
    6.45 +    Field       = 3,
    6.46 +    Arraycopy   = 4
    6.47    } NodeType;
    6.48  
    6.49    typedef enum {
    6.50 @@ -140,178 +158,387 @@
    6.51    } EscapeState;
    6.52  
    6.53    typedef enum {
    6.54 -    UnknownEdge   = 0,
    6.55 -    PointsToEdge  = 1,
    6.56 -    DeferredEdge  = 2,
    6.57 -    FieldEdge     = 3
    6.58 -  } EdgeType;
    6.59 +    ScalarReplaceable = 1,  // Not escaped object could be replaced with scalar
    6.60 +    PointsToUnknown   = 2,  // Has edge to phantom_object
    6.61 +    ArraycopySrc      = 4,  // Has edge from Arraycopy node
    6.62 +    ArraycopyDst      = 8   // Has edge to Arraycopy node
    6.63 +  } NodeFlags;
    6.64  
    6.65 -private:
    6.66 -  enum {
    6.67 -    EdgeMask = 3,
    6.68 -    EdgeShift = 2,
    6.69  
    6.70 -    INITIAL_EDGE_COUNT = 4
    6.71 -  };
    6.72 -
    6.73 -  NodeType             _type;
    6.74 -  EscapeState          _escape;
    6.75 -  GrowableArray<uint>* _edges; // outgoing edges
    6.76 -  Node* _node;                 // Ideal node corresponding to this PointsTo node.
    6.77 -  int   _offset;               // Object fields offsets.
    6.78 -  bool  _scalar_replaceable;   // Not escaped object could be replaced with scalar
    6.79 -  bool  _has_unknown_ptr;      // Has edge to phantom_object
    6.80 -
    6.81 -public:
    6.82 -  PointsToNode():
    6.83 -    _type(UnknownType),
    6.84 -    _escape(UnknownEscape),
    6.85 -    _edges(NULL),
    6.86 -    _node(NULL),
    6.87 -    _offset(-1),
    6.88 -    _has_unknown_ptr(false),
    6.89 -    _scalar_replaceable(true) {}
    6.90 -
    6.91 -
    6.92 -  EscapeState escape_state() const { return _escape; }
    6.93 -  NodeType node_type() const { return _type;}
    6.94 -  int offset() { return _offset;}
    6.95 -  bool scalar_replaceable() { return _scalar_replaceable;}
    6.96 -  bool has_unknown_ptr()    { return _has_unknown_ptr;}
    6.97 -
    6.98 -  void set_offset(int offs) { _offset = offs;}
    6.99 -  void set_escape_state(EscapeState state) { _escape = state; }
   6.100 -  void set_node_type(NodeType ntype) {
   6.101 -    assert(_type == UnknownType || _type == ntype, "Can't change node type");
   6.102 -    _type = ntype;
   6.103 -  }
   6.104 -  void set_scalar_replaceable(bool v) { _scalar_replaceable = v; }
   6.105 -  void set_has_unknown_ptr()          { _has_unknown_ptr = true; }
   6.106 -
   6.107 -  // count of outgoing edges
   6.108 -  uint edge_count() const { return (_edges == NULL) ? 0 : _edges->length(); }
   6.109 -
   6.110 -  // node index of target of outgoing edge "e"
   6.111 -  uint edge_target(uint e) const {
   6.112 -    assert(_edges != NULL, "valid edge index");
   6.113 -    return (_edges->at(e) >> EdgeShift);
   6.114 -  }
   6.115 -  // type of outgoing edge "e"
   6.116 -  EdgeType edge_type(uint e) const {
   6.117 -    assert(_edges != NULL, "valid edge index");
   6.118 -    return (EdgeType) (_edges->at(e) & EdgeMask);
   6.119 +  PointsToNode(Compile *C, Node* n, EscapeState es, NodeType type):
   6.120 +    _edges(C->comp_arena(), 2, 0, NULL),
   6.121 +    _uses (C->comp_arena(), 2, 0, NULL),
   6.122 +    _node(n),
   6.123 +    _idx(n->_idx),
   6.124 +    _type((u1)type),
   6.125 +    _escape((u1)es),
   6.126 +    _fields_escape((u1)es),
   6.127 +    _flags(ScalarReplaceable) {
   6.128 +    assert(n != NULL && es != UnknownEscape, "sanity");
   6.129    }
   6.130  
   6.131 -  // add a edge of the specified type pointing to the specified target
   6.132 -  void add_edge(uint targIdx, EdgeType et);
   6.133 +  Node* ideal_node()   const { return _node; }
   6.134 +  int          idx()   const { return _idx; }
   6.135  
   6.136 -  // remove an edge of the specified type pointing to the specified target
   6.137 -  void remove_edge(uint targIdx, EdgeType et);
   6.138 +  bool is_JavaObject() const { return _type == (u1)JavaObject; }
   6.139 +  bool is_LocalVar()   const { return _type == (u1)LocalVar; }
   6.140 +  bool is_Field()      const { return _type == (u1)Field; }
   6.141 +  bool is_Arraycopy()  const { return _type == (u1)Arraycopy; }
   6.142 +
   6.143 +  JavaObjectNode* as_JavaObject() { assert(is_JavaObject(),""); return (JavaObjectNode*)this; }
   6.144 +  LocalVarNode*   as_LocalVar()   { assert(is_LocalVar(),"");   return (LocalVarNode*)this; }
   6.145 +  FieldNode*      as_Field()      { assert(is_Field(),"");      return (FieldNode*)this; }
   6.146 +  ArraycopyNode*  as_Arraycopy()  { assert(is_Arraycopy(),"");  return (ArraycopyNode*)this; }
   6.147 +
   6.148 +  EscapeState escape_state() const { return (EscapeState)_escape; }
   6.149 +  void    set_escape_state(EscapeState state) { _escape = (u1)state; }
   6.150 +
   6.151 +  EscapeState fields_escape_state() const { return (EscapeState)_fields_escape; }
   6.152 +  void    set_fields_escape_state(EscapeState state) { _fields_escape = (u1)state; }
   6.153 +
   6.154 +  bool     has_unknown_ptr() const { return (_flags & PointsToUnknown) != 0; }
   6.155 +  void set_has_unknown_ptr()       { _flags |= PointsToUnknown; }
   6.156 +
   6.157 +  bool     arraycopy_src() const { return (_flags & ArraycopySrc) != 0; }
   6.158 +  void set_arraycopy_src()       { _flags |= ArraycopySrc; }
   6.159 +  bool     arraycopy_dst() const { return (_flags & ArraycopyDst) != 0; }
   6.160 +  void set_arraycopy_dst()       { _flags |= ArraycopyDst; }
   6.161 +
   6.162 +  bool     scalar_replaceable() const { return (_flags & ScalarReplaceable) != 0;}
   6.163 +  void set_scalar_replaceable(bool v) {
   6.164 +    if (v)
   6.165 +      _flags |= ScalarReplaceable;
   6.166 +    else
   6.167 +      _flags &= ~ScalarReplaceable;
   6.168 +  }
   6.169 +
   6.170 +  int edge_count()              const { return _edges.length(); }
   6.171 +  PointsToNode* edge(int e)     const { return _edges.at(e); }
   6.172 +  bool add_edge(PointsToNode* edge)    { return _edges.append_if_missing(edge); }
   6.173 +
   6.174 +  int use_count()             const { return _uses.length(); }
   6.175 +  PointsToNode* use(int e)    const { return _uses.at(e); }
   6.176 +  bool add_use(PointsToNode* use)    { return _uses.append_if_missing(use); }
   6.177 +
   6.178 +  // Mark base edge use to distinguish from stored value edge.
   6.179 +  bool add_base_use(FieldNode* use) { return _uses.append_if_missing((PointsToNode*)((intptr_t)use + 1)); }
   6.180 +  static bool is_base_use(PointsToNode* use) { return (((intptr_t)use) & 1); }
   6.181 +  static PointsToNode* get_use_node(PointsToNode* use) { return (PointsToNode*)(((intptr_t)use) & ~1); }
   6.182 +
   6.183 +  // Return true if this node points to specified node or nodes it points to.
   6.184 +  bool points_to(JavaObjectNode* ptn) const;
   6.185 +
   6.186 +  // Return true if this node points only to non-escaping allocations.
   6.187 +  bool non_escaping_allocation();
   6.188 +
   6.189 +  // Return true if one node points to an other.
   6.190 +  bool meet(PointsToNode* ptn);
   6.191  
   6.192  #ifndef PRODUCT
   6.193 +  NodeType node_type() const { return (NodeType)_type;}
   6.194    void dump(bool print_state=true) const;
   6.195  #endif
   6.196  
   6.197  };
   6.198  
   6.199 +class LocalVarNode: public PointsToNode {
   6.200 +public:
   6.201 +  LocalVarNode(Compile *C, Node* n, EscapeState es):
   6.202 +    PointsToNode(C, n, es, LocalVar) {}
   6.203 +};
   6.204 +
   6.205 +class JavaObjectNode: public PointsToNode {
   6.206 +public:
   6.207 +  JavaObjectNode(Compile *C, Node* n, EscapeState es):
   6.208 +    PointsToNode(C, n, es, JavaObject) {
   6.209 +      if (es > NoEscape)
   6.210 +        set_scalar_replaceable(false);
   6.211 +    }
   6.212 +};
   6.213 +
   6.214 +class FieldNode: public PointsToNode {
   6.215 +  GrowableArray<PointsToNode*> _bases; // List of JavaObject nodes which point to this node
   6.216 +  const int   _offset; // Field's offset.
   6.217 +  const bool  _is_oop; // Field points to object
   6.218 +        bool  _has_unknown_base; // Has phantom_object base
   6.219 +public:
   6.220 +  FieldNode(Compile *C, Node* n, EscapeState es, int offs, bool is_oop):
   6.221 +    PointsToNode(C, n, es, Field),
   6.222 +    _offset(offs), _is_oop(is_oop),
   6.223 +    _has_unknown_base(false) {}
   6.224 +
   6.225 +  int      offset()              const { return _offset;}
   6.226 +  bool     is_oop()              const { return _is_oop;}
   6.227 +  bool     has_unknown_base()    const { return _has_unknown_base; }
   6.228 +  void set_has_unknown_base()          { _has_unknown_base = true; }
   6.229 +
   6.230 +  int base_count()              const { return _bases.length(); }
   6.231 +  PointsToNode* base(int e)     const { return _bases.at(e); }
   6.232 +  bool add_base(PointsToNode* base)    { return _bases.append_if_missing(base); }
   6.233 +#ifdef ASSERT
   6.234 +  // Return true if bases points to this java object.
   6.235 +  bool has_base(JavaObjectNode* ptn) const;
   6.236 +#endif
   6.237 +
   6.238 +};
   6.239 +
   6.240 +class ArraycopyNode: public PointsToNode {
   6.241 +public:
   6.242 +  ArraycopyNode(Compile *C, Node* n, EscapeState es):
   6.243 +    PointsToNode(C, n, es, Arraycopy) {}
   6.244 +};
   6.245 +
   6.246 +// Iterators for PointsTo node's edges:
   6.247 +//   for (EdgeIterator i(n); i.has_next(); i.next()) {
   6.248 +//     PointsToNode* u = i.get();
   6.249 +class PointsToIterator: public StackObj {
   6.250 +protected:
   6.251 +  const PointsToNode* node;
   6.252 +  const int cnt;
   6.253 +  int i;
   6.254 +public:
   6.255 +  inline PointsToIterator(const PointsToNode* n, int cnt) : node(n), cnt(cnt), i(0) { }
   6.256 +  inline bool has_next() const { return i < cnt; }
   6.257 +  inline void next() { i++; }
   6.258 +  PointsToNode* get() const { ShouldNotCallThis(); return NULL; }
   6.259 +};
   6.260 +
   6.261 +class EdgeIterator: public PointsToIterator {
   6.262 +public:
   6.263 +  inline EdgeIterator(const PointsToNode* n) : PointsToIterator(n, n->edge_count()) { }
   6.264 +  inline PointsToNode* get() const { return node->edge(i); }
   6.265 +};
   6.266 +
   6.267 +class UseIterator: public PointsToIterator {
   6.268 +public:
   6.269 +  inline UseIterator(const PointsToNode* n) : PointsToIterator(n, n->use_count()) { }
   6.270 +  inline PointsToNode* get() const { return node->use(i); }
   6.271 +};
   6.272 +
   6.273 +class BaseIterator: public PointsToIterator {
   6.274 +public:
   6.275 +  inline BaseIterator(const FieldNode* n) : PointsToIterator(n, n->base_count()) { }
   6.276 +  inline PointsToNode* get() const { return ((PointsToNode*)node)->as_Field()->base(i); }
   6.277 +};
   6.278 +
   6.279 +
   6.280  class ConnectionGraph: public ResourceObj {
   6.281  private:
   6.282 -  GrowableArray<PointsToNode>  _nodes; // Connection graph nodes indexed
   6.283 -                                       // by ideal node index.
   6.284 +  GrowableArray<PointsToNode*>  _nodes; // Map from ideal nodes to
   6.285 +                                        // ConnectionGraph nodes.
   6.286  
   6.287 -  Unique_Node_List  _delayed_worklist; // Nodes to be processed before
   6.288 -                                       // the call build_connection_graph().
   6.289 +  GrowableArray<PointsToNode*>  _worklist; // Nodes to be processed
   6.290  
   6.291 -  GrowableArray<MergeMemNode *>  _mergemem_worklist; // List of all MergeMem nodes
   6.292 +  bool            _collecting; // Indicates whether escape information
   6.293 +                               // is still being collected. If false,
   6.294 +                               // no new nodes will be processed.
   6.295  
   6.296 -  VectorSet                _processed; // Records which nodes have been
   6.297 -                                       // processed.
   6.298 +  bool               _verify;  // verify graph
   6.299  
   6.300 -  bool                    _collecting; // Indicates whether escape information
   6.301 -                                       // is still being collected. If false,
   6.302 -                                       // no new nodes will be processed.
   6.303 +  JavaObjectNode* phantom_obj; // Unknown object
   6.304 +  JavaObjectNode*    null_obj;
   6.305 +  Node*             _pcmp_neq; // ConI(#CC_GT)
   6.306 +  Node*              _pcmp_eq; // ConI(#CC_EQ)
   6.307  
   6.308 -  bool                    _progress;   // Indicates whether new Graph's edges
   6.309 -                                       // were created.
   6.310 +  Compile*           _compile; // Compile object for current compilation
   6.311 +  PhaseIterGVN*         _igvn; // Value numbering
   6.312  
   6.313 -  uint                _phantom_object; // Index of globally escaping object
   6.314 -                                       // that pointer values loaded from
   6.315 -                                       // a field which has not been set
   6.316 -                                       // are assumed to point to.
   6.317 -  uint                      _oop_null; // ConP(#NULL)->_idx
   6.318 -  uint                     _noop_null; // ConN(#NULL)->_idx
   6.319 -  Node*                     _pcmp_neq; // ConI(#CC_GT)
   6.320 -  Node*                      _pcmp_eq; // ConI(#CC_EQ)
   6.321 -
   6.322 -  Compile *                  _compile; // Compile object for current compilation
   6.323 -  PhaseIterGVN *                _igvn; // Value numbering
   6.324 +  Unique_Node_List ideal_nodes; // Used by CG construction and types splitting.
   6.325  
   6.326    // Address of an element in _nodes.  Used when the element is to be modified
   6.327 -  PointsToNode *ptnode_adr(uint idx) const {
   6.328 +  PointsToNode* ptnode_adr(int idx) const {
   6.329      // There should be no new ideal nodes during ConnectionGraph build,
   6.330 -    // growableArray::adr_at() will throw assert otherwise.
   6.331 -    return _nodes.adr_at(idx);
   6.332 +    // growableArray::at() will throw assert otherwise.
   6.333 +    return _nodes.at(idx);
   6.334    }
   6.335    uint nodes_size() const { return _nodes.length(); }
   6.336  
   6.337 -  bool is_null_ptr(uint idx) const { return (idx == _noop_null || idx == _oop_null); }
   6.338 +  // Add nodes to ConnectionGraph.
   6.339 +  void add_local_var(Node* n, PointsToNode::EscapeState es);
   6.340 +  void add_java_object(Node* n, PointsToNode::EscapeState es);
   6.341 +  void add_field(Node* n, PointsToNode::EscapeState es, int offset);
   6.342 +  void add_arraycopy(Node* n, PointsToNode::EscapeState es, PointsToNode* src, PointsToNode* dst);
   6.343  
   6.344 -  // Add node to ConnectionGraph.
   6.345 -  void add_node(Node *n, PointsToNode::NodeType nt, PointsToNode::EscapeState es, bool done);
   6.346 +  // Compute the escape state for arguments to a call.
   6.347 +  void process_call_arguments(CallNode *call);
   6.348 +
   6.349 +  // Add PointsToNode node corresponding to a call
   6.350 +  void add_call_node(CallNode* call);
   6.351 +
   6.352 +  // Map ideal node to existing PointsTo node (usually phantom_object).
   6.353 +  void map_ideal_node(Node *n, PointsToNode* ptn) {
   6.354 +    assert(ptn != NULL, "only existing PointsTo node");
   6.355 +    _nodes.at_put(n->_idx, ptn);
   6.356 +  }
   6.357 +
   6.358 +  // Create PointsToNode node and add it to Connection Graph.
   6.359 +  void add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist);
   6.360 +
   6.361 +  // Add final simple edges to graph.
   6.362 +  void add_final_edges(Node *n);
   6.363 +
   6.364 +  // Finish Graph construction.
   6.365 +  bool complete_connection_graph(GrowableArray<PointsToNode*>&   ptnodes_worklist,
   6.366 +                                 GrowableArray<JavaObjectNode*>& non_escaped_worklist,
   6.367 +                                 GrowableArray<JavaObjectNode*>& java_objects_worklist,
   6.368 +                                 GrowableArray<FieldNode*>&      oop_fields_worklist);
   6.369 +
   6.370 +#ifdef ASSERT
   6.371 +  void verify_connection_graph(GrowableArray<PointsToNode*>&   ptnodes_worklist,
   6.372 +                               GrowableArray<JavaObjectNode*>& non_escaped_worklist,
   6.373 +                               GrowableArray<JavaObjectNode*>& java_objects_worklist,
   6.374 +                               GrowableArray<Node*>& addp_worklist);
   6.375 +#endif
   6.376 +
   6.377 +  // Add all references to this JavaObject node.
   6.378 +  int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist);
   6.379 +
   6.380 +  // Put node on worklist if it is (or was) not there.
   6.381 +  void add_to_worklist(PointsToNode* pt) {
   6.382 +    _worklist.push(pt);
   6.383 +    return;
   6.384 +  }
   6.385 +
   6.386 +  // Put on worklist all uses of this node.
   6.387 +  void add_uses_to_worklist(PointsToNode* pt) {
   6.388 +    for (UseIterator i(pt); i.has_next(); i.next())
   6.389 +      _worklist.push(i.get());
   6.390 +  }
   6.391 +
   6.392 +  // Put on worklist all field's uses and related field nodes.
   6.393 +  void add_field_uses_to_worklist(FieldNode* field);
   6.394 +
   6.395 +  // Put on worklist all related field nodes.
   6.396 +  void add_fields_to_worklist(FieldNode* field, PointsToNode* base);
   6.397 +
   6.398 +  // Find fields which have unknown value.
   6.399 +  int find_field_value(FieldNode* field);
   6.400 +
   6.401 +  // Find fields initializing values for allocations.
   6.402 +  int find_init_values(JavaObjectNode* ptn, PointsToNode* init_val, PhaseTransform* phase);
   6.403 +
   6.404 +  // Set the escape state of an object and its fields.
   6.405 +  void set_escape_state(PointsToNode* ptn, PointsToNode::EscapeState esc) {
   6.406 +    // Don't change non-escaping state of NULL pointer.
   6.407 +    if (ptn != null_obj) {
   6.408 +      if (ptn->escape_state() < esc)
   6.409 +        ptn->set_escape_state(esc);
   6.410 +      if (ptn->fields_escape_state() < esc)
   6.411 +        ptn->set_fields_escape_state(esc);
   6.412 +    }
   6.413 +  }
   6.414 +  void set_fields_escape_state(PointsToNode* ptn, PointsToNode::EscapeState esc) {
   6.415 +    // Don't change non-escaping state of NULL pointer.
   6.416 +    if (ptn != null_obj) {
   6.417 +      if (ptn->fields_escape_state() < esc)
   6.418 +        ptn->set_fields_escape_state(esc);
   6.419 +    }
   6.420 +  }
   6.421 +
   6.422 +  // Propagate GlobalEscape and ArgEscape escape states to all nodes
   6.423 +  // and check that we still have non-escaping java objects.
   6.424 +  bool find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
   6.425 +                                GrowableArray<JavaObjectNode*>& non_escaped_worklist);
   6.426 +
   6.427 +  // Adjust scalar_replaceable state after Connection Graph is built.
   6.428 +  void adjust_scalar_replaceable_state(JavaObjectNode* jobj);
   6.429 +
   6.430 +  // Optimize ideal graph.
   6.431 +  void optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
   6.432 +                            GrowableArray<Node*>& storestore_worklist);
   6.433 +  // Optimize objects compare.
   6.434 +  Node* optimize_ptr_compare(Node* n);
   6.435 +
   6.436 +  // Returns unique corresponding java object or NULL.
   6.437 +  JavaObjectNode* unique_java_object(Node *n);
   6.438 +
   6.439 +  // Add an edge of the specified type pointing to the specified target.
   6.440 +  bool add_edge(PointsToNode* from, PointsToNode* to) {
   6.441 +    assert(!from->is_Field() || from->as_Field()->is_oop(), "sanity");
   6.442 +
   6.443 +    if (to == phantom_obj) {
   6.444 +      if (from->has_unknown_ptr()) {
   6.445 +        return false; // already points to phantom_obj
   6.446 +      }
   6.447 +      from->set_has_unknown_ptr();
   6.448 +    }
   6.449 +
   6.450 +    bool is_new = from->add_edge(to);
   6.451 +    assert(to != phantom_obj || is_new, "sanity");
   6.452 +    if (is_new) { // New edge?
   6.453 +      assert(!_verify, "graph is incomplete");
   6.454 +      is_new = to->add_use(from);
   6.455 +      assert(is_new, "use should be also new");
   6.456 +    }
   6.457 +    return is_new;
   6.458 +  }
   6.459 +
   6.460 +  // Add an edge from Field node to its base and back.
   6.461 +  bool add_base(FieldNode* from, PointsToNode* to) {
   6.462 +    assert(!to->is_Arraycopy(), "sanity");
   6.463 +    if (to == phantom_obj) {
   6.464 +      if (from->has_unknown_base()) {
   6.465 +        return false; // already has phantom_obj base
   6.466 +      }
   6.467 +      from->set_has_unknown_base();
   6.468 +    }
   6.469 +    bool is_new = from->add_base(to);
   6.470 +    assert(to != phantom_obj || is_new, "sanity");
   6.471 +    if (is_new) {      // New edge?
   6.472 +      assert(!_verify, "graph is incomplete");
   6.473 +      if (to == null_obj)
   6.474 +        return is_new; // Don't add fields to NULL pointer.
   6.475 +      if (to->is_JavaObject()) {
   6.476 +        is_new = to->add_edge(from);
   6.477 +      } else {
   6.478 +        is_new = to->add_base_use(from);
   6.479 +      }
   6.480 +      assert(is_new, "use should be also new");
   6.481 +    }
   6.482 +    return is_new;
   6.483 +  }
   6.484 +
   6.485 +  // Add LocalVar node and edge if possible
   6.486 +  void add_local_var_and_edge(Node* n, PointsToNode::EscapeState es, Node* to,
   6.487 +                              Unique_Node_List *delayed_worklist) {
   6.488 +    PointsToNode* ptn = ptnode_adr(to->_idx);
   6.489 +    if (delayed_worklist != NULL) { // First iteration of CG construction
   6.490 +      add_local_var(n, es);
   6.491 +      if (ptn == NULL) {
   6.492 +        delayed_worklist->push(n);
   6.493 +        return; // Process it later.
   6.494 +      }
   6.495 +    } else {
   6.496 +      assert(ptn != NULL, "node should be registered");
   6.497 +    }
   6.498 +    add_edge(ptnode_adr(n->_idx), ptn);
   6.499 +  }
   6.500 +
   6.501 +  // Helper functions
   6.502 +  bool   is_oop_field(Node* n, int offset);
   6.503 +  static Node* get_addp_base(Node *addp);
   6.504 +  static Node* find_second_addp(Node* addp, Node* n);
   6.505  
   6.506    // offset of a field reference
   6.507    int address_offset(Node* adr, PhaseTransform *phase);
   6.508  
   6.509 -  // compute the escape state for arguments to a call
   6.510 -  void process_call_arguments(CallNode *call, PhaseTransform *phase);
   6.511  
   6.512 -  // compute the escape state for the return value of a call
   6.513 -  void process_call_result(ProjNode *resproj, PhaseTransform *phase);
   6.514 +  // Propagate unique types created for unescaped allocated objects
   6.515 +  // through the graph
   6.516 +  void split_unique_types(GrowableArray<Node *>  &alloc_worklist);
   6.517  
   6.518 -  // Populate Connection Graph with Ideal nodes.
   6.519 -  void record_for_escape_analysis(Node *n, PhaseTransform *phase);
   6.520 +  // Helper methods for unique types split.
   6.521 +  bool split_AddP(Node *addp, Node *base);
   6.522  
   6.523 -  // Build Connection Graph and set nodes escape state.
   6.524 -  void build_connection_graph(Node *n, PhaseTransform *phase);
   6.525 +  PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created);
   6.526 +  PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist);
   6.527  
   6.528 -  // walk the connection graph starting at the node corresponding to "n" and
   6.529 -  // add the index of everything it could point to, to "ptset".  This may cause
   6.530 -  // Phi's encountered to get (re)processed  (which requires "phase".)
   6.531 -  VectorSet* PointsTo(Node * n);
   6.532 +  void  move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis);
   6.533 +  Node* find_inst_mem(Node* mem, int alias_idx,GrowableArray<PhiNode *>  &orig_phi_worklist);
   6.534 +  Node* step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop);
   6.535  
   6.536 -  // Reused structures for PointsTo().
   6.537 -  VectorSet            pt_ptset;
   6.538 -  VectorSet            pt_visited;
   6.539 -  GrowableArray<uint>  pt_worklist;
   6.540  
   6.541 -  //  Edge manipulation.  The "from_i" and "to_i" arguments are the
   6.542 -  //  node indices of the source and destination of the edge
   6.543 -  void add_pointsto_edge(uint from_i, uint to_i);
   6.544 -  void add_deferred_edge(uint from_i, uint to_i);
   6.545 -  void add_field_edge(uint from_i, uint to_i, int offs);
   6.546 -
   6.547 -  // Add an edge of the specified type pointing to the specified target.
   6.548 -  // Set _progress if new edge is added.
   6.549 -  void add_edge(PointsToNode *f, uint to_i, PointsToNode::EdgeType et) {
   6.550 -    uint e_cnt = f->edge_count();
   6.551 -    f->add_edge(to_i, et);
   6.552 -    _progress |= (f->edge_count() != e_cnt);
   6.553 -  }
   6.554 -
   6.555 -  // Add an edge to node given by "to_i" from any field of adr_i whose offset
   6.556 -  // matches "offset"  A deferred edge is added if to_i is a LocalVar, and
   6.557 -  // a pointsto edge is added if it is a JavaObject
   6.558 -  void add_edge_from_fields(uint adr, uint to_i, int offs);
   6.559 -
   6.560 -  // Add a deferred  edge from node given by "from_i" to any field
   6.561 -  // of adr_i whose offset matches "offset"
   6.562 -  void add_deferred_edge_to_fields(uint from_i, uint adr, int offs);
   6.563 -
   6.564 -
   6.565 -  // Remove outgoing deferred edges from the node referenced by "ni".
   6.566 -  // Any outgoing edges from the target of the deferred edge are copied
   6.567 -  // to "ni".
   6.568 -  void remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited);
   6.569 +  GrowableArray<MergeMemNode*>  _mergemem_worklist; // List of all MergeMem nodes
   6.570  
   6.571    Node_Array _node_map; // used for bookeeping during type splitting
   6.572                          // Used for the following purposes:
   6.573 @@ -320,21 +547,18 @@
   6.574                          // MemNode       - new memory input for this node
   6.575                          // ChecCastPP    - allocation that this is a cast of
   6.576                          // allocation    - CheckCastPP of the allocation
   6.577 -  bool split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn);
   6.578 -  PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created);
   6.579 -  PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn);
   6.580 -  void  move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *igvn);
   6.581 -  Node *find_inst_mem(Node *mem, int alias_idx,GrowableArray<PhiNode *>  &orig_phi_worklist,  PhaseGVN  *igvn);
   6.582 -
   6.583 -  // Propagate unique types created for unescaped allocated objects
   6.584 -  // through the graph
   6.585 -  void split_unique_types(GrowableArray<Node *>  &alloc_worklist);
   6.586  
   6.587    // manage entries in _node_map
   6.588 -  void  set_map(int idx, Node *n)        { _node_map.map(idx, n); }
   6.589 -  Node *get_map(int idx)                 { return _node_map[idx]; }
   6.590 -  PhiNode *get_map_phi(int idx) {
   6.591 -    Node *phi = _node_map[idx];
   6.592 +
   6.593 +  void  set_map(Node* from, Node* to)  {
   6.594 +    ideal_nodes.push(from);
   6.595 +    _node_map.map(from->_idx, to);
   6.596 +  }
   6.597 +
   6.598 +  Node* get_map(int idx) { return _node_map[idx]; }
   6.599 +
   6.600 +  PhiNode* get_map_phi(int idx) {
   6.601 +    Node* phi = _node_map[idx];
   6.602      return (phi == NULL) ? NULL : phi->as_Phi();
   6.603    }
   6.604  
   6.605 @@ -344,23 +568,6 @@
   6.606      _igvn->add_users_to_worklist(n);
   6.607    }
   6.608  
   6.609 -  // Set the escape state of a node
   6.610 -  void set_escape_state(uint ni, PointsToNode::EscapeState es);
   6.611 -
   6.612 -  // Find fields initializing values for allocations.
   6.613 -  void find_init_values(Node* n, VectorSet* visited, PhaseTransform* phase);
   6.614 -
   6.615 -  // Adjust escape state after Connection Graph is built.
   6.616 -  void adjust_escape_state(Node* n);
   6.617 -
   6.618 -  // Propagate escape states to referenced nodes.
   6.619 -  bool propagate_escape_state(GrowableArray<int>* cg_worklist,
   6.620 -                              GrowableArray<uint>* worklist,
   6.621 -                              PointsToNode::EscapeState esc_state);
   6.622 -
   6.623 -  // Optimize objects compare.
   6.624 -  Node* optimize_ptr_compare(Node* n);
   6.625 -
   6.626    // Compute the escape information
   6.627    bool compute_escape();
   6.628  
   6.629 @@ -373,11 +580,10 @@
   6.630    // Perform escape analysis
   6.631    static void do_analysis(Compile *C, PhaseIterGVN *igvn);
   6.632  
   6.633 -  // escape state of a node
   6.634 -  PointsToNode::EscapeState escape_state(Node *n);
   6.635 +  bool not_global_escape(Node *n);
   6.636  
   6.637  #ifndef PRODUCT
   6.638 -  void dump();
   6.639 +  void dump(GrowableArray<PointsToNode*>& ptnodes_worklist);
   6.640  #endif
   6.641  };
   6.642  
     7.1 --- a/src/share/vm/opto/phase.cpp	Fri Mar 09 13:34:45 2012 -0800
     7.2 +++ b/src/share/vm/opto/phase.cpp	Mon Mar 12 10:46:47 2012 -0700
     7.3 @@ -1,5 +1,5 @@
     7.4  /*
     7.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     7.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     7.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     7.8   *
     7.9   * This code is free software; you can redistribute it and/or modify it
    7.10 @@ -39,8 +39,9 @@
    7.11  
    7.12  // The next timers used for LogCompilation
    7.13  elapsedTimer Phase::_t_parser;
    7.14 -elapsedTimer Phase::_t_escapeAnalysis;
    7.15  elapsedTimer Phase::_t_optimizer;
    7.16 +elapsedTimer   Phase::_t_escapeAnalysis;
    7.17 +elapsedTimer     Phase::_t_connectionGraph;
    7.18  elapsedTimer   Phase::_t_idealLoop;
    7.19  elapsedTimer   Phase::_t_ccp;
    7.20  elapsedTimer Phase::_t_matcher;
    7.21 @@ -51,6 +52,7 @@
    7.22  elapsedTimer Phase::_t_graphReshaping;
    7.23  elapsedTimer Phase::_t_scheduler;
    7.24  elapsedTimer Phase::_t_blockOrdering;
    7.25 +elapsedTimer Phase::_t_macroEliminate;
    7.26  elapsedTimer Phase::_t_macroExpand;
    7.27  elapsedTimer Phase::_t_peephole;
    7.28  elapsedTimer Phase::_t_codeGeneration;
    7.29 @@ -104,6 +106,8 @@
    7.30      if (DoEscapeAnalysis) {
    7.31        // EA is part of Optimizer.
    7.32        tty->print_cr ("      escape analysis: %3.3f sec", Phase::_t_escapeAnalysis.seconds());
    7.33 +      tty->print_cr ("        connection graph: %3.3f sec", Phase::_t_connectionGraph.seconds());
    7.34 +      tty->print_cr ("      macroEliminate : %3.3f sec", Phase::_t_macroEliminate.seconds());
    7.35      }
    7.36      tty->print_cr ("      iterGVN        : %3.3f sec", Phase::_t_iterGVN.seconds());
    7.37      tty->print_cr ("      idealLoop      : %3.3f sec", Phase::_t_idealLoop.seconds());
    7.38 @@ -112,9 +116,10 @@
    7.39      tty->print_cr ("      iterGVN2       : %3.3f sec", Phase::_t_iterGVN2.seconds());
    7.40      tty->print_cr ("      macroExpand    : %3.3f sec", Phase::_t_macroExpand.seconds());
    7.41      tty->print_cr ("      graphReshape   : %3.3f sec", Phase::_t_graphReshaping.seconds());
    7.42 -    double optimizer_subtotal = Phase::_t_iterGVN.seconds() +
    7.43 +    double optimizer_subtotal = Phase::_t_iterGVN.seconds() + Phase::_t_iterGVN2.seconds() +
    7.44 +      Phase::_t_escapeAnalysis.seconds() + Phase::_t_macroEliminate.seconds() +
    7.45        Phase::_t_idealLoop.seconds() + Phase::_t_ccp.seconds() +
    7.46 -      Phase::_t_graphReshaping.seconds();
    7.47 +      Phase::_t_macroExpand.seconds() + Phase::_t_graphReshaping.seconds();
    7.48      double percent_of_optimizer = ((optimizer_subtotal == 0.0) ? 0.0 : (optimizer_subtotal / Phase::_t_optimizer.seconds() * 100.0));
    7.49      tty->print_cr ("      subtotal       : %3.3f sec,  %3.2f %%", optimizer_subtotal, percent_of_optimizer);
    7.50    }
     8.1 --- a/src/share/vm/opto/phase.hpp	Fri Mar 09 13:34:45 2012 -0800
     8.2 +++ b/src/share/vm/opto/phase.hpp	Mon Mar 12 10:46:47 2012 -0700
     8.3 @@ -1,5 +1,5 @@
     8.4  /*
     8.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     8.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     8.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.8   *
     8.9   * This code is free software; you can redistribute it and/or modify it
    8.10 @@ -72,8 +72,12 @@
    8.11  
    8.12  // The next timers used for LogCompilation
    8.13    static elapsedTimer _t_parser;
    8.14 -  static elapsedTimer _t_escapeAnalysis;
    8.15    static elapsedTimer _t_optimizer;
    8.16 +public:
    8.17 +  // ConnectionGraph can't be Phase since it is used after EA done.
    8.18 +  static elapsedTimer   _t_escapeAnalysis;
    8.19 +  static elapsedTimer     _t_connectionGraph;
    8.20 +protected:
    8.21    static elapsedTimer   _t_idealLoop;
    8.22    static elapsedTimer   _t_ccp;
    8.23    static elapsedTimer _t_matcher;
    8.24 @@ -84,6 +88,7 @@
    8.25    static elapsedTimer _t_graphReshaping;
    8.26    static elapsedTimer _t_scheduler;
    8.27    static elapsedTimer _t_blockOrdering;
    8.28 +  static elapsedTimer _t_macroEliminate;
    8.29    static elapsedTimer _t_macroExpand;
    8.30    static elapsedTimer _t_peephole;
    8.31    static elapsedTimer _t_codeGeneration;
     9.1 --- a/src/share/vm/utilities/growableArray.hpp	Fri Mar 09 13:34:45 2012 -0800
     9.2 +++ b/src/share/vm/utilities/growableArray.hpp	Mon Mar 12 10:46:47 2012 -0700
     9.3 @@ -1,5 +1,5 @@
     9.4  /*
     9.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     9.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     9.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     9.8   *
     9.9   * This code is free software; you can redistribute it and/or modify it
    9.10 @@ -198,8 +198,11 @@
    9.11      return idx;
    9.12    }
    9.13  
    9.14 -  void append_if_missing(const E& elem) {
    9.15 -    if (!contains(elem)) append(elem);
    9.16 +  bool append_if_missing(const E& elem) {
    9.17 +    // Returns TRUE if elem is added.
    9.18 +    bool missed = !contains(elem);
    9.19 +    if (missed) append(elem);
    9.20 +    return missed;
    9.21    }
    9.22  
    9.23    E at(int i) const {
    9.24 @@ -292,12 +295,22 @@
    9.25      ShouldNotReachHere();
    9.26    }
    9.27  
    9.28 +  // The order is preserved.
    9.29    void remove_at(int index) {
    9.30      assert(0 <= index && index < _len, "illegal index");
    9.31      for (int j = index + 1; j < _len; j++) _data[j-1] = _data[j];
    9.32      _len--;
    9.33    }
    9.34  
    9.35 +  // The order is changed.
    9.36 +  void delete_at(int index) {
    9.37 +    assert(0 <= index && index < _len, "illegal index");
    9.38 +    if (index < --_len) {
    9.39 +      // Replace removed element with last one.
    9.40 +      _data[index] = _data[_len];
    9.41 +    }
    9.42 +  }
    9.43 +
    9.44    // inserts the given element before the element at index i
    9.45    void insert_before(const int idx, const E& elem) {
    9.46      check_nesting();

mercurial