src/share/vm/opto/escape.cpp

changeset 3651
ee138854b3a6
parent 3604
9a72c7ece7fb
child 3657
ed4c92f54c2d
     1.1 --- a/src/share/vm/opto/escape.cpp	Fri Mar 09 13:34:45 2012 -0800
     1.2 +++ b/src/share/vm/opto/escape.cpp	Mon Mar 12 10:46:47 2012 -0700
     1.3 @@ -24,6 +24,7 @@
     1.4  
     1.5  #include "precompiled.hpp"
     1.6  #include "ci/bcEscapeAnalyzer.hpp"
     1.7 +#include "compiler/compileLog.hpp"
     1.8  #include "libadt/vectset.hpp"
     1.9  #include "memory/allocation.hpp"
    1.10  #include "opto/c2compiler.hpp"
    1.11 @@ -34,125 +35,1901 @@
    1.12  #include "opto/phaseX.hpp"
    1.13  #include "opto/rootnode.hpp"
    1.14  
    1.15 -void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) {
    1.16 -  uint v = (targIdx << EdgeShift) + ((uint) et);
    1.17 -  if (_edges == NULL) {
    1.18 -     Arena *a = Compile::current()->comp_arena();
    1.19 -    _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0);
    1.20 -  }
    1.21 -  _edges->append_if_missing(v);
    1.22 -}
    1.23 -
    1.24 -void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
    1.25 -  uint v = (targIdx << EdgeShift) + ((uint) et);
    1.26 -
    1.27 -  _edges->remove(v);
    1.28 -}
    1.29 -
    1.30 -#ifndef PRODUCT
    1.31 -static const char *node_type_names[] = {
    1.32 -  "UnknownType",
    1.33 -  "JavaObject",
    1.34 -  "LocalVar",
    1.35 -  "Field"
    1.36 -};
    1.37 -
    1.38 -static const char *esc_names[] = {
    1.39 -  "UnknownEscape",
    1.40 -  "NoEscape",
    1.41 -  "ArgEscape",
    1.42 -  "GlobalEscape"
    1.43 -};
    1.44 -
    1.45 -static const char *edge_type_suffix[] = {
    1.46 - "?", // UnknownEdge
    1.47 - "P", // PointsToEdge
    1.48 - "D", // DeferredEdge
    1.49 - "F"  // FieldEdge
    1.50 -};
    1.51 -
    1.52 -void PointsToNode::dump(bool print_state) const {
    1.53 -  NodeType nt = node_type();
    1.54 -  tty->print("%s ", node_type_names[(int) nt]);
    1.55 -  if (print_state) {
    1.56 -    EscapeState es = escape_state();
    1.57 -    tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR");
    1.58 -  }
    1.59 -  tty->print("[[");
    1.60 -  for (uint i = 0; i < edge_count(); i++) {
    1.61 -    tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
    1.62 -  }
    1.63 -  tty->print("]]  ");
    1.64 -  if (_node == NULL)
    1.65 -    tty->print_cr("<null>");
    1.66 -  else
    1.67 -    _node->dump();
    1.68 -}
    1.69 -#endif
    1.70 -
    1.71  ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
    1.72 -  _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
    1.73 -  _processed(C->comp_arena()),
    1.74 -  pt_ptset(C->comp_arena()),
    1.75 -  pt_visited(C->comp_arena()),
    1.76 -  pt_worklist(C->comp_arena(), 4, 0, 0),
    1.77 +  _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
    1.78    _collecting(true),
    1.79 -  _progress(false),
    1.80 +  _verify(false),
    1.81    _compile(C),
    1.82    _igvn(igvn),
    1.83    _node_map(C->comp_arena()) {
    1.84 -
    1.85 -  _phantom_object = C->top()->_idx,
    1.86 -  add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true);
    1.87 -
    1.88 +  // Add unknown java object.
    1.89 +  add_java_object(C->top(), PointsToNode::GlobalEscape);
    1.90 +  phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
    1.91    // Add ConP(#NULL) and ConN(#NULL) nodes.
    1.92    Node* oop_null = igvn->zerocon(T_OBJECT);
    1.93 -  _oop_null = oop_null->_idx;
    1.94 -  assert(_oop_null < nodes_size(), "should be created already");
    1.95 -  add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
    1.96 -
    1.97 +  assert(oop_null->_idx < nodes_size(), "should be created already");
    1.98 +  add_java_object(oop_null, PointsToNode::NoEscape);
    1.99 +  null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
   1.100    if (UseCompressedOops) {
   1.101      Node* noop_null = igvn->zerocon(T_NARROWOOP);
   1.102 -    _noop_null = noop_null->_idx;
   1.103 -    assert(_noop_null < nodes_size(), "should be created already");
   1.104 -    add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
   1.105 -  } else {
   1.106 -    _noop_null = _oop_null; // Should be initialized
   1.107 +    assert(noop_null->_idx < nodes_size(), "should be created already");
   1.108 +    map_ideal_node(noop_null, null_obj);
   1.109    }
   1.110    _pcmp_neq = NULL; // Should be initialized
   1.111    _pcmp_eq  = NULL;
   1.112  }
   1.113  
   1.114 -void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
   1.115 -  PointsToNode *f = ptnode_adr(from_i);
   1.116 -  PointsToNode *t = ptnode_adr(to_i);
   1.117 -
   1.118 -  assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   1.119 -  assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge");
   1.120 -  assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge");
   1.121 -  if (to_i == _phantom_object) { // Quick test for most common object
   1.122 -    if (f->has_unknown_ptr()) {
   1.123 -      return;
   1.124 -    } else {
   1.125 -      f->set_has_unknown_ptr();
   1.126 +bool ConnectionGraph::has_candidates(Compile *C) {
   1.127 +  // EA brings benefits only when the code has allocations and/or locks which
   1.128 +  // are represented by ideal Macro nodes.
   1.129 +  int cnt = C->macro_count();
   1.130 +  for( int i=0; i < cnt; i++ ) {
   1.131 +    Node *n = C->macro_node(i);
   1.132 +    if ( n->is_Allocate() )
   1.133 +      return true;
   1.134 +    if( n->is_Lock() ) {
   1.135 +      Node* obj = n->as_Lock()->obj_node()->uncast();
   1.136 +      if( !(obj->is_Parm() || obj->is_Con()) )
   1.137 +        return true;
   1.138      }
   1.139    }
   1.140 -  add_edge(f, to_i, PointsToNode::PointsToEdge);
   1.141 +  return false;
   1.142  }
   1.143  
   1.144 -void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) {
   1.145 -  PointsToNode *f = ptnode_adr(from_i);
   1.146 -  PointsToNode *t = ptnode_adr(to_i);
   1.147 +void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
   1.148 +  Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true);
   1.149 +  ResourceMark rm;
   1.150  
   1.151 -  assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   1.152 -  assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge");
   1.153 -  assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge");
   1.154 -  // don't add a self-referential edge, this can occur during removal of
   1.155 -  // deferred edges
   1.156 -  if (from_i != to_i)
   1.157 -    add_edge(f, to_i, PointsToNode::DeferredEdge);
   1.158 +  // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
   1.159 +  // to create space for them in ConnectionGraph::_nodes[].
   1.160 +  Node* oop_null = igvn->zerocon(T_OBJECT);
   1.161 +  Node* noop_null = igvn->zerocon(T_NARROWOOP);
   1.162 +  ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
   1.163 +  // Perform escape analysis
   1.164 +  if (congraph->compute_escape()) {
   1.165 +    // There are non escaping objects.
   1.166 +    C->set_congraph(congraph);
   1.167 +  }
   1.168 +  // Cleanup.
   1.169 +  if (oop_null->outcnt() == 0)
   1.170 +    igvn->hash_delete(oop_null);
   1.171 +  if (noop_null->outcnt() == 0)
   1.172 +    igvn->hash_delete(noop_null);
   1.173  }
   1.174  
   1.175 +bool ConnectionGraph::compute_escape() {
   1.176 +  Compile* C = _compile;
   1.177 +  PhaseGVN* igvn = _igvn;
   1.178 +
   1.179 +  // Worklists used by EA.
   1.180 +  Unique_Node_List delayed_worklist;
   1.181 +  GrowableArray<Node*> alloc_worklist;
   1.182 +  GrowableArray<Node*> ptr_cmp_worklist;
   1.183 +  GrowableArray<Node*> storestore_worklist;
   1.184 +  GrowableArray<PointsToNode*>   ptnodes_worklist;
   1.185 +  GrowableArray<JavaObjectNode*> java_objects_worklist;
   1.186 +  GrowableArray<JavaObjectNode*> non_escaped_worklist;
   1.187 +  GrowableArray<FieldNode*>      oop_fields_worklist;
   1.188 +  DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
   1.189 +
   1.190 +  { Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);
   1.191 +
   1.192 +  // 1. Populate Connection Graph (CG) with PointsTo nodes.
   1.193 +  ideal_nodes.map(C->unique(), NULL);  // preallocate space
   1.194 +  // Initialize worklist
   1.195 +  if (C->root() != NULL) {
   1.196 +    ideal_nodes.push(C->root());
   1.197 +  }
   1.198 +  for( uint next = 0; next < ideal_nodes.size(); ++next ) {
   1.199 +    Node* n = ideal_nodes.at(next);
   1.200 +    // Create PointsTo nodes and add them to Connection Graph. Called
   1.201 +    // only once per ideal node since ideal_nodes is Unique_Node list.
   1.202 +    add_node_to_connection_graph(n, &delayed_worklist);
   1.203 +    PointsToNode* ptn = ptnode_adr(n->_idx);
   1.204 +    if (ptn != NULL) {
   1.205 +      ptnodes_worklist.append(ptn);
   1.206 +      if (ptn->is_JavaObject()) {
   1.207 +        java_objects_worklist.append(ptn->as_JavaObject());
   1.208 +        if ((n->is_Allocate() || n->is_CallStaticJava()) &&
   1.209 +            (ptn->escape_state() < PointsToNode::GlobalEscape)) {
   1.210 +          // Only allocations and java static calls results are interesting.
   1.211 +          non_escaped_worklist.append(ptn->as_JavaObject());
   1.212 +        }
   1.213 +      } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
   1.214 +        oop_fields_worklist.append(ptn->as_Field());
   1.215 +      }
   1.216 +    }
   1.217 +    if (n->is_MergeMem()) {
   1.218 +      // Collect all MergeMem nodes to add memory slices for
   1.219 +      // scalar replaceable objects in split_unique_types().
   1.220 +      _mergemem_worklist.append(n->as_MergeMem());
   1.221 +    } else if (OptimizePtrCompare && n->is_Cmp() &&
   1.222 +               (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
   1.223 +      // Collect compare pointers nodes.
   1.224 +      ptr_cmp_worklist.append(n);
   1.225 +    } else if (n->is_MemBarStoreStore()) {
   1.226 +      // Collect all MemBarStoreStore nodes so that depending on the
   1.227 +      // escape status of the associated Allocate node some of them
   1.228 +      // may be eliminated.
   1.229 +      storestore_worklist.append(n);
   1.230 +#ifdef ASSERT
   1.231 +    } else if(n->is_AddP()) {
   1.232 +      // Collect address nodes for graph verification.
   1.233 +      addp_worklist.append(n);
   1.234 +#endif
   1.235 +    }
   1.236 +    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
   1.237 +      Node* m = n->fast_out(i);   // Get user
   1.238 +      ideal_nodes.push(m);
   1.239 +    }
   1.240 +  }
   1.241 +  if (non_escaped_worklist.length() == 0) {
   1.242 +    _collecting = false;
   1.243 +    return false; // Nothing to do.
   1.244 +  }
   1.245 +  // Add final simple edges to graph.
   1.246 +  while(delayed_worklist.size() > 0) {
   1.247 +    Node* n = delayed_worklist.pop();
   1.248 +    add_final_edges(n);
   1.249 +  }
   1.250 +  int ptnodes_length = ptnodes_worklist.length();
   1.251 +
   1.252 +#ifdef ASSERT
   1.253 +  if (VerifyConnectionGraph) {
   1.254 +    // Verify that no new simple edges could be created and all
   1.255 +    // local vars has edges.
   1.256 +    _verify = true;
   1.257 +    for (int next = 0; next < ptnodes_length; ++next) {
   1.258 +      PointsToNode* ptn = ptnodes_worklist.at(next);
   1.259 +      add_final_edges(ptn->ideal_node());
   1.260 +      if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
   1.261 +        ptn->dump();
   1.262 +        assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
   1.263 +      }
   1.264 +    }
   1.265 +    _verify = false;
   1.266 +  }
   1.267 +#endif
   1.268 +
   1.269 +  // 2. Finish Graph construction by propagating references to all
   1.270 +  //    java objects through graph.
   1.271 +  if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist,
   1.272 +                                 java_objects_worklist, oop_fields_worklist)) {
   1.273 +    // All objects escaped or hit time or iterations limits.
   1.274 +    _collecting = false;
   1.275 +    return false;
   1.276 +  }
   1.277 +
   1.278 +  // 3. Adjust scalar_replaceable state of nonescaping objects and push
   1.279 +  //    scalar replaceable allocations on alloc_worklist for processing
   1.280 +  //    in split_unique_types().
   1.281 +  int non_escaped_length = non_escaped_worklist.length();
   1.282 +  for (int next = 0; next < non_escaped_length; next++) {
   1.283 +    JavaObjectNode* ptn = non_escaped_worklist.at(next);
   1.284 +    if (ptn->escape_state() == PointsToNode::NoEscape &&
   1.285 +        ptn->scalar_replaceable()) {
   1.286 +      adjust_scalar_replaceable_state(ptn);
   1.287 +      if (ptn->scalar_replaceable()) {
   1.288 +        alloc_worklist.append(ptn->ideal_node());
   1.289 +      }
   1.290 +    }
   1.291 +  }
   1.292 +
   1.293 +#ifdef ASSERT
   1.294 +  if (VerifyConnectionGraph) {
   1.295 +    // Verify that graph is complete - no new edges could be added or needed.
   1.296 +    verify_connection_graph(ptnodes_worklist, non_escaped_worklist,
   1.297 +                            java_objects_worklist, addp_worklist);
   1.298 +  }
   1.299 +  assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
   1.300 +  assert(null_obj->escape_state() == PointsToNode::NoEscape &&
   1.301 +         null_obj->edge_count() == 0 &&
   1.302 +         !null_obj->arraycopy_src() &&
   1.303 +         !null_obj->arraycopy_dst(), "sanity");
   1.304 +#endif
   1.305 +
   1.306 +  _collecting = false;
   1.307 +
   1.308 +  } // TracePhase t3("connectionGraph")
   1.309 +
   1.310 +  // 4. Optimize ideal graph based on EA information.
   1.311 +  bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);
   1.312 +  if (has_non_escaping_obj) {
   1.313 +    optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
   1.314 +  }
   1.315 +
   1.316 +#ifndef PRODUCT
   1.317 +  if (PrintEscapeAnalysis) {
   1.318 +    dump(ptnodes_worklist); // Dump ConnectionGraph
   1.319 +  }
   1.320 +#endif
   1.321 +
   1.322 +  bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
   1.323 +#ifdef ASSERT
   1.324 +  if (VerifyConnectionGraph) {
   1.325 +    int alloc_length = alloc_worklist.length();
   1.326 +    for (int next = 0; next < alloc_length; ++next) {
   1.327 +      Node* n = alloc_worklist.at(next);
   1.328 +      PointsToNode* ptn = ptnode_adr(n->_idx);
   1.329 +      assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
   1.330 +    }
   1.331 +  }
   1.332 +#endif
   1.333 +
   1.334 +  // 5. Separate memory graph for scalar replaceable allcations.
   1.335 +  if (has_scalar_replaceable_candidates &&
   1.336 +      C->AliasLevel() >= 3 && EliminateAllocations) {
   1.337 +    // Now use the escape information to create unique types for
   1.338 +    // scalar replaceable objects.
   1.339 +    split_unique_types(alloc_worklist);
   1.340 +    if (C->failing())  return false;
   1.341 +    C->print_method("After Escape Analysis", 2);
   1.342 +
   1.343 +#ifdef ASSERT
   1.344 +  } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
   1.345 +    tty->print("=== No allocations eliminated for ");
   1.346 +    C->method()->print_short_name();
   1.347 +    if(!EliminateAllocations) {
   1.348 +      tty->print(" since EliminateAllocations is off ===");
   1.349 +    } else if(!has_scalar_replaceable_candidates) {
   1.350 +      tty->print(" since there are no scalar replaceable candidates ===");
   1.351 +    } else if(C->AliasLevel() < 3) {
   1.352 +      tty->print(" since AliasLevel < 3 ===");
   1.353 +    }
   1.354 +    tty->cr();
   1.355 +#endif
   1.356 +  }
   1.357 +  return has_non_escaping_obj;
   1.358 +}
   1.359 +
   1.360 +// Populate Connection Graph with PointsTo nodes and create simple
   1.361 +// connection graph edges.
   1.362 +void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
   1.363 +  assert(!_verify, "this method sould not be called for verification");
   1.364 +  PhaseGVN* igvn = _igvn;
   1.365 +  uint n_idx = n->_idx;
   1.366 +  PointsToNode* n_ptn = ptnode_adr(n_idx);
   1.367 +  if (n_ptn != NULL)
   1.368 +    return; // No need to redefine PointsTo node during first iteration.
   1.369 +
   1.370 +  if (n->is_Call()) {
   1.371 +    // Arguments to allocation and locking don't escape.
   1.372 +    if (n->is_AbstractLock()) {
   1.373 +      // Put Lock and Unlock nodes on IGVN worklist to process them during
   1.374 +      // first IGVN optimization when escape information is still available.
   1.375 +      record_for_optimizer(n);
   1.376 +    } else if (n->is_Allocate()) {
   1.377 +      add_call_node(n->as_Call());
   1.378 +      record_for_optimizer(n);
   1.379 +    } else {
   1.380 +      if (n->is_CallStaticJava()) {
   1.381 +        const char* name = n->as_CallStaticJava()->_name;
   1.382 +        if (name != NULL && strcmp(name, "uncommon_trap") == 0)
   1.383 +          return; // Skip uncommon traps
   1.384 +      }
   1.385 +      // Don't mark as processed since call's arguments have to be processed.
   1.386 +      delayed_worklist->push(n);
   1.387 +      // Check if a call returns an object.
   1.388 +      if (n->as_Call()->returns_pointer() &&
   1.389 +          n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
   1.390 +        add_call_node(n->as_Call());
   1.391 +      }
   1.392 +    }
   1.393 +    return;
   1.394 +  }
   1.395 +  // Put this check here to process call arguments since some call nodes
   1.396 +  // point to phantom_obj.
   1.397 +  if (n_ptn == phantom_obj || n_ptn == null_obj)
   1.398 +    return; // Skip predefined nodes.
   1.399 +
   1.400 +  int opcode = n->Opcode();
   1.401 +  switch (opcode) {
   1.402 +    case Op_AddP: {
   1.403 +      Node* base = get_addp_base(n);
   1.404 +      PointsToNode* ptn_base = ptnode_adr(base->_idx);
   1.405 +      // Field nodes are created for all field types. They are used in
   1.406 +      // adjust_scalar_replaceable_state() and split_unique_types().
   1.407 +      // Note, non-oop fields will have only base edges in Connection
   1.408 +      // Graph because such fields are not used for oop loads and stores.
   1.409 +      int offset = address_offset(n, igvn);
   1.410 +      add_field(n, PointsToNode::NoEscape, offset);
   1.411 +      if (ptn_base == NULL) {
   1.412 +        delayed_worklist->push(n); // Process it later.
   1.413 +      } else {
   1.414 +        n_ptn = ptnode_adr(n_idx);
   1.415 +        add_base(n_ptn->as_Field(), ptn_base);
   1.416 +      }
   1.417 +      break;
   1.418 +    }
   1.419 +    case Op_CastX2P: {
   1.420 +      map_ideal_node(n, phantom_obj);
   1.421 +      break;
   1.422 +    }
   1.423 +    case Op_CastPP:
   1.424 +    case Op_CheckCastPP:
   1.425 +    case Op_EncodeP:
   1.426 +    case Op_DecodeN: {
   1.427 +      add_local_var_and_edge(n, PointsToNode::NoEscape,
   1.428 +                             n->in(1), delayed_worklist);
   1.429 +      break;
   1.430 +    }
   1.431 +    case Op_CMoveP: {
   1.432 +      add_local_var(n, PointsToNode::NoEscape);
   1.433 +      // Do not add edges during first iteration because some could be
   1.434 +      // not defined yet.
   1.435 +      delayed_worklist->push(n);
   1.436 +      break;
   1.437 +    }
   1.438 +    case Op_ConP:
   1.439 +    case Op_ConN: {
   1.440 +      // assume all oop constants globally escape except for null
   1.441 +      PointsToNode::EscapeState es;
   1.442 +      if (igvn->type(n) == TypePtr::NULL_PTR ||
   1.443 +          igvn->type(n) == TypeNarrowOop::NULL_PTR) {
   1.444 +        es = PointsToNode::NoEscape;
   1.445 +      } else {
   1.446 +        es = PointsToNode::GlobalEscape;
   1.447 +      }
   1.448 +      add_java_object(n, es);
   1.449 +      break;
   1.450 +    }
   1.451 +    case Op_CreateEx: {
   1.452 +      // assume that all exception objects globally escape
   1.453 +      add_java_object(n, PointsToNode::GlobalEscape);
   1.454 +      break;
   1.455 +    }
   1.456 +    case Op_LoadKlass:
   1.457 +    case Op_LoadNKlass: {
   1.458 +      // Unknown class is loaded
   1.459 +      map_ideal_node(n, phantom_obj);
   1.460 +      break;
   1.461 +    }
   1.462 +    case Op_LoadP:
   1.463 +    case Op_LoadN:
   1.464 +    case Op_LoadPLocked: {
   1.465 +      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
   1.466 +      // ThreadLocal has RawPrt type.
   1.467 +      const Type* t = igvn->type(n);
   1.468 +      if (t->make_ptr() != NULL) {
   1.469 +        Node* adr = n->in(MemNode::Address);
   1.470 +#ifdef ASSERT
   1.471 +        if (!adr->is_AddP()) {
   1.472 +          assert(igvn->type(adr)->isa_rawptr(), "sanity");
   1.473 +        } else {
   1.474 +          assert((ptnode_adr(adr->_idx) == NULL ||
   1.475 +                  ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
   1.476 +        }
   1.477 +#endif
   1.478 +        add_local_var_and_edge(n, PointsToNode::NoEscape,
   1.479 +                               adr, delayed_worklist);
   1.480 +      }
   1.481 +      break;
   1.482 +    }
   1.483 +    case Op_Parm: {
   1.484 +      map_ideal_node(n, phantom_obj);
   1.485 +      break;
   1.486 +    }
   1.487 +    case Op_PartialSubtypeCheck: {
   1.488 +      // Produces Null or notNull and is used in only in CmpP so
   1.489 +      // phantom_obj could be used.
   1.490 +      map_ideal_node(n, phantom_obj); // Result is unknown
   1.491 +      break;
   1.492 +    }
   1.493 +    case Op_Phi: {
   1.494 +      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
   1.495 +      // ThreadLocal has RawPrt type.
   1.496 +      const Type* t = n->as_Phi()->type();
   1.497 +      if (t->make_ptr() != NULL) {
   1.498 +        add_local_var(n, PointsToNode::NoEscape);
   1.499 +        // Do not add edges during first iteration because some could be
   1.500 +        // not defined yet.
   1.501 +        delayed_worklist->push(n);
   1.502 +      }
   1.503 +      break;
   1.504 +    }
   1.505 +    case Op_Proj: {
   1.506 +      // we are only interested in the oop result projection from a call
   1.507 +      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
   1.508 +          n->in(0)->as_Call()->returns_pointer()) {
   1.509 +        add_local_var_and_edge(n, PointsToNode::NoEscape,
   1.510 +                               n->in(0), delayed_worklist);
   1.511 +      }
   1.512 +      break;
   1.513 +    }
   1.514 +    case Op_Rethrow: // Exception object escapes
   1.515 +    case Op_Return: {
   1.516 +      if (n->req() > TypeFunc::Parms &&
   1.517 +          igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
   1.518 +        // Treat Return value as LocalVar with GlobalEscape escape state.
   1.519 +        add_local_var_and_edge(n, PointsToNode::GlobalEscape,
   1.520 +                               n->in(TypeFunc::Parms), delayed_worklist);
   1.521 +      }
   1.522 +      break;
   1.523 +    }
   1.524 +    case Op_StoreP:
   1.525 +    case Op_StoreN:
   1.526 +    case Op_StorePConditional:
   1.527 +    case Op_CompareAndSwapP:
   1.528 +    case Op_CompareAndSwapN: {
   1.529 +      Node* adr = n->in(MemNode::Address);
   1.530 +      const Type *adr_type = igvn->type(adr);
   1.531 +      adr_type = adr_type->make_ptr();
   1.532 +      if (adr_type->isa_oopptr() ||
   1.533 +          (opcode == Op_StoreP || opcode == Op_StoreN) &&
   1.534 +                        (adr_type == TypeRawPtr::NOTNULL &&
   1.535 +                         adr->in(AddPNode::Address)->is_Proj() &&
   1.536 +                         adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
   1.537 +        delayed_worklist->push(n); // Process it later.
   1.538 +#ifdef ASSERT
   1.539 +        assert(adr->is_AddP(), "expecting an AddP");
   1.540 +        if (adr_type == TypeRawPtr::NOTNULL) {
   1.541 +          // Verify a raw address for a store captured by Initialize node.
   1.542 +          int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
   1.543 +          assert(offs != Type::OffsetBot, "offset must be a constant");
   1.544 +        }
   1.545 +      } else {
   1.546 +        // Ignore copy the displaced header to the BoxNode (OSR compilation).
   1.547 +        if (adr->is_BoxLock())
   1.548 +          break;
   1.549 +
   1.550 +        if (!adr->is_AddP()) {
   1.551 +          n->dump(1);
   1.552 +          assert(adr->is_AddP(), "expecting an AddP");
   1.553 +        }
   1.554 +        // Ignore G1 barrier's stores.
   1.555 +        if (!UseG1GC || (opcode != Op_StoreP) ||
   1.556 +            (adr_type != TypeRawPtr::BOTTOM)) {
   1.557 +          n->dump(1);
   1.558 +          assert(false, "not G1 barrier raw StoreP");
   1.559 +        }
   1.560 +#endif
   1.561 +      }
   1.562 +      break;
   1.563 +    }
   1.564 +    case Op_AryEq:
   1.565 +    case Op_StrComp:
   1.566 +    case Op_StrEquals:
   1.567 +    case Op_StrIndexOf: {
   1.568 +      add_local_var(n, PointsToNode::ArgEscape);
   1.569 +      delayed_worklist->push(n); // Process it later.
   1.570 +      break;
   1.571 +    }
   1.572 +    case Op_ThreadLocal: {
   1.573 +      add_java_object(n, PointsToNode::ArgEscape);
   1.574 +      break;
   1.575 +    }
   1.576 +    default:
   1.577 +      ; // Do nothing for nodes not related to EA.
   1.578 +  }
   1.579 +  return;
   1.580 +}
   1.581 +
   1.582 +#ifdef ASSERT
   1.583 +#define ELSE_FAIL(name)                               \
   1.584 +      /* Should not be called for not pointer type. */  \
   1.585 +      n->dump(1);                                       \
   1.586 +      assert(false, name);                              \
   1.587 +      break;
   1.588 +#else
   1.589 +#define ELSE_FAIL(name) \
   1.590 +      break;
   1.591 +#endif
   1.592 +
   1.593 +// Add final simple edges to graph.
   1.594 +void ConnectionGraph::add_final_edges(Node *n) {
   1.595 +  PointsToNode* n_ptn = ptnode_adr(n->_idx);
   1.596 +#ifdef ASSERT
   1.597 +  if (_verify && n_ptn->is_JavaObject())
   1.598 +    return; // This method does not change graph for JavaObject.
   1.599 +#endif
   1.600 +
   1.601 +  if (n->is_Call()) {
   1.602 +    process_call_arguments(n->as_Call());
   1.603 +    return;
   1.604 +  }
   1.605 +  assert(n->is_Store() || n->is_LoadStore() ||
   1.606 +         (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
   1.607 +         "node should be registered already");
   1.608 +  int opcode = n->Opcode();
   1.609 +  switch (opcode) {
   1.610 +    case Op_AddP: {
   1.611 +      Node* base = get_addp_base(n);
   1.612 +      PointsToNode* ptn_base = ptnode_adr(base->_idx);
   1.613 +      assert(ptn_base != NULL, "field's base should be registered");
   1.614 +      add_base(n_ptn->as_Field(), ptn_base);
   1.615 +      break;
   1.616 +    }
   1.617 +    case Op_CastPP:
   1.618 +    case Op_CheckCastPP:
   1.619 +    case Op_EncodeP:
   1.620 +    case Op_DecodeN: {
   1.621 +      add_local_var_and_edge(n, PointsToNode::NoEscape,
   1.622 +                             n->in(1), NULL);
   1.623 +      break;
   1.624 +    }
   1.625 +    case Op_CMoveP: {
   1.626 +      for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
   1.627 +        Node* in = n->in(i);
   1.628 +        if (in == NULL)
   1.629 +          continue;  // ignore NULL
   1.630 +        Node* uncast_in = in->uncast();
   1.631 +        if (uncast_in->is_top() || uncast_in == n)
   1.632 +          continue;  // ignore top or inputs which go back this node
   1.633 +        PointsToNode* ptn = ptnode_adr(in->_idx);
   1.634 +        assert(ptn != NULL, "node should be registered");
   1.635 +        add_edge(n_ptn, ptn);
   1.636 +      }
   1.637 +      break;
   1.638 +    }
   1.639 +    case Op_LoadP:
   1.640 +    case Op_LoadN:
   1.641 +    case Op_LoadPLocked: {
   1.642 +      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
   1.643 +      // ThreadLocal has RawPrt type.
   1.644 +      const Type* t = _igvn->type(n);
   1.645 +      if (t->make_ptr() != NULL) {
   1.646 +        Node* adr = n->in(MemNode::Address);
   1.647 +        add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
   1.648 +        break;
   1.649 +      }
   1.650 +      ELSE_FAIL("Op_LoadP");
   1.651 +    }
   1.652 +    case Op_Phi: {
   1.653 +      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
   1.654 +      // ThreadLocal has RawPrt type.
   1.655 +      const Type* t = n->as_Phi()->type();
   1.656 +      if (t->make_ptr() != NULL) {
   1.657 +        for (uint i = 1; i < n->req(); i++) {
   1.658 +          Node* in = n->in(i);
   1.659 +          if (in == NULL)
   1.660 +            continue;  // ignore NULL
   1.661 +          Node* uncast_in = in->uncast();
   1.662 +          if (uncast_in->is_top() || uncast_in == n)
   1.663 +            continue;  // ignore top or inputs which go back this node
   1.664 +          PointsToNode* ptn = ptnode_adr(in->_idx);
   1.665 +          assert(ptn != NULL, "node should be registered");
   1.666 +          add_edge(n_ptn, ptn);
   1.667 +        }
   1.668 +        break;
   1.669 +      }
   1.670 +      ELSE_FAIL("Op_Phi");
   1.671 +    }
   1.672 +    case Op_Proj: {
   1.673 +      // we are only interested in the oop result projection from a call
   1.674 +      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
   1.675 +          n->in(0)->as_Call()->returns_pointer()) {
   1.676 +        add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
   1.677 +        break;
   1.678 +      }
   1.679 +      ELSE_FAIL("Op_Proj");
   1.680 +    }
   1.681 +    case Op_Rethrow: // Exception object escapes
   1.682 +    case Op_Return: {
   1.683 +      if (n->req() > TypeFunc::Parms &&
   1.684 +          _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
   1.685 +        // Treat Return value as LocalVar with GlobalEscape escape state.
   1.686 +        add_local_var_and_edge(n, PointsToNode::GlobalEscape,
   1.687 +                               n->in(TypeFunc::Parms), NULL);
   1.688 +        break;
   1.689 +      }
   1.690 +      ELSE_FAIL("Op_Return");
   1.691 +    }
   1.692 +    case Op_StoreP:
   1.693 +    case Op_StoreN:
   1.694 +    case Op_StorePConditional:
   1.695 +    case Op_CompareAndSwapP:
   1.696 +    case Op_CompareAndSwapN: {
   1.697 +      Node* adr = n->in(MemNode::Address);
   1.698 +      const Type *adr_type = _igvn->type(adr);
   1.699 +      adr_type = adr_type->make_ptr();
   1.700 +      if (adr_type->isa_oopptr() ||
   1.701 +          (opcode == Op_StoreP || opcode == Op_StoreN) &&
   1.702 +                        (adr_type == TypeRawPtr::NOTNULL &&
   1.703 +                         adr->in(AddPNode::Address)->is_Proj() &&
   1.704 +                         adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
   1.705 +        // Point Address to Value
   1.706 +        PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
   1.707 +        assert(adr_ptn != NULL &&
   1.708 +               adr_ptn->as_Field()->is_oop(), "node should be registered");
   1.709 +        Node *val = n->in(MemNode::ValueIn);
   1.710 +        PointsToNode* ptn = ptnode_adr(val->_idx);
   1.711 +        assert(ptn != NULL, "node should be registered");
   1.712 +        add_edge(adr_ptn, ptn);
   1.713 +        break;
   1.714 +      }
   1.715 +      ELSE_FAIL("Op_StoreP");
   1.716 +    }
   1.717 +    case Op_AryEq:
   1.718 +    case Op_StrComp:
   1.719 +    case Op_StrEquals:
   1.720 +    case Op_StrIndexOf: {
   1.721 +      // char[] arrays passed to string intrinsic do not escape but
   1.722 +      // they are not scalar replaceable. Adjust escape state for them.
   1.723 +      // Start from in(2) edge since in(1) is memory edge.
   1.724 +      for (uint i = 2; i < n->req(); i++) {
   1.725 +        Node* adr = n->in(i);
   1.726 +        const Type* at = _igvn->type(adr);
   1.727 +        if (!adr->is_top() && at->isa_ptr()) {
   1.728 +          assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
   1.729 +                 at->isa_ptr() != NULL, "expecting a pointer");
   1.730 +          if (adr->is_AddP()) {
   1.731 +            adr = get_addp_base(adr);
   1.732 +          }
   1.733 +          PointsToNode* ptn = ptnode_adr(adr->_idx);
   1.734 +          assert(ptn != NULL, "node should be registered");
   1.735 +          add_edge(n_ptn, ptn);
   1.736 +        }
   1.737 +      }
   1.738 +      break;
   1.739 +    }
   1.740 +    default: {
   1.741 +      // This method should be called only for EA specific nodes which may
   1.742 +      // miss some edges when they were created.
   1.743 +#ifdef ASSERT
   1.744 +      n->dump(1);
   1.745 +#endif
   1.746 +      guarantee(false, "unknown node");
   1.747 +    }
   1.748 +  }
   1.749 +  return;
   1.750 +}
   1.751 +
   1.752 +void ConnectionGraph::add_call_node(CallNode* call) {
   1.753 +  assert(call->returns_pointer(), "only for call which returns pointer");
   1.754 +  uint call_idx = call->_idx;
   1.755 +  if (call->is_Allocate()) {
   1.756 +    Node* k = call->in(AllocateNode::KlassNode);
   1.757 +    const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
   1.758 +    assert(kt != NULL, "TypeKlassPtr  required.");
   1.759 +    ciKlass* cik = kt->klass();
   1.760 +    PointsToNode::EscapeState es = PointsToNode::NoEscape;
   1.761 +    bool scalar_replaceable = true;
   1.762 +    if (call->is_AllocateArray()) {
   1.763 +      if (!cik->is_array_klass()) { // StressReflectiveCode
   1.764 +        es = PointsToNode::GlobalEscape;
   1.765 +      } else {
   1.766 +        int length = call->in(AllocateNode::ALength)->find_int_con(-1);
   1.767 +        if (length < 0 || length > EliminateAllocationArraySizeLimit) {
   1.768 +          // Not scalar replaceable if the length is not constant or too big.
   1.769 +          scalar_replaceable = false;
   1.770 +        }
   1.771 +      }
   1.772 +    } else {  // Allocate instance
   1.773 +      if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
   1.774 +         !cik->is_instance_klass() || // StressReflectiveCode
   1.775 +          cik->as_instance_klass()->has_finalizer()) {
   1.776 +        es = PointsToNode::GlobalEscape;
   1.777 +      }
   1.778 +    }
   1.779 +    add_java_object(call, es);
   1.780 +    PointsToNode* ptn = ptnode_adr(call_idx);
   1.781 +    if (!scalar_replaceable && ptn->scalar_replaceable()) {
   1.782 +      ptn->set_scalar_replaceable(false);
   1.783 +    }
   1.784 +  } else if (call->is_CallStaticJava()) {
   1.785 +    // Call nodes could be different types:
   1.786 +    //
   1.787 +    // 1. CallDynamicJavaNode (what happened during call is unknown):
   1.788 +    //
   1.789 +    //    - mapped to GlobalEscape JavaObject node if oop is returned;
   1.790 +    //
   1.791 +    //    - all oop arguments are escaping globally;
   1.792 +    //
   1.793 +    // 2. CallStaticJavaNode (execute bytecode analysis if possible):
   1.794 +    //
   1.795 +    //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
   1.796 +    //
   1.797 +    //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
   1.798 +    //    - mapped to NoEscape JavaObject node if non-escaping object allocated
   1.799 +    //      during call is returned;
   1.800 +    //    - mapped to ArgEscape LocalVar node pointed to object arguments
   1.801 +    //      which are returned and does not escape during call;
   1.802 +    //
   1.803 +    //    - oop arguments escaping status is defined by bytecode analysis;
   1.804 +    //
   1.805 +    // For a static call, we know exactly what method is being called.
   1.806 +    // Use bytecode estimator to record whether the call's return value escapes.
   1.807 +    ciMethod* meth = call->as_CallJava()->method();
   1.808 +    if (meth == NULL) {
   1.809 +      const char* name = call->as_CallStaticJava()->_name;
   1.810 +      assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
   1.811 +      // Returns a newly allocated unescaped object.
   1.812 +      add_java_object(call, PointsToNode::NoEscape);
   1.813 +      ptnode_adr(call_idx)->set_scalar_replaceable(false);
   1.814 +    } else {
   1.815 +      BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
   1.816 +      call_analyzer->copy_dependencies(_compile->dependencies());
   1.817 +      if (call_analyzer->is_return_allocated()) {
   1.818 +        // Returns a newly allocated unescaped object, simply
   1.819 +        // update dependency information.
   1.820 +        // Mark it as NoEscape so that objects referenced by
   1.821 +        // it's fields will be marked as NoEscape at least.
   1.822 +        add_java_object(call, PointsToNode::NoEscape);
   1.823 +        ptnode_adr(call_idx)->set_scalar_replaceable(false);
   1.824 +      } else {
   1.825 +        // Determine whether any arguments are returned.
   1.826 +        const TypeTuple* d = call->tf()->domain();
   1.827 +        bool ret_arg = false;
   1.828 +        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
   1.829 +          if (d->field_at(i)->isa_ptr() != NULL &&
   1.830 +              call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
   1.831 +            ret_arg = true;
   1.832 +            break;
   1.833 +          }
   1.834 +        }
   1.835 +        if (ret_arg) {
   1.836 +          add_local_var(call, PointsToNode::ArgEscape);
   1.837 +        } else {
   1.838 +          // Returns unknown object.
   1.839 +          map_ideal_node(call, phantom_obj);
   1.840 +        }
   1.841 +      }
   1.842 +    }
   1.843 +  } else {
   1.844 +    // An other type of call, assume the worst case:
   1.845 +    // returned value is unknown and globally escapes.
   1.846 +    assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
   1.847 +    map_ideal_node(call, phantom_obj);
   1.848 +  }
   1.849 +}
   1.850 +
   1.851 +void ConnectionGraph::process_call_arguments(CallNode *call) {
   1.852 +    bool is_arraycopy = false;
   1.853 +    switch (call->Opcode()) {
   1.854 +#ifdef ASSERT
   1.855 +    case Op_Allocate:
   1.856 +    case Op_AllocateArray:
   1.857 +    case Op_Lock:
   1.858 +    case Op_Unlock:
   1.859 +      assert(false, "should be done already");
   1.860 +      break;
   1.861 +#endif
   1.862 +    case Op_CallLeafNoFP:
   1.863 +      is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
   1.864 +                      strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
   1.865 +      // fall through
   1.866 +    case Op_CallLeaf: {
   1.867 +      // Stub calls, objects do not escape but they are not scale replaceable.
   1.868 +      // Adjust escape state for outgoing arguments.
   1.869 +      const TypeTuple * d = call->tf()->domain();
   1.870 +      bool src_has_oops = false;
   1.871 +      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
   1.872 +        const Type* at = d->field_at(i);
   1.873 +        Node *arg = call->in(i);
   1.874 +        const Type *aat = _igvn->type(arg);
   1.875 +        if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
   1.876 +          continue;
   1.877 +        if (arg->is_AddP()) {
   1.878 +          //
   1.879 +          // The inline_native_clone() case when the arraycopy stub is called
   1.880 +          // after the allocation before Initialize and CheckCastPP nodes.
   1.881 +          // Or normal arraycopy for object arrays case.
   1.882 +          //
   1.883 +          // Set AddP's base (Allocate) as not scalar replaceable since
   1.884 +          // pointer to the base (with offset) is passed as argument.
   1.885 +          //
   1.886 +          arg = get_addp_base(arg);
   1.887 +        }
   1.888 +        PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
   1.889 +        assert(arg_ptn != NULL, "should be registered");
   1.890 +        PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
   1.891 +        if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
   1.892 +          assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
   1.893 +                 aat->isa_ptr() != NULL, "expecting an Ptr");
   1.894 +          bool arg_has_oops = aat->isa_oopptr() &&
   1.895 +                              (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
   1.896 +                               (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
   1.897 +          if (i == TypeFunc::Parms) {
   1.898 +            src_has_oops = arg_has_oops;
   1.899 +          }
   1.900 +          //
   1.901 +          // src or dst could be j.l.Object when other is basic type array:
   1.902 +          //
   1.903 +          //   arraycopy(char[],0,Object*,0,size);
   1.904 +          //   arraycopy(Object*,0,char[],0,size);
   1.905 +          //
   1.906 +          // Don't add edges in such cases.
   1.907 +          //
   1.908 +          bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
   1.909 +                                       arg_has_oops && (i > TypeFunc::Parms);
   1.910 +#ifdef ASSERT
   1.911 +          if (!(is_arraycopy ||
   1.912 +                call->as_CallLeaf()->_name != NULL &&
   1.913 +                (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
   1.914 +                 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
   1.915 +          ) {
   1.916 +            call->dump();
   1.917 +            assert(false, "EA: unexpected CallLeaf");
   1.918 +          }
   1.919 +#endif
   1.920 +          // Always process arraycopy's destination object since
   1.921 +          // we need to add all possible edges to references in
   1.922 +          // source object.
   1.923 +          if (arg_esc >= PointsToNode::ArgEscape &&
   1.924 +              !arg_is_arraycopy_dest) {
   1.925 +            continue;
   1.926 +          }
   1.927 +          set_escape_state(arg_ptn, PointsToNode::ArgEscape);
   1.928 +          if (arg_is_arraycopy_dest) {
   1.929 +            Node* src = call->in(TypeFunc::Parms);
   1.930 +            if (src->is_AddP()) {
   1.931 +              src = get_addp_base(src);
   1.932 +            }
   1.933 +            PointsToNode* src_ptn = ptnode_adr(src->_idx);
   1.934 +            assert(src_ptn != NULL, "should be registered");
   1.935 +            if (arg_ptn != src_ptn) {
   1.936 +              // Special arraycopy edge:
   1.937 +              // A destination object's field can't have the source object
   1.938 +              // as base since objects escape states are not related.
   1.939 +              // Only escape state of destination object's fields affects
   1.940 +              // escape state of fields in source object.
   1.941 +              add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn);
   1.942 +            }
   1.943 +          }
   1.944 +        }
   1.945 +      }
   1.946 +      break;
   1.947 +    }
   1.948 +    case Op_CallStaticJava: {
   1.949 +      // For a static call, we know exactly what method is being called.
   1.950 +      // Use bytecode estimator to record the call's escape affects
   1.951 +#ifdef ASSERT
   1.952 +      const char* name = call->as_CallStaticJava()->_name;
   1.953 +      assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
   1.954 +#endif
   1.955 +      ciMethod* meth = call->as_CallJava()->method();
   1.956 +      BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
   1.957 +      // fall-through if not a Java method or no analyzer information
   1.958 +      if (call_analyzer != NULL) {
   1.959 +        PointsToNode* call_ptn = ptnode_adr(call->_idx);
   1.960 +        const TypeTuple* d = call->tf()->domain();
   1.961 +        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
   1.962 +          const Type* at = d->field_at(i);
   1.963 +          int k = i - TypeFunc::Parms;
   1.964 +          Node* arg = call->in(i);
   1.965 +          PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
   1.966 +          if (at->isa_ptr() != NULL &&
   1.967 +              call_analyzer->is_arg_returned(k)) {
   1.968 +            // The call returns arguments.
   1.969 +            if (call_ptn != NULL) { // Is call's result used?
   1.970 +              assert(call_ptn->is_LocalVar(), "node should be registered");
   1.971 +              assert(arg_ptn != NULL, "node should be registered");
   1.972 +              add_edge(call_ptn, arg_ptn);
   1.973 +            }
   1.974 +          }
   1.975 +          if (at->isa_oopptr() != NULL &&
   1.976 +              arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
   1.977 +            if (!call_analyzer->is_arg_stack(k)) {
   1.978 +              // The argument global escapes
   1.979 +              set_escape_state(arg_ptn, PointsToNode::GlobalEscape);
   1.980 +            } else {
   1.981 +              set_escape_state(arg_ptn, PointsToNode::ArgEscape);
   1.982 +              if (!call_analyzer->is_arg_local(k)) {
   1.983 +                // The argument itself doesn't escape, but any fields might
   1.984 +                set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);
   1.985 +              }
   1.986 +            }
   1.987 +          }
   1.988 +        }
   1.989 +        if (call_ptn != NULL && call_ptn->is_LocalVar()) {
   1.990 +          // The call returns arguments.
   1.991 +          assert(call_ptn->edge_count() > 0, "sanity");
   1.992 +          if (!call_analyzer->is_return_local()) {
   1.993 +            // Returns also unknown object.
   1.994 +            add_edge(call_ptn, phantom_obj);
   1.995 +          }
   1.996 +        }
   1.997 +        break;
   1.998 +      }
   1.999 +    }
  1.1000 +    default: {
  1.1001 +      // Fall-through here if not a Java method or no analyzer information
  1.1002 +      // or some other type of call, assume the worst case: all arguments
  1.1003 +      // globally escape.
  1.1004 +      const TypeTuple* d = call->tf()->domain();
  1.1005 +      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1.1006 +        const Type* at = d->field_at(i);
  1.1007 +        if (at->isa_oopptr() != NULL) {
  1.1008 +          Node* arg = call->in(i);
  1.1009 +          if (arg->is_AddP()) {
  1.1010 +            arg = get_addp_base(arg);
  1.1011 +          }
  1.1012 +          assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
  1.1013 +          set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);
  1.1014 +        }
  1.1015 +      }
  1.1016 +    }
  1.1017 +  }
  1.1018 +}
  1.1019 +
  1.1020 +
  1.1021 +// Finish Graph construction.
  1.1022 +bool ConnectionGraph::complete_connection_graph(
  1.1023 +                         GrowableArray<PointsToNode*>&   ptnodes_worklist,
  1.1024 +                         GrowableArray<JavaObjectNode*>& non_escaped_worklist,
  1.1025 +                         GrowableArray<JavaObjectNode*>& java_objects_worklist,
  1.1026 +                         GrowableArray<FieldNode*>&      oop_fields_worklist) {
  1.1027 +  // Normally only 1-3 passes needed to build Connection Graph depending
  1.1028 +  // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
  1.1029 +  // Set limit to 20 to catch situation when something did go wrong and
  1.1030 +  // bailout Escape Analysis.
  1.1031 +  // Also limit build time to 30 sec (60 in debug VM).
  1.1032 +#define CG_BUILD_ITER_LIMIT 20
  1.1033 +#ifdef ASSERT
  1.1034 +#define CG_BUILD_TIME_LIMIT 60.0
  1.1035 +#else
  1.1036 +#define CG_BUILD_TIME_LIMIT 30.0
  1.1037 +#endif
  1.1038 +
  1.1039 +  // Propagate GlobalEscape and ArgEscape escape states and check that
  1.1040 +  // we still have non-escaping objects. The method pushs on _worklist
  1.1041 +  // Field nodes which reference phantom_object.
  1.1042 +  if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
  1.1043 +    return false; // Nothing to do.
  1.1044 +  }
  1.1045 +  // Now propagate references to all JavaObject nodes.
  1.1046 +  int java_objects_length = java_objects_worklist.length();
  1.1047 +  elapsedTimer time;
  1.1048 +  int new_edges = 1;
  1.1049 +  int iterations = 0;
  1.1050 +  do {
  1.1051 +    while ((new_edges > 0) &&
  1.1052 +          (iterations++   < CG_BUILD_ITER_LIMIT) &&
  1.1053 +          (time.seconds() < CG_BUILD_TIME_LIMIT)) {
  1.1054 +      time.start();
  1.1055 +      new_edges = 0;
  1.1056 +      // Propagate references to phantom_object for nodes pushed on _worklist
  1.1057 +      // by find_non_escaped_objects() and find_field_value().
  1.1058 +      new_edges += add_java_object_edges(phantom_obj, false);
  1.1059 +      for (int next = 0; next < java_objects_length; ++next) {
  1.1060 +        JavaObjectNode* ptn = java_objects_worklist.at(next);
  1.1061 +        new_edges += add_java_object_edges(ptn, true);
  1.1062 +      }
  1.1063 +      if (new_edges > 0) {
  1.1064 +        // Update escape states on each iteration if graph was updated.
  1.1065 +        if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
  1.1066 +          return false; // Nothing to do.
  1.1067 +        }
  1.1068 +      }
  1.1069 +      time.stop();
  1.1070 +    }
  1.1071 +    if ((iterations     < CG_BUILD_ITER_LIMIT) &&
  1.1072 +        (time.seconds() < CG_BUILD_TIME_LIMIT)) {
  1.1073 +      time.start();
  1.1074 +      // Find fields which have unknown value.
  1.1075 +      int fields_length = oop_fields_worklist.length();
  1.1076 +      for (int next = 0; next < fields_length; next++) {
  1.1077 +        FieldNode* field = oop_fields_worklist.at(next);
  1.1078 +        if (field->edge_count() == 0) {
  1.1079 +          new_edges += find_field_value(field);
  1.1080 +          // This code may added new edges to phantom_object.
  1.1081 +          // Need an other cycle to propagate references to phantom_object.
  1.1082 +        }
  1.1083 +      }
  1.1084 +      time.stop();
  1.1085 +    } else {
  1.1086 +      new_edges = 0; // Bailout
  1.1087 +    }
  1.1088 +  } while (new_edges > 0);
  1.1089 +
  1.1090 +  // Bailout if passed limits.
  1.1091 +  if ((iterations     >= CG_BUILD_ITER_LIMIT) ||
  1.1092 +      (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
  1.1093 +    Compile* C = _compile;
  1.1094 +    if (C->log() != NULL) {
  1.1095 +      C->log()->begin_elem("connectionGraph_bailout reason='reached ");
  1.1096 +      C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
  1.1097 +      C->log()->end_elem(" limit'");
  1.1098 +    }
  1.1099 +    assert(false, err_msg("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
  1.1100 +           time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()));
  1.1101 +    // Possible infinite build_connection_graph loop,
  1.1102 +    // bailout (no changes to ideal graph were made).
  1.1103 +    return false;
  1.1104 +  }
  1.1105 +#ifdef ASSERT
  1.1106 +  if (Verbose && PrintEscapeAnalysis) {
  1.1107 +    tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",
  1.1108 +                  iterations, nodes_size(), ptnodes_worklist.length());
  1.1109 +  }
  1.1110 +#endif
  1.1111 +
  1.1112 +#undef CG_BUILD_ITER_LIMIT
  1.1113 +#undef CG_BUILD_TIME_LIMIT
  1.1114 +
  1.1115 +  // Find fields initialized by NULL for non-escaping Allocations.
  1.1116 +  int non_escaped_length = non_escaped_worklist.length();
  1.1117 +  for (int next = 0; next < non_escaped_length; next++) {
  1.1118 +    JavaObjectNode* ptn = non_escaped_worklist.at(next);
  1.1119 +    PointsToNode::EscapeState es = ptn->escape_state();
  1.1120 +    assert(es <= PointsToNode::ArgEscape, "sanity");
  1.1121 +    if (es == PointsToNode::NoEscape) {
  1.1122 +      if (find_init_values(ptn, null_obj, _igvn) > 0) {
  1.1123 +        // Adding references to NULL object does not change escape states
  1.1124 +        // since it does not escape. Also no fields are added to NULL object.
  1.1125 +        add_java_object_edges(null_obj, false);
  1.1126 +      }
  1.1127 +    }
  1.1128 +    Node* n = ptn->ideal_node();
  1.1129 +    if (n->is_Allocate()) {
  1.1130 +      // The object allocated by this Allocate node will never be
  1.1131 +      // seen by an other thread. Mark it so that when it is
  1.1132 +      // expanded no MemBarStoreStore is added.
  1.1133 +      InitializeNode* ini = n->as_Allocate()->initialization();
  1.1134 +      if (ini != NULL)
  1.1135 +        ini->set_does_not_escape();
  1.1136 +    }
  1.1137 +  }
  1.1138 +  return true; // Finished graph construction.
  1.1139 +}
  1.1140 +
  1.1141 +// Propagate GlobalEscape and ArgEscape escape states to all nodes
  1.1142 +// and check that we still have non-escaping java objects.
  1.1143 +bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
  1.1144 +                                               GrowableArray<JavaObjectNode*>& non_escaped_worklist) {
  1.1145 +  GrowableArray<PointsToNode*> escape_worklist;
  1.1146 +  // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
  1.1147 +  int ptnodes_length = ptnodes_worklist.length();
  1.1148 +  for (int next = 0; next < ptnodes_length; ++next) {
  1.1149 +    PointsToNode* ptn = ptnodes_worklist.at(next);
  1.1150 +    if (ptn->escape_state() >= PointsToNode::ArgEscape ||
  1.1151 +        ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
  1.1152 +      escape_worklist.push(ptn);
  1.1153 +    }
  1.1154 +  }
  1.1155 +  // Set escape states to referenced nodes (edges list).
  1.1156 +  while (escape_worklist.length() > 0) {
  1.1157 +    PointsToNode* ptn = escape_worklist.pop();
  1.1158 +    PointsToNode::EscapeState es  = ptn->escape_state();
  1.1159 +    PointsToNode::EscapeState field_es = ptn->fields_escape_state();
  1.1160 +    if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
  1.1161 +        es >= PointsToNode::ArgEscape) {
  1.1162 +      // GlobalEscape or ArgEscape state of field means it has unknown value.
  1.1163 +      if (add_edge(ptn, phantom_obj)) {
  1.1164 +        // New edge was added
  1.1165 +        add_field_uses_to_worklist(ptn->as_Field());
  1.1166 +      }
  1.1167 +    }
  1.1168 +    for (EdgeIterator i(ptn); i.has_next(); i.next()) {
  1.1169 +      PointsToNode* e = i.get();
  1.1170 +      if (e->is_Arraycopy()) {
  1.1171 +        assert(ptn->arraycopy_dst(), "sanity");
  1.1172 +        // Propagate only fields escape state through arraycopy edge.
  1.1173 +        if (e->fields_escape_state() < field_es) {
  1.1174 +          set_fields_escape_state(e, field_es);
  1.1175 +          escape_worklist.push(e);
  1.1176 +        }
  1.1177 +      } else if (es >= field_es) {
  1.1178 +        // fields_escape_state is also set to 'es' if it is less than 'es'.
  1.1179 +        if (e->escape_state() < es) {
  1.1180 +          set_escape_state(e, es);
  1.1181 +          escape_worklist.push(e);
  1.1182 +        }
  1.1183 +      } else {
  1.1184 +        // Propagate field escape state.
  1.1185 +        bool es_changed = false;
  1.1186 +        if (e->fields_escape_state() < field_es) {
  1.1187 +          set_fields_escape_state(e, field_es);
  1.1188 +          es_changed = true;
  1.1189 +        }
  1.1190 +        if ((e->escape_state() < field_es) &&
  1.1191 +            e->is_Field() && ptn->is_JavaObject() &&
  1.1192 +            e->as_Field()->is_oop()) {
  1.1193 +          // Change escape state of referenced fileds.
  1.1194 +          set_escape_state(e, field_es);
  1.1195 +          es_changed = true;;
  1.1196 +        } else if (e->escape_state() < es) {
  1.1197 +          set_escape_state(e, es);
  1.1198 +          es_changed = true;;
  1.1199 +        }
  1.1200 +        if (es_changed) {
  1.1201 +          escape_worklist.push(e);
  1.1202 +        }
  1.1203 +      }
  1.1204 +    }
  1.1205 +  }
  1.1206 +  // Remove escaped objects from non_escaped list.
  1.1207 +  for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) {
  1.1208 +    JavaObjectNode* ptn = non_escaped_worklist.at(next);
  1.1209 +    if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
  1.1210 +      non_escaped_worklist.delete_at(next);
  1.1211 +    }
  1.1212 +    if (ptn->escape_state() == PointsToNode::NoEscape) {
  1.1213 +      // Find fields in non-escaped allocations which have unknown value.
  1.1214 +      find_init_values(ptn, phantom_obj, NULL);
  1.1215 +    }
  1.1216 +  }
  1.1217 +  return (non_escaped_worklist.length() > 0);
  1.1218 +}
  1.1219 +
  1.1220 +// Add all references to JavaObject node by walking over all uses.
  1.1221 +int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
  1.1222 +  int new_edges = 0;
  1.1223 +  if (populate_worklist) {
  1.1224 +    // Populate _worklist by uses of jobj's uses.
  1.1225 +    for (UseIterator i(jobj); i.has_next(); i.next()) {
  1.1226 +      PointsToNode* use = i.get();
  1.1227 +      if (use->is_Arraycopy())
  1.1228 +        continue;
  1.1229 +      add_uses_to_worklist(use);
  1.1230 +      if (use->is_Field() && use->as_Field()->is_oop()) {
  1.1231 +        // Put on worklist all field's uses (loads) and
  1.1232 +        // related field nodes (same base and offset).
  1.1233 +        add_field_uses_to_worklist(use->as_Field());
  1.1234 +      }
  1.1235 +    }
  1.1236 +  }
  1.1237 +  while(_worklist.length() > 0) {
  1.1238 +    PointsToNode* use = _worklist.pop();
  1.1239 +    if (PointsToNode::is_base_use(use)) {
  1.1240 +      // Add reference from jobj to field and from field to jobj (field's base).
  1.1241 +      use = PointsToNode::get_use_node(use)->as_Field();
  1.1242 +      if (add_base(use->as_Field(), jobj)) {
  1.1243 +        new_edges++;
  1.1244 +      }
  1.1245 +      continue;
  1.1246 +    }
  1.1247 +    assert(!use->is_JavaObject(), "sanity");
  1.1248 +    if (use->is_Arraycopy()) {
  1.1249 +      if (jobj == null_obj) // NULL object does not have field edges
  1.1250 +        continue;
  1.1251 +      // Added edge from Arraycopy node to arraycopy's source java object
  1.1252 +      if (add_edge(use, jobj)) {
  1.1253 +        jobj->set_arraycopy_src();
  1.1254 +        new_edges++;
  1.1255 +      }
  1.1256 +      // and stop here.
  1.1257 +      continue;
  1.1258 +    }
  1.1259 +    if (!add_edge(use, jobj))
  1.1260 +      continue; // No new edge added, there was such edge already.
  1.1261 +    new_edges++;
  1.1262 +    if (use->is_LocalVar()) {
  1.1263 +      add_uses_to_worklist(use);
  1.1264 +      if (use->arraycopy_dst()) {
  1.1265 +        for (EdgeIterator i(use); i.has_next(); i.next()) {
  1.1266 +          PointsToNode* e = i.get();
  1.1267 +          if (e->is_Arraycopy()) {
  1.1268 +            if (jobj == null_obj) // NULL object does not have field edges
  1.1269 +              continue;
  1.1270 +            // Add edge from arraycopy's destination java object to Arraycopy node.
  1.1271 +            if (add_edge(jobj, e)) {
  1.1272 +              new_edges++;
  1.1273 +              jobj->set_arraycopy_dst();
  1.1274 +            }
  1.1275 +          }
  1.1276 +        }
  1.1277 +      }
  1.1278 +    } else {
  1.1279 +      // Added new edge to stored in field values.
  1.1280 +      // Put on worklist all field's uses (loads) and
  1.1281 +      // related field nodes (same base and offset).
  1.1282 +      add_field_uses_to_worklist(use->as_Field());
  1.1283 +    }
  1.1284 +  }
  1.1285 +  return new_edges;
  1.1286 +}
  1.1287 +
  1.1288 +// Put on worklist all related field nodes.
  1.1289 +void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
  1.1290 +  assert(field->is_oop(), "sanity");
  1.1291 +  int offset = field->offset();
  1.1292 +  add_uses_to_worklist(field);
  1.1293 +  // Loop over all bases of this field and push on worklist Field nodes
  1.1294 +  // with the same offset and base (since they may reference the same field).
  1.1295 +  for (BaseIterator i(field); i.has_next(); i.next()) {
  1.1296 +    PointsToNode* base = i.get();
  1.1297 +    add_fields_to_worklist(field, base);
  1.1298 +    // Check if the base was source object of arraycopy and go over arraycopy's
  1.1299 +    // destination objects since values stored to a field of source object are
  1.1300 +    // accessable by uses (loads) of fields of destination objects.
  1.1301 +    if (base->arraycopy_src()) {
  1.1302 +      for (UseIterator j(base); j.has_next(); j.next()) {
  1.1303 +        PointsToNode* arycp = j.get();
  1.1304 +        if (arycp->is_Arraycopy()) {
  1.1305 +          for (UseIterator k(arycp); k.has_next(); k.next()) {
  1.1306 +            PointsToNode* abase = k.get();
  1.1307 +            if (abase->arraycopy_dst() && abase != base) {
  1.1308 +              // Look for the same arracopy reference.
  1.1309 +              add_fields_to_worklist(field, abase);
  1.1310 +            }
  1.1311 +          }
  1.1312 +        }
  1.1313 +      }
  1.1314 +    }
  1.1315 +  }
  1.1316 +}
  1.1317 +
  1.1318 +// Put on worklist all related field nodes.
  1.1319 +void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
  1.1320 +  int offset = field->offset();
  1.1321 +  if (base->is_LocalVar()) {
  1.1322 +    for (UseIterator j(base); j.has_next(); j.next()) {
  1.1323 +      PointsToNode* f = j.get();
  1.1324 +      if (PointsToNode::is_base_use(f)) { // Field
  1.1325 +        f = PointsToNode::get_use_node(f);
  1.1326 +        if (f == field || !f->as_Field()->is_oop())
  1.1327 +          continue;
  1.1328 +        int offs = f->as_Field()->offset();
  1.1329 +        if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
  1.1330 +          add_to_worklist(f);
  1.1331 +        }
  1.1332 +      }
  1.1333 +    }
  1.1334 +  } else {
  1.1335 +    assert(base->is_JavaObject(), "sanity");
  1.1336 +    if (// Skip phantom_object since it is only used to indicate that
  1.1337 +        // this field's content globally escapes.
  1.1338 +        (base != phantom_obj) &&
  1.1339 +        // NULL object node does not have fields.
  1.1340 +        (base != null_obj)) {
  1.1341 +      for (EdgeIterator i(base); i.has_next(); i.next()) {
  1.1342 +        PointsToNode* f = i.get();
  1.1343 +        // Skip arraycopy edge since store to destination object field
  1.1344 +        // does not update value in source object field.
  1.1345 +        if (f->is_Arraycopy()) {
  1.1346 +          assert(base->arraycopy_dst(), "sanity");
  1.1347 +          continue;
  1.1348 +        }
  1.1349 +        if (f == field || !f->as_Field()->is_oop())
  1.1350 +          continue;
  1.1351 +        int offs = f->as_Field()->offset();
  1.1352 +        if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
  1.1353 +          add_to_worklist(f);
  1.1354 +        }
  1.1355 +      }
  1.1356 +    }
  1.1357 +  }
  1.1358 +}
  1.1359 +
  1.1360 +// Find fields which have unknown value.
  1.1361 +int ConnectionGraph::find_field_value(FieldNode* field) {
  1.1362 +  // Escaped fields should have init value already.
  1.1363 +  assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
  1.1364 +  int new_edges = 0;
  1.1365 +  for (BaseIterator i(field); i.has_next(); i.next()) {
  1.1366 +    PointsToNode* base = i.get();
  1.1367 +    if (base->is_JavaObject()) {
  1.1368 +      // Skip Allocate's fields which will be processed later.
  1.1369 +      if (base->ideal_node()->is_Allocate())
  1.1370 +        return 0;
  1.1371 +      assert(base == null_obj, "only NULL ptr base expected here");
  1.1372 +    }
  1.1373 +  }
  1.1374 +  if (add_edge(field, phantom_obj)) {
  1.1375 +    // New edge was added
  1.1376 +    new_edges++;
  1.1377 +    add_field_uses_to_worklist(field);
  1.1378 +  }
  1.1379 +  return new_edges;
  1.1380 +}
  1.1381 +
  1.1382 +// Find fields initializing values for allocations.
  1.1383 +int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) {
  1.1384 +  assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
  1.1385 +  int new_edges = 0;
  1.1386 +  Node* alloc = pta->ideal_node();
  1.1387 +  if (init_val == phantom_obj) {
  1.1388 +    // Do nothing for Allocate nodes since its fields values are "known".
  1.1389 +    if (alloc->is_Allocate())
  1.1390 +      return 0;
  1.1391 +    assert(alloc->as_CallStaticJava(), "sanity");
  1.1392 +#ifdef ASSERT
  1.1393 +    if (alloc->as_CallStaticJava()->method() == NULL) {
  1.1394 +      const char* name = alloc->as_CallStaticJava()->_name;
  1.1395 +      assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
  1.1396 +    }
  1.1397 +#endif
  1.1398 +    // Non-escaped allocation returned from Java or runtime call have
  1.1399 +    // unknown values in fields.
  1.1400 +    for (EdgeIterator i(pta); i.has_next(); i.next()) {
  1.1401 +      PointsToNode* ptn = i.get();
  1.1402 +      if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
  1.1403 +        if (add_edge(ptn, phantom_obj)) {
  1.1404 +          // New edge was added
  1.1405 +          new_edges++;
  1.1406 +          add_field_uses_to_worklist(ptn->as_Field());
  1.1407 +        }
  1.1408 +      }
  1.1409 +    }
  1.1410 +    return new_edges;
  1.1411 +  }
  1.1412 +  assert(init_val == null_obj, "sanity");
  1.1413 +  // Do nothing for Call nodes since its fields values are unknown.
  1.1414 +  if (!alloc->is_Allocate())
  1.1415 +    return 0;
  1.1416 +
  1.1417 +  InitializeNode* ini = alloc->as_Allocate()->initialization();
  1.1418 +  Compile* C = _compile;
  1.1419 +  bool visited_bottom_offset = false;
  1.1420 +  GrowableArray<int> offsets_worklist;
  1.1421 +
  1.1422 +  // Check if an oop field's initializing value is recorded and add
  1.1423 +  // a corresponding NULL if field's value if it is not recorded.
  1.1424 +  // Connection Graph does not record a default initialization by NULL
  1.1425 +  // captured by Initialize node.
  1.1426 +  //
  1.1427 +  for (EdgeIterator i(pta); i.has_next(); i.next()) {
  1.1428 +    PointsToNode* ptn = i.get(); // Field (AddP)
  1.1429 +    if (!ptn->is_Field() || !ptn->as_Field()->is_oop())
  1.1430 +      continue; // Not oop field
  1.1431 +    int offset = ptn->as_Field()->offset();
  1.1432 +    if (offset == Type::OffsetBot) {
  1.1433 +      if (!visited_bottom_offset) {
  1.1434 +        // OffsetBot is used to reference array's element,
  1.1435 +        // always add reference to NULL to all Field nodes since we don't
  1.1436 +        // known which element is referenced.
  1.1437 +        if (add_edge(ptn, null_obj)) {
  1.1438 +          // New edge was added
  1.1439 +          new_edges++;
  1.1440 +          add_field_uses_to_worklist(ptn->as_Field());
  1.1441 +          visited_bottom_offset = true;
  1.1442 +        }
  1.1443 +      }
  1.1444 +    } else {
  1.1445 +      // Check only oop fields.
  1.1446 +      const Type* adr_type = ptn->ideal_node()->as_AddP()->bottom_type();
  1.1447 +      if (adr_type->isa_rawptr()) {
  1.1448 +#ifdef ASSERT
  1.1449 +        // Raw pointers are used for initializing stores so skip it
  1.1450 +        // since it should be recorded already
  1.1451 +        Node* base = get_addp_base(ptn->ideal_node());
  1.1452 +        assert(adr_type->isa_rawptr() && base->is_Proj() &&
  1.1453 +               (base->in(0) == alloc),"unexpected pointer type");
  1.1454 +#endif
  1.1455 +        continue;
  1.1456 +      }
  1.1457 +      if (!offsets_worklist.contains(offset)) {
  1.1458 +        offsets_worklist.append(offset);
  1.1459 +        Node* value = NULL;
  1.1460 +        if (ini != NULL) {
  1.1461 +          BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
  1.1462 +          Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
  1.1463 +          if (store != NULL && store->is_Store()) {
  1.1464 +            value = store->in(MemNode::ValueIn);
  1.1465 +          } else {
  1.1466 +            // There could be initializing stores which follow allocation.
  1.1467 +            // For example, a volatile field store is not collected
  1.1468 +            // by Initialize node.
  1.1469 +            //
  1.1470 +            // Need to check for dependent loads to separate such stores from
  1.1471 +            // stores which follow loads. For now, add initial value NULL so
  1.1472 +            // that compare pointers optimization works correctly.
  1.1473 +          }
  1.1474 +        }
  1.1475 +        if (value == NULL) {
  1.1476 +          // A field's initializing value was not recorded. Add NULL.
  1.1477 +          if (add_edge(ptn, null_obj)) {
  1.1478 +            // New edge was added
  1.1479 +            new_edges++;
  1.1480 +            add_field_uses_to_worklist(ptn->as_Field());
  1.1481 +          }
  1.1482 +        }
  1.1483 +      }
  1.1484 +    }
  1.1485 +  }
  1.1486 +  return new_edges;
  1.1487 +}
  1.1488 +
  1.1489 +// Adjust scalar_replaceable state after Connection Graph is built.
  1.1490 +void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {
  1.1491 +  // Search for non-escaping objects which are not scalar replaceable
  1.1492 +  // and mark them to propagate the state to referenced objects.
  1.1493 +
  1.1494 +  // 1. An object is not scalar replaceable if the field into which it is
  1.1495 +  // stored has unknown offset (stored into unknown element of an array).
  1.1496 +  //
  1.1497 +  for (UseIterator i(jobj); i.has_next(); i.next()) {
  1.1498 +    PointsToNode* use = i.get();
  1.1499 +    assert(!use->is_Arraycopy(), "sanity");
  1.1500 +    if (use->is_Field()) {
  1.1501 +      FieldNode* field = use->as_Field();
  1.1502 +      assert(field->is_oop() && field->scalar_replaceable() &&
  1.1503 +             field->fields_escape_state() == PointsToNode::NoEscape, "sanity");
  1.1504 +      if (field->offset() == Type::OffsetBot) {
  1.1505 +        jobj->set_scalar_replaceable(false);
  1.1506 +        return;
  1.1507 +      }
  1.1508 +    }
  1.1509 +    assert(use->is_Field() || use->is_LocalVar(), "sanity");
  1.1510 +    // 2. An object is not scalar replaceable if it is merged with other objects.
  1.1511 +    for (EdgeIterator j(use); j.has_next(); j.next()) {
  1.1512 +      PointsToNode* ptn = j.get();
  1.1513 +      if (ptn->is_JavaObject() && ptn != jobj) {
  1.1514 +        // Mark all objects.
  1.1515 +        jobj->set_scalar_replaceable(false);
  1.1516 +         ptn->set_scalar_replaceable(false);
  1.1517 +      }
  1.1518 +    }
  1.1519 +    if (!jobj->scalar_replaceable()) {
  1.1520 +      return;
  1.1521 +    }
  1.1522 +  }
  1.1523 +
  1.1524 +  for (EdgeIterator j(jobj); j.has_next(); j.next()) {
  1.1525 +    // Non-escaping object node should point only to field nodes.
  1.1526 +    FieldNode* field = j.get()->as_Field();
  1.1527 +    int offset = field->as_Field()->offset();
  1.1528 +
  1.1529 +    // 3. An object is not scalar replaceable if it has a field with unknown
  1.1530 +    // offset (array's element is accessed in loop).
  1.1531 +    if (offset == Type::OffsetBot) {
  1.1532 +      jobj->set_scalar_replaceable(false);
  1.1533 +      return;
  1.1534 +    }
  1.1535 +    // 4. Currently an object is not scalar replaceable if a LoadStore node
  1.1536 +    // access its field since the field value is unknown after it.
  1.1537 +    //
  1.1538 +    Node* n = field->ideal_node();
  1.1539 +    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1.1540 +      if (n->fast_out(i)->is_LoadStore()) {
  1.1541 +        jobj->set_scalar_replaceable(false);
  1.1542 +        return;
  1.1543 +      }
  1.1544 +    }
  1.1545 +
  1.1546 +    // 5. Or the address may point to more then one object. This may produce
  1.1547 +    // the false positive result (set not scalar replaceable)
  1.1548 +    // since the flow-insensitive escape analysis can't separate
  1.1549 +    // the case when stores overwrite the field's value from the case
  1.1550 +    // when stores happened on different control branches.
  1.1551 +    //
  1.1552 +    // Note: it will disable scalar replacement in some cases:
  1.1553 +    //
  1.1554 +    //    Point p[] = new Point[1];
  1.1555 +    //    p[0] = new Point(); // Will be not scalar replaced
  1.1556 +    //
  1.1557 +    // but it will save us from incorrect optimizations in next cases:
  1.1558 +    //
  1.1559 +    //    Point p[] = new Point[1];
  1.1560 +    //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
  1.1561 +    //
  1.1562 +    if (field->base_count() > 1) {
  1.1563 +      for (BaseIterator i(field); i.has_next(); i.next()) {
  1.1564 +        PointsToNode* base = i.get();
  1.1565 +        // Don't take into account LocalVar nodes which
  1.1566 +        // may point to only one object which should be also
  1.1567 +        // this field's base by now.
  1.1568 +        if (base->is_JavaObject() && base != jobj) {
  1.1569 +          // Mark all bases.
  1.1570 +          jobj->set_scalar_replaceable(false);
  1.1571 +          base->set_scalar_replaceable(false);
  1.1572 +        }
  1.1573 +      }
  1.1574 +    }
  1.1575 +  }
  1.1576 +}
  1.1577 +
  1.1578 +#ifdef ASSERT
  1.1579 +void ConnectionGraph::verify_connection_graph(
  1.1580 +                         GrowableArray<PointsToNode*>&   ptnodes_worklist,
  1.1581 +                         GrowableArray<JavaObjectNode*>& non_escaped_worklist,
  1.1582 +                         GrowableArray<JavaObjectNode*>& java_objects_worklist,
  1.1583 +                         GrowableArray<Node*>& addp_worklist) {
  1.1584 +  // Verify that graph is complete - no new edges could be added.
  1.1585 +  int java_objects_length = java_objects_worklist.length();
  1.1586 +  int non_escaped_length  = non_escaped_worklist.length();
  1.1587 +  int new_edges = 0;
  1.1588 +  for (int next = 0; next < java_objects_length; ++next) {
  1.1589 +    JavaObjectNode* ptn = java_objects_worklist.at(next);
  1.1590 +    new_edges += add_java_object_edges(ptn, true);
  1.1591 +  }
  1.1592 +  assert(new_edges == 0, "graph was not complete");
  1.1593 +  // Verify that escape state is final.
  1.1594 +  int length = non_escaped_worklist.length();
  1.1595 +  find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist);
  1.1596 +  assert((non_escaped_length == non_escaped_worklist.length()) &&
  1.1597 +         (non_escaped_length == length) &&
  1.1598 +         (_worklist.length() == 0), "escape state was not final");
  1.1599 +
  1.1600 +  // Verify fields information.
  1.1601 +  int addp_length = addp_worklist.length();
  1.1602 +  for (int next = 0; next < addp_length; ++next ) {
  1.1603 +    Node* n = addp_worklist.at(next);
  1.1604 +    FieldNode* field = ptnode_adr(n->_idx)->as_Field();
  1.1605 +    if (field->is_oop()) {
  1.1606 +      // Verify that field has all bases
  1.1607 +      Node* base = get_addp_base(n);
  1.1608 +      PointsToNode* ptn = ptnode_adr(base->_idx);
  1.1609 +      if (ptn->is_JavaObject()) {
  1.1610 +        assert(field->has_base(ptn->as_JavaObject()), "sanity");
  1.1611 +      } else {
  1.1612 +        assert(ptn->is_LocalVar(), "sanity");
  1.1613 +        for (EdgeIterator i(ptn); i.has_next(); i.next()) {
  1.1614 +          PointsToNode* e = i.get();
  1.1615 +          if (e->is_JavaObject()) {
  1.1616 +            assert(field->has_base(e->as_JavaObject()), "sanity");
  1.1617 +          }
  1.1618 +        }
  1.1619 +      }
  1.1620 +      // Verify that all fields have initializing values.
  1.1621 +      if (field->edge_count() == 0) {
  1.1622 +        field->dump();
  1.1623 +        assert(field->edge_count() > 0, "sanity");
  1.1624 +      }
  1.1625 +    }
  1.1626 +  }
  1.1627 +}
  1.1628 +#endif
  1.1629 +
  1.1630 +// Optimize ideal graph.
  1.1631 +void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
  1.1632 +                                           GrowableArray<Node*>& storestore_worklist) {
  1.1633 +  Compile* C = _compile;
  1.1634 +  PhaseIterGVN* igvn = _igvn;
  1.1635 +  if (EliminateLocks) {
  1.1636 +    // Mark locks before changing ideal graph.
  1.1637 +    int cnt = C->macro_count();
  1.1638 +    for( int i=0; i < cnt; i++ ) {
  1.1639 +      Node *n = C->macro_node(i);
  1.1640 +      if (n->is_AbstractLock()) { // Lock and Unlock nodes
  1.1641 +        AbstractLockNode* alock = n->as_AbstractLock();
  1.1642 +        if (!alock->is_non_esc_obj()) {
  1.1643 +          if (not_global_escape(alock->obj_node())) {
  1.1644 +            assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
  1.1645 +            // The lock could be marked eliminated by lock coarsening
  1.1646 +            // code during first IGVN before EA. Replace coarsened flag
  1.1647 +            // to eliminate all associated locks/unlocks.
  1.1648 +            alock->set_non_esc_obj();
  1.1649 +          }
  1.1650 +        }
  1.1651 +      }
  1.1652 +    }
  1.1653 +  }
  1.1654 +
  1.1655 +  if (OptimizePtrCompare) {
  1.1656 +    // Add ConI(#CC_GT) and ConI(#CC_EQ).
  1.1657 +    _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
  1.1658 +    _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
  1.1659 +    // Optimize objects compare.
  1.1660 +    while (ptr_cmp_worklist.length() != 0) {
  1.1661 +      Node *n = ptr_cmp_worklist.pop();
  1.1662 +      Node *res = optimize_ptr_compare(n);
  1.1663 +      if (res != NULL) {
  1.1664 +#ifndef PRODUCT
  1.1665 +        if (PrintOptimizePtrCompare) {
  1.1666 +          tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
  1.1667 +          if (Verbose) {
  1.1668 +            n->dump(1);
  1.1669 +          }
  1.1670 +        }
  1.1671 +#endif
  1.1672 +        igvn->replace_node(n, res);
  1.1673 +      }
  1.1674 +    }
  1.1675 +    // cleanup
  1.1676 +    if (_pcmp_neq->outcnt() == 0)
  1.1677 +      igvn->hash_delete(_pcmp_neq);
  1.1678 +    if (_pcmp_eq->outcnt()  == 0)
  1.1679 +      igvn->hash_delete(_pcmp_eq);
  1.1680 +  }
  1.1681 +
  1.1682 +  // For MemBarStoreStore nodes added in library_call.cpp, check
  1.1683 +  // escape status of associated AllocateNode and optimize out
  1.1684 +  // MemBarStoreStore node if the allocated object never escapes.
  1.1685 +  while (storestore_worklist.length() != 0) {
  1.1686 +    Node *n = storestore_worklist.pop();
  1.1687 +    MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
  1.1688 +    Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
  1.1689 +    assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
  1.1690 +    if (not_global_escape(alloc)) {
  1.1691 +      MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
  1.1692 +      mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
  1.1693 +      mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
  1.1694 +      igvn->register_new_node_with_optimizer(mb);
  1.1695 +      igvn->replace_node(storestore, mb);
  1.1696 +    }
  1.1697 +  }
  1.1698 +}
  1.1699 +
  1.1700 +// Optimize objects compare.
  1.1701 +Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
  1.1702 +  assert(OptimizePtrCompare, "sanity");
  1.1703 +  PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
  1.1704 +  PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
  1.1705 +  JavaObjectNode* jobj1 = unique_java_object(n->in(1));
  1.1706 +  JavaObjectNode* jobj2 = unique_java_object(n->in(2));
  1.1707 +  assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
  1.1708 +  assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
  1.1709 +
  1.1710 +  // Check simple cases first.
  1.1711 +  if (jobj1 != NULL) {
  1.1712 +    if (jobj1->escape_state() == PointsToNode::NoEscape) {
  1.1713 +      if (jobj1 == jobj2) {
  1.1714 +        // Comparing the same not escaping object.
  1.1715 +        return _pcmp_eq;
  1.1716 +      }
  1.1717 +      Node* obj = jobj1->ideal_node();
  1.1718 +      // Comparing not escaping allocation.
  1.1719 +      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
  1.1720 +          !ptn2->points_to(jobj1)) {
  1.1721 +        return _pcmp_neq; // This includes nullness check.
  1.1722 +      }
  1.1723 +    }
  1.1724 +  }
  1.1725 +  if (jobj2 != NULL) {
  1.1726 +    if (jobj2->escape_state() == PointsToNode::NoEscape) {
  1.1727 +      Node* obj = jobj2->ideal_node();
  1.1728 +      // Comparing not escaping allocation.
  1.1729 +      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
  1.1730 +          !ptn1->points_to(jobj2)) {
  1.1731 +        return _pcmp_neq; // This includes nullness check.
  1.1732 +      }
  1.1733 +    }
  1.1734 +  }
  1.1735 +  if (jobj1 != NULL && jobj1 != phantom_obj &&
  1.1736 +      jobj2 != NULL && jobj2 != phantom_obj &&
  1.1737 +      jobj1->ideal_node()->is_Con() &&
  1.1738 +      jobj2->ideal_node()->is_Con()) {
  1.1739 +    // Klass or String constants compare. Need to be careful with
  1.1740 +    // compressed pointers - compare types of ConN and ConP instead of nodes.
  1.1741 +    const Type* t1 = jobj1->ideal_node()->bottom_type()->make_ptr();
  1.1742 +    const Type* t2 = jobj2->ideal_node()->bottom_type()->make_ptr();
  1.1743 +    assert(t1 != NULL && t2 != NULL, "sanity");
  1.1744 +    if (t1->make_ptr() == t2->make_ptr()) {
  1.1745 +      return _pcmp_eq;
  1.1746 +    } else {
  1.1747 +      return _pcmp_neq;
  1.1748 +    }
  1.1749 +  }
  1.1750 +  if (ptn1->meet(ptn2)) {
  1.1751 +    return NULL; // Sets are not disjoint
  1.1752 +  }
  1.1753 +
  1.1754 +  // Sets are disjoint.
  1.1755 +  bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
  1.1756 +  bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
  1.1757 +  bool set1_has_null_ptr    = ptn1->points_to(null_obj);
  1.1758 +  bool set2_has_null_ptr    = ptn2->points_to(null_obj);
  1.1759 +  if (set1_has_unknown_ptr && set2_has_null_ptr ||
  1.1760 +      set2_has_unknown_ptr && set1_has_null_ptr) {
  1.1761 +    // Check nullness of unknown object.
  1.1762 +    return NULL;
  1.1763 +  }
  1.1764 +
  1.1765 +  // Disjointness by itself is not sufficient since
  1.1766 +  // alias analysis is not complete for escaped objects.
  1.1767 +  // Disjoint sets are definitely unrelated only when
  1.1768 +  // at least one set has only not escaping allocations.
  1.1769 +  if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
  1.1770 +    if (ptn1->non_escaping_allocation()) {
  1.1771 +      return _pcmp_neq;
  1.1772 +    }
  1.1773 +  }
  1.1774 +  if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
  1.1775 +    if (ptn2->non_escaping_allocation()) {
  1.1776 +      return _pcmp_neq;
  1.1777 +    }
  1.1778 +  }
  1.1779 +  return NULL;
  1.1780 +}
  1.1781 +
  1.1782 +// Connection Graph constuction functions.
  1.1783 +
  1.1784 +void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
  1.1785 +  PointsToNode* ptadr = _nodes.at(n->_idx);
  1.1786 +  if (ptadr != NULL) {
  1.1787 +    assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
  1.1788 +    return;
  1.1789 +  }
  1.1790 +  Compile* C = _compile;
  1.1791 +  ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
  1.1792 +  _nodes.at_put(n->_idx, ptadr);
  1.1793 +}
  1.1794 +
  1.1795 +void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
  1.1796 +  PointsToNode* ptadr = _nodes.at(n->_idx);
  1.1797 +  if (ptadr != NULL) {
  1.1798 +    assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
  1.1799 +    return;
  1.1800 +  }
  1.1801 +  Compile* C = _compile;
  1.1802 +  ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
  1.1803 +  _nodes.at_put(n->_idx, ptadr);
  1.1804 +}
  1.1805 +
  1.1806 +void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
  1.1807 +  PointsToNode* ptadr = _nodes.at(n->_idx);
  1.1808 +  if (ptadr != NULL) {
  1.1809 +    assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
  1.1810 +    return;
  1.1811 +  }
  1.1812 +  Compile* C = _compile;
  1.1813 +  bool is_oop = is_oop_field(n, offset);
  1.1814 +  FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
  1.1815 +  _nodes.at_put(n->_idx, field);
  1.1816 +}
  1.1817 +
  1.1818 +void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
  1.1819 +                                    PointsToNode* src, PointsToNode* dst) {
  1.1820 +  assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
  1.1821 +  assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
  1.1822 +  PointsToNode* ptadr = _nodes.at(n->_idx);
  1.1823 +  if (ptadr != NULL) {
  1.1824 +    assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
  1.1825 +    return;
  1.1826 +  }
  1.1827 +  Compile* C = _compile;
  1.1828 +  ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
  1.1829 +  _nodes.at_put(n->_idx, ptadr);
  1.1830 +  // Add edge from arraycopy node to source object.
  1.1831 +  (void)add_edge(ptadr, src);
  1.1832 +  src->set_arraycopy_src();
  1.1833 +  // Add edge from destination object to arraycopy node.
  1.1834 +  (void)add_edge(dst, ptadr);
  1.1835 +  dst->set_arraycopy_dst();
  1.1836 +}
  1.1837 +
  1.1838 +bool ConnectionGraph::is_oop_field(Node* n, int offset) {
  1.1839 +  const Type* adr_type = n->as_AddP()->bottom_type();
  1.1840 +  BasicType bt = T_INT;
  1.1841 +  if (offset == Type::OffsetBot) {
  1.1842 +    // Check only oop fields.
  1.1843 +    if (!adr_type->isa_aryptr() ||
  1.1844 +        (adr_type->isa_aryptr()->klass() == NULL) ||
  1.1845 +         adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
  1.1846 +      // OffsetBot is used to reference array's element. Ignore first AddP.
  1.1847 +      if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
  1.1848 +        bt = T_OBJECT;
  1.1849 +      }
  1.1850 +    }
  1.1851 +  } else if (offset != oopDesc::klass_offset_in_bytes()) {
  1.1852 +    if (adr_type->isa_instptr()) {
  1.1853 +      ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
  1.1854 +      if (field != NULL) {
  1.1855 +        bt = field->layout_type();
  1.1856 +      } else {
  1.1857 +        // Ignore non field load (for example, klass load)
  1.1858 +      }
  1.1859 +    } else if (adr_type->isa_aryptr()) {
  1.1860 +      if (offset == arrayOopDesc::length_offset_in_bytes()) {
  1.1861 +        // Ignore array length load.
  1.1862 +      } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
  1.1863 +        // Ignore first AddP.
  1.1864 +      } else {
  1.1865 +        const Type* elemtype = adr_type->isa_aryptr()->elem();
  1.1866 +        bt = elemtype->array_element_basic_type();
  1.1867 +      }
  1.1868 +    } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
  1.1869 +      // Allocation initialization, ThreadLocal field access, unsafe access
  1.1870 +      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1.1871 +        int opcode = n->fast_out(i)->Opcode();
  1.1872 +        if (opcode == Op_StoreP || opcode == Op_LoadP ||
  1.1873 +            opcode == Op_StoreN || opcode == Op_LoadN) {
  1.1874 +          bt = T_OBJECT;
  1.1875 +        }
  1.1876 +      }
  1.1877 +    }
  1.1878 +  }
  1.1879 +  return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
  1.1880 +}
  1.1881 +
  1.1882 +// Returns unique pointed java object or NULL.
  1.1883 +JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
  1.1884 +  assert(!_collecting, "should not call when contructed graph");
  1.1885 +  // If the node was created after the escape computation we can't answer.
  1.1886 +  uint idx = n->_idx;
  1.1887 +  if (idx >= nodes_size()) {
  1.1888 +    return NULL;
  1.1889 +  }
  1.1890 +  PointsToNode* ptn = ptnode_adr(idx);
  1.1891 +  if (ptn->is_JavaObject()) {
  1.1892 +    return ptn->as_JavaObject();
  1.1893 +  }
  1.1894 +  assert(ptn->is_LocalVar(), "sanity");
  1.1895 +  // Check all java objects it points to.
  1.1896 +  JavaObjectNode* jobj = NULL;
  1.1897 +  for (EdgeIterator i(ptn); i.has_next(); i.next()) {
  1.1898 +    PointsToNode* e = i.get();
  1.1899 +    if (e->is_JavaObject()) {
  1.1900 +      if (jobj == NULL) {
  1.1901 +        jobj = e->as_JavaObject();
  1.1902 +      } else if (jobj != e) {
  1.1903 +        return NULL;
  1.1904 +      }
  1.1905 +    }
  1.1906 +  }
  1.1907 +  return jobj;
  1.1908 +}
  1.1909 +
  1.1910 +// Return true if this node points only to non-escaping allocations.
  1.1911 +bool PointsToNode::non_escaping_allocation() {
  1.1912 +  if (is_JavaObject()) {
  1.1913 +    Node* n = ideal_node();
  1.1914 +    if (n->is_Allocate() || n->is_CallStaticJava()) {
  1.1915 +      return (escape_state() == PointsToNode::NoEscape);
  1.1916 +    } else {
  1.1917 +      return false;
  1.1918 +    }
  1.1919 +  }
  1.1920 +  assert(is_LocalVar(), "sanity");
  1.1921 +  // Check all java objects it points to.
  1.1922 +  for (EdgeIterator i(this); i.has_next(); i.next()) {
  1.1923 +    PointsToNode* e = i.get();
  1.1924 +    if (e->is_JavaObject()) {
  1.1925 +      Node* n = e->ideal_node();
  1.1926 +      if ((e->escape_state() != PointsToNode::NoEscape) ||
  1.1927 +          !(n->is_Allocate() || n->is_CallStaticJava())) {
  1.1928 +        return false;
  1.1929 +      }
  1.1930 +    }
  1.1931 +  }
  1.1932 +  return true;
  1.1933 +}
  1.1934 +
  1.1935 +// Return true if we know the node does not escape globally.
  1.1936 +bool ConnectionGraph::not_global_escape(Node *n) {
  1.1937 +  assert(!_collecting, "should not call during graph construction");
  1.1938 +  // If the node was created after the escape computation we can't answer.
  1.1939 +  uint idx = n->_idx;
  1.1940 +  if (idx >= nodes_size()) {
  1.1941 +    return false;
  1.1942 +  }
  1.1943 +  PointsToNode* ptn = ptnode_adr(idx);
  1.1944 +  PointsToNode::EscapeState es = ptn->escape_state();
  1.1945 +  // If we have already computed a value, return it.
  1.1946 +  if (es >= PointsToNode::GlobalEscape)
  1.1947 +    return false;
  1.1948 +  if (ptn->is_JavaObject()) {
  1.1949 +    return true; // (es < PointsToNode::GlobalEscape);
  1.1950 +  }
  1.1951 +  assert(ptn->is_LocalVar(), "sanity");
  1.1952 +  // Check all java objects it points to.
  1.1953 +  for (EdgeIterator i(ptn); i.has_next(); i.next()) {
  1.1954 +    if (i.get()->escape_state() >= PointsToNode::GlobalEscape)
  1.1955 +      return false;
  1.1956 +  }
  1.1957 +  return true;
  1.1958 +}
  1.1959 +
  1.1960 +
  1.1961 +// Helper functions
  1.1962 +
  1.1963 +// Return true if this node points to specified node or nodes it points to.
  1.1964 +bool PointsToNode::points_to(JavaObjectNode* ptn) const {
  1.1965 +  if (is_JavaObject()) {
  1.1966 +    return (this == ptn);
  1.1967 +  }
  1.1968 +  assert(is_LocalVar(), "sanity");
  1.1969 +  for (EdgeIterator i(this); i.has_next(); i.next()) {
  1.1970 +    if (i.get() == ptn)
  1.1971 +      return true;
  1.1972 +  }
  1.1973 +  return false;
  1.1974 +}
  1.1975 +
  1.1976 +// Return true if one node points to an other.
  1.1977 +bool PointsToNode::meet(PointsToNode* ptn) {
  1.1978 +  if (this == ptn) {
  1.1979 +    return true;
  1.1980 +  } else if (ptn->is_JavaObject()) {
  1.1981 +    return this->points_to(ptn->as_JavaObject());
  1.1982 +  } else if (this->is_JavaObject()) {
  1.1983 +    return ptn->points_to(this->as_JavaObject());
  1.1984 +  }
  1.1985 +  assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
  1.1986 +  int ptn_count =  ptn->edge_count();
  1.1987 +  for (EdgeIterator i(this); i.has_next(); i.next()) {
  1.1988 +    PointsToNode* this_e = i.get();
  1.1989 +    for (int j = 0; j < ptn_count; j++) {
  1.1990 +      if (this_e == ptn->edge(j))
  1.1991 +        return true;
  1.1992 +    }
  1.1993 +  }
  1.1994 +  return false;
  1.1995 +}
  1.1996 +
  1.1997 +#ifdef ASSERT
  1.1998 +// Return true if bases point to this java object.
  1.1999 +bool FieldNode::has_base(JavaObjectNode* jobj) const {
  1.2000 +  for (BaseIterator i(this); i.has_next(); i.next()) {
  1.2001 +    if (i.get() == jobj)
  1.2002 +      return true;
  1.2003 +  }
  1.2004 +  return false;
  1.2005 +}
  1.2006 +#endif
  1.2007 +
  1.2008  int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
  1.2009    const Type *adr_type = phase->type(adr);
  1.2010    if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
  1.2011 @@ -171,286 +1948,7 @@
  1.2012    return t_ptr->offset();
  1.2013  }
  1.2014  
  1.2015 -void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) {
  1.2016 -  // Don't add fields to NULL pointer.
  1.2017 -  if (is_null_ptr(from_i))
  1.2018 -    return;
  1.2019 -  PointsToNode *f = ptnode_adr(from_i);
  1.2020 -  PointsToNode *t = ptnode_adr(to_i);
  1.2021 -
  1.2022 -  assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
  1.2023 -  assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge");
  1.2024 -  assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge");
  1.2025 -  assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets");
  1.2026 -  t->set_offset(offset);
  1.2027 -
  1.2028 -  add_edge(f, to_i, PointsToNode::FieldEdge);
  1.2029 -}
  1.2030 -
  1.2031 -void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) {
  1.2032 -  // Don't change non-escaping state of NULL pointer.
  1.2033 -  if (is_null_ptr(ni))
  1.2034 -    return;
  1.2035 -  PointsToNode *npt = ptnode_adr(ni);
  1.2036 -  PointsToNode::EscapeState old_es = npt->escape_state();
  1.2037 -  if (es > old_es)
  1.2038 -    npt->set_escape_state(es);
  1.2039 -}
  1.2040 -
  1.2041 -void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
  1.2042 -                               PointsToNode::EscapeState es, bool done) {
  1.2043 -  PointsToNode* ptadr = ptnode_adr(n->_idx);
  1.2044 -  ptadr->_node = n;
  1.2045 -  ptadr->set_node_type(nt);
  1.2046 -
  1.2047 -  // inline set_escape_state(idx, es);
  1.2048 -  PointsToNode::EscapeState old_es = ptadr->escape_state();
  1.2049 -  if (es > old_es)
  1.2050 -    ptadr->set_escape_state(es);
  1.2051 -
  1.2052 -  if (done)
  1.2053 -    _processed.set(n->_idx);
  1.2054 -}
  1.2055 -
  1.2056 -PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n) {
  1.2057 -  uint idx = n->_idx;
  1.2058 -  PointsToNode::EscapeState es;
  1.2059 -
  1.2060 -  // If we are still collecting or there were no non-escaping allocations
  1.2061 -  // we don't know the answer yet
  1.2062 -  if (_collecting)
  1.2063 -    return PointsToNode::UnknownEscape;
  1.2064 -
  1.2065 -  // if the node was created after the escape computation, return
  1.2066 -  // UnknownEscape
  1.2067 -  if (idx >= nodes_size())
  1.2068 -    return PointsToNode::UnknownEscape;
  1.2069 -
  1.2070 -  es = ptnode_adr(idx)->escape_state();
  1.2071 -
  1.2072 -  // if we have already computed a value, return it
  1.2073 -  if (es != PointsToNode::UnknownEscape &&
  1.2074 -      ptnode_adr(idx)->node_type() == PointsToNode::JavaObject)
  1.2075 -    return es;
  1.2076 -
  1.2077 -  // PointsTo() calls n->uncast() which can return a new ideal node.
  1.2078 -  if (n->uncast()->_idx >= nodes_size())
  1.2079 -    return PointsToNode::UnknownEscape;
  1.2080 -
  1.2081 -  PointsToNode::EscapeState orig_es = es;
  1.2082 -
  1.2083 -  // compute max escape state of anything this node could point to
  1.2084 -  for(VectorSetI i(PointsTo(n)); i.test() && es != PointsToNode::GlobalEscape; ++i) {
  1.2085 -    uint pt = i.elem;
  1.2086 -    PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
  1.2087 -    if (pes > es)
  1.2088 -      es = pes;
  1.2089 -  }
  1.2090 -  if (orig_es != es) {
  1.2091 -    // cache the computed escape state
  1.2092 -    assert(es > orig_es, "should have computed an escape state");
  1.2093 -    set_escape_state(idx, es);
  1.2094 -  } // orig_es could be PointsToNode::UnknownEscape
  1.2095 -  return es;
  1.2096 -}
  1.2097 -
  1.2098 -VectorSet* ConnectionGraph::PointsTo(Node * n) {
  1.2099 -  pt_ptset.Reset();
  1.2100 -  pt_visited.Reset();
  1.2101 -  pt_worklist.clear();
  1.2102 -
  1.2103 -#ifdef ASSERT
  1.2104 -  Node *orig_n = n;
  1.2105 -#endif
  1.2106 -
  1.2107 -  n = n->uncast();
  1.2108 -  PointsToNode* npt = ptnode_adr(n->_idx);
  1.2109 -
  1.2110 -  // If we have a JavaObject, return just that object
  1.2111 -  if (npt->node_type() == PointsToNode::JavaObject) {
  1.2112 -    pt_ptset.set(n->_idx);
  1.2113 -    return &pt_ptset;
  1.2114 -  }
  1.2115 -#ifdef ASSERT
  1.2116 -  if (npt->_node == NULL) {
  1.2117 -    if (orig_n != n)
  1.2118 -      orig_n->dump();
  1.2119 -    n->dump();
  1.2120 -    assert(npt->_node != NULL, "unregistered node");
  1.2121 -  }
  1.2122 -#endif
  1.2123 -  pt_worklist.push(n->_idx);
  1.2124 -  while(pt_worklist.length() > 0) {
  1.2125 -    int ni = pt_worklist.pop();
  1.2126 -    if (pt_visited.test_set(ni))
  1.2127 -      continue;
  1.2128 -
  1.2129 -    PointsToNode* pn = ptnode_adr(ni);
  1.2130 -    // ensure that all inputs of a Phi have been processed
  1.2131 -    assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),"");
  1.2132 -
  1.2133 -    int edges_processed = 0;
  1.2134 -    uint e_cnt = pn->edge_count();
  1.2135 -    for (uint e = 0; e < e_cnt; e++) {
  1.2136 -      uint etgt = pn->edge_target(e);
  1.2137 -      PointsToNode::EdgeType et = pn->edge_type(e);
  1.2138 -      if (et == PointsToNode::PointsToEdge) {
  1.2139 -        pt_ptset.set(etgt);
  1.2140 -        edges_processed++;
  1.2141 -      } else if (et == PointsToNode::DeferredEdge) {
  1.2142 -        pt_worklist.push(etgt);
  1.2143 -        edges_processed++;
  1.2144 -      } else {
  1.2145 -        assert(false,"neither PointsToEdge or DeferredEdge");
  1.2146 -      }
  1.2147 -    }
  1.2148 -    if (edges_processed == 0) {
  1.2149 -      // no deferred or pointsto edges found.  Assume the value was set
  1.2150 -      // outside this method.  Add the phantom object to the pointsto set.
  1.2151 -      pt_ptset.set(_phantom_object);
  1.2152 -    }
  1.2153 -  }
  1.2154 -  return &pt_ptset;
  1.2155 -}
  1.2156 -
  1.2157 -void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) {
  1.2158 -  // This method is most expensive during ConnectionGraph construction.
  1.2159 -  // Reuse vectorSet and an additional growable array for deferred edges.
  1.2160 -  deferred_edges->clear();
  1.2161 -  visited->Reset();
  1.2162 -
  1.2163 -  visited->set(ni);
  1.2164 -  PointsToNode *ptn = ptnode_adr(ni);
  1.2165 -  assert(ptn->node_type() == PointsToNode::LocalVar ||
  1.2166 -         ptn->node_type() == PointsToNode::Field, "sanity");
  1.2167 -  assert(ptn->edge_count() != 0, "should have at least phantom_object");
  1.2168 -
  1.2169 -  // Mark current edges as visited and move deferred edges to separate array.
  1.2170 -  for (uint i = 0; i < ptn->edge_count(); ) {
  1.2171 -    uint t = ptn->edge_target(i);
  1.2172 -#ifdef ASSERT
  1.2173 -    assert(!visited->test_set(t), "expecting no duplications");
  1.2174 -#else
  1.2175 -    visited->set(t);
  1.2176 -#endif
  1.2177 -    if (ptn->edge_type(i) == PointsToNode::DeferredEdge) {
  1.2178 -      ptn->remove_edge(t, PointsToNode::DeferredEdge);
  1.2179 -      deferred_edges->append(t);
  1.2180 -    } else {
  1.2181 -      i++;
  1.2182 -    }
  1.2183 -  }
  1.2184 -  for (int next = 0; next < deferred_edges->length(); ++next) {
  1.2185 -    uint t = deferred_edges->at(next);
  1.2186 -    PointsToNode *ptt = ptnode_adr(t);
  1.2187 -    uint e_cnt = ptt->edge_count();
  1.2188 -    assert(e_cnt != 0, "should have at least phantom_object");
  1.2189 -    for (uint e = 0; e < e_cnt; e++) {
  1.2190 -      uint etgt = ptt->edge_target(e);
  1.2191 -      if (visited->test_set(etgt))
  1.2192 -        continue;
  1.2193 -
  1.2194 -      PointsToNode::EdgeType et = ptt->edge_type(e);
  1.2195 -      if (et == PointsToNode::PointsToEdge) {
  1.2196 -        add_pointsto_edge(ni, etgt);
  1.2197 -      } else if (et == PointsToNode::DeferredEdge) {
  1.2198 -        deferred_edges->append(etgt);
  1.2199 -      } else {
  1.2200 -        assert(false,"invalid connection graph");
  1.2201 -      }
  1.2202 -    }
  1.2203 -  }
  1.2204 -  if (ptn->edge_count() == 0) {
  1.2205 -    // No pointsto edges found after deferred edges are removed.
  1.2206 -    // For example, in the next case where call is replaced
  1.2207 -    // with uncommon trap and as result array's load references
  1.2208 -    // itself through deferred edges:
  1.2209 -    //
  1.2210 -    // A a = b[i];
  1.2211 -    // if (c!=null) a = c.foo();
  1.2212 -    // b[i] = a;
  1.2213 -    //
  1.2214 -    // Assume the value was set outside this method and
  1.2215 -    // add edge to phantom object.
  1.2216 -    add_pointsto_edge(ni, _phantom_object);
  1.2217 -  }
  1.2218 -}
  1.2219 -
  1.2220 -
  1.2221 -//  Add an edge to node given by "to_i" from any field of adr_i whose offset
  1.2222 -//  matches "offset"  A deferred edge is added if to_i is a LocalVar, and
  1.2223 -//  a pointsto edge is added if it is a JavaObject
  1.2224 -
  1.2225 -void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) {
  1.2226 -  // No fields for NULL pointer.
  1.2227 -  if (is_null_ptr(adr_i)) {
  1.2228 -    return;
  1.2229 -  }
  1.2230 -  PointsToNode* an = ptnode_adr(adr_i);
  1.2231 -  PointsToNode* to = ptnode_adr(to_i);
  1.2232 -  bool deferred = (to->node_type() == PointsToNode::LocalVar);
  1.2233 -  bool escaped  = (to_i == _phantom_object) && (offs == Type::OffsetTop);
  1.2234 -  if (escaped) {
  1.2235 -    // Values in fields escaped during call.
  1.2236 -    assert(an->escape_state() >= PointsToNode::ArgEscape, "sanity");
  1.2237 -    offs = Type::OffsetBot;
  1.2238 -  }
  1.2239 -  for (uint fe = 0; fe < an->edge_count(); fe++) {
  1.2240 -    assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
  1.2241 -    int fi = an->edge_target(fe);
  1.2242 -    if (escaped) {
  1.2243 -      set_escape_state(fi, PointsToNode::GlobalEscape);
  1.2244 -    }
  1.2245 -    PointsToNode* pf = ptnode_adr(fi);
  1.2246 -    int po = pf->offset();
  1.2247 -    if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
  1.2248 -      if (deferred)
  1.2249 -        add_deferred_edge(fi, to_i);
  1.2250 -      else
  1.2251 -        add_pointsto_edge(fi, to_i);
  1.2252 -    }
  1.2253 -  }
  1.2254 -}
  1.2255 -
  1.2256 -// Add a deferred  edge from node given by "from_i" to any field of adr_i
  1.2257 -// whose offset matches "offset".
  1.2258 -void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
  1.2259 -  // No fields for NULL pointer.
  1.2260 -  if (is_null_ptr(adr_i)) {
  1.2261 -    return;
  1.2262 -  }
  1.2263 -  if (adr_i == _phantom_object) {
  1.2264 -    // Add only one edge for unknown object.
  1.2265 -    add_pointsto_edge(from_i, _phantom_object);
  1.2266 -    return;
  1.2267 -  }
  1.2268 -  PointsToNode* an = ptnode_adr(adr_i);
  1.2269 -  bool is_alloc = an->_node->is_Allocate();
  1.2270 -  for (uint fe = 0; fe < an->edge_count(); fe++) {
  1.2271 -    assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
  1.2272 -    int fi = an->edge_target(fe);
  1.2273 -    PointsToNode* pf = ptnode_adr(fi);
  1.2274 -    int offset = pf->offset();
  1.2275 -    if (!is_alloc) {
  1.2276 -      // Assume the field was set outside this method if it is not Allocation
  1.2277 -      add_pointsto_edge(fi, _phantom_object);
  1.2278 -    }
  1.2279 -    if (offset == offs || offset == Type::OffsetBot || offs == Type::OffsetBot) {
  1.2280 -      add_deferred_edge(from_i, fi);
  1.2281 -    }
  1.2282 -  }
  1.2283 -  // Some fields references (AddP) may still be missing
  1.2284 -  // until Connection Graph construction is complete.
  1.2285 -  // For example, loads from RAW pointers with offset 0
  1.2286 -  // which don't have AddP.
  1.2287 -  // A reference to phantom_object will be added if
  1.2288 -  // a field reference is still missing after completing
  1.2289 -  // Connection Graph (see remove_deferred()).
  1.2290 -}
  1.2291 -
  1.2292 -// Helper functions
  1.2293 -
  1.2294 -static Node* get_addp_base(Node *addp) {
  1.2295 +Node* ConnectionGraph::get_addp_base(Node *addp) {
  1.2296    assert(addp->is_AddP(), "must be AddP");
  1.2297    //
  1.2298    // AddP cases for Base and Address inputs:
  1.2299 @@ -513,30 +2011,30 @@
  1.2300    //       | |
  1.2301    //       AddP  ( base == address )
  1.2302    //
  1.2303 -  Node *base = addp->in(AddPNode::Base)->uncast();
  1.2304 -  if (base->is_top()) { // The AddP case #3 and #6.
  1.2305 -    base = addp->in(AddPNode::Address)->uncast();
  1.2306 +  Node *base = addp->in(AddPNode::Base);
  1.2307 +  if (base->uncast()->is_top()) { // The AddP case #3 and #6.
  1.2308 +    base = addp->in(AddPNode::Address);
  1.2309      while (base->is_AddP()) {
  1.2310        // Case #6 (unsafe access) may have several chained AddP nodes.
  1.2311 -      assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only");
  1.2312 -      base = base->in(AddPNode::Address)->uncast();
  1.2313 +      assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
  1.2314 +      base = base->in(AddPNode::Address);
  1.2315      }
  1.2316 -    assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal ||
  1.2317 -           base->Opcode() == Op_CastX2P || base->is_DecodeN() ||
  1.2318 -           (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) ||
  1.2319 -           (base->is_Proj() && base->in(0)->is_Allocate()), "sanity");
  1.2320 +    Node* uncast_base = base->uncast();
  1.2321 +    int opcode = uncast_base->Opcode();
  1.2322 +    assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
  1.2323 +           opcode == Op_CastX2P || uncast_base->is_DecodeN() ||
  1.2324 +           (uncast_base->is_Mem() && uncast_base->bottom_type() == TypeRawPtr::NOTNULL) ||
  1.2325 +           (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
  1.2326    }
  1.2327    return base;
  1.2328  }
  1.2329  
  1.2330 -static Node* find_second_addp(Node* addp, Node* n) {
  1.2331 +Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
  1.2332    assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
  1.2333 -
  1.2334    Node* addp2 = addp->raw_out(0);
  1.2335    if (addp->outcnt() == 1 && addp2->is_AddP() &&
  1.2336        addp2->in(AddPNode::Base) == n &&
  1.2337        addp2->in(AddPNode::Address) == addp) {
  1.2338 -
  1.2339      assert(addp->in(AddPNode::Base) == n, "expecting the same base");
  1.2340      //
  1.2341      // Find array's offset to push it on worklist first and
  1.2342 @@ -575,7 +2073,8 @@
  1.2343  // Adjust the type and inputs of an AddP which computes the
  1.2344  // address of a field of an instance
  1.2345  //
  1.2346 -bool ConnectionGraph::split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn) {
  1.2347 +bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
  1.2348 +  PhaseGVN* igvn = _igvn;
  1.2349    const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
  1.2350    assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
  1.2351    const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
  1.2352 @@ -612,7 +2111,6 @@
  1.2353        !base_t->klass()->is_subtype_of(t->klass())) {
  1.2354       return false; // bail out
  1.2355    }
  1.2356 -
  1.2357    const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
  1.2358    // Do NOT remove the next line: ensure a new alias index is allocated
  1.2359    // for the instance type. Note: C++ will not remove it since the call
  1.2360 @@ -620,9 +2118,7 @@
  1.2361    int alias_idx = _compile->get_alias_index(tinst);
  1.2362    igvn->set_type(addp, tinst);
  1.2363    // record the allocation in the node map
  1.2364 -  assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered");
  1.2365 -  set_map(addp->_idx, get_map(base->_idx));
  1.2366 -
  1.2367 +  set_map(addp, get_map(base->_idx));
  1.2368    // Set addp's Base and Address to 'base'.
  1.2369    Node *abase = addp->in(AddPNode::Base);
  1.2370    Node *adr   = addp->in(AddPNode::Address);
  1.2371 @@ -657,8 +2153,9 @@
  1.2372  // created phi or an existing phi.  Sets create_new to indicate whether a new
  1.2373  // phi was created.  Cache the last newly created phi in the node map.
  1.2374  //
  1.2375 -PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created) {
  1.2376 +PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created) {
  1.2377    Compile *C = _compile;
  1.2378 +  PhaseGVN* igvn = _igvn;
  1.2379    new_created = false;
  1.2380    int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
  1.2381    // nothing to do if orig_phi is bottom memory or matches alias_idx
  1.2382 @@ -698,12 +2195,7 @@
  1.2383    C->copy_node_notes_to(result, orig_phi);
  1.2384    igvn->set_type(result, result->bottom_type());
  1.2385    record_for_optimizer(result);
  1.2386 -
  1.2387 -  debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;)
  1.2388 -  assert(pn == NULL || pn == orig_phi, "wrong node");
  1.2389 -  set_map(orig_phi->_idx, result);
  1.2390 -  ptnode_adr(orig_phi->_idx)->_node = orig_phi;
  1.2391 -
  1.2392 +  set_map(orig_phi, result);
  1.2393    new_created = true;
  1.2394    return result;
  1.2395  }
  1.2396 @@ -712,27 +2204,25 @@
  1.2397  // Return a new version of Memory Phi "orig_phi" with the inputs having the
  1.2398  // specified alias index.
  1.2399  //
  1.2400 -PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn) {
  1.2401 -
  1.2402 +PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist) {
  1.2403    assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
  1.2404    Compile *C = _compile;
  1.2405 +  PhaseGVN* igvn = _igvn;
  1.2406    bool new_phi_created;
  1.2407 -  PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created);
  1.2408 +  PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
  1.2409    if (!new_phi_created) {
  1.2410      return result;
  1.2411    }
  1.2412 -
  1.2413    GrowableArray<PhiNode *>  phi_list;
  1.2414    GrowableArray<uint>  cur_input;
  1.2415 -
  1.2416    PhiNode *phi = orig_phi;
  1.2417    uint idx = 1;
  1.2418    bool finished = false;
  1.2419    while(!finished) {
  1.2420      while (idx < phi->req()) {
  1.2421 -      Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn);
  1.2422 +      Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);
  1.2423        if (mem != NULL && mem->is_Phi()) {
  1.2424 -        PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created);
  1.2425 +        PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
  1.2426          if (new_phi_created) {
  1.2427            // found an phi for which we created a new split, push current one on worklist and begin
  1.2428            // processing new one
  1.2429 @@ -775,19 +2265,18 @@
  1.2430    return result;
  1.2431  }
  1.2432  
  1.2433 -
  1.2434  //
  1.2435  // The next methods are derived from methods in MemNode.
  1.2436  //
  1.2437 -static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
  1.2438 +Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
  1.2439    Node *mem = mmem;
  1.2440    // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
  1.2441    // means an array I have not precisely typed yet.  Do not do any
  1.2442    // alias stuff with it any time soon.
  1.2443 -  if( toop->base() != Type::AnyPtr &&
  1.2444 +  if (toop->base() != Type::AnyPtr &&
  1.2445        !(toop->klass() != NULL &&
  1.2446          toop->klass()->is_java_lang_Object() &&
  1.2447 -        toop->offset() == Type::OffsetBot) ) {
  1.2448 +        toop->offset() == Type::OffsetBot)) {
  1.2449      mem = mmem->memory_at(alias_idx);
  1.2450      // Update input if it is progress over what we have now
  1.2451    }
  1.2452 @@ -797,9 +2286,9 @@
  1.2453  //
  1.2454  // Move memory users to their memory slices.
  1.2455  //
  1.2456 -void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *igvn) {
  1.2457 +void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis) {
  1.2458    Compile* C = _compile;
  1.2459 -
  1.2460 +  PhaseGVN* igvn = _igvn;
  1.2461    const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
  1.2462    assert(tp != NULL, "ptr type");
  1.2463    int alias_idx = C->get_alias_index(tp);
  1.2464 @@ -816,7 +2305,7 @@
  1.2465        }
  1.2466        // Replace previous general reference to mem node.
  1.2467        uint orig_uniq = C->unique();
  1.2468 -      Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
  1.2469 +      Node* m = find_inst_mem(n, general_idx, orig_phis);
  1.2470        assert(orig_uniq == C->unique(), "no new nodes");
  1.2471        mmem->set_memory_at(general_idx, m);
  1.2472        --imax;
  1.2473 @@ -836,7 +2325,7 @@
  1.2474        }
  1.2475        // Move to general memory slice.
  1.2476        uint orig_uniq = C->unique();
  1.2477 -      Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
  1.2478 +      Node* m = find_inst_mem(n, general_idx, orig_phis);
  1.2479        assert(orig_uniq == C->unique(), "no new nodes");
  1.2480        igvn->hash_delete(use);
  1.2481        imax -= use->replace_edge(n, m);
  1.2482 @@ -873,10 +2362,11 @@
  1.2483  // Search memory chain of "mem" to find a MemNode whose address
  1.2484  // is the specified alias index.
  1.2485  //
  1.2486 -Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *phase) {
  1.2487 +Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis) {
  1.2488    if (orig_mem == NULL)
  1.2489      return orig_mem;
  1.2490 -  Compile* C = phase->C;
  1.2491 +  Compile* C = _compile;
  1.2492 +  PhaseGVN* igvn = _igvn;
  1.2493    const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
  1.2494    bool is_instance = (toop != NULL) && toop->is_known_instance();
  1.2495    Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
  1.2496 @@ -887,7 +2377,7 @@
  1.2497      if (result == start_mem)
  1.2498        break;  // hit one of our sentinels
  1.2499      if (result->is_Mem()) {
  1.2500 -      const Type *at = phase->type(result->in(MemNode::Address));
  1.2501 +      const Type *at = igvn->type(result->in(MemNode::Address));
  1.2502        if (at == Type::TOP)
  1.2503          break; // Dead
  1.2504        assert (at->isa_ptr() != NULL, "pointer type required.");
  1.2505 @@ -909,7 +2399,7 @@
  1.2506          break;  // hit one of our sentinels
  1.2507        } else if (proj_in->is_Call()) {
  1.2508          CallNode *call = proj_in->as_Call();
  1.2509 -        if (!call->may_modify(toop, phase)) {
  1.2510 +        if (!call->may_modify(toop, igvn)) {
  1.2511            result = call->in(TypeFunc::Memory);
  1.2512          }
  1.2513        } else if (proj_in->is_Initialize()) {
  1.2514 @@ -928,7 +2418,7 @@
  1.2515        if (result == mmem->base_memory()) {
  1.2516          // Didn't find instance memory, search through general slice recursively.
  1.2517          result = mmem->memory_at(C->get_general_index(alias_idx));
  1.2518 -        result = find_inst_mem(result, alias_idx, orig_phis, phase);
  1.2519 +        result = find_inst_mem(result, alias_idx, orig_phis);
  1.2520          if (C->failing()) {
  1.2521            return NULL;
  1.2522          }
  1.2523 @@ -936,7 +2426,7 @@
  1.2524        }
  1.2525      } else if (result->is_Phi() &&
  1.2526                 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
  1.2527 -      Node *un = result->as_Phi()->unique_input(phase);
  1.2528 +      Node *un = result->as_Phi()->unique_input(igvn);
  1.2529        if (un != NULL) {
  1.2530          orig_phis.append_if_missing(result->as_Phi());
  1.2531          result = un;
  1.2532 @@ -944,7 +2434,7 @@
  1.2533          break;
  1.2534        }
  1.2535      } else if (result->is_ClearArray()) {
  1.2536 -      if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), phase)) {
  1.2537 +      if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
  1.2538          // Can not bypass initialization of the instance
  1.2539          // we are looking for.
  1.2540          break;
  1.2541 @@ -952,7 +2442,7 @@
  1.2542        // Otherwise skip it (the call updated 'result' value).
  1.2543      } else if (result->Opcode() == Op_SCMemProj) {
  1.2544        assert(result->in(0)->is_LoadStore(), "sanity");
  1.2545 -      const Type *at = phase->type(result->in(0)->in(MemNode::Address));
  1.2546 +      const Type *at = igvn->type(result->in(0)->in(MemNode::Address));
  1.2547        if (at != Type::TOP) {
  1.2548          assert (at->isa_ptr() != NULL, "pointer type required.");
  1.2549          int idx = C->get_alias_index(at->is_ptr());
  1.2550 @@ -972,7 +2462,7 @@
  1.2551        orig_phis.append_if_missing(mphi);
  1.2552      } else if (C->get_alias_index(t) != alias_idx) {
  1.2553        // Create a new Phi with the specified alias index type.
  1.2554 -      result = split_memory_phi(mphi, alias_idx, orig_phis, phase);
  1.2555 +      result = split_memory_phi(mphi, alias_idx, orig_phis);
  1.2556      }
  1.2557    }
  1.2558    // the result is either MemNode, PhiNode, InitializeNode.
  1.2559 @@ -1071,12 +2561,12 @@
  1.2560  void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist) {
  1.2561    GrowableArray<Node *>  memnode_worklist;
  1.2562    GrowableArray<PhiNode *>  orig_phis;
  1.2563 -
  1.2564    PhaseIterGVN  *igvn = _igvn;
  1.2565    uint new_index_start = (uint) _compile->num_alias_types();
  1.2566    Arena* arena = Thread::current()->resource_area();
  1.2567    VectorSet visited(arena);
  1.2568 -
  1.2569 +  ideal_nodes.clear(); // Reset for use with set_map/get_map.
  1.2570 +  uint unique_old = _compile->unique();
  1.2571  
  1.2572    //  Phase 1:  Process possible allocations from alloc_worklist.
  1.2573    //  Create instance types for the CheckCastPP for allocations where possible.
  1.2574 @@ -1088,17 +2578,15 @@
  1.2575    while (alloc_worklist.length() != 0) {
  1.2576      Node *n = alloc_worklist.pop();
  1.2577      uint ni = n->_idx;
  1.2578 -    const TypeOopPtr* tinst = NULL;
  1.2579      if (n->is_Call()) {
  1.2580        CallNode *alloc = n->as_Call();
  1.2581        // copy escape information to call node
  1.2582        PointsToNode* ptn = ptnode_adr(alloc->_idx);
  1.2583 -      PointsToNode::EscapeState es = escape_state(alloc);
  1.2584 +      PointsToNode::EscapeState es = ptn->escape_state();
  1.2585        // We have an allocation or call which returns a Java object,
  1.2586        // see if it is unescaped.
  1.2587        if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())
  1.2588          continue;
  1.2589 -
  1.2590        // Find CheckCastPP for the allocate or for the return value of a call
  1.2591        n = alloc->result_cast();
  1.2592        if (n == NULL) {            // No uses except Initialize node
  1.2593 @@ -1145,20 +2633,18 @@
  1.2594          // so it could be eliminated.
  1.2595          alloc->as_Allocate()->_is_scalar_replaceable = true;
  1.2596        }
  1.2597 -      set_escape_state(n->_idx, es); // CheckCastPP escape state
  1.2598 +      set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state
  1.2599        // in order for an object to be scalar-replaceable, it must be:
  1.2600        //   - a direct allocation (not a call returning an object)
  1.2601        //   - non-escaping
  1.2602        //   - eligible to be a unique type
  1.2603        //   - not determined to be ineligible by escape analysis
  1.2604 -      assert(ptnode_adr(alloc->_idx)->_node != NULL &&
  1.2605 -             ptnode_adr(n->_idx)->_node != NULL, "should be registered");
  1.2606 -      set_map(alloc->_idx, n);
  1.2607 -      set_map(n->_idx, alloc);
  1.2608 +      set_map(alloc, n);
  1.2609 +      set_map(n, alloc);
  1.2610        const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
  1.2611        if (t == NULL)
  1.2612          continue;  // not a TypeOopPtr
  1.2613 -      tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
  1.2614 +      const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
  1.2615        igvn->hash_delete(n);
  1.2616        igvn->set_type(n,  tinst);
  1.2617        n->raise_bottom_type(tinst);
  1.2618 @@ -1168,9 +2654,10 @@
  1.2619  
  1.2620          // First, put on the worklist all Field edges from Connection Graph
  1.2621          // which is more accurate then putting immediate users from Ideal Graph.
  1.2622 -        for (uint e = 0; e < ptn->edge_count(); e++) {
  1.2623 -          Node *use = ptnode_adr(ptn->edge_target(e))->_node;
  1.2624 -          assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
  1.2625 +        for (EdgeIterator e(ptn); e.has_next(); e.next()) {
  1.2626 +          PointsToNode* tgt = e.get();
  1.2627 +          Node* use = tgt->ideal_node();
  1.2628 +          assert(tgt->is_Field() && use->is_AddP(),
  1.2629                   "only AddP nodes are Field edges in CG");
  1.2630            if (use->outcnt() > 0) { // Don't process dead nodes
  1.2631              Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
  1.2632 @@ -1202,16 +2689,18 @@
  1.2633          }
  1.2634        }
  1.2635      } else if (n->is_AddP()) {
  1.2636 -      VectorSet* ptset = PointsTo(get_addp_base(n));
  1.2637 -      assert(ptset->Size() == 1, "AddP address is unique");
  1.2638 -      uint elem = ptset->getelem(); // Allocation node's index
  1.2639 -      if (elem == _phantom_object) {
  1.2640 -        assert(false, "escaped allocation");
  1.2641 -        continue; // Assume the value was set outside this method.
  1.2642 +      JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
  1.2643 +      if (jobj == NULL || jobj == phantom_obj) {
  1.2644 +#ifdef ASSERT
  1.2645 +        ptnode_adr(get_addp_base(n)->_idx)->dump();
  1.2646 +        ptnode_adr(n->_idx)->dump();
  1.2647 +        assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
  1.2648 +#endif
  1.2649 +        _compile->record_failure(C2Compiler::retry_no_escape_analysis());
  1.2650 +        return;
  1.2651        }
  1.2652 -      Node *base = get_map(elem);  // CheckCastPP node
  1.2653 -      if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path
  1.2654 -      tinst = igvn->type(base)->isa_oopptr();
  1.2655 +      Node *base = get_map(jobj->idx());  // CheckCastPP node
  1.2656 +      if (!split_AddP(n, base)) continue; // wrong type from dead path
  1.2657      } else if (n->is_Phi() ||
  1.2658                 n->is_CheckCastPP() ||
  1.2659                 n->is_EncodeP() ||
  1.2660 @@ -1221,18 +2710,20 @@
  1.2661          assert(n->is_Phi(), "loops only through Phi's");
  1.2662          continue;  // already processed
  1.2663        }
  1.2664 -      VectorSet* ptset = PointsTo(n);
  1.2665 -      if (ptset->Size() == 1) {
  1.2666 -        uint elem = ptset->getelem(); // Allocation node's index
  1.2667 -        if (elem == _phantom_object) {
  1.2668 -          assert(false, "escaped allocation");
  1.2669 -          continue; // Assume the value was set outside this method.
  1.2670 -        }
  1.2671 -        Node *val = get_map(elem);   // CheckCastPP node
  1.2672 +      JavaObjectNode* jobj = unique_java_object(n);
  1.2673 +      if (jobj == NULL || jobj == phantom_obj) {
  1.2674 +#ifdef ASSERT
  1.2675 +        ptnode_adr(n->_idx)->dump();
  1.2676 +        assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
  1.2677 +#endif
  1.2678 +        _compile->record_failure(C2Compiler::retry_no_escape_analysis());
  1.2679 +        return;
  1.2680 +      } else {
  1.2681 +        Node *val = get_map(jobj->idx());   // CheckCastPP node
  1.2682          TypeNode *tn = n->as_Type();
  1.2683 -        tinst = igvn->type(val)->isa_oopptr();
  1.2684 +        const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
  1.2685          assert(tinst != NULL && tinst->is_known_instance() &&
  1.2686 -               (uint)tinst->instance_id() == elem , "instance type expected.");
  1.2687 +               tinst->instance_id() == jobj->idx() , "instance type expected.");
  1.2688  
  1.2689          const Type *tn_type = igvn->type(tn);
  1.2690          const TypeOopPtr *tn_t;
  1.2691 @@ -1241,7 +2732,6 @@
  1.2692          } else {
  1.2693            tn_t = tn_type->isa_oopptr();
  1.2694          }
  1.2695 -
  1.2696          if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
  1.2697            if (tn_type->isa_narrowoop()) {
  1.2698              tn_type = tinst->make_narrowoop();
  1.2699 @@ -1314,13 +2804,13 @@
  1.2700    }
  1.2701    // New alias types were created in split_AddP().
  1.2702    uint new_index_end = (uint) _compile->num_alias_types();
  1.2703 +  assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
  1.2704  
  1.2705    //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
  1.2706    //            compute new values for Memory inputs  (the Memory inputs are not
  1.2707    //            actually updated until phase 4.)
  1.2708    if (memnode_worklist.length() == 0)
  1.2709      return;  // nothing to do
  1.2710 -
  1.2711    while (memnode_worklist.length() != 0) {
  1.2712      Node *n = memnode_worklist.pop();
  1.2713      if (visited.test_set(n->_idx))
  1.2714 @@ -1341,17 +2831,14 @@
  1.2715        assert (addr_t->isa_ptr() != NULL, "pointer type required.");
  1.2716        int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
  1.2717        assert ((uint)alias_idx < new_index_end, "wrong alias index");
  1.2718 -      Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn);
  1.2719 +      Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
  1.2720        if (_compile->failing()) {
  1.2721          return;
  1.2722        }
  1.2723        if (mem != n->in(MemNode::Memory)) {
  1.2724          // We delay the memory edge update since we need old one in
  1.2725          // MergeMem code below when instances memory slices are separated.
  1.2726 -        debug_only(Node* pn = ptnode_adr(n->_idx)->_node;)
  1.2727 -        assert(pn == NULL || pn == n, "wrong node");
  1.2728 -        set_map(n->_idx, mem);
  1.2729 -        ptnode_adr(n->_idx)->_node = n;
  1.2730 +        set_map(n, mem);
  1.2731        }
  1.2732        if (n->is_Load()) {
  1.2733          continue;  // don't push users
  1.2734 @@ -1442,7 +2929,7 @@
  1.2735          if((uint)_compile->get_general_index(ni) == i) {
  1.2736            Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
  1.2737            if (nmm->is_empty_memory(m)) {
  1.2738 -            Node* result = find_inst_mem(mem, ni, orig_phis, igvn);
  1.2739 +            Node* result = find_inst_mem(mem, ni, orig_phis);
  1.2740              if (_compile->failing()) {
  1.2741                return;
  1.2742              }
  1.2743 @@ -1458,7 +2945,7 @@
  1.2744        if (result == nmm->base_memory()) {
  1.2745          // Didn't find instance memory, search through general slice recursively.
  1.2746          result = nmm->memory_at(_compile->get_general_index(ni));
  1.2747 -        result = find_inst_mem(result, ni, orig_phis, igvn);
  1.2748 +        result = find_inst_mem(result, ni, orig_phis);
  1.2749          if (_compile->failing()) {
  1.2750            return;
  1.2751          }
  1.2752 @@ -1482,7 +2969,7 @@
  1.2753      igvn->hash_delete(phi);
  1.2754      for (uint i = 1; i < phi->req(); i++) {
  1.2755        Node *mem = phi->in(i);
  1.2756 -      Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn);
  1.2757 +      Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
  1.2758        if (_compile->failing()) {
  1.2759          return;
  1.2760        }
  1.2761 @@ -1496,39 +2983,36 @@
  1.2762  
  1.2763    // Update the memory inputs of MemNodes with the value we computed
  1.2764    // in Phase 2 and move stores memory users to corresponding memory slices.
  1.2765 -
  1.2766    // Disable memory split verification code until the fix for 6984348.
  1.2767    // Currently it produces false negative results since it does not cover all cases.
  1.2768  #if 0 // ifdef ASSERT
  1.2769    visited.Reset();
  1.2770    Node_Stack old_mems(arena, _compile->unique() >> 2);
  1.2771  #endif
  1.2772 -  for (uint i = 0; i < nodes_size(); i++) {
  1.2773 -    Node *nmem = get_map(i);
  1.2774 -    if (nmem != NULL) {
  1.2775 -      Node *n = ptnode_adr(i)->_node;
  1.2776 -      assert(n != NULL, "sanity");
  1.2777 -      if (n->is_Mem()) {
  1.2778 +  for (uint i = 0; i < ideal_nodes.size(); i++) {
  1.2779 +    Node*    n = ideal_nodes.at(i);
  1.2780 +    Node* nmem = get_map(n->_idx);
  1.2781 +    assert(nmem != NULL, "sanity");
  1.2782 +    if (n->is_Mem()) {
  1.2783  #if 0 // ifdef ASSERT
  1.2784 -        Node* old_mem = n->in(MemNode::Memory);
  1.2785 -        if (!visited.test_set(old_mem->_idx)) {
  1.2786 -          old_mems.push(old_mem, old_mem->outcnt());
  1.2787 -        }
  1.2788 +      Node* old_mem = n->in(MemNode::Memory);
  1.2789 +      if (!visited.test_set(old_mem->_idx)) {
  1.2790 +        old_mems.push(old_mem, old_mem->outcnt());
  1.2791 +      }
  1.2792  #endif
  1.2793 -        assert(n->in(MemNode::Memory) != nmem, "sanity");
  1.2794 -        if (!n->is_Load()) {
  1.2795 -          // Move memory users of a store first.
  1.2796 -          move_inst_mem(n, orig_phis, igvn);
  1.2797 -        }
  1.2798 -        // Now update memory input
  1.2799 -        igvn->hash_delete(n);
  1.2800 -        n->set_req(MemNode::Memory, nmem);
  1.2801 -        igvn->hash_insert(n);
  1.2802 -        record_for_optimizer(n);
  1.2803 -      } else {
  1.2804 -        assert(n->is_Allocate() || n->is_CheckCastPP() ||
  1.2805 -               n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
  1.2806 +      assert(n->in(MemNode::Memory) != nmem, "sanity");
  1.2807 +      if (!n->is_Load()) {
  1.2808 +        // Move memory users of a store first.
  1.2809 +        move_inst_mem(n, orig_phis);
  1.2810        }
  1.2811 +      // Now update memory input
  1.2812 +      igvn->hash_delete(n);
  1.2813 +      n->set_req(MemNode::Memory, nmem);
  1.2814 +      igvn->hash_insert(n);
  1.2815 +      record_for_optimizer(n);
  1.2816 +    } else {
  1.2817 +      assert(n->is_Allocate() || n->is_CheckCastPP() ||
  1.2818 +             n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
  1.2819      }
  1.2820    }
  1.2821  #if 0 // ifdef ASSERT
  1.2822 @@ -1542,1571 +3026,72 @@
  1.2823  #endif
  1.2824  }
  1.2825  
  1.2826 -bool ConnectionGraph::has_candidates(Compile *C) {
  1.2827 -  // EA brings benefits only when the code has allocations and/or locks which
  1.2828 -  // are represented by ideal Macro nodes.
  1.2829 -  int cnt = C->macro_count();
  1.2830 -  for( int i=0; i < cnt; i++ ) {
  1.2831 -    Node *n = C->macro_node(i);
  1.2832 -    if ( n->is_Allocate() )
  1.2833 -      return true;
  1.2834 -    if( n->is_Lock() ) {
  1.2835 -      Node* obj = n->as_Lock()->obj_node()->uncast();
  1.2836 -      if( !(obj->is_Parm() || obj->is_Con()) )
  1.2837 -        return true;
  1.2838 +#ifndef PRODUCT
  1.2839 +static const char *node_type_names[] = {
  1.2840 +  "UnknownType",
  1.2841 +  "JavaObject",
  1.2842 +  "LocalVar",
  1.2843 +  "Field",
  1.2844 +  "Arraycopy"
  1.2845 +};
  1.2846 +
  1.2847 +static const char *esc_names[] = {
  1.2848 +  "UnknownEscape",
  1.2849 +  "NoEscape",
  1.2850 +  "ArgEscape",
  1.2851 +  "GlobalEscape"
  1.2852 +};
  1.2853 +
  1.2854 +void PointsToNode::dump(bool print_state) const {
  1.2855 +  NodeType nt = node_type();
  1.2856 +  tty->print("%s ", node_type_names[(int) nt]);
  1.2857 +  if (print_state) {
  1.2858 +    EscapeState es = escape_state();
  1.2859 +    EscapeState fields_es = fields_escape_state();
  1.2860 +    tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
  1.2861 +    if (nt == PointsToNode::JavaObject && !this->scalar_replaceable())
  1.2862 +      tty->print("NSR");
  1.2863 +  }
  1.2864 +  if (is_Field()) {
  1.2865 +    FieldNode* f = (FieldNode*)this;
  1.2866 +    tty->print("(");
  1.2867 +    for (BaseIterator i(f); i.has_next(); i.next()) {
  1.2868 +      PointsToNode* b = i.get();
  1.2869 +      tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
  1.2870      }
  1.2871 +    tty->print(" )");
  1.2872    }
  1.2873 -  return false;
  1.2874 +  tty->print("[");
  1.2875 +  for (EdgeIterator i(this); i.has_next(); i.next()) {
  1.2876 +    PointsToNode* e = i.get();
  1.2877 +    tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
  1.2878 +  }
  1.2879 +  tty->print(" [");
  1.2880 +  for (UseIterator i(this); i.has_next(); i.next()) {
  1.2881 +    PointsToNode* u = i.get();
  1.2882 +    bool is_base = false;
  1.2883 +    if (PointsToNode::is_base_use(u)) {
  1.2884 +      is_base = true;
  1.2885 +      u = PointsToNode::get_use_node(u)->as_Field();
  1.2886 +    }
  1.2887 +    tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
  1.2888 +  }
  1.2889 +  tty->print(" ]]  ");
  1.2890 +  if (_node == NULL)
  1.2891 +    tty->print_cr("<null>");
  1.2892 +  else
  1.2893 +    _node->dump();
  1.2894  }
  1.2895  
  1.2896 -void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
  1.2897 -  // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
  1.2898 -  // to create space for them in ConnectionGraph::_nodes[].
  1.2899 -  Node* oop_null = igvn->zerocon(T_OBJECT);
  1.2900 -  Node* noop_null = igvn->zerocon(T_NARROWOOP);
  1.2901 -
  1.2902 -  ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
  1.2903 -  // Perform escape analysis
  1.2904 -  if (congraph->compute_escape()) {
  1.2905 -    // There are non escaping objects.
  1.2906 -    C->set_congraph(congraph);
  1.2907 -  }
  1.2908 -
  1.2909 -  // Cleanup.
  1.2910 -  if (oop_null->outcnt() == 0)
  1.2911 -    igvn->hash_delete(oop_null);
  1.2912 -  if (noop_null->outcnt() == 0)
  1.2913 -    igvn->hash_delete(noop_null);
  1.2914 -}
  1.2915 -
  1.2916 -bool ConnectionGraph::compute_escape() {
  1.2917 -  Compile* C = _compile;
  1.2918 -
  1.2919 -  // 1. Populate Connection Graph (CG) with Ideal nodes.
  1.2920 -
  1.2921 -  Unique_Node_List worklist_init;
  1.2922 -  worklist_init.map(C->unique(), NULL);  // preallocate space
  1.2923 -
  1.2924 -  // Initialize worklist
  1.2925 -  if (C->root() != NULL) {
  1.2926 -    worklist_init.push(C->root());
  1.2927 -  }
  1.2928 -
  1.2929 -  GrowableArray<Node*> alloc_worklist;
  1.2930 -  GrowableArray<Node*> addp_worklist;
  1.2931 -  GrowableArray<Node*> ptr_cmp_worklist;
  1.2932 -  GrowableArray<Node*> storestore_worklist;
  1.2933 -  PhaseGVN* igvn = _igvn;
  1.2934 -
  1.2935 -  // Push all useful nodes onto CG list and set their type.
  1.2936 -  for( uint next = 0; next < worklist_init.size(); ++next ) {
  1.2937 -    Node* n = worklist_init.at(next);
  1.2938 -    record_for_escape_analysis(n, igvn);
  1.2939 -    // Only allocations and java static calls results are checked
  1.2940 -    // for an escape status. See process_call_result() below.
  1.2941 -    if (n->is_Allocate() || n->is_CallStaticJava() &&
  1.2942 -        ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
  1.2943 -      alloc_worklist.append(n);
  1.2944 -    } else if(n->is_AddP()) {
  1.2945 -      // Collect address nodes. Use them during stage 3 below
  1.2946 -      // to build initial connection graph field edges.
  1.2947 -      addp_worklist.append(n);
  1.2948 -    } else if (n->is_MergeMem()) {
  1.2949 -      // Collect all MergeMem nodes to add memory slices for
  1.2950 -      // scalar replaceable objects in split_unique_types().
  1.2951 -      _mergemem_worklist.append(n->as_MergeMem());
  1.2952 -    } else if (OptimizePtrCompare && n->is_Cmp() &&
  1.2953 -               (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
  1.2954 -      // Compare pointers nodes
  1.2955 -      ptr_cmp_worklist.append(n);
  1.2956 -    } else if (n->is_MemBarStoreStore()) {
  1.2957 -      // Collect all MemBarStoreStore nodes so that depending on the
  1.2958 -      // escape status of the associated Allocate node some of them
  1.2959 -      // may be eliminated.
  1.2960 -      storestore_worklist.append(n);
  1.2961 -    }
  1.2962 -    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1.2963 -      Node* m = n->fast_out(i);   // Get user
  1.2964 -      worklist_init.push(m);
  1.2965 -    }
  1.2966 -  }
  1.2967 -
  1.2968 -  if (alloc_worklist.length() == 0) {
  1.2969 -    _collecting = false;
  1.2970 -    return false; // Nothing to do.
  1.2971 -  }
  1.2972 -
  1.2973 -  // 2. First pass to create simple CG edges (doesn't require to walk CG).
  1.2974 -  uint delayed_size = _delayed_worklist.size();
  1.2975 -  for( uint next = 0; next < delayed_size; ++next ) {
  1.2976 -    Node* n = _delayed_worklist.at(next);
  1.2977 -    build_connection_graph(n, igvn);
  1.2978 -  }
  1.2979 -
  1.2980 -  // 3. Pass to create initial fields edges (JavaObject -F-> AddP)
  1.2981 -  //    to reduce number of iterations during stage 4 below.
  1.2982 -  uint addp_length = addp_worklist.length();
  1.2983 -  for( uint next = 0; next < addp_length; ++next ) {
  1.2984 -    Node* n = addp_worklist.at(next);
  1.2985 -    Node* base = get_addp_base(n);
  1.2986 -    if (base->is_Proj() && base->in(0)->is_Call())
  1.2987 -      base = base->in(0);
  1.2988 -    PointsToNode::NodeType nt = ptnode_adr(base->_idx)->node_type();
  1.2989 -    if (nt == PointsToNode::JavaObject) {
  1.2990 -      build_connection_graph(n, igvn);
  1.2991 -    }
  1.2992 -  }
  1.2993 -
  1.2994 -  GrowableArray<int> cg_worklist;
  1.2995 -  cg_worklist.append(_phantom_object);
  1.2996 -  GrowableArray<uint>  worklist;
  1.2997 -
  1.2998 -  // 4. Build Connection Graph which need
  1.2999 -  //    to walk the connection graph.
  1.3000 -  _progress = false;
  1.3001 -  for (uint ni = 0; ni < nodes_size(); ni++) {
  1.3002 -    PointsToNode* ptn = ptnode_adr(ni);
  1.3003 -    Node *n = ptn->_node;
  1.3004 -    if (n != NULL) { // Call, AddP, LoadP, StoreP
  1.3005 -      build_connection_graph(n, igvn);
  1.3006 -      if (ptn->node_type() != PointsToNode::UnknownType)
  1.3007 -        cg_worklist.append(n->_idx); // Collect CG nodes
  1.3008 -      if (!_processed.test(n->_idx))
  1.3009 -        worklist.append(n->_idx); // Collect C/A/L/S nodes
  1.3010 -    }
  1.3011 -  }
  1.3012 -
  1.3013 -  // After IGVN user nodes may have smaller _idx than
  1.3014 -  // their inputs so they will be processed first in
  1.3015 -  // previous loop. Because of that not all Graph
  1.3016 -  // edges will be created. Walk over interesting
  1.3017 -  // nodes again until no new edges are created.
  1.3018 -  //
  1.3019 -  // Normally only 1-3 passes needed to build
  1.3020 -  // Connection Graph depending on graph complexity.
  1.3021 -  // Observed 8 passes in jvm2008 compiler.compiler.
  1.3022 -  // Set limit to 20 to catch situation when something
  1.3023 -  // did go wrong and recompile the method without EA.
  1.3024 -  // Also limit build time to 30 sec (60 in debug VM).
  1.3025 -
  1.3026 -#define CG_BUILD_ITER_LIMIT 20
  1.3027 -
  1.3028 -#ifdef ASSERT
  1.3029 -#define CG_BUILD_TIME_LIMIT 60.0
  1.3030 -#else
  1.3031 -#define CG_BUILD_TIME_LIMIT 30.0
  1.3032 -#endif
  1.3033 -
  1.3034 -  uint length = worklist.length();
  1.3035 -  int iterations = 0;
  1.3036 -  elapsedTimer time;
  1.3037 -  while(_progress &&
  1.3038 -        (iterations++   < CG_BUILD_ITER_LIMIT) &&
  1.3039 -        (time.seconds() < CG_BUILD_TIME_LIMIT)) {
  1.3040 -    time.start();
  1.3041 -    _progress = false;
  1.3042 -    for( uint next = 0; next < length; ++next ) {
  1.3043 -      int ni = worklist.at(next);
  1.3044 -      PointsToNode* ptn = ptnode_adr(ni);
  1.3045 -      Node* n = ptn->_node;
  1.3046 -      assert(n != NULL, "should be known node");
  1.3047 -      build_connection_graph(n, igvn);
  1.3048 -    }
  1.3049 -    time.stop();
  1.3050 -  }
  1.3051 -  if ((iterations     >= CG_BUILD_ITER_LIMIT) ||
  1.3052 -      (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
  1.3053 -    assert(false, err_msg("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
  1.3054 -           time.seconds(), iterations, nodes_size(), length));
  1.3055 -    // Possible infinite build_connection_graph loop,
  1.3056 -    // bailout (no changes to ideal graph were made).
  1.3057 -    _collecting = false;
  1.3058 -    return false;
  1.3059 -  }
  1.3060 -#undef CG_BUILD_ITER_LIMIT
  1.3061 -#undef CG_BUILD_TIME_LIMIT
  1.3062 -
  1.3063 -  // 5. Propagate escaped states.
  1.3064 -  worklist.clear();
  1.3065 -
  1.3066 -  // mark all nodes reachable from GlobalEscape nodes
  1.3067 -  (void)propagate_escape_state(&cg_worklist, &worklist, PointsToNode::GlobalEscape);
  1.3068 -
  1.3069 -  // mark all nodes reachable from ArgEscape nodes
  1.3070 -  bool has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape);
  1.3071 -
  1.3072 -  Arena* arena = Thread::current()->resource_area();
  1.3073 -  VectorSet visited(arena);
  1.3074 -
  1.3075 -  // 6. Find fields initializing values for not escaped allocations
  1.3076 -  uint alloc_length = alloc_worklist.length();
  1.3077 -  for (uint next = 0; next < alloc_length; ++next) {
  1.3078 -    Node* n = alloc_worklist.at(next);
  1.3079 -    PointsToNode::EscapeState es = ptnode_adr(n->_idx)->escape_state();
  1.3080 -    if (es == PointsToNode::NoEscape) {
  1.3081 -      has_non_escaping_obj = true;
  1.3082 -      if (n->is_Allocate()) {
  1.3083 -        find_init_values(n, &visited, igvn);
  1.3084 -        // The object allocated by this Allocate node will never be
  1.3085 -        // seen by an other thread. Mark it so that when it is
  1.3086 -        // expanded no MemBarStoreStore is added.
  1.3087 -        n->as_Allocate()->initialization()->set_does_not_escape();
  1.3088 -      }
  1.3089 -    } else if ((es == PointsToNode::ArgEscape) && n->is_Allocate()) {
  1.3090 -      // Same as above. Mark this Allocate node so that when it is
  1.3091 -      // expanded no MemBarStoreStore is added.
  1.3092 -      n->as_Allocate()->initialization()->set_does_not_escape();
  1.3093 -    }
  1.3094 -  }
  1.3095 -
  1.3096 -  uint cg_length = cg_worklist.length();
  1.3097 -
  1.3098 -  // Skip the rest of code if all objects escaped.
  1.3099 -  if (!has_non_escaping_obj) {
  1.3100 -    cg_length = 0;
  1.3101 -    addp_length = 0;
  1.3102 -  }
  1.3103 -
  1.3104 -  for (uint next = 0; next < cg_length; ++next) {
  1.3105 -    int ni = cg_worklist.at(next);
  1.3106 -    PointsToNode* ptn = ptnode_adr(ni);
  1.3107 -    PointsToNode::NodeType nt = ptn->node_type();
  1.3108 -    if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
  1.3109 -      if (ptn->edge_count() == 0) {
  1.3110 -        // No values were found. Assume the value was set
  1.3111 -        // outside this method - add edge to phantom object.
  1.3112 -        add_pointsto_edge(ni, _phantom_object);
  1.3113 -      }
  1.3114 -    }
  1.3115 -  }
  1.3116 -
  1.3117 -  // 7. Remove deferred edges from the graph.
  1.3118 -  for (uint next = 0; next < cg_length; ++next) {
  1.3119 -    int ni = cg_worklist.at(next);
  1.3120 -    PointsToNode* ptn = ptnode_adr(ni);
  1.3121 -    PointsToNode::NodeType nt = ptn->node_type();
  1.3122 -    if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
  1.3123 -      remove_deferred(ni, &worklist, &visited);
  1.3124 -    }
  1.3125 -  }
  1.3126 -
  1.3127 -  // 8. Adjust escape state of nonescaping objects.
  1.3128 -  for (uint next = 0; next < addp_length; ++next) {
  1.3129 -    Node* n = addp_worklist.at(next);
  1.3130 -    adjust_escape_state(n);
  1.3131 -  }
  1.3132 -
  1.3133 -  // push all NoEscape nodes on the worklist
  1.3134 -  worklist.clear();
  1.3135 -  for( uint next = 0; next < cg_length; ++next ) {
  1.3136 -    int nk = cg_worklist.at(next);
  1.3137 -    if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape &&
  1.3138 -        !is_null_ptr(nk))
  1.3139 -      worklist.push(nk);
  1.3140 -  }
  1.3141 -
  1.3142 -  alloc_worklist.clear();
  1.3143 -  // Propagate scalar_replaceable value.
  1.3144 -  while(worklist.length() > 0) {
  1.3145 -    uint nk = worklist.pop();
  1.3146 -    PointsToNode* ptn = ptnode_adr(nk);
  1.3147 -    Node* n = ptn->_node;
  1.3148 -    bool scalar_replaceable = ptn->scalar_replaceable();
  1.3149 -    if (n->is_Allocate() && scalar_replaceable) {
  1.3150 -      // Push scalar replaceable allocations on alloc_worklist
  1.3151 -      // for processing in split_unique_types(). Note,
  1.3152 -      // following code may change scalar_replaceable value.
  1.3153 -      alloc_worklist.append(n);
  1.3154 -    }
  1.3155 -    uint e_cnt = ptn->edge_count();
  1.3156 -    for (uint ei = 0; ei < e_cnt; ei++) {
  1.3157 -      uint npi = ptn->edge_target(ei);
  1.3158 -      if (is_null_ptr(npi))
  1.3159 -        continue;
  1.3160 -      PointsToNode *np = ptnode_adr(npi);
  1.3161 -      if (np->escape_state() < PointsToNode::NoEscape) {
  1.3162 -        set_escape_state(npi, PointsToNode::NoEscape);
  1.3163 -        if (!scalar_replaceable) {
  1.3164 -          np->set_scalar_replaceable(false);
  1.3165 -        }
  1.3166 -        worklist.push(npi);
  1.3167 -      } else if (np->scalar_replaceable() && !scalar_replaceable) {
  1.3168 -        np->set_scalar_replaceable(false);
  1.3169 -        worklist.push(npi);
  1.3170 -      }
  1.3171 -    }
  1.3172 -  }
  1.3173 -
  1.3174 -  _collecting = false;
  1.3175 -  assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
  1.3176 -
  1.3177 -  assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape &&
  1.3178 -         ptnode_adr(_oop_null)->edge_count() == 0, "sanity");
  1.3179 -  if (UseCompressedOops) {
  1.3180 -    assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape &&
  1.3181 -           ptnode_adr(_noop_null)->edge_count() == 0, "sanity");
  1.3182 -  }
  1.3183 -
  1.3184 -  if (EliminateLocks && has_non_escaping_obj) {
  1.3185 -    // Mark locks before changing ideal graph.
  1.3186 -    int cnt = C->macro_count();
  1.3187 -    for( int i=0; i < cnt; i++ ) {
  1.3188 -      Node *n = C->macro_node(i);
  1.3189 -      if (n->is_AbstractLock()) { // Lock and Unlock nodes
  1.3190 -        AbstractLockNode* alock = n->as_AbstractLock();
  1.3191 -        if (!alock->is_non_esc_obj()) {
  1.3192 -          PointsToNode::EscapeState es = escape_state(alock->obj_node());
  1.3193 -          assert(es != PointsToNode::UnknownEscape, "should know");
  1.3194 -          if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
  1.3195 -            assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
  1.3196 -            // The lock could be marked eliminated by lock coarsening
  1.3197 -            // code during first IGVN before EA. Replace coarsened flag
  1.3198 -            // to eliminate all associated locks/unlocks.
  1.3199 -            alock->set_non_esc_obj();
  1.3200 -          }
  1.3201 -        }
  1.3202 -      }
  1.3203 -    }
  1.3204 -  }
  1.3205 -
  1.3206 -  if (OptimizePtrCompare && has_non_escaping_obj) {
  1.3207 -    // Add ConI(#CC_GT) and ConI(#CC_EQ).
  1.3208 -    _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
  1.3209 -    _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
  1.3210 -    // Optimize objects compare.
  1.3211 -    while (ptr_cmp_worklist.length() != 0) {
  1.3212 -      Node *n = ptr_cmp_worklist.pop();
  1.3213 -      Node *res = optimize_ptr_compare(n);
  1.3214 -      if (res != NULL) {
  1.3215 -#ifndef PRODUCT
  1.3216 -        if (PrintOptimizePtrCompare) {
  1.3217 -          tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
  1.3218 -          if (Verbose) {
  1.3219 -            n->dump(1);
  1.3220 -          }
  1.3221 -        }
  1.3222 -#endif
  1.3223 -        _igvn->replace_node(n, res);
  1.3224 -      }
  1.3225 -    }
  1.3226 -    // cleanup
  1.3227 -    if (_pcmp_neq->outcnt() == 0)
  1.3228 -      igvn->hash_delete(_pcmp_neq);
  1.3229 -    if (_pcmp_eq->outcnt()  == 0)
  1.3230 -      igvn->hash_delete(_pcmp_eq);
  1.3231 -  }
  1.3232 -
  1.3233 -  // For MemBarStoreStore nodes added in library_call.cpp, check
  1.3234 -  // escape status of associated AllocateNode and optimize out
  1.3235 -  // MemBarStoreStore node if the allocated object never escapes.
  1.3236 -  while (storestore_worklist.length() != 0) {
  1.3237 -    Node *n = storestore_worklist.pop();
  1.3238 -    MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
  1.3239 -    Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
  1.3240 -    assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
  1.3241 -    PointsToNode::EscapeState es = ptnode_adr(alloc->_idx)->escape_state();
  1.3242 -    if (es == PointsToNode::NoEscape || es == PointsToNode::ArgEscape) {
  1.3243 -      MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
  1.3244 -      mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
  1.3245 -      mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
  1.3246 -
  1.3247 -      _igvn->register_new_node_with_optimizer(mb);
  1.3248 -      _igvn->replace_node(storestore, mb);
  1.3249 -    }
  1.3250 -  }
  1.3251 -
  1.3252 -#ifndef PRODUCT
  1.3253 -  if (PrintEscapeAnalysis) {
  1.3254 -    dump(); // Dump ConnectionGraph
  1.3255 -  }
  1.3256 -#endif
  1.3257 -
  1.3258 -  bool has_scalar_replaceable_candidates = false;
  1.3259 -  alloc_length = alloc_worklist.length();
  1.3260 -  for (uint next = 0; next < alloc_length; ++next) {
  1.3261 -    Node* n = alloc_worklist.at(next);
  1.3262 -    PointsToNode* ptn = ptnode_adr(n->_idx);
  1.3263 -    assert(ptn->escape_state() == PointsToNode::NoEscape, "sanity");
  1.3264 -    if (ptn->scalar_replaceable()) {
  1.3265 -      has_scalar_replaceable_candidates = true;
  1.3266 -      break;
  1.3267 -    }
  1.3268 -  }
  1.3269 -
  1.3270 -  if ( has_scalar_replaceable_candidates &&
  1.3271 -       C->AliasLevel() >= 3 && EliminateAllocations ) {
  1.3272 -
  1.3273 -    // Now use the escape information to create unique types for
  1.3274 -    // scalar replaceable objects.
  1.3275 -    split_unique_types(alloc_worklist);
  1.3276 -
  1.3277 -    if (C->failing())  return false;
  1.3278 -
  1.3279 -    C->print_method("After Escape Analysis", 2);
  1.3280 -
  1.3281 -#ifdef ASSERT
  1.3282 -  } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
  1.3283 -    tty->print("=== No allocations eliminated for ");
  1.3284 -    C->method()->print_short_name();
  1.3285 -    if(!EliminateAllocations) {
  1.3286 -      tty->print(" since EliminateAllocations is off ===");
  1.3287 -    } else if(!has_scalar_replaceable_candidates) {
  1.3288 -      tty->print(" since there are no scalar replaceable candidates ===");
  1.3289 -    } else if(C->AliasLevel() < 3) {
  1.3290 -      tty->print(" since AliasLevel < 3 ===");
  1.3291 -    }
  1.3292 -    tty->cr();
  1.3293 -#endif
  1.3294 -  }
  1.3295 -  return has_non_escaping_obj;
  1.3296 -}
  1.3297 -
  1.3298 -// Find fields initializing values for allocations.
  1.3299 -void ConnectionGraph::find_init_values(Node* alloc, VectorSet* visited, PhaseTransform* phase) {
  1.3300 -  assert(alloc->is_Allocate(), "Should be called for Allocate nodes only");
  1.3301 -  PointsToNode* pta = ptnode_adr(alloc->_idx);
  1.3302 -  assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
  1.3303 -  InitializeNode* ini = alloc->as_Allocate()->initialization();
  1.3304 -
  1.3305 -  Compile* C = _compile;
  1.3306 -  visited->Reset();
  1.3307 -  // Check if a oop field's initializing value is recorded and add
  1.3308 -  // a corresponding NULL field's value if it is not recorded.
  1.3309 -  // Connection Graph does not record a default initialization by NULL
  1.3310 -  // captured by Initialize node.
  1.3311 -  //
  1.3312 -  uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
  1.3313 -  uint ae_cnt = pta->edge_count();
  1.3314 -  bool visited_bottom_offset = false;
  1.3315 -  for (uint ei = 0; ei < ae_cnt; ei++) {
  1.3316 -    uint nidx = pta->edge_target(ei); // Field (AddP)
  1.3317 -    PointsToNode* ptn = ptnode_adr(nidx);
  1.3318 -    assert(ptn->_node->is_AddP(), "Should be AddP nodes only");
  1.3319 -    int offset = ptn->offset();
  1.3320 -    if (offset == Type::OffsetBot) {
  1.3321 -      if (!visited_bottom_offset) {
  1.3322 -        visited_bottom_offset = true;
  1.3323 -        // Check only oop fields.
  1.3324 -        const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
  1.3325 -        if (!adr_type->isa_aryptr() ||
  1.3326 -            (adr_type->isa_aryptr()->klass() == NULL) ||
  1.3327 -             adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
  1.3328 -          // OffsetBot is used to reference array's element,
  1.3329 -          // always add reference to NULL since we don't
  1.3330 -          // known which element is referenced.
  1.3331 -          add_edge_from_fields(alloc->_idx, null_idx, offset);
  1.3332 -        }
  1.3333 -      }
  1.3334 -    } else if (offset != oopDesc::klass_offset_in_bytes() &&
  1.3335 -               !visited->test_set(offset)) {
  1.3336 -
  1.3337 -      // Check only oop fields.
  1.3338 -      const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
  1.3339 -      BasicType basic_field_type = T_INT;
  1.3340 -      if (adr_type->isa_instptr()) {
  1.3341 -        ciField* field = C->alias_type(adr_type->isa_instptr())->field();
  1.3342 -        if (field != NULL) {
  1.3343 -          basic_field_type = field->layout_type();
  1.3344 -        } else {
  1.3345 -          // Ignore non field load (for example, klass load)
  1.3346 -        }
  1.3347 -      } else if (adr_type->isa_aryptr()) {
  1.3348 -        if (offset != arrayOopDesc::length_offset_in_bytes()) {
  1.3349 -          const Type* elemtype = adr_type->isa_aryptr()->elem();
  1.3350 -          basic_field_type = elemtype->array_element_basic_type();
  1.3351 -        } else {
  1.3352 -          // Ignore array length load
  1.3353 -        }
  1.3354 -#ifdef ASSERT
  1.3355 -      } else {
  1.3356 -        // Raw pointers are used for initializing stores so skip it
  1.3357 -        // since it should be recorded already
  1.3358 -        Node* base = get_addp_base(ptn->_node);
  1.3359 -        assert(adr_type->isa_rawptr() && base->is_Proj() &&
  1.3360 -               (base->in(0) == alloc),"unexpected pointer type");
  1.3361 -#endif
  1.3362 -      }
  1.3363 -      if (basic_field_type == T_OBJECT ||
  1.3364 -          basic_field_type == T_NARROWOOP ||
  1.3365 -          basic_field_type == T_ARRAY) {
  1.3366 -        Node* value = NULL;
  1.3367 -        if (ini != NULL) {
  1.3368 -          BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
  1.3369 -          Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
  1.3370 -          if (store != NULL && store->is_Store()) {
  1.3371 -            value = store->in(MemNode::ValueIn);
  1.3372 -          } else {
  1.3373 -            // There could be initializing stores which follow allocation.
  1.3374 -            // For example, a volatile field store is not collected
  1.3375 -            // by Initialize node.
  1.3376 -            //
  1.3377 -            // Need to check for dependent loads to separate such stores from
  1.3378 -            // stores which follow loads. For now, add initial value NULL so
  1.3379 -            // that compare pointers optimization works correctly.
  1.3380 -          }
  1.3381 -        }
  1.3382 -        if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
  1.3383 -          // A field's initializing value was not recorded. Add NULL.
  1.3384 -          add_edge_from_fields(alloc->_idx, null_idx, offset);
  1.3385 -        }
  1.3386 -      }
  1.3387 -    }
  1.3388 -  }
  1.3389 -}
  1.3390 -
  1.3391 -// Adjust escape state after Connection Graph is built.
  1.3392 -void ConnectionGraph::adjust_escape_state(Node* n) {
  1.3393 -  PointsToNode* ptn = ptnode_adr(n->_idx);
  1.3394 -  assert(n->is_AddP(), "Should be called for AddP nodes only");
  1.3395 -  // Search for objects which are not scalar replaceable
  1.3396 -  // and mark them to propagate the state to referenced objects.
  1.3397 -  //
  1.3398 -
  1.3399 -  int offset = ptn->offset();
  1.3400 -  Node* base = get_addp_base(n);
  1.3401 -  VectorSet* ptset = PointsTo(base);
  1.3402 -  int ptset_size = ptset->Size();
  1.3403 -
  1.3404 -  // An object is not scalar replaceable if the field which may point
  1.3405 -  // to it has unknown offset (unknown element of an array of objects).
  1.3406 -  //
  1.3407 -
  1.3408 -  if (offset == Type::OffsetBot) {
  1.3409 -    uint e_cnt = ptn->edge_count();
  1.3410 -    for (uint ei = 0; ei < e_cnt; ei++) {
  1.3411 -      uint npi = ptn->edge_target(ei);
  1.3412 -      ptnode_adr(npi)->set_scalar_replaceable(false);
  1.3413 -    }
  1.3414 -  }
  1.3415 -
  1.3416 -  // Currently an object is not scalar replaceable if a LoadStore node
  1.3417 -  // access its field since the field value is unknown after it.
  1.3418 -  //
  1.3419 -  bool has_LoadStore = false;
  1.3420 -  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1.3421 -    Node *use = n->fast_out(i);
  1.3422 -    if (use->is_LoadStore()) {
  1.3423 -      has_LoadStore = true;
  1.3424 -      break;
  1.3425 -    }
  1.3426 -  }
  1.3427 -  // An object is not scalar replaceable if the address points
  1.3428 -  // to unknown field (unknown element for arrays, offset is OffsetBot).
  1.3429 -  //
  1.3430 -  // Or the address may point to more then one object. This may produce
  1.3431 -  // the false positive result (set not scalar replaceable)
  1.3432 -  // since the flow-insensitive escape analysis can't separate
  1.3433 -  // the case when stores overwrite the field's value from the case
  1.3434 -  // when stores happened on different control branches.
  1.3435 -  //
  1.3436 -  // Note: it will disable scalar replacement in some cases:
  1.3437 -  //
  1.3438 -  //    Point p[] = new Point[1];
  1.3439 -  //    p[0] = new Point(); // Will be not scalar replaced
  1.3440 -  //
  1.3441 -  // but it will save us from incorrect optimizations in next cases:
  1.3442 -  //
  1.3443 -  //    Point p[] = new Point[1];
  1.3444 -  //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
  1.3445 -  //
  1.3446 -  if (ptset_size > 1 || ptset_size != 0 &&
  1.3447 -      (has_LoadStore || offset == Type::OffsetBot)) {
  1.3448 -    for( VectorSetI j(ptset); j.test(); ++j ) {
  1.3449 -      ptnode_adr(j.elem)->set_scalar_replaceable(false);
  1.3450 -    }
  1.3451 -  }
  1.3452 -}
  1.3453 -
  1.3454 -// Propagate escape states to referenced nodes.
  1.3455 -bool ConnectionGraph::propagate_escape_state(GrowableArray<int>* cg_worklist,
  1.3456 -                                             GrowableArray<uint>* worklist,
  1.3457 -                                             PointsToNode::EscapeState esc_state) {
  1.3458 -  bool has_java_obj = false;
  1.3459 -
  1.3460 -  // push all nodes with the same escape state on the worklist
  1.3461 -  uint cg_length = cg_worklist->length();
  1.3462 -  for (uint next = 0; next < cg_length; ++next) {
  1.3463 -    int nk = cg_worklist->at(next);
  1.3464 -    if (ptnode_adr(nk)->escape_state() == esc_state)
  1.3465 -      worklist->push(nk);
  1.3466 -  }
  1.3467 -  // mark all reachable nodes
  1.3468 -  while (worklist->length() > 0) {
  1.3469 -    int pt = worklist->pop();
  1.3470 -    PointsToNode* ptn = ptnode_adr(pt);
  1.3471 -    if (ptn->node_type() == PointsToNode::JavaObject &&
  1.3472 -        !is_null_ptr(pt)) {
  1.3473 -      has_java_obj = true;
  1.3474 -      if (esc_state > PointsToNode::NoEscape) {
  1.3475 -        // fields values are unknown if object escapes
  1.3476 -        add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
  1.3477 -      }
  1.3478 -    }
  1.3479 -    uint e_cnt = ptn->edge_count();
  1.3480 -    for (uint ei = 0; ei < e_cnt; ei++) {
  1.3481 -      uint npi = ptn->edge_target(ei);
  1.3482 -      if (is_null_ptr(npi))
  1.3483 -        continue;
  1.3484 -      PointsToNode *np = ptnode_adr(npi);
  1.3485 -      if (np->escape_state() < esc_state) {
  1.3486 -        set_escape_state(npi, esc_state);
  1.3487 -        worklist->push(npi);
  1.3488 -      }
  1.3489 -    }
  1.3490 -  }
  1.3491 -  // Has not escaping java objects
  1.3492 -  return has_java_obj && (esc_state < PointsToNode::GlobalEscape);
  1.3493 -}
  1.3494 -
  1.3495 -// Optimize objects compare.
  1.3496 -Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
  1.3497 -  assert(OptimizePtrCompare, "sanity");
  1.3498 -  // Clone returned Set since PointsTo() returns pointer
  1.3499 -  // to the same structure ConnectionGraph.pt_ptset.
  1.3500 -  VectorSet ptset1 = *PointsTo(n->in(1));
  1.3501 -  VectorSet ptset2 = *PointsTo(n->in(2));
  1.3502 -
  1.3503 -  // Check simple cases first.
  1.3504 -  if (ptset1.Size() == 1) {
  1.3505 -    uint pt1 = ptset1.getelem();
  1.3506 -    PointsToNode* ptn1 = ptnode_adr(pt1);
  1.3507 -    if (ptn1->escape_state() == PointsToNode::NoEscape) {
  1.3508 -      if (ptset2.Size() == 1 && ptset2.getelem() == pt1) {
  1.3509 -        // Comparing the same not escaping object.
  1.3510 -        return _pcmp_eq;
  1.3511 -      }
  1.3512 -      Node* obj = ptn1->_node;
  1.3513 -      // Comparing not escaping allocation.
  1.3514 -      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
  1.3515 -          !ptset2.test(pt1)) {
  1.3516 -        return _pcmp_neq; // This includes nullness check.
  1.3517 -      }
  1.3518 -    }
  1.3519 -  } else if (ptset2.Size() == 1) {
  1.3520 -    uint pt2 = ptset2.getelem();
  1.3521 -    PointsToNode* ptn2 = ptnode_adr(pt2);
  1.3522 -    if (ptn2->escape_state() == PointsToNode::NoEscape) {
  1.3523 -      Node* obj = ptn2->_node;
  1.3524 -      // Comparing not escaping allocation.
  1.3525 -      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
  1.3526 -          !ptset1.test(pt2)) {
  1.3527 -        return _pcmp_neq; // This includes nullness check.
  1.3528 -      }
  1.3529 -    }
  1.3530 -  }
  1.3531 -
  1.3532 -  if (!ptset1.disjoint(ptset2)) {
  1.3533 -    return NULL; // Sets are not disjoint
  1.3534 -  }
  1.3535 -
  1.3536 -  // Sets are disjoint.
  1.3537 -  bool set1_has_unknown_ptr = ptset1.test(_phantom_object) != 0;
  1.3538 -  bool set2_has_unknown_ptr = ptset2.test(_phantom_object) != 0;
  1.3539 -  bool set1_has_null_ptr   = (ptset1.test(_oop_null) | ptset1.test(_noop_null)) != 0;
  1.3540 -  bool set2_has_null_ptr   = (ptset2.test(_oop_null) | ptset2.test(_noop_null)) != 0;
  1.3541 -
  1.3542 -  if (set1_has_unknown_ptr && set2_has_null_ptr ||
  1.3543 -      set2_has_unknown_ptr && set1_has_null_ptr) {
  1.3544 -    // Check nullness of unknown object.
  1.3545 -    return NULL;
  1.3546 -  }
  1.3547 -
  1.3548 -  // Disjointness by itself is not sufficient since
  1.3549 -  // alias analysis is not complete for escaped objects.
  1.3550 -  // Disjoint sets are definitely unrelated only when
  1.3551 -  // at least one set has only not escaping objects.
  1.3552 -  if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
  1.3553 -    bool has_only_non_escaping_alloc = true;
  1.3554 -    for (VectorSetI i(&ptset1); i.test(); ++i) {
  1.3555 -      uint pt = i.elem;
  1.3556 -      PointsToNode* ptn = ptnode_adr(pt);
  1.3557 -      Node* obj = ptn->_node;
  1.3558 -      if (ptn->escape_state() != PointsToNode::NoEscape ||
  1.3559 -          !(obj->is_Allocate() || obj->is_CallStaticJava())) {
  1.3560 -        has_only_non_escaping_alloc = false;
  1.3561 -        break;
  1.3562 -      }
  1.3563 -    }
  1.3564 -    if (has_only_non_escaping_alloc) {
  1.3565 -      return _pcmp_neq;
  1.3566 -    }
  1.3567 -  }
  1.3568 -  if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
  1.3569 -    bool has_only_non_escaping_alloc = true;
  1.3570 -    for (VectorSetI i(&ptset2); i.test(); ++i) {
  1.3571 -      uint pt = i.elem;
  1.3572 -      PointsToNode* ptn = ptnode_adr(pt);
  1.3573 -      Node* obj = ptn->_node;
  1.3574 -      if (ptn->escape_state() != PointsToNode::NoEscape ||
  1.3575 -          !(obj->is_Allocate() || obj->is_CallStaticJava())) {
  1.3576 -        has_only_non_escaping_alloc = false;
  1.3577 -        break;
  1.3578 -      }
  1.3579 -    }
  1.3580 -    if (has_only_non_escaping_alloc) {
  1.3581 -      return _pcmp_neq;
  1.3582 -    }
  1.3583 -  }
  1.3584 -  return NULL;
  1.3585 -}
  1.3586 -
  1.3587 -void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
  1.3588 -    bool is_arraycopy = false;
  1.3589 -    switch (call->Opcode()) {
  1.3590 -#ifdef ASSERT
  1.3591 -    case Op_Allocate:
  1.3592 -    case Op_AllocateArray:
  1.3593 -    case Op_Lock:
  1.3594 -    case Op_Unlock:
  1.3595 -      assert(false, "should be done already");
  1.3596 -      break;
  1.3597 -#endif
  1.3598 -    case Op_CallLeafNoFP:
  1.3599 -      is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
  1.3600 -                      strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
  1.3601 -      // fall through
  1.3602 -    case Op_CallLeaf:
  1.3603 -    {
  1.3604 -      // Stub calls, objects do not escape but they are not scale replaceable.
  1.3605 -      // Adjust escape state for outgoing arguments.
  1.3606 -      const TypeTuple * d = call->tf()->domain();
  1.3607 -      bool src_has_oops = false;
  1.3608 -      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1.3609 -        const Type* at = d->field_at(i);
  1.3610 -        Node *arg = call->in(i)->uncast();
  1.3611 -        const Type *aat = phase->type(arg);
  1.3612 -        PointsToNode::EscapeState arg_esc = ptnode_adr(arg->_idx)->escape_state();
  1.3613 -        if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
  1.3614 -            (is_arraycopy || arg_esc < PointsToNode::ArgEscape)) {
  1.3615 -#ifdef ASSERT
  1.3616 -          assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
  1.3617 -                 aat->isa_ptr() != NULL, "expecting an Ptr");
  1.3618 -          if (!(is_arraycopy ||
  1.3619 -                call->as_CallLeaf()->_name != NULL &&
  1.3620 -                (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
  1.3621 -                 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
  1.3622 -          ) {
  1.3623 -            call->dump();
  1.3624 -            assert(false, "EA: unexpected CallLeaf");
  1.3625 -          }
  1.3626 -#endif
  1.3627 -          if (arg_esc < PointsToNode::ArgEscape) {
  1.3628 -            set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  1.3629 -            Node* arg_base = arg;
  1.3630 -            if (arg->is_AddP()) {
  1.3631 -              //
  1.3632 -              // The inline_native_clone() case when the arraycopy stub is called
  1.3633 -              // after the allocation before Initialize and CheckCastPP nodes.
  1.3634 -              // Or normal arraycopy for object arrays case.
  1.3635 -              //
  1.3636 -              // Set AddP's base (Allocate) as not scalar replaceable since
  1.3637 -              // pointer to the base (with offset) is passed as argument.
  1.3638 -              //
  1.3639 -              arg_base = get_addp_base(arg);
  1.3640 -              set_escape_state(arg_base->_idx, PointsToNode::ArgEscape);
  1.3641 -            }
  1.3642 -          }
  1.3643 -
  1.3644 -          bool arg_has_oops = aat->isa_oopptr() &&
  1.3645 -                              (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
  1.3646 -                               (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
  1.3647 -          if (i == TypeFunc::Parms) {
  1.3648 -            src_has_oops = arg_has_oops;
  1.3649 -          }
  1.3650 -          //
  1.3651 -          // src or dst could be j.l.Object when other is basic type array:
  1.3652 -          //
  1.3653 -          //   arraycopy(char[],0,Object*,0,size);
  1.3654 -          //   arraycopy(Object*,0,char[],0,size);
  1.3655 -          //
  1.3656 -          // Do nothing special in such cases.
  1.3657 -          //
  1.3658 -          if (is_arraycopy && (i > TypeFunc::Parms) &&
  1.3659 -              src_has_oops && arg_has_oops) {
  1.3660 -            // Destination object's fields reference an unknown object.
  1.3661 -            Node* arg_base = arg;
  1.3662 -            if (arg->is_AddP()) {
  1.3663 -              arg_base = get_addp_base(arg);
  1.3664 -            }
  1.3665 -            for (VectorSetI s(PointsTo(arg_base)); s.test(); ++s) {
  1.3666 -              uint ps = s.elem;
  1.3667 -              set_escape_state(ps, PointsToNode::ArgEscape);
  1.3668 -              add_edge_from_fields(ps, _phantom_object, Type::OffsetBot);
  1.3669 -            }
  1.3670 -            // Conservatively all values in source object fields globally escape
  1.3671 -            // since we don't know if values in destination object fields
  1.3672 -            // escape (it could be traced but it is too expensive).
  1.3673 -            Node* src = call->in(TypeFunc::Parms)->uncast();
  1.3674 -            Node* src_base = src;
  1.3675 -            if (src->is_AddP()) {
  1.3676 -              src_base  = get_addp_base(src);
  1.3677 -            }
  1.3678 -            for (VectorSetI s(PointsTo(src_base)); s.test(); ++s) {
  1.3679 -              uint ps = s.elem;
  1.3680 -              set_escape_state(ps, PointsToNode::ArgEscape);
  1.3681 -              // Use OffsetTop to indicate fields global escape.
  1.3682 -              add_edge_from_fields(ps, _phantom_object, Type::OffsetTop);
  1.3683 -            }
  1.3684 -          }
  1.3685 -        }
  1.3686 -      }
  1.3687 -      break;
  1.3688 -    }
  1.3689 -
  1.3690 -    case Op_CallStaticJava:
  1.3691 -    // For a static call, we know exactly what method is being called.
  1.3692 -    // Use bytecode estimator to record the call's escape affects
  1.3693 -    {
  1.3694 -      ciMethod *meth = call->as_CallJava()->method();
  1.3695 -      BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
  1.3696 -      // fall-through if not a Java method or no analyzer information
  1.3697 -      if (call_analyzer != NULL) {
  1.3698 -        const TypeTuple * d = call->tf()->domain();
  1.3699 -        bool copy_dependencies = false;
  1.3700 -        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1.3701 -          const Type* at = d->field_at(i);
  1.3702 -          int k = i - TypeFunc::Parms;
  1.3703 -          Node *arg = call->in(i)->uncast();
  1.3704 -
  1.3705 -          if (at->isa_oopptr() != NULL &&
  1.3706 -              ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) {
  1.3707 -
  1.3708 -            bool global_escapes = false;
  1.3709 -            bool fields_escapes = false;
  1.3710 -            if (!call_analyzer->is_arg_stack(k)) {
  1.3711 -              // The argument global escapes, mark everything it could point to
  1.3712 -              set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  1.3713 -              global_escapes = true;
  1.3714 -            } else {
  1.3715 -              if (!call_analyzer->is_arg_local(k)) {
  1.3716 -                // The argument itself doesn't escape, but any fields might
  1.3717 -                fields_escapes = true;
  1.3718 -              }
  1.3719 -              set_escape_state(arg->_idx, PointsToNode::ArgEscape);
  1.3720 -              copy_dependencies = true;
  1.3721 -            }
  1.3722 -
  1.3723 -            for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
  1.3724 -              uint pt = j.elem;
  1.3725 -              if (global_escapes) {
  1.3726 -                // The argument global escapes, mark everything it could point to
  1.3727 -                set_escape_state(pt, PointsToNode::GlobalEscape);
  1.3728 -                add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
  1.3729 -              } else {
  1.3730 -                set_escape_state(pt, PointsToNode::ArgEscape);
  1.3731 -                if (fields_escapes) {
  1.3732 -                  // The argument itself doesn't escape, but any fields might.
  1.3733 -                  // Use OffsetTop to indicate such case.
  1.3734 -                  add_edge_from_fields(pt, _phantom_object, Type::OffsetTop);
  1.3735 -                }
  1.3736 -              }
  1.3737 -            }
  1.3738 -          }
  1.3739 -        }
  1.3740 -        if (copy_dependencies)
  1.3741 -          call_analyzer->copy_dependencies(_compile->dependencies());
  1.3742 -        break;
  1.3743 -      }
  1.3744 -    }
  1.3745 -
  1.3746 -    default:
  1.3747 -    // Fall-through here if not a Java method or no analyzer information
  1.3748 -    // or some other type of call, assume the worst case: all arguments
  1.3749 -    // globally escape.
  1.3750 -    {
  1.3751 -      // adjust escape state for  outgoing arguments
  1.3752 -      const TypeTuple * d = call->tf()->domain();
  1.3753 -      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1.3754 -        const Type* at = d->field_at(i);
  1.3755 -        if (at->isa_oopptr() != NULL) {
  1.3756 -          Node *arg = call->in(i)->uncast();
  1.3757 -          set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
  1.3758 -          for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
  1.3759 -            uint pt = j.elem;
  1.3760 -            set_escape_state(pt, PointsToNode::GlobalEscape);
  1.3761 -            add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
  1.3762 -          }
  1.3763 -        }
  1.3764 -      }
  1.3765 -    }
  1.3766 -  }
  1.3767 -}
  1.3768 -void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) {
  1.3769 -  CallNode   *call = resproj->in(0)->as_Call();
  1.3770 -  uint    call_idx = call->_idx;
  1.3771 -  uint resproj_idx = resproj->_idx;
  1.3772 -
  1.3773 -  switch (call->Opcode()) {
  1.3774 -    case Op_Allocate:
  1.3775 -    {
  1.3776 -      Node *k = call->in(AllocateNode::KlassNode);
  1.3777 -      const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
  1.3778 -      assert(kt != NULL, "TypeKlassPtr  required.");
  1.3779 -      ciKlass* cik = kt->klass();
  1.3780 -
  1.3781 -      PointsToNode::EscapeState es;
  1.3782 -      uint edge_to;
  1.3783 -      if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
  1.3784 -         !cik->is_instance_klass() || // StressReflectiveCode
  1.3785 -          cik->as_instance_klass()->has_finalizer()) {
  1.3786 -        es = PointsToNode::GlobalEscape;
  1.3787 -        edge_to = _phantom_object; // Could not be worse
  1.3788 -      } else {
  1.3789 -        es = PointsToNode::NoEscape;
  1.3790 -        edge_to = call_idx;
  1.3791 -        assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
  1.3792 -      }
  1.3793 -      set_escape_state(call_idx, es);
  1.3794 -      add_pointsto_edge(resproj_idx, edge_to);
  1.3795 -      _processed.set(resproj_idx);
  1.3796 -      break;
  1.3797 -    }
  1.3798 -
  1.3799 -    case Op_AllocateArray:
  1.3800 -    {
  1.3801 -
  1.3802 -      Node *k = call->in(AllocateNode::KlassNode);
  1.3803 -      const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
  1.3804 -      assert(kt != NULL, "TypeKlassPtr  required.");
  1.3805 -      ciKlass* cik = kt->klass();
  1.3806 -
  1.3807 -      PointsToNode::EscapeState es;
  1.3808 -      uint edge_to;
  1.3809 -      if (!cik->is_array_klass()) { // StressReflectiveCode
  1.3810 -        es = PointsToNode::GlobalEscape;
  1.3811 -        edge_to = _phantom_object;
  1.3812 -      } else {
  1.3813 -        es = PointsToNode::NoEscape;
  1.3814 -        edge_to = call_idx;
  1.3815 -        assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
  1.3816 -        int length = call->in(AllocateNode::ALength)->find_int_con(-1);
  1.3817 -        if (length < 0 || length > EliminateAllocationArraySizeLimit) {
  1.3818 -          // Not scalar replaceable if the length is not constant or too big.
  1.3819 -          ptnode_adr(call_idx)->set_scalar_replaceable(false);
  1.3820 -        }
  1.3821 -      }
  1.3822 -      set_escape_state(call_idx, es);
  1.3823 -      add_pointsto_edge(resproj_idx, edge_to);
  1.3824 -      _processed.set(resproj_idx);
  1.3825 -      break;
  1.3826 -    }
  1.3827 -
  1.3828 -    case Op_CallStaticJava:
  1.3829 -    // For a static call, we know exactly what method is being called.
  1.3830 -    // Use bytecode estimator to record whether the call's return value escapes
  1.3831 -    {
  1.3832 -      bool done = true;
  1.3833 -      const TypeTuple *r = call->tf()->range();
  1.3834 -      const Type* ret_type = NULL;
  1.3835 -
  1.3836 -      if (r->cnt() > TypeFunc::Parms)
  1.3837 -        ret_type = r->field_at(TypeFunc::Parms);
  1.3838 -
  1.3839 -      // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  1.3840 -      //        _multianewarray functions return a TypeRawPtr.
  1.3841 -      if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
  1.3842 -        _processed.set(resproj_idx);
  1.3843 -        break;  // doesn't return a pointer type
  1.3844 -      }
  1.3845 -      ciMethod *meth = call->as_CallJava()->method();
  1.3846 -      const TypeTuple * d = call->tf()->domain();
  1.3847 -      if (meth == NULL) {
  1.3848 -        // not a Java method, assume global escape
  1.3849 -        set_escape_state(call_idx, PointsToNode::GlobalEscape);
  1.3850 -        add_pointsto_edge(resproj_idx, _phantom_object);
  1.3851 -      } else {
  1.3852 -        BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
  1.3853 -        bool copy_dependencies = false;
  1.3854 -
  1.3855 -        if (call_analyzer->is_return_allocated()) {
  1.3856 -          // Returns a newly allocated unescaped object, simply
  1.3857 -          // update dependency information.
  1.3858 -          // Mark it as NoEscape so that objects referenced by
  1.3859 -          // it's fields will be marked as NoEscape at least.
  1.3860 -          set_escape_state(call_idx, PointsToNode::NoEscape);
  1.3861 -          ptnode_adr(call_idx)->set_scalar_replaceable(false);
  1.3862 -          // Fields values are unknown
  1.3863 -          add_edge_from_fields(call_idx, _phantom_object, Type::OffsetBot);
  1.3864 -          add_pointsto_edge(resproj_idx, call_idx);
  1.3865 -          copy_dependencies = true;
  1.3866 -        } else {
  1.3867 -          // determine whether any arguments are returned
  1.3868 -          set_escape_state(call_idx, PointsToNode::ArgEscape);
  1.3869 -          bool ret_arg = false;
  1.3870 -          for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
  1.3871 -            const Type* at = d->field_at(i);
  1.3872 -            if (at->isa_oopptr() != NULL) {
  1.3873 -              Node *arg = call->in(i)->uncast();
  1.3874 -
  1.3875 -              if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
  1.3876 -                ret_arg = true;
  1.3877 -                PointsToNode *arg_esp = ptnode_adr(arg->_idx);
  1.3878 -                if (arg_esp->node_type() == PointsToNode::UnknownType)
  1.3879 -                  done = false;
  1.3880 -                else if (arg_esp->node_type() == PointsToNode::JavaObject)
  1.3881 -                  add_pointsto_edge(resproj_idx, arg->_idx);
  1.3882 -                else
  1.3883 -                  add_deferred_edge(resproj_idx, arg->_idx);
  1.3884 -              }
  1.3885 -            }
  1.3886 -          }
  1.3887 -          if (done) {
  1.3888 -            copy_dependencies = true;
  1.3889 -            // is_return_local() is true when only arguments are returned.
  1.3890 -            if (!ret_arg || !call_analyzer->is_return_local()) {
  1.3891 -              // Returns unknown object.
  1.3892 -              add_pointsto_edge(resproj_idx, _phantom_object);
  1.3893 -            }
  1.3894 -          }
  1.3895 -        }
  1.3896 -        if (copy_dependencies)
  1.3897 -          call_analyzer->copy_dependencies(_compile->dependencies());
  1.3898 -      }
  1.3899 -      if (done)
  1.3900 -        _processed.set(resproj_idx);
  1.3901 -      break;
  1.3902 -    }
  1.3903 -
  1.3904 -    default:
  1.3905 -    // Some other type of call, assume the worst case that the
  1.3906 -    // returned value, if any, globally escapes.
  1.3907 -    {
  1.3908 -      const TypeTuple *r = call->tf()->range();
  1.3909 -      if (r->cnt() > TypeFunc::Parms) {
  1.3910 -        const Type* ret_type = r->field_at(TypeFunc::Parms);
  1.3911 -
  1.3912 -        // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
  1.3913 -        //        _multianewarray functions return a TypeRawPtr.
  1.3914 -        if (ret_type->isa_ptr() != NULL) {
  1.3915 -          set_escape_state(call_idx, PointsToNode::GlobalEscape);
  1.3916 -          add_pointsto_edge(resproj_idx, _phantom_object);
  1.3917 -        }
  1.3918 -      }
  1.3919 -      _processed.set(resproj_idx);
  1.3920 -    }
  1.3921 -  }
  1.3922 -}
  1.3923 -
  1.3924 -// Populate Connection Graph with Ideal nodes and create simple
  1.3925 -// connection graph edges (do not need to check the node_type of inputs
  1.3926 -// or to call PointsTo() to walk the connection graph).
  1.3927 -void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) {
  1.3928 -  if (_processed.test(n->_idx))
  1.3929 -    return; // No need to redefine node's state.
  1.3930 -
  1.3931 -  if (n->is_Call()) {
  1.3932 -    // Arguments to allocation and locking don't escape.
  1.3933 -    if (n->is_Allocate()) {
  1.3934 -      add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true);
  1.3935 -      record_for_optimizer(n);
  1.3936 -    } else if (n->is_Lock() || n->is_Unlock()) {
  1.3937 -      // Put Lock and Unlock nodes on IGVN worklist to process them during
  1.3938 -      // the first IGVN optimization when escape information is still available.
  1.3939 -      record_for_optimizer(n);
  1.3940 -      _processed.set(n->_idx);
  1.3941 -    } else {
  1.3942 -      // Don't mark as processed since call's arguments have to be processed.
  1.3943 -      PointsToNode::NodeType nt = PointsToNode::UnknownType;
  1.3944 -      PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
  1.3945 -
  1.3946 -      // Check if a call returns an object.
  1.3947 -      const TypeTuple *r = n->as_Call()->tf()->range();
  1.3948 -      if (r->cnt() > TypeFunc::Parms &&
  1.3949 -          r->field_at(TypeFunc::Parms)->isa_ptr() &&
  1.3950 -          n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
  1.3951 -        nt = PointsToNode::JavaObject;
  1.3952 -        if (!n->is_CallStaticJava()) {
  1.3953 -          // Since the called mathod is statically unknown assume
  1.3954 -          // the worst case that the returned value globally escapes.
  1.3955 -          es = PointsToNode::GlobalEscape;
  1.3956 -        }
  1.3957 -      }
  1.3958 -      add_node(n, nt, es, false);
  1.3959 -    }
  1.3960 -    return;
  1.3961 -  }
  1.3962 -
  1.3963 -  // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
  1.3964 -  // ThreadLocal has RawPrt type.
  1.3965 -  switch (n->Opcode()) {
  1.3966 -    case Op_AddP:
  1.3967 -    {
  1.3968 -      add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false);
  1.3969 -      break;
  1.3970 -    }
  1.3971 -    case Op_CastX2P:
  1.3972 -    { // "Unsafe" memory access.
  1.3973 -      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  1.3974 -      break;
  1.3975 -    }
  1.3976 -    case Op_CastPP:
  1.3977 -    case Op_CheckCastPP:
  1.3978 -    case Op_EncodeP:
  1.3979 -    case Op_DecodeN:
  1.3980 -    {
  1.3981 -      add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  1.3982 -      int ti = n->in(1)->_idx;
  1.3983 -      PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  1.3984 -      if (nt == PointsToNode::UnknownType) {
  1.3985 -        _delayed_worklist.push(n); // Process it later.
  1.3986 -        break;
  1.3987 -      } else if (nt == PointsToNode::JavaObject) {
  1.3988 -        add_pointsto_edge(n->_idx, ti);
  1.3989 -      } else {
  1.3990 -        add_deferred_edge(n->_idx, ti);
  1.3991 -      }
  1.3992 -      _processed.set(n->_idx);
  1.3993 -      break;
  1.3994 -    }
  1.3995 -    case Op_ConP:
  1.3996 -    {
  1.3997 -      // assume all pointer constants globally escape except for null
  1.3998 -      PointsToNode::EscapeState es;
  1.3999 -      if (phase->type(n) == TypePtr::NULL_PTR)
  1.4000 -        es = PointsToNode::NoEscape;
  1.4001 -      else
  1.4002 -        es = PointsToNode::GlobalEscape;
  1.4003 -
  1.4004 -      add_node(n, PointsToNode::JavaObject, es, true);
  1.4005 -      break;
  1.4006 -    }
  1.4007 -    case Op_ConN:
  1.4008 -    {
  1.4009 -      // assume all narrow oop constants globally escape except for null
  1.4010 -      PointsToNode::EscapeState es;
  1.4011 -      if (phase->type(n) == TypeNarrowOop::NULL_PTR)
  1.4012 -        es = PointsToNode::NoEscape;
  1.4013 -      else
  1.4014 -        es = PointsToNode::GlobalEscape;
  1.4015 -
  1.4016 -      add_node(n, PointsToNode::JavaObject, es, true);
  1.4017 -      break;
  1.4018 -    }
  1.4019 -    case Op_CreateEx:
  1.4020 -    {
  1.4021 -      // assume that all exception objects globally escape
  1.4022 -      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  1.4023 -      break;
  1.4024 -    }
  1.4025 -    case Op_LoadKlass:
  1.4026 -    case Op_LoadNKlass:
  1.4027 -    {
  1.4028 -      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
  1.4029 -      break;
  1.4030 -    }
  1.4031 -    case Op_LoadP:
  1.4032 -    case Op_LoadN:
  1.4033 -    {
  1.4034 -      const Type *t = phase->type(n);
  1.4035 -      if (t->make_ptr() == NULL) {
  1.4036 -        _processed.set(n->_idx);
  1.4037 -        return;
  1.4038 -      }
  1.4039 -      add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  1.4040 -      break;
  1.4041 -    }
  1.4042 -    case Op_Parm:
  1.4043 -    {
  1.4044 -      _processed.set(n->_idx); // No need to redefine it state.
  1.4045 -      uint con = n->as_Proj()->_con;
  1.4046 -      if (con < TypeFunc::Parms)
  1.4047 -        return;
  1.4048 -      const Type *t = n->in(0)->as_Start()->_domain->field_at(con);
  1.4049 -      if (t->isa_ptr() == NULL)
  1.4050 -        return;
  1.4051 -      // We have to assume all input parameters globally escape
  1.4052 -      // (Note: passing 'false' since _processed is already set).
  1.4053 -      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
  1.4054 -      break;
  1.4055 -    }
  1.4056 -    case Op_PartialSubtypeCheck:
  1.4057 -    { // Produces Null or notNull and is used in CmpP.
  1.4058 -      add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
  1.4059 -      break;
  1.4060 -    }
  1.4061 -    case Op_Phi:
  1.4062 -    {
  1.4063 -      const Type *t = n->as_Phi()->type();
  1.4064 -      if (t->make_ptr() == NULL) {
  1.4065 -        // nothing to do if not an oop or narrow oop
  1.4066 -        _processed.set(n->_idx);
  1.4067 -        return;
  1.4068 -      }
  1.4069 -      add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  1.4070 -      uint i;
  1.4071 -      for (i = 1; i < n->req() ; i++) {
  1.4072 -        Node* in = n->in(i);
  1.4073 -        if (in == NULL)
  1.4074 -          continue;  // ignore NULL
  1.4075 -        in = in->uncast();
  1.4076 -        if (in->is_top() || in == n)
  1.4077 -          continue;  // ignore top or inputs which go back this node
  1.4078 -        int ti = in->_idx;
  1.4079 -        PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  1.4080 -        if (nt == PointsToNode::UnknownType) {
  1.4081 -          break;
  1.4082 -        } else if (nt == PointsToNode::JavaObject) {
  1.4083 -          add_pointsto_edge(n->_idx, ti);
  1.4084 -        } else {
  1.4085 -          add_deferred_edge(n->_idx, ti);
  1.4086 -        }
  1.4087 -      }
  1.4088 -      if (i >= n->req())
  1.4089 -        _processed.set(n->_idx);
  1.4090 -      else
  1.4091 -        _delayed_worklist.push(n);
  1.4092 -      break;
  1.4093 -    }
  1.4094 -    case Op_Proj:
  1.4095 -    {
  1.4096 -      // we are only interested in the oop result projection from a call
  1.4097 -      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  1.4098 -        const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
  1.4099 -        assert(r->cnt() > TypeFunc::Parms, "sanity");
  1.4100 -        if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
  1.4101 -          add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
  1.4102 -          int ti = n->in(0)->_idx;
  1.4103 -          // The call may not be registered yet (since not all its inputs are registered)
  1.4104 -          // if this is the projection from backbranch edge of Phi.
  1.4105 -          if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
  1.4106 -            process_call_result(n->as_Proj(), phase);
  1.4107 -          }
  1.4108 -          if (!_processed.test(n->_idx)) {
  1.4109 -            // The call's result may need to be processed later if the call
  1.4110 -            // returns it's argument and the argument is not processed yet.
  1.4111 -            _delayed_worklist.push(n);
  1.4112 -          }
  1.4113 -          break;
  1.4114 -        }
  1.4115 -      }
  1.4116 -      _processed.set(n->_idx);
  1.4117 -      break;
  1.4118 -    }
  1.4119 -    case Op_Return:
  1.4120 -    {
  1.4121 -      if( n->req() > TypeFunc::Parms &&
  1.4122 -          phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  1.4123 -        // Treat Return value as LocalVar with GlobalEscape escape state.
  1.4124 -        add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false);
  1.4125 -        int ti = n->in(TypeFunc::Parms)->_idx;
  1.4126 -        PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  1.4127 -        if (nt == PointsToNode::UnknownType) {
  1.4128 -          _delayed_worklist.push(n); // Process it later.
  1.4129 -          break;
  1.4130 -        } else if (nt == PointsToNode::JavaObject) {
  1.4131 -          add_pointsto_edge(n->_idx, ti);
  1.4132 -        } else {
  1.4133 -          add_deferred_edge(n->_idx, ti);
  1.4134 -        }
  1.4135 -      }
  1.4136 -      _processed.set(n->_idx);
  1.4137 -      break;
  1.4138 -    }
  1.4139 -    case Op_StoreP:
  1.4140 -    case Op_StoreN:
  1.4141 -    {
  1.4142 -      const Type *adr_type = phase->type(n->in(MemNode::Address));
  1.4143 -      adr_type = adr_type->make_ptr();
  1.4144 -      if (adr_type->isa_oopptr()) {
  1.4145 -        add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  1.4146 -      } else {
  1.4147 -        Node* adr = n->in(MemNode::Address);
  1.4148 -        if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL &&
  1.4149 -            adr->in(AddPNode::Address)->is_Proj() &&
  1.4150 -            adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
  1.4151 -          add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  1.4152 -          // We are computing a raw address for a store captured
  1.4153 -          // by an Initialize compute an appropriate address type.
  1.4154 -          int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
  1.4155 -          assert(offs != Type::OffsetBot, "offset must be a constant");
  1.4156 -        } else {
  1.4157 -          _processed.set(n->_idx);
  1.4158 -          return;
  1.4159 -        }
  1.4160 -      }
  1.4161 -      break;
  1.4162 -    }
  1.4163 -    case Op_StorePConditional:
  1.4164 -    case Op_CompareAndSwapP:
  1.4165 -    case Op_CompareAndSwapN:
  1.4166 -    {
  1.4167 -      const Type *adr_type = phase->type(n->in(MemNode::Address));
  1.4168 -      adr_type = adr_type->make_ptr();
  1.4169 -      if (adr_type->isa_oopptr()) {
  1.4170 -        add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  1.4171 -      } else {
  1.4172 -        _processed.set(n->_idx);
  1.4173 -        return;
  1.4174 -      }
  1.4175 -      break;
  1.4176 -    }
  1.4177 -    case Op_AryEq:
  1.4178 -    case Op_StrComp:
  1.4179 -    case Op_StrEquals:
  1.4180 -    case Op_StrIndexOf:
  1.4181 -    {
  1.4182 -      // char[] arrays passed to string intrinsics are not scalar replaceable.
  1.4183 -      add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
  1.4184 -      break;
  1.4185 -    }
  1.4186 -    case Op_ThreadLocal:
  1.4187 -    {
  1.4188 -      add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
  1.4189 -      break;
  1.4190 -    }
  1.4191 -    default:
  1.4192 -      ;
  1.4193 -      // nothing to do
  1.4194 -  }
  1.4195 -  return;
  1.4196 -}
  1.4197 -
  1.4198 -void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
  1.4199 -  uint n_idx = n->_idx;
  1.4200 -  assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
  1.4201 -
  1.4202 -  // Don't set processed bit for AddP, LoadP, StoreP since
  1.4203 -  // they may need more then one pass to process.
  1.4204 -  // Also don't mark as processed Call nodes since their
  1.4205 -  // arguments may need more then one pass to process.
  1.4206 -  if (_processed.test(n_idx))
  1.4207 -    return; // No need to redefine node's state.
  1.4208 -
  1.4209 -  if (n->is_Call()) {
  1.4210 -    CallNode *call = n->as_Call();
  1.4211 -    process_call_arguments(call, phase);
  1.4212 -    return;
  1.4213 -  }
  1.4214 -
  1.4215 -  switch (n->Opcode()) {
  1.4216 -    case Op_AddP:
  1.4217 -    {
  1.4218 -      Node *base = get_addp_base(n);
  1.4219 -      int offset = address_offset(n, phase);
  1.4220 -      // Create a field edge to this node from everything base could point to.
  1.4221 -      for( VectorSetI i(PointsTo(base)); i.test(); ++i ) {
  1.4222 -        uint pt = i.elem;
  1.4223 -        add_field_edge(pt, n_idx, offset);
  1.4224 -      }
  1.4225 -      break;
  1.4226 -    }
  1.4227 -    case Op_CastX2P:
  1.4228 -    {
  1.4229 -      assert(false, "Op_CastX2P");
  1.4230 -      break;
  1.4231 -    }
  1.4232 -    case Op_CastPP:
  1.4233 -    case Op_CheckCastPP:
  1.4234 -    case Op_EncodeP:
  1.4235 -    case Op_DecodeN:
  1.4236 -    {
  1.4237 -      int ti = n->in(1)->_idx;
  1.4238 -      assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
  1.4239 -      if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
  1.4240 -        add_pointsto_edge(n_idx, ti);
  1.4241 -      } else {
  1.4242 -        add_deferred_edge(n_idx, ti);
  1.4243 -      }
  1.4244 -      _processed.set(n_idx);
  1.4245 -      break;
  1.4246 -    }
  1.4247 -    case Op_ConP:
  1.4248 -    {
  1.4249 -      assert(false, "Op_ConP");
  1.4250 -      break;
  1.4251 -    }
  1.4252 -    case Op_ConN:
  1.4253 -    {
  1.4254 -      assert(false, "Op_ConN");
  1.4255 -      break;
  1.4256 -    }
  1.4257 -    case Op_CreateEx:
  1.4258 -    {
  1.4259 -      assert(false, "Op_CreateEx");
  1.4260 -      break;
  1.4261 -    }
  1.4262 -    case Op_LoadKlass:
  1.4263 -    case Op_LoadNKlass:
  1.4264 -    {
  1.4265 -      assert(false, "Op_LoadKlass");
  1.4266 -      break;
  1.4267 -    }
  1.4268 -    case Op_LoadP:
  1.4269 -    case Op_LoadN:
  1.4270 -    {
  1.4271 -      const Type *t = phase->type(n);
  1.4272 -#ifdef ASSERT
  1.4273 -      if (t->make_ptr() == NULL)
  1.4274 -        assert(false, "Op_LoadP");
  1.4275 -#endif
  1.4276 -
  1.4277 -      Node* adr = n->in(MemNode::Address)->uncast();
  1.4278 -      Node* adr_base;
  1.4279 -      if (adr->is_AddP()) {
  1.4280 -        adr_base = get_addp_base(adr);
  1.4281 -      } else {
  1.4282 -        adr_base = adr;
  1.4283 -      }
  1.4284 -
  1.4285 -      // For everything "adr_base" could point to, create a deferred edge from
  1.4286 -      // this node to each field with the same offset.
  1.4287 -      int offset = address_offset(adr, phase);
  1.4288 -      for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
  1.4289 -        uint pt = i.elem;
  1.4290 -        if (adr->is_AddP()) {
  1.4291 -          // Add field edge if it is missing.
  1.4292 -          add_field_edge(pt, adr->_idx, offset);
  1.4293 -        }
  1.4294 -        add_deferred_edge_to_fields(n_idx, pt, offset);
  1.4295 -      }
  1.4296 -      break;
  1.4297 -    }
  1.4298 -    case Op_Parm:
  1.4299 -    {
  1.4300 -      assert(false, "Op_Parm");
  1.4301 -      break;
  1.4302 -    }
  1.4303 -    case Op_PartialSubtypeCheck:
  1.4304 -    {
  1.4305 -      assert(false, "Op_PartialSubtypeCheck");
  1.4306 -      break;
  1.4307 -    }
  1.4308 -    case Op_Phi:
  1.4309 -    {
  1.4310 -#ifdef ASSERT
  1.4311 -      const Type *t = n->as_Phi()->type();
  1.4312 -      if (t->make_ptr() == NULL)
  1.4313 -        assert(false, "Op_Phi");
  1.4314 -#endif
  1.4315 -      for (uint i = 1; i < n->req() ; i++) {
  1.4316 -        Node* in = n->in(i);
  1.4317 -        if (in == NULL)
  1.4318 -          continue;  // ignore NULL
  1.4319 -        in = in->uncast();
  1.4320 -        if (in->is_top() || in == n)
  1.4321 -          continue;  // ignore top or inputs which go back this node
  1.4322 -        int ti = in->_idx;
  1.4323 -        PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
  1.4324 -        assert(nt != PointsToNode::UnknownType, "all nodes should be known");
  1.4325 -        if (nt == PointsToNode::JavaObject) {
  1.4326 -          add_pointsto_edge(n_idx, ti);
  1.4327 -        } else {
  1.4328 -          add_deferred_edge(n_idx, ti);
  1.4329 -        }
  1.4330 -      }
  1.4331 -      _processed.set(n_idx);
  1.4332 -      break;
  1.4333 -    }
  1.4334 -    case Op_Proj:
  1.4335 -    {
  1.4336 -      // we are only interested in the oop result projection from a call
  1.4337 -      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
  1.4338 -        assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
  1.4339 -               "all nodes should be registered");
  1.4340 -        const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
  1.4341 -        assert(r->cnt() > TypeFunc::Parms, "sanity");
  1.4342 -        if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
  1.4343 -          process_call_result(n->as_Proj(), phase);
  1.4344 -          assert(_processed.test(n_idx), "all call results should be processed");
  1.4345 -          break;
  1.4346 -        }
  1.4347 -      }
  1.4348 -      assert(false, "Op_Proj");
  1.4349 -      break;
  1.4350 -    }
  1.4351 -    case Op_Return:
  1.4352 -    {
  1.4353 -#ifdef ASSERT
  1.4354 -      if( n->req() <= TypeFunc::Parms ||
  1.4355 -          !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
  1.4356 -        assert(false, "Op_Return");
  1.4357 -      }
  1.4358 -#endif
  1.4359 -      int ti = n->in(TypeFunc::Parms)->_idx;
  1.4360 -      assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
  1.4361 -      if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
  1.4362 -        add_pointsto_edge(n_idx, ti);
  1.4363 -      } else {
  1.4364 -        add_deferred_edge(n_idx, ti);
  1.4365 -      }
  1.4366 -      _processed.set(n_idx);
  1.4367 -      break;
  1.4368 -    }
  1.4369 -    case Op_StoreP:
  1.4370 -    case Op_StoreN:
  1.4371 -    case Op_StorePConditional:
  1.4372 -    case Op_CompareAndSwapP:
  1.4373 -    case Op_CompareAndSwapN:
  1.4374 -    {
  1.4375 -      Node *adr = n->in(MemNode::Address);
  1.4376 -      const Type *adr_type = phase->type(adr)->make_ptr();
  1.4377 -#ifdef ASSERT
  1.4378 -      if (!adr_type->isa_oopptr())
  1.4379 -        assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP");
  1.4380 -#endif
  1.4381 -
  1.4382 -      assert(adr->is_AddP(), "expecting an AddP");
  1.4383 -      Node *adr_base = get_addp_base(adr);
  1.4384 -      Node *val = n->in(MemNode::ValueIn)->uncast();
  1.4385 -      int offset = address_offset(adr, phase);
  1.4386 -      // For everything "adr_base" could point to, create a deferred edge
  1.4387 -      // to "val" from each field with the same offset.
  1.4388 -      for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
  1.4389 -        uint pt = i.elem;
  1.4390 -        // Add field edge if it is missing.
  1.4391 -        add_field_edge(pt, adr->_idx, offset);
  1.4392 -        add_edge_from_fields(pt, val->_idx, offset);
  1.4393 -      }
  1.4394 -      break;
  1.4395 -    }
  1.4396 -    case Op_AryEq:
  1.4397 -    case Op_StrComp:
  1.4398 -    case Op_StrEquals:
  1.4399 -    case Op_StrIndexOf:
  1.4400 -    {
  1.4401 -      // char[] arrays passed to string intrinsic do not escape but
  1.4402 -      // they are not scalar replaceable. Adjust escape state for them.
  1.4403 -      // Start from in(2) edge since in(1) is memory edge.
  1.4404 -      for (uint i = 2; i < n->req(); i++) {
  1.4405 -        Node* adr = n->in(i)->uncast();
  1.4406 -        const Type *at = phase->type(adr);
  1.4407 -        if (!adr->is_top() && at->isa_ptr()) {
  1.4408 -          assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
  1.4409 -                 at->isa_ptr() != NULL, "expecting an Ptr");
  1.4410 -          if (adr->is_AddP()) {
  1.4411 -            adr = get_addp_base(adr);
  1.4412 -          }
  1.4413 -          // Mark as ArgEscape everything "adr" could point to.
  1.4414 -          set_escape_state(adr->_idx, PointsToNode::ArgEscape);
  1.4415 -        }
  1.4416 -      }
  1.4417 -      _processed.set(n_idx);
  1.4418 -      break;
  1.4419 -    }
  1.4420 -    case Op_ThreadLocal:
  1.4421 -    {
  1.4422 -      assert(false, "Op_ThreadLocal");
  1.4423 -      break;
  1.4424 -    }
  1.4425 -    default:
  1.4426 -      // This method should be called only for EA specific nodes.
  1.4427 -      ShouldNotReachHere();
  1.4428 -  }
  1.4429 -}
  1.4430 -
  1.4431 -#ifndef PRODUCT
  1.4432 -void ConnectionGraph::dump() {
  1.4433 +void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
  1.4434    bool first = true;
  1.4435 -
  1.4436 -  uint size = nodes_size();
  1.4437 -  for (uint ni = 0; ni < size; ni++) {
  1.4438 -    PointsToNode *ptn = ptnode_adr(ni);
  1.4439 -    PointsToNode::NodeType ptn_type = ptn->node_type();
  1.4440 -
  1.4441 -    if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
  1.4442 +  int ptnodes_length = ptnodes_worklist.length();
  1.4443 +  for (int i = 0; i < ptnodes_length; i++) {
  1.4444 +    PointsToNode *ptn = ptnodes_worklist.at(i);
  1.4445 +    if (ptn == NULL || !ptn->is_JavaObject())
  1.4446        continue;
  1.4447 -    PointsToNode::EscapeState es = escape_state(ptn->_node);
  1.4448 -    if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
  1.4449 +    PointsToNode::EscapeState es = ptn->escape_state();
  1.4450 +    if (ptn->ideal_node()->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
  1.4451        if (first) {
  1.4452          tty->cr();
  1.4453          tty->print("======== Connection graph for ");
  1.4454 @@ -3114,22 +3099,14 @@
  1.4455          tty->cr();
  1.4456          first = false;
  1.4457        }
  1.4458 -      tty->print("%6d ", ni);
  1.4459        ptn->dump();
  1.4460 -      // Print all locals which reference this allocation
  1.4461 -      for (uint li = ni; li < size; li++) {
  1.4462 -        PointsToNode *ptn_loc = ptnode_adr(li);
  1.4463 -        PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type();
  1.4464 -        if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL &&
  1.4465 -             ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) {
  1.4466 -          ptnode_adr(li)->dump(false);
  1.4467 -        }
  1.4468 -      }
  1.4469 -      if (Verbose) {
  1.4470 -        // Print all fields which reference this allocation
  1.4471 -        for (uint i = 0; i < ptn->edge_count(); i++) {
  1.4472 -          uint ei = ptn->edge_target(i);
  1.4473 -          ptnode_adr(ei)->dump(false);
  1.4474 +      // Print all locals and fields which reference this allocation
  1.4475 +      for (UseIterator j(ptn); j.has_next(); j.next()) {
  1.4476 +        PointsToNode* use = j.get();
  1.4477 +        if (use->is_LocalVar()) {
  1.4478 +          use->dump(Verbose);
  1.4479 +        } else if (Verbose) {
  1.4480 +          use->dump();
  1.4481          }
  1.4482        }
  1.4483        tty->cr();

mercurial