src/share/vm/opto/escape.cpp

changeset 3651
ee138854b3a6
parent 3604
9a72c7ece7fb
child 3657
ed4c92f54c2d
equal deleted inserted replaced
3636:fde683df4c27 3651:ee138854b3a6
22 * 22 *
23 */ 23 */
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "compiler/compileLog.hpp"
27 #include "libadt/vectset.hpp" 28 #include "libadt/vectset.hpp"
28 #include "memory/allocation.hpp" 29 #include "memory/allocation.hpp"
29 #include "opto/c2compiler.hpp" 30 #include "opto/c2compiler.hpp"
30 #include "opto/callnode.hpp" 31 #include "opto/callnode.hpp"
31 #include "opto/cfgnode.hpp" 32 #include "opto/cfgnode.hpp"
32 #include "opto/compile.hpp" 33 #include "opto/compile.hpp"
33 #include "opto/escape.hpp" 34 #include "opto/escape.hpp"
34 #include "opto/phaseX.hpp" 35 #include "opto/phaseX.hpp"
35 #include "opto/rootnode.hpp" 36 #include "opto/rootnode.hpp"
36 37
37 void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) {
38 uint v = (targIdx << EdgeShift) + ((uint) et);
39 if (_edges == NULL) {
40 Arena *a = Compile::current()->comp_arena();
41 _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0);
42 }
43 _edges->append_if_missing(v);
44 }
45
46 void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
47 uint v = (targIdx << EdgeShift) + ((uint) et);
48
49 _edges->remove(v);
50 }
51
52 #ifndef PRODUCT
53 static const char *node_type_names[] = {
54 "UnknownType",
55 "JavaObject",
56 "LocalVar",
57 "Field"
58 };
59
60 static const char *esc_names[] = {
61 "UnknownEscape",
62 "NoEscape",
63 "ArgEscape",
64 "GlobalEscape"
65 };
66
67 static const char *edge_type_suffix[] = {
68 "?", // UnknownEdge
69 "P", // PointsToEdge
70 "D", // DeferredEdge
71 "F" // FieldEdge
72 };
73
74 void PointsToNode::dump(bool print_state) const {
75 NodeType nt = node_type();
76 tty->print("%s ", node_type_names[(int) nt]);
77 if (print_state) {
78 EscapeState es = escape_state();
79 tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR");
80 }
81 tty->print("[[");
82 for (uint i = 0; i < edge_count(); i++) {
83 tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
84 }
85 tty->print("]] ");
86 if (_node == NULL)
87 tty->print_cr("<null>");
88 else
89 _node->dump();
90 }
91 #endif
92
93 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 38 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
94 _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()), 39 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
95 _processed(C->comp_arena()),
96 pt_ptset(C->comp_arena()),
97 pt_visited(C->comp_arena()),
98 pt_worklist(C->comp_arena(), 4, 0, 0),
99 _collecting(true), 40 _collecting(true),
100 _progress(false), 41 _verify(false),
101 _compile(C), 42 _compile(C),
102 _igvn(igvn), 43 _igvn(igvn),
103 _node_map(C->comp_arena()) { 44 _node_map(C->comp_arena()) {
104 45 // Add unknown java object.
105 _phantom_object = C->top()->_idx, 46 add_java_object(C->top(), PointsToNode::GlobalEscape);
106 add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true); 47 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
107
108 // Add ConP(#NULL) and ConN(#NULL) nodes. 48 // Add ConP(#NULL) and ConN(#NULL) nodes.
109 Node* oop_null = igvn->zerocon(T_OBJECT); 49 Node* oop_null = igvn->zerocon(T_OBJECT);
110 _oop_null = oop_null->_idx; 50 assert(oop_null->_idx < nodes_size(), "should be created already");
111 assert(_oop_null < nodes_size(), "should be created already"); 51 add_java_object(oop_null, PointsToNode::NoEscape);
112 add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true); 52 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
113
114 if (UseCompressedOops) { 53 if (UseCompressedOops) {
115 Node* noop_null = igvn->zerocon(T_NARROWOOP); 54 Node* noop_null = igvn->zerocon(T_NARROWOOP);
116 _noop_null = noop_null->_idx; 55 assert(noop_null->_idx < nodes_size(), "should be created already");
117 assert(_noop_null < nodes_size(), "should be created already"); 56 map_ideal_node(noop_null, null_obj);
118 add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
119 } else {
120 _noop_null = _oop_null; // Should be initialized
121 } 57 }
122 _pcmp_neq = NULL; // Should be initialized 58 _pcmp_neq = NULL; // Should be initialized
123 _pcmp_eq = NULL; 59 _pcmp_eq = NULL;
124 } 60 }
125 61
126 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) { 62 bool ConnectionGraph::has_candidates(Compile *C) {
127 PointsToNode *f = ptnode_adr(from_i); 63 // EA brings benefits only when the code has allocations and/or locks which
128 PointsToNode *t = ptnode_adr(to_i); 64 // are represented by ideal Macro nodes.
129 65 int cnt = C->macro_count();
130 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set"); 66 for( int i=0; i < cnt; i++ ) {
131 assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge"); 67 Node *n = C->macro_node(i);
132 assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge"); 68 if ( n->is_Allocate() )
133 if (to_i == _phantom_object) { // Quick test for most common object 69 return true;
134 if (f->has_unknown_ptr()) { 70 if( n->is_Lock() ) {
71 Node* obj = n->as_Lock()->obj_node()->uncast();
72 if( !(obj->is_Parm() || obj->is_Con()) )
73 return true;
74 }
75 }
76 return false;
77 }
78
79 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
80 Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true);
81 ResourceMark rm;
82
83 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
84 // to create space for them in ConnectionGraph::_nodes[].
85 Node* oop_null = igvn->zerocon(T_OBJECT);
86 Node* noop_null = igvn->zerocon(T_NARROWOOP);
87 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
88 // Perform escape analysis
89 if (congraph->compute_escape()) {
90 // There are non escaping objects.
91 C->set_congraph(congraph);
92 }
93 // Cleanup.
94 if (oop_null->outcnt() == 0)
95 igvn->hash_delete(oop_null);
96 if (noop_null->outcnt() == 0)
97 igvn->hash_delete(noop_null);
98 }
99
100 bool ConnectionGraph::compute_escape() {
101 Compile* C = _compile;
102 PhaseGVN* igvn = _igvn;
103
104 // Worklists used by EA.
105 Unique_Node_List delayed_worklist;
106 GrowableArray<Node*> alloc_worklist;
107 GrowableArray<Node*> ptr_cmp_worklist;
108 GrowableArray<Node*> storestore_worklist;
109 GrowableArray<PointsToNode*> ptnodes_worklist;
110 GrowableArray<JavaObjectNode*> java_objects_worklist;
111 GrowableArray<JavaObjectNode*> non_escaped_worklist;
112 GrowableArray<FieldNode*> oop_fields_worklist;
113 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
114
115 { Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);
116
117 // 1. Populate Connection Graph (CG) with PointsTo nodes.
118 ideal_nodes.map(C->unique(), NULL); // preallocate space
119 // Initialize worklist
120 if (C->root() != NULL) {
121 ideal_nodes.push(C->root());
122 }
123 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
124 Node* n = ideal_nodes.at(next);
125 // Create PointsTo nodes and add them to Connection Graph. Called
126 // only once per ideal node since ideal_nodes is Unique_Node list.
127 add_node_to_connection_graph(n, &delayed_worklist);
128 PointsToNode* ptn = ptnode_adr(n->_idx);
129 if (ptn != NULL) {
130 ptnodes_worklist.append(ptn);
131 if (ptn->is_JavaObject()) {
132 java_objects_worklist.append(ptn->as_JavaObject());
133 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
134 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
135 // Only allocations and java static calls results are interesting.
136 non_escaped_worklist.append(ptn->as_JavaObject());
137 }
138 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
139 oop_fields_worklist.append(ptn->as_Field());
140 }
141 }
142 if (n->is_MergeMem()) {
143 // Collect all MergeMem nodes to add memory slices for
144 // scalar replaceable objects in split_unique_types().
145 _mergemem_worklist.append(n->as_MergeMem());
146 } else if (OptimizePtrCompare && n->is_Cmp() &&
147 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
148 // Collect compare pointers nodes.
149 ptr_cmp_worklist.append(n);
150 } else if (n->is_MemBarStoreStore()) {
151 // Collect all MemBarStoreStore nodes so that depending on the
152 // escape status of the associated Allocate node some of them
153 // may be eliminated.
154 storestore_worklist.append(n);
155 #ifdef ASSERT
156 } else if(n->is_AddP()) {
157 // Collect address nodes for graph verification.
158 addp_worklist.append(n);
159 #endif
160 }
161 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
162 Node* m = n->fast_out(i); // Get user
163 ideal_nodes.push(m);
164 }
165 }
166 if (non_escaped_worklist.length() == 0) {
167 _collecting = false;
168 return false; // Nothing to do.
169 }
170 // Add final simple edges to graph.
171 while(delayed_worklist.size() > 0) {
172 Node* n = delayed_worklist.pop();
173 add_final_edges(n);
174 }
175 int ptnodes_length = ptnodes_worklist.length();
176
177 #ifdef ASSERT
178 if (VerifyConnectionGraph) {
179 // Verify that no new simple edges could be created and all
180 // local vars has edges.
181 _verify = true;
182 for (int next = 0; next < ptnodes_length; ++next) {
183 PointsToNode* ptn = ptnodes_worklist.at(next);
184 add_final_edges(ptn->ideal_node());
185 if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
186 ptn->dump();
187 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
188 }
189 }
190 _verify = false;
191 }
192 #endif
193
194 // 2. Finish Graph construction by propagating references to all
195 // java objects through graph.
196 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist,
197 java_objects_worklist, oop_fields_worklist)) {
198 // All objects escaped or hit time or iterations limits.
199 _collecting = false;
200 return false;
201 }
202
203 // 3. Adjust scalar_replaceable state of nonescaping objects and push
204 // scalar replaceable allocations on alloc_worklist for processing
205 // in split_unique_types().
206 int non_escaped_length = non_escaped_worklist.length();
207 for (int next = 0; next < non_escaped_length; next++) {
208 JavaObjectNode* ptn = non_escaped_worklist.at(next);
209 if (ptn->escape_state() == PointsToNode::NoEscape &&
210 ptn->scalar_replaceable()) {
211 adjust_scalar_replaceable_state(ptn);
212 if (ptn->scalar_replaceable()) {
213 alloc_worklist.append(ptn->ideal_node());
214 }
215 }
216 }
217
218 #ifdef ASSERT
219 if (VerifyConnectionGraph) {
220 // Verify that graph is complete - no new edges could be added or needed.
221 verify_connection_graph(ptnodes_worklist, non_escaped_worklist,
222 java_objects_worklist, addp_worklist);
223 }
224 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
225 assert(null_obj->escape_state() == PointsToNode::NoEscape &&
226 null_obj->edge_count() == 0 &&
227 !null_obj->arraycopy_src() &&
228 !null_obj->arraycopy_dst(), "sanity");
229 #endif
230
231 _collecting = false;
232
233 } // TracePhase t3("connectionGraph")
234
235 // 4. Optimize ideal graph based on EA information.
236 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);
237 if (has_non_escaping_obj) {
238 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
239 }
240
241 #ifndef PRODUCT
242 if (PrintEscapeAnalysis) {
243 dump(ptnodes_worklist); // Dump ConnectionGraph
244 }
245 #endif
246
247 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
248 #ifdef ASSERT
249 if (VerifyConnectionGraph) {
250 int alloc_length = alloc_worklist.length();
251 for (int next = 0; next < alloc_length; ++next) {
252 Node* n = alloc_worklist.at(next);
253 PointsToNode* ptn = ptnode_adr(n->_idx);
254 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
255 }
256 }
257 #endif
258
259 // 5. Separate memory graph for scalar replaceable allcations.
260 if (has_scalar_replaceable_candidates &&
261 C->AliasLevel() >= 3 && EliminateAllocations) {
262 // Now use the escape information to create unique types for
263 // scalar replaceable objects.
264 split_unique_types(alloc_worklist);
265 if (C->failing()) return false;
266 C->print_method("After Escape Analysis", 2);
267
268 #ifdef ASSERT
269 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
270 tty->print("=== No allocations eliminated for ");
271 C->method()->print_short_name();
272 if(!EliminateAllocations) {
273 tty->print(" since EliminateAllocations is off ===");
274 } else if(!has_scalar_replaceable_candidates) {
275 tty->print(" since there are no scalar replaceable candidates ===");
276 } else if(C->AliasLevel() < 3) {
277 tty->print(" since AliasLevel < 3 ===");
278 }
279 tty->cr();
280 #endif
281 }
282 return has_non_escaping_obj;
283 }
284
285 // Populate Connection Graph with PointsTo nodes and create simple
286 // connection graph edges.
287 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
288 assert(!_verify, "this method sould not be called for verification");
289 PhaseGVN* igvn = _igvn;
290 uint n_idx = n->_idx;
291 PointsToNode* n_ptn = ptnode_adr(n_idx);
292 if (n_ptn != NULL)
293 return; // No need to redefine PointsTo node during first iteration.
294
295 if (n->is_Call()) {
296 // Arguments to allocation and locking don't escape.
297 if (n->is_AbstractLock()) {
298 // Put Lock and Unlock nodes on IGVN worklist to process them during
299 // first IGVN optimization when escape information is still available.
300 record_for_optimizer(n);
301 } else if (n->is_Allocate()) {
302 add_call_node(n->as_Call());
303 record_for_optimizer(n);
304 } else {
305 if (n->is_CallStaticJava()) {
306 const char* name = n->as_CallStaticJava()->_name;
307 if (name != NULL && strcmp(name, "uncommon_trap") == 0)
308 return; // Skip uncommon traps
309 }
310 // Don't mark as processed since call's arguments have to be processed.
311 delayed_worklist->push(n);
312 // Check if a call returns an object.
313 if (n->as_Call()->returns_pointer() &&
314 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
315 add_call_node(n->as_Call());
316 }
317 }
318 return;
319 }
320 // Put this check here to process call arguments since some call nodes
321 // point to phantom_obj.
322 if (n_ptn == phantom_obj || n_ptn == null_obj)
323 return; // Skip predefined nodes.
324
325 int opcode = n->Opcode();
326 switch (opcode) {
327 case Op_AddP: {
328 Node* base = get_addp_base(n);
329 PointsToNode* ptn_base = ptnode_adr(base->_idx);
330 // Field nodes are created for all field types. They are used in
331 // adjust_scalar_replaceable_state() and split_unique_types().
332 // Note, non-oop fields will have only base edges in Connection
333 // Graph because such fields are not used for oop loads and stores.
334 int offset = address_offset(n, igvn);
335 add_field(n, PointsToNode::NoEscape, offset);
336 if (ptn_base == NULL) {
337 delayed_worklist->push(n); // Process it later.
338 } else {
339 n_ptn = ptnode_adr(n_idx);
340 add_base(n_ptn->as_Field(), ptn_base);
341 }
342 break;
343 }
344 case Op_CastX2P: {
345 map_ideal_node(n, phantom_obj);
346 break;
347 }
348 case Op_CastPP:
349 case Op_CheckCastPP:
350 case Op_EncodeP:
351 case Op_DecodeN: {
352 add_local_var_and_edge(n, PointsToNode::NoEscape,
353 n->in(1), delayed_worklist);
354 break;
355 }
356 case Op_CMoveP: {
357 add_local_var(n, PointsToNode::NoEscape);
358 // Do not add edges during first iteration because some could be
359 // not defined yet.
360 delayed_worklist->push(n);
361 break;
362 }
363 case Op_ConP:
364 case Op_ConN: {
365 // assume all oop constants globally escape except for null
366 PointsToNode::EscapeState es;
367 if (igvn->type(n) == TypePtr::NULL_PTR ||
368 igvn->type(n) == TypeNarrowOop::NULL_PTR) {
369 es = PointsToNode::NoEscape;
370 } else {
371 es = PointsToNode::GlobalEscape;
372 }
373 add_java_object(n, es);
374 break;
375 }
376 case Op_CreateEx: {
377 // assume that all exception objects globally escape
378 add_java_object(n, PointsToNode::GlobalEscape);
379 break;
380 }
381 case Op_LoadKlass:
382 case Op_LoadNKlass: {
383 // Unknown class is loaded
384 map_ideal_node(n, phantom_obj);
385 break;
386 }
387 case Op_LoadP:
388 case Op_LoadN:
389 case Op_LoadPLocked: {
390 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
391 // ThreadLocal has RawPrt type.
392 const Type* t = igvn->type(n);
393 if (t->make_ptr() != NULL) {
394 Node* adr = n->in(MemNode::Address);
395 #ifdef ASSERT
396 if (!adr->is_AddP()) {
397 assert(igvn->type(adr)->isa_rawptr(), "sanity");
398 } else {
399 assert((ptnode_adr(adr->_idx) == NULL ||
400 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
401 }
402 #endif
403 add_local_var_and_edge(n, PointsToNode::NoEscape,
404 adr, delayed_worklist);
405 }
406 break;
407 }
408 case Op_Parm: {
409 map_ideal_node(n, phantom_obj);
410 break;
411 }
412 case Op_PartialSubtypeCheck: {
413 // Produces Null or notNull and is used in only in CmpP so
414 // phantom_obj could be used.
415 map_ideal_node(n, phantom_obj); // Result is unknown
416 break;
417 }
418 case Op_Phi: {
419 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
420 // ThreadLocal has RawPrt type.
421 const Type* t = n->as_Phi()->type();
422 if (t->make_ptr() != NULL) {
423 add_local_var(n, PointsToNode::NoEscape);
424 // Do not add edges during first iteration because some could be
425 // not defined yet.
426 delayed_worklist->push(n);
427 }
428 break;
429 }
430 case Op_Proj: {
431 // we are only interested in the oop result projection from a call
432 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
433 n->in(0)->as_Call()->returns_pointer()) {
434 add_local_var_and_edge(n, PointsToNode::NoEscape,
435 n->in(0), delayed_worklist);
436 }
437 break;
438 }
439 case Op_Rethrow: // Exception object escapes
440 case Op_Return: {
441 if (n->req() > TypeFunc::Parms &&
442 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
443 // Treat Return value as LocalVar with GlobalEscape escape state.
444 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
445 n->in(TypeFunc::Parms), delayed_worklist);
446 }
447 break;
448 }
449 case Op_StoreP:
450 case Op_StoreN:
451 case Op_StorePConditional:
452 case Op_CompareAndSwapP:
453 case Op_CompareAndSwapN: {
454 Node* adr = n->in(MemNode::Address);
455 const Type *adr_type = igvn->type(adr);
456 adr_type = adr_type->make_ptr();
457 if (adr_type->isa_oopptr() ||
458 (opcode == Op_StoreP || opcode == Op_StoreN) &&
459 (adr_type == TypeRawPtr::NOTNULL &&
460 adr->in(AddPNode::Address)->is_Proj() &&
461 adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
462 delayed_worklist->push(n); // Process it later.
463 #ifdef ASSERT
464 assert(adr->is_AddP(), "expecting an AddP");
465 if (adr_type == TypeRawPtr::NOTNULL) {
466 // Verify a raw address for a store captured by Initialize node.
467 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
468 assert(offs != Type::OffsetBot, "offset must be a constant");
469 }
470 } else {
471 // Ignore copy the displaced header to the BoxNode (OSR compilation).
472 if (adr->is_BoxLock())
473 break;
474
475 if (!adr->is_AddP()) {
476 n->dump(1);
477 assert(adr->is_AddP(), "expecting an AddP");
478 }
479 // Ignore G1 barrier's stores.
480 if (!UseG1GC || (opcode != Op_StoreP) ||
481 (adr_type != TypeRawPtr::BOTTOM)) {
482 n->dump(1);
483 assert(false, "not G1 barrier raw StoreP");
484 }
485 #endif
486 }
487 break;
488 }
489 case Op_AryEq:
490 case Op_StrComp:
491 case Op_StrEquals:
492 case Op_StrIndexOf: {
493 add_local_var(n, PointsToNode::ArgEscape);
494 delayed_worklist->push(n); // Process it later.
495 break;
496 }
497 case Op_ThreadLocal: {
498 add_java_object(n, PointsToNode::ArgEscape);
499 break;
500 }
501 default:
502 ; // Do nothing for nodes not related to EA.
503 }
504 return;
505 }
506
507 #ifdef ASSERT
508 #define ELSE_FAIL(name) \
509 /* Should not be called for not pointer type. */ \
510 n->dump(1); \
511 assert(false, name); \
512 break;
513 #else
514 #define ELSE_FAIL(name) \
515 break;
516 #endif
517
518 // Add final simple edges to graph.
519 void ConnectionGraph::add_final_edges(Node *n) {
520 PointsToNode* n_ptn = ptnode_adr(n->_idx);
521 #ifdef ASSERT
522 if (_verify && n_ptn->is_JavaObject())
523 return; // This method does not change graph for JavaObject.
524 #endif
525
526 if (n->is_Call()) {
527 process_call_arguments(n->as_Call());
528 return;
529 }
530 assert(n->is_Store() || n->is_LoadStore() ||
531 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
532 "node should be registered already");
533 int opcode = n->Opcode();
534 switch (opcode) {
535 case Op_AddP: {
536 Node* base = get_addp_base(n);
537 PointsToNode* ptn_base = ptnode_adr(base->_idx);
538 assert(ptn_base != NULL, "field's base should be registered");
539 add_base(n_ptn->as_Field(), ptn_base);
540 break;
541 }
542 case Op_CastPP:
543 case Op_CheckCastPP:
544 case Op_EncodeP:
545 case Op_DecodeN: {
546 add_local_var_and_edge(n, PointsToNode::NoEscape,
547 n->in(1), NULL);
548 break;
549 }
550 case Op_CMoveP: {
551 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
552 Node* in = n->in(i);
553 if (in == NULL)
554 continue; // ignore NULL
555 Node* uncast_in = in->uncast();
556 if (uncast_in->is_top() || uncast_in == n)
557 continue; // ignore top or inputs which go back this node
558 PointsToNode* ptn = ptnode_adr(in->_idx);
559 assert(ptn != NULL, "node should be registered");
560 add_edge(n_ptn, ptn);
561 }
562 break;
563 }
564 case Op_LoadP:
565 case Op_LoadN:
566 case Op_LoadPLocked: {
567 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
568 // ThreadLocal has RawPrt type.
569 const Type* t = _igvn->type(n);
570 if (t->make_ptr() != NULL) {
571 Node* adr = n->in(MemNode::Address);
572 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
573 break;
574 }
575 ELSE_FAIL("Op_LoadP");
576 }
577 case Op_Phi: {
578 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
579 // ThreadLocal has RawPrt type.
580 const Type* t = n->as_Phi()->type();
581 if (t->make_ptr() != NULL) {
582 for (uint i = 1; i < n->req(); i++) {
583 Node* in = n->in(i);
584 if (in == NULL)
585 continue; // ignore NULL
586 Node* uncast_in = in->uncast();
587 if (uncast_in->is_top() || uncast_in == n)
588 continue; // ignore top or inputs which go back this node
589 PointsToNode* ptn = ptnode_adr(in->_idx);
590 assert(ptn != NULL, "node should be registered");
591 add_edge(n_ptn, ptn);
592 }
593 break;
594 }
595 ELSE_FAIL("Op_Phi");
596 }
597 case Op_Proj: {
598 // we are only interested in the oop result projection from a call
599 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
600 n->in(0)->as_Call()->returns_pointer()) {
601 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
602 break;
603 }
604 ELSE_FAIL("Op_Proj");
605 }
606 case Op_Rethrow: // Exception object escapes
607 case Op_Return: {
608 if (n->req() > TypeFunc::Parms &&
609 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
610 // Treat Return value as LocalVar with GlobalEscape escape state.
611 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
612 n->in(TypeFunc::Parms), NULL);
613 break;
614 }
615 ELSE_FAIL("Op_Return");
616 }
617 case Op_StoreP:
618 case Op_StoreN:
619 case Op_StorePConditional:
620 case Op_CompareAndSwapP:
621 case Op_CompareAndSwapN: {
622 Node* adr = n->in(MemNode::Address);
623 const Type *adr_type = _igvn->type(adr);
624 adr_type = adr_type->make_ptr();
625 if (adr_type->isa_oopptr() ||
626 (opcode == Op_StoreP || opcode == Op_StoreN) &&
627 (adr_type == TypeRawPtr::NOTNULL &&
628 adr->in(AddPNode::Address)->is_Proj() &&
629 adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
630 // Point Address to Value
631 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
632 assert(adr_ptn != NULL &&
633 adr_ptn->as_Field()->is_oop(), "node should be registered");
634 Node *val = n->in(MemNode::ValueIn);
635 PointsToNode* ptn = ptnode_adr(val->_idx);
636 assert(ptn != NULL, "node should be registered");
637 add_edge(adr_ptn, ptn);
638 break;
639 }
640 ELSE_FAIL("Op_StoreP");
641 }
642 case Op_AryEq:
643 case Op_StrComp:
644 case Op_StrEquals:
645 case Op_StrIndexOf: {
646 // char[] arrays passed to string intrinsic do not escape but
647 // they are not scalar replaceable. Adjust escape state for them.
648 // Start from in(2) edge since in(1) is memory edge.
649 for (uint i = 2; i < n->req(); i++) {
650 Node* adr = n->in(i);
651 const Type* at = _igvn->type(adr);
652 if (!adr->is_top() && at->isa_ptr()) {
653 assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
654 at->isa_ptr() != NULL, "expecting a pointer");
655 if (adr->is_AddP()) {
656 adr = get_addp_base(adr);
657 }
658 PointsToNode* ptn = ptnode_adr(adr->_idx);
659 assert(ptn != NULL, "node should be registered");
660 add_edge(n_ptn, ptn);
661 }
662 }
663 break;
664 }
665 default: {
666 // This method should be called only for EA specific nodes which may
667 // miss some edges when they were created.
668 #ifdef ASSERT
669 n->dump(1);
670 #endif
671 guarantee(false, "unknown node");
672 }
673 }
674 return;
675 }
676
677 void ConnectionGraph::add_call_node(CallNode* call) {
678 assert(call->returns_pointer(), "only for call which returns pointer");
679 uint call_idx = call->_idx;
680 if (call->is_Allocate()) {
681 Node* k = call->in(AllocateNode::KlassNode);
682 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
683 assert(kt != NULL, "TypeKlassPtr required.");
684 ciKlass* cik = kt->klass();
685 PointsToNode::EscapeState es = PointsToNode::NoEscape;
686 bool scalar_replaceable = true;
687 if (call->is_AllocateArray()) {
688 if (!cik->is_array_klass()) { // StressReflectiveCode
689 es = PointsToNode::GlobalEscape;
690 } else {
691 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
692 if (length < 0 || length > EliminateAllocationArraySizeLimit) {
693 // Not scalar replaceable if the length is not constant or too big.
694 scalar_replaceable = false;
695 }
696 }
697 } else { // Allocate instance
698 if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
699 !cik->is_instance_klass() || // StressReflectiveCode
700 cik->as_instance_klass()->has_finalizer()) {
701 es = PointsToNode::GlobalEscape;
702 }
703 }
704 add_java_object(call, es);
705 PointsToNode* ptn = ptnode_adr(call_idx);
706 if (!scalar_replaceable && ptn->scalar_replaceable()) {
707 ptn->set_scalar_replaceable(false);
708 }
709 } else if (call->is_CallStaticJava()) {
710 // Call nodes could be different types:
711 //
712 // 1. CallDynamicJavaNode (what happened during call is unknown):
713 //
714 // - mapped to GlobalEscape JavaObject node if oop is returned;
715 //
716 // - all oop arguments are escaping globally;
717 //
718 // 2. CallStaticJavaNode (execute bytecode analysis if possible):
719 //
720 // - the same as CallDynamicJavaNode if can't do bytecode analysis;
721 //
722 // - mapped to GlobalEscape JavaObject node if unknown oop is returned;
723 // - mapped to NoEscape JavaObject node if non-escaping object allocated
724 // during call is returned;
725 // - mapped to ArgEscape LocalVar node pointed to object arguments
726 // which are returned and does not escape during call;
727 //
728 // - oop arguments escaping status is defined by bytecode analysis;
729 //
730 // For a static call, we know exactly what method is being called.
731 // Use bytecode estimator to record whether the call's return value escapes.
732 ciMethod* meth = call->as_CallJava()->method();
733 if (meth == NULL) {
734 const char* name = call->as_CallStaticJava()->_name;
735 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
736 // Returns a newly allocated unescaped object.
737 add_java_object(call, PointsToNode::NoEscape);
738 ptnode_adr(call_idx)->set_scalar_replaceable(false);
739 } else {
740 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
741 call_analyzer->copy_dependencies(_compile->dependencies());
742 if (call_analyzer->is_return_allocated()) {
743 // Returns a newly allocated unescaped object, simply
744 // update dependency information.
745 // Mark it as NoEscape so that objects referenced by
746 // it's fields will be marked as NoEscape at least.
747 add_java_object(call, PointsToNode::NoEscape);
748 ptnode_adr(call_idx)->set_scalar_replaceable(false);
749 } else {
750 // Determine whether any arguments are returned.
751 const TypeTuple* d = call->tf()->domain();
752 bool ret_arg = false;
753 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
754 if (d->field_at(i)->isa_ptr() != NULL &&
755 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
756 ret_arg = true;
757 break;
758 }
759 }
760 if (ret_arg) {
761 add_local_var(call, PointsToNode::ArgEscape);
762 } else {
763 // Returns unknown object.
764 map_ideal_node(call, phantom_obj);
765 }
766 }
767 }
768 } else {
769 // An other type of call, assume the worst case:
770 // returned value is unknown and globally escapes.
771 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
772 map_ideal_node(call, phantom_obj);
773 }
774 }
775
776 void ConnectionGraph::process_call_arguments(CallNode *call) {
777 bool is_arraycopy = false;
778 switch (call->Opcode()) {
779 #ifdef ASSERT
780 case Op_Allocate:
781 case Op_AllocateArray:
782 case Op_Lock:
783 case Op_Unlock:
784 assert(false, "should be done already");
785 break;
786 #endif
787 case Op_CallLeafNoFP:
788 is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
789 strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
790 // fall through
791 case Op_CallLeaf: {
792 // Stub calls, objects do not escape but they are not scale replaceable.
793 // Adjust escape state for outgoing arguments.
794 const TypeTuple * d = call->tf()->domain();
795 bool src_has_oops = false;
796 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
797 const Type* at = d->field_at(i);
798 Node *arg = call->in(i);
799 const Type *aat = _igvn->type(arg);
800 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
801 continue;
802 if (arg->is_AddP()) {
803 //
804 // The inline_native_clone() case when the arraycopy stub is called
805 // after the allocation before Initialize and CheckCastPP nodes.
806 // Or normal arraycopy for object arrays case.
807 //
808 // Set AddP's base (Allocate) as not scalar replaceable since
809 // pointer to the base (with offset) is passed as argument.
810 //
811 arg = get_addp_base(arg);
812 }
813 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
814 assert(arg_ptn != NULL, "should be registered");
815 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
816 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
817 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
818 aat->isa_ptr() != NULL, "expecting an Ptr");
819 bool arg_has_oops = aat->isa_oopptr() &&
820 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
821 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
822 if (i == TypeFunc::Parms) {
823 src_has_oops = arg_has_oops;
824 }
825 //
826 // src or dst could be j.l.Object when other is basic type array:
827 //
828 // arraycopy(char[],0,Object*,0,size);
829 // arraycopy(Object*,0,char[],0,size);
830 //
831 // Don't add edges in such cases.
832 //
833 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
834 arg_has_oops && (i > TypeFunc::Parms);
835 #ifdef ASSERT
836 if (!(is_arraycopy ||
837 call->as_CallLeaf()->_name != NULL &&
838 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||
839 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
840 ) {
841 call->dump();
842 assert(false, "EA: unexpected CallLeaf");
843 }
844 #endif
845 // Always process arraycopy's destination object since
846 // we need to add all possible edges to references in
847 // source object.
848 if (arg_esc >= PointsToNode::ArgEscape &&
849 !arg_is_arraycopy_dest) {
850 continue;
851 }
852 set_escape_state(arg_ptn, PointsToNode::ArgEscape);
853 if (arg_is_arraycopy_dest) {
854 Node* src = call->in(TypeFunc::Parms);
855 if (src->is_AddP()) {
856 src = get_addp_base(src);
857 }
858 PointsToNode* src_ptn = ptnode_adr(src->_idx);
859 assert(src_ptn != NULL, "should be registered");
860 if (arg_ptn != src_ptn) {
861 // Special arraycopy edge:
862 // A destination object's field can't have the source object
863 // as base since objects escape states are not related.
864 // Only escape state of destination object's fields affects
865 // escape state of fields in source object.
866 add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn);
867 }
868 }
869 }
870 }
871 break;
872 }
873 case Op_CallStaticJava: {
874 // For a static call, we know exactly what method is being called.
875 // Use bytecode estimator to record the call's escape affects
876 #ifdef ASSERT
877 const char* name = call->as_CallStaticJava()->_name;
878 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
879 #endif
880 ciMethod* meth = call->as_CallJava()->method();
881 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
882 // fall-through if not a Java method or no analyzer information
883 if (call_analyzer != NULL) {
884 PointsToNode* call_ptn = ptnode_adr(call->_idx);
885 const TypeTuple* d = call->tf()->domain();
886 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
887 const Type* at = d->field_at(i);
888 int k = i - TypeFunc::Parms;
889 Node* arg = call->in(i);
890 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
891 if (at->isa_ptr() != NULL &&
892 call_analyzer->is_arg_returned(k)) {
893 // The call returns arguments.
894 if (call_ptn != NULL) { // Is call's result used?
895 assert(call_ptn->is_LocalVar(), "node should be registered");
896 assert(arg_ptn != NULL, "node should be registered");
897 add_edge(call_ptn, arg_ptn);
898 }
899 }
900 if (at->isa_oopptr() != NULL &&
901 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
902 if (!call_analyzer->is_arg_stack(k)) {
903 // The argument global escapes
904 set_escape_state(arg_ptn, PointsToNode::GlobalEscape);
905 } else {
906 set_escape_state(arg_ptn, PointsToNode::ArgEscape);
907 if (!call_analyzer->is_arg_local(k)) {
908 // The argument itself doesn't escape, but any fields might
909 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);
910 }
911 }
912 }
913 }
914 if (call_ptn != NULL && call_ptn->is_LocalVar()) {
915 // The call returns arguments.
916 assert(call_ptn->edge_count() > 0, "sanity");
917 if (!call_analyzer->is_return_local()) {
918 // Returns also unknown object.
919 add_edge(call_ptn, phantom_obj);
920 }
921 }
922 break;
923 }
924 }
925 default: {
926 // Fall-through here if not a Java method or no analyzer information
927 // or some other type of call, assume the worst case: all arguments
928 // globally escape.
929 const TypeTuple* d = call->tf()->domain();
930 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
931 const Type* at = d->field_at(i);
932 if (at->isa_oopptr() != NULL) {
933 Node* arg = call->in(i);
934 if (arg->is_AddP()) {
935 arg = get_addp_base(arg);
936 }
937 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
938 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);
939 }
940 }
941 }
942 }
943 }
944
945
946 // Finish Graph construction.
947 bool ConnectionGraph::complete_connection_graph(
948 GrowableArray<PointsToNode*>& ptnodes_worklist,
949 GrowableArray<JavaObjectNode*>& non_escaped_worklist,
950 GrowableArray<JavaObjectNode*>& java_objects_worklist,
951 GrowableArray<FieldNode*>& oop_fields_worklist) {
952 // Normally only 1-3 passes needed to build Connection Graph depending
953 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
954 // Set limit to 20 to catch situation when something did go wrong and
955 // bailout Escape Analysis.
956 // Also limit build time to 30 sec (60 in debug VM).
957 #define CG_BUILD_ITER_LIMIT 20
958 #ifdef ASSERT
959 #define CG_BUILD_TIME_LIMIT 60.0
960 #else
961 #define CG_BUILD_TIME_LIMIT 30.0
962 #endif
963
964 // Propagate GlobalEscape and ArgEscape escape states and check that
965 // we still have non-escaping objects. The method pushs on _worklist
966 // Field nodes which reference phantom_object.
967 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
968 return false; // Nothing to do.
969 }
970 // Now propagate references to all JavaObject nodes.
971 int java_objects_length = java_objects_worklist.length();
972 elapsedTimer time;
973 int new_edges = 1;
974 int iterations = 0;
975 do {
976 while ((new_edges > 0) &&
977 (iterations++ < CG_BUILD_ITER_LIMIT) &&
978 (time.seconds() < CG_BUILD_TIME_LIMIT)) {
979 time.start();
980 new_edges = 0;
981 // Propagate references to phantom_object for nodes pushed on _worklist
982 // by find_non_escaped_objects() and find_field_value().
983 new_edges += add_java_object_edges(phantom_obj, false);
984 for (int next = 0; next < java_objects_length; ++next) {
985 JavaObjectNode* ptn = java_objects_worklist.at(next);
986 new_edges += add_java_object_edges(ptn, true);
987 }
988 if (new_edges > 0) {
989 // Update escape states on each iteration if graph was updated.
990 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
991 return false; // Nothing to do.
992 }
993 }
994 time.stop();
995 }
996 if ((iterations < CG_BUILD_ITER_LIMIT) &&
997 (time.seconds() < CG_BUILD_TIME_LIMIT)) {
998 time.start();
999 // Find fields which have unknown value.
1000 int fields_length = oop_fields_worklist.length();
1001 for (int next = 0; next < fields_length; next++) {
1002 FieldNode* field = oop_fields_worklist.at(next);
1003 if (field->edge_count() == 0) {
1004 new_edges += find_field_value(field);
1005 // This code may added new edges to phantom_object.
1006 // Need an other cycle to propagate references to phantom_object.
1007 }
1008 }
1009 time.stop();
1010 } else {
1011 new_edges = 0; // Bailout
1012 }
1013 } while (new_edges > 0);
1014
1015 // Bailout if passed limits.
1016 if ((iterations >= CG_BUILD_ITER_LIMIT) ||
1017 (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
1018 Compile* C = _compile;
1019 if (C->log() != NULL) {
1020 C->log()->begin_elem("connectionGraph_bailout reason='reached ");
1021 C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
1022 C->log()->end_elem(" limit'");
1023 }
1024 assert(false, err_msg("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
1025 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()));
1026 // Possible infinite build_connection_graph loop,
1027 // bailout (no changes to ideal graph were made).
1028 return false;
1029 }
1030 #ifdef ASSERT
1031 if (Verbose && PrintEscapeAnalysis) {
1032 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",
1033 iterations, nodes_size(), ptnodes_worklist.length());
1034 }
1035 #endif
1036
1037 #undef CG_BUILD_ITER_LIMIT
1038 #undef CG_BUILD_TIME_LIMIT
1039
1040 // Find fields initialized by NULL for non-escaping Allocations.
1041 int non_escaped_length = non_escaped_worklist.length();
1042 for (int next = 0; next < non_escaped_length; next++) {
1043 JavaObjectNode* ptn = non_escaped_worklist.at(next);
1044 PointsToNode::EscapeState es = ptn->escape_state();
1045 assert(es <= PointsToNode::ArgEscape, "sanity");
1046 if (es == PointsToNode::NoEscape) {
1047 if (find_init_values(ptn, null_obj, _igvn) > 0) {
1048 // Adding references to NULL object does not change escape states
1049 // since it does not escape. Also no fields are added to NULL object.
1050 add_java_object_edges(null_obj, false);
1051 }
1052 }
1053 Node* n = ptn->ideal_node();
1054 if (n->is_Allocate()) {
1055 // The object allocated by this Allocate node will never be
1056 // seen by an other thread. Mark it so that when it is
1057 // expanded no MemBarStoreStore is added.
1058 InitializeNode* ini = n->as_Allocate()->initialization();
1059 if (ini != NULL)
1060 ini->set_does_not_escape();
1061 }
1062 }
1063 return true; // Finished graph construction.
1064 }
1065
1066 // Propagate GlobalEscape and ArgEscape escape states to all nodes
1067 // and check that we still have non-escaping java objects.
1068 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
1069 GrowableArray<JavaObjectNode*>& non_escaped_worklist) {
1070 GrowableArray<PointsToNode*> escape_worklist;
1071 // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
1072 int ptnodes_length = ptnodes_worklist.length();
1073 for (int next = 0; next < ptnodes_length; ++next) {
1074 PointsToNode* ptn = ptnodes_worklist.at(next);
1075 if (ptn->escape_state() >= PointsToNode::ArgEscape ||
1076 ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
1077 escape_worklist.push(ptn);
1078 }
1079 }
1080 // Set escape states to referenced nodes (edges list).
1081 while (escape_worklist.length() > 0) {
1082 PointsToNode* ptn = escape_worklist.pop();
1083 PointsToNode::EscapeState es = ptn->escape_state();
1084 PointsToNode::EscapeState field_es = ptn->fields_escape_state();
1085 if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
1086 es >= PointsToNode::ArgEscape) {
1087 // GlobalEscape or ArgEscape state of field means it has unknown value.
1088 if (add_edge(ptn, phantom_obj)) {
1089 // New edge was added
1090 add_field_uses_to_worklist(ptn->as_Field());
1091 }
1092 }
1093 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1094 PointsToNode* e = i.get();
1095 if (e->is_Arraycopy()) {
1096 assert(ptn->arraycopy_dst(), "sanity");
1097 // Propagate only fields escape state through arraycopy edge.
1098 if (e->fields_escape_state() < field_es) {
1099 set_fields_escape_state(e, field_es);
1100 escape_worklist.push(e);
1101 }
1102 } else if (es >= field_es) {
1103 // fields_escape_state is also set to 'es' if it is less than 'es'.
1104 if (e->escape_state() < es) {
1105 set_escape_state(e, es);
1106 escape_worklist.push(e);
1107 }
1108 } else {
1109 // Propagate field escape state.
1110 bool es_changed = false;
1111 if (e->fields_escape_state() < field_es) {
1112 set_fields_escape_state(e, field_es);
1113 es_changed = true;
1114 }
1115 if ((e->escape_state() < field_es) &&
1116 e->is_Field() && ptn->is_JavaObject() &&
1117 e->as_Field()->is_oop()) {
1118 // Change escape state of referenced fileds.
1119 set_escape_state(e, field_es);
1120 es_changed = true;;
1121 } else if (e->escape_state() < es) {
1122 set_escape_state(e, es);
1123 es_changed = true;;
1124 }
1125 if (es_changed) {
1126 escape_worklist.push(e);
1127 }
1128 }
1129 }
1130 }
1131 // Remove escaped objects from non_escaped list.
1132 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) {
1133 JavaObjectNode* ptn = non_escaped_worklist.at(next);
1134 if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
1135 non_escaped_worklist.delete_at(next);
1136 }
1137 if (ptn->escape_state() == PointsToNode::NoEscape) {
1138 // Find fields in non-escaped allocations which have unknown value.
1139 find_init_values(ptn, phantom_obj, NULL);
1140 }
1141 }
1142 return (non_escaped_worklist.length() > 0);
1143 }
1144
1145 // Add all references to JavaObject node by walking over all uses.
1146 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
1147 int new_edges = 0;
1148 if (populate_worklist) {
1149 // Populate _worklist by uses of jobj's uses.
1150 for (UseIterator i(jobj); i.has_next(); i.next()) {
1151 PointsToNode* use = i.get();
1152 if (use->is_Arraycopy())
1153 continue;
1154 add_uses_to_worklist(use);
1155 if (use->is_Field() && use->as_Field()->is_oop()) {
1156 // Put on worklist all field's uses (loads) and
1157 // related field nodes (same base and offset).
1158 add_field_uses_to_worklist(use->as_Field());
1159 }
1160 }
1161 }
1162 while(_worklist.length() > 0) {
1163 PointsToNode* use = _worklist.pop();
1164 if (PointsToNode::is_base_use(use)) {
1165 // Add reference from jobj to field and from field to jobj (field's base).
1166 use = PointsToNode::get_use_node(use)->as_Field();
1167 if (add_base(use->as_Field(), jobj)) {
1168 new_edges++;
1169 }
1170 continue;
1171 }
1172 assert(!use->is_JavaObject(), "sanity");
1173 if (use->is_Arraycopy()) {
1174 if (jobj == null_obj) // NULL object does not have field edges
1175 continue;
1176 // Added edge from Arraycopy node to arraycopy's source java object
1177 if (add_edge(use, jobj)) {
1178 jobj->set_arraycopy_src();
1179 new_edges++;
1180 }
1181 // and stop here.
1182 continue;
1183 }
1184 if (!add_edge(use, jobj))
1185 continue; // No new edge added, there was such edge already.
1186 new_edges++;
1187 if (use->is_LocalVar()) {
1188 add_uses_to_worklist(use);
1189 if (use->arraycopy_dst()) {
1190 for (EdgeIterator i(use); i.has_next(); i.next()) {
1191 PointsToNode* e = i.get();
1192 if (e->is_Arraycopy()) {
1193 if (jobj == null_obj) // NULL object does not have field edges
1194 continue;
1195 // Add edge from arraycopy's destination java object to Arraycopy node.
1196 if (add_edge(jobj, e)) {
1197 new_edges++;
1198 jobj->set_arraycopy_dst();
1199 }
1200 }
1201 }
1202 }
1203 } else {
1204 // Added new edge to stored in field values.
1205 // Put on worklist all field's uses (loads) and
1206 // related field nodes (same base and offset).
1207 add_field_uses_to_worklist(use->as_Field());
1208 }
1209 }
1210 return new_edges;
1211 }
1212
1213 // Put on worklist all related field nodes.
1214 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
1215 assert(field->is_oop(), "sanity");
1216 int offset = field->offset();
1217 add_uses_to_worklist(field);
1218 // Loop over all bases of this field and push on worklist Field nodes
1219 // with the same offset and base (since they may reference the same field).
1220 for (BaseIterator i(field); i.has_next(); i.next()) {
1221 PointsToNode* base = i.get();
1222 add_fields_to_worklist(field, base);
1223 // Check if the base was source object of arraycopy and go over arraycopy's
1224 // destination objects since values stored to a field of source object are
1225 // accessable by uses (loads) of fields of destination objects.
1226 if (base->arraycopy_src()) {
1227 for (UseIterator j(base); j.has_next(); j.next()) {
1228 PointsToNode* arycp = j.get();
1229 if (arycp->is_Arraycopy()) {
1230 for (UseIterator k(arycp); k.has_next(); k.next()) {
1231 PointsToNode* abase = k.get();
1232 if (abase->arraycopy_dst() && abase != base) {
1233 // Look for the same arracopy reference.
1234 add_fields_to_worklist(field, abase);
1235 }
1236 }
1237 }
1238 }
1239 }
1240 }
1241 }
1242
1243 // Put on worklist all related field nodes.
1244 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
1245 int offset = field->offset();
1246 if (base->is_LocalVar()) {
1247 for (UseIterator j(base); j.has_next(); j.next()) {
1248 PointsToNode* f = j.get();
1249 if (PointsToNode::is_base_use(f)) { // Field
1250 f = PointsToNode::get_use_node(f);
1251 if (f == field || !f->as_Field()->is_oop())
1252 continue;
1253 int offs = f->as_Field()->offset();
1254 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1255 add_to_worklist(f);
1256 }
1257 }
1258 }
1259 } else {
1260 assert(base->is_JavaObject(), "sanity");
1261 if (// Skip phantom_object since it is only used to indicate that
1262 // this field's content globally escapes.
1263 (base != phantom_obj) &&
1264 // NULL object node does not have fields.
1265 (base != null_obj)) {
1266 for (EdgeIterator i(base); i.has_next(); i.next()) {
1267 PointsToNode* f = i.get();
1268 // Skip arraycopy edge since store to destination object field
1269 // does not update value in source object field.
1270 if (f->is_Arraycopy()) {
1271 assert(base->arraycopy_dst(), "sanity");
1272 continue;
1273 }
1274 if (f == field || !f->as_Field()->is_oop())
1275 continue;
1276 int offs = f->as_Field()->offset();
1277 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1278 add_to_worklist(f);
1279 }
1280 }
1281 }
1282 }
1283 }
1284
1285 // Find fields which have unknown value.
1286 int ConnectionGraph::find_field_value(FieldNode* field) {
1287 // Escaped fields should have init value already.
1288 assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
1289 int new_edges = 0;
1290 for (BaseIterator i(field); i.has_next(); i.next()) {
1291 PointsToNode* base = i.get();
1292 if (base->is_JavaObject()) {
1293 // Skip Allocate's fields which will be processed later.
1294 if (base->ideal_node()->is_Allocate())
1295 return 0;
1296 assert(base == null_obj, "only NULL ptr base expected here");
1297 }
1298 }
1299 if (add_edge(field, phantom_obj)) {
1300 // New edge was added
1301 new_edges++;
1302 add_field_uses_to_worklist(field);
1303 }
1304 return new_edges;
1305 }
1306
1307 // Find fields initializing values for allocations.
1308 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) {
1309 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1310 int new_edges = 0;
1311 Node* alloc = pta->ideal_node();
1312 if (init_val == phantom_obj) {
1313 // Do nothing for Allocate nodes since its fields values are "known".
1314 if (alloc->is_Allocate())
1315 return 0;
1316 assert(alloc->as_CallStaticJava(), "sanity");
1317 #ifdef ASSERT
1318 if (alloc->as_CallStaticJava()->method() == NULL) {
1319 const char* name = alloc->as_CallStaticJava()->_name;
1320 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
1321 }
1322 #endif
1323 // Non-escaped allocation returned from Java or runtime call have
1324 // unknown values in fields.
1325 for (EdgeIterator i(pta); i.has_next(); i.next()) {
1326 PointsToNode* ptn = i.get();
1327 if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
1328 if (add_edge(ptn, phantom_obj)) {
1329 // New edge was added
1330 new_edges++;
1331 add_field_uses_to_worklist(ptn->as_Field());
1332 }
1333 }
1334 }
1335 return new_edges;
1336 }
1337 assert(init_val == null_obj, "sanity");
1338 // Do nothing for Call nodes since its fields values are unknown.
1339 if (!alloc->is_Allocate())
1340 return 0;
1341
1342 InitializeNode* ini = alloc->as_Allocate()->initialization();
1343 Compile* C = _compile;
1344 bool visited_bottom_offset = false;
1345 GrowableArray<int> offsets_worklist;
1346
1347 // Check if an oop field's initializing value is recorded and add
1348 // a corresponding NULL if field's value if it is not recorded.
1349 // Connection Graph does not record a default initialization by NULL
1350 // captured by Initialize node.
1351 //
1352 for (EdgeIterator i(pta); i.has_next(); i.next()) {
1353 PointsToNode* ptn = i.get(); // Field (AddP)
1354 if (!ptn->is_Field() || !ptn->as_Field()->is_oop())
1355 continue; // Not oop field
1356 int offset = ptn->as_Field()->offset();
1357 if (offset == Type::OffsetBot) {
1358 if (!visited_bottom_offset) {
1359 // OffsetBot is used to reference array's element,
1360 // always add reference to NULL to all Field nodes since we don't
1361 // known which element is referenced.
1362 if (add_edge(ptn, null_obj)) {
1363 // New edge was added
1364 new_edges++;
1365 add_field_uses_to_worklist(ptn->as_Field());
1366 visited_bottom_offset = true;
1367 }
1368 }
1369 } else {
1370 // Check only oop fields.
1371 const Type* adr_type = ptn->ideal_node()->as_AddP()->bottom_type();
1372 if (adr_type->isa_rawptr()) {
1373 #ifdef ASSERT
1374 // Raw pointers are used for initializing stores so skip it
1375 // since it should be recorded already
1376 Node* base = get_addp_base(ptn->ideal_node());
1377 assert(adr_type->isa_rawptr() && base->is_Proj() &&
1378 (base->in(0) == alloc),"unexpected pointer type");
1379 #endif
1380 continue;
1381 }
1382 if (!offsets_worklist.contains(offset)) {
1383 offsets_worklist.append(offset);
1384 Node* value = NULL;
1385 if (ini != NULL) {
1386 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
1387 Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
1388 if (store != NULL && store->is_Store()) {
1389 value = store->in(MemNode::ValueIn);
1390 } else {
1391 // There could be initializing stores which follow allocation.
1392 // For example, a volatile field store is not collected
1393 // by Initialize node.
1394 //
1395 // Need to check for dependent loads to separate such stores from
1396 // stores which follow loads. For now, add initial value NULL so
1397 // that compare pointers optimization works correctly.
1398 }
1399 }
1400 if (value == NULL) {
1401 // A field's initializing value was not recorded. Add NULL.
1402 if (add_edge(ptn, null_obj)) {
1403 // New edge was added
1404 new_edges++;
1405 add_field_uses_to_worklist(ptn->as_Field());
1406 }
1407 }
1408 }
1409 }
1410 }
1411 return new_edges;
1412 }
1413
1414 // Adjust scalar_replaceable state after Connection Graph is built.
1415 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {
1416 // Search for non-escaping objects which are not scalar replaceable
1417 // and mark them to propagate the state to referenced objects.
1418
1419 // 1. An object is not scalar replaceable if the field into which it is
1420 // stored has unknown offset (stored into unknown element of an array).
1421 //
1422 for (UseIterator i(jobj); i.has_next(); i.next()) {
1423 PointsToNode* use = i.get();
1424 assert(!use->is_Arraycopy(), "sanity");
1425 if (use->is_Field()) {
1426 FieldNode* field = use->as_Field();
1427 assert(field->is_oop() && field->scalar_replaceable() &&
1428 field->fields_escape_state() == PointsToNode::NoEscape, "sanity");
1429 if (field->offset() == Type::OffsetBot) {
1430 jobj->set_scalar_replaceable(false);
1431 return;
1432 }
1433 }
1434 assert(use->is_Field() || use->is_LocalVar(), "sanity");
1435 // 2. An object is not scalar replaceable if it is merged with other objects.
1436 for (EdgeIterator j(use); j.has_next(); j.next()) {
1437 PointsToNode* ptn = j.get();
1438 if (ptn->is_JavaObject() && ptn != jobj) {
1439 // Mark all objects.
1440 jobj->set_scalar_replaceable(false);
1441 ptn->set_scalar_replaceable(false);
1442 }
1443 }
1444 if (!jobj->scalar_replaceable()) {
135 return; 1445 return;
1446 }
1447 }
1448
1449 for (EdgeIterator j(jobj); j.has_next(); j.next()) {
1450 // Non-escaping object node should point only to field nodes.
1451 FieldNode* field = j.get()->as_Field();
1452 int offset = field->as_Field()->offset();
1453
1454 // 3. An object is not scalar replaceable if it has a field with unknown
1455 // offset (array's element is accessed in loop).
1456 if (offset == Type::OffsetBot) {
1457 jobj->set_scalar_replaceable(false);
1458 return;
1459 }
1460 // 4. Currently an object is not scalar replaceable if a LoadStore node
1461 // access its field since the field value is unknown after it.
1462 //
1463 Node* n = field->ideal_node();
1464 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1465 if (n->fast_out(i)->is_LoadStore()) {
1466 jobj->set_scalar_replaceable(false);
1467 return;
1468 }
1469 }
1470
1471 // 5. Or the address may point to more then one object. This may produce
1472 // the false positive result (set not scalar replaceable)
1473 // since the flow-insensitive escape analysis can't separate
1474 // the case when stores overwrite the field's value from the case
1475 // when stores happened on different control branches.
1476 //
1477 // Note: it will disable scalar replacement in some cases:
1478 //
1479 // Point p[] = new Point[1];
1480 // p[0] = new Point(); // Will be not scalar replaced
1481 //
1482 // but it will save us from incorrect optimizations in next cases:
1483 //
1484 // Point p[] = new Point[1];
1485 // if ( x ) p[0] = new Point(); // Will be not scalar replaced
1486 //
1487 if (field->base_count() > 1) {
1488 for (BaseIterator i(field); i.has_next(); i.next()) {
1489 PointsToNode* base = i.get();
1490 // Don't take into account LocalVar nodes which
1491 // may point to only one object which should be also
1492 // this field's base by now.
1493 if (base->is_JavaObject() && base != jobj) {
1494 // Mark all bases.
1495 jobj->set_scalar_replaceable(false);
1496 base->set_scalar_replaceable(false);
1497 }
1498 }
1499 }
1500 }
1501 }
1502
1503 #ifdef ASSERT
1504 void ConnectionGraph::verify_connection_graph(
1505 GrowableArray<PointsToNode*>& ptnodes_worklist,
1506 GrowableArray<JavaObjectNode*>& non_escaped_worklist,
1507 GrowableArray<JavaObjectNode*>& java_objects_worklist,
1508 GrowableArray<Node*>& addp_worklist) {
1509 // Verify that graph is complete - no new edges could be added.
1510 int java_objects_length = java_objects_worklist.length();
1511 int non_escaped_length = non_escaped_worklist.length();
1512 int new_edges = 0;
1513 for (int next = 0; next < java_objects_length; ++next) {
1514 JavaObjectNode* ptn = java_objects_worklist.at(next);
1515 new_edges += add_java_object_edges(ptn, true);
1516 }
1517 assert(new_edges == 0, "graph was not complete");
1518 // Verify that escape state is final.
1519 int length = non_escaped_worklist.length();
1520 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist);
1521 assert((non_escaped_length == non_escaped_worklist.length()) &&
1522 (non_escaped_length == length) &&
1523 (_worklist.length() == 0), "escape state was not final");
1524
1525 // Verify fields information.
1526 int addp_length = addp_worklist.length();
1527 for (int next = 0; next < addp_length; ++next ) {
1528 Node* n = addp_worklist.at(next);
1529 FieldNode* field = ptnode_adr(n->_idx)->as_Field();
1530 if (field->is_oop()) {
1531 // Verify that field has all bases
1532 Node* base = get_addp_base(n);
1533 PointsToNode* ptn = ptnode_adr(base->_idx);
1534 if (ptn->is_JavaObject()) {
1535 assert(field->has_base(ptn->as_JavaObject()), "sanity");
1536 } else {
1537 assert(ptn->is_LocalVar(), "sanity");
1538 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1539 PointsToNode* e = i.get();
1540 if (e->is_JavaObject()) {
1541 assert(field->has_base(e->as_JavaObject()), "sanity");
1542 }
1543 }
1544 }
1545 // Verify that all fields have initializing values.
1546 if (field->edge_count() == 0) {
1547 field->dump();
1548 assert(field->edge_count() > 0, "sanity");
1549 }
1550 }
1551 }
1552 }
1553 #endif
1554
1555 // Optimize ideal graph.
1556 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
1557 GrowableArray<Node*>& storestore_worklist) {
1558 Compile* C = _compile;
1559 PhaseIterGVN* igvn = _igvn;
1560 if (EliminateLocks) {
1561 // Mark locks before changing ideal graph.
1562 int cnt = C->macro_count();
1563 for( int i=0; i < cnt; i++ ) {
1564 Node *n = C->macro_node(i);
1565 if (n->is_AbstractLock()) { // Lock and Unlock nodes
1566 AbstractLockNode* alock = n->as_AbstractLock();
1567 if (!alock->is_non_esc_obj()) {
1568 if (not_global_escape(alock->obj_node())) {
1569 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
1570 // The lock could be marked eliminated by lock coarsening
1571 // code during first IGVN before EA. Replace coarsened flag
1572 // to eliminate all associated locks/unlocks.
1573 alock->set_non_esc_obj();
1574 }
1575 }
1576 }
1577 }
1578 }
1579
1580 if (OptimizePtrCompare) {
1581 // Add ConI(#CC_GT) and ConI(#CC_EQ).
1582 _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
1583 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
1584 // Optimize objects compare.
1585 while (ptr_cmp_worklist.length() != 0) {
1586 Node *n = ptr_cmp_worklist.pop();
1587 Node *res = optimize_ptr_compare(n);
1588 if (res != NULL) {
1589 #ifndef PRODUCT
1590 if (PrintOptimizePtrCompare) {
1591 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
1592 if (Verbose) {
1593 n->dump(1);
1594 }
1595 }
1596 #endif
1597 igvn->replace_node(n, res);
1598 }
1599 }
1600 // cleanup
1601 if (_pcmp_neq->outcnt() == 0)
1602 igvn->hash_delete(_pcmp_neq);
1603 if (_pcmp_eq->outcnt() == 0)
1604 igvn->hash_delete(_pcmp_eq);
1605 }
1606
1607 // For MemBarStoreStore nodes added in library_call.cpp, check
1608 // escape status of associated AllocateNode and optimize out
1609 // MemBarStoreStore node if the allocated object never escapes.
1610 while (storestore_worklist.length() != 0) {
1611 Node *n = storestore_worklist.pop();
1612 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
1613 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
1614 assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
1615 if (not_global_escape(alloc)) {
1616 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
1617 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
1618 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
1619 igvn->register_new_node_with_optimizer(mb);
1620 igvn->replace_node(storestore, mb);
1621 }
1622 }
1623 }
1624
1625 // Optimize objects compare.
1626 Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
1627 assert(OptimizePtrCompare, "sanity");
1628 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
1629 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
1630 JavaObjectNode* jobj1 = unique_java_object(n->in(1));
1631 JavaObjectNode* jobj2 = unique_java_object(n->in(2));
1632 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
1633 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
1634
1635 // Check simple cases first.
1636 if (jobj1 != NULL) {
1637 if (jobj1->escape_state() == PointsToNode::NoEscape) {
1638 if (jobj1 == jobj2) {
1639 // Comparing the same not escaping object.
1640 return _pcmp_eq;
1641 }
1642 Node* obj = jobj1->ideal_node();
1643 // Comparing not escaping allocation.
1644 if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
1645 !ptn2->points_to(jobj1)) {
1646 return _pcmp_neq; // This includes nullness check.
1647 }
1648 }
1649 }
1650 if (jobj2 != NULL) {
1651 if (jobj2->escape_state() == PointsToNode::NoEscape) {
1652 Node* obj = jobj2->ideal_node();
1653 // Comparing not escaping allocation.
1654 if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
1655 !ptn1->points_to(jobj2)) {
1656 return _pcmp_neq; // This includes nullness check.
1657 }
1658 }
1659 }
1660 if (jobj1 != NULL && jobj1 != phantom_obj &&
1661 jobj2 != NULL && jobj2 != phantom_obj &&
1662 jobj1->ideal_node()->is_Con() &&
1663 jobj2->ideal_node()->is_Con()) {
1664 // Klass or String constants compare. Need to be careful with
1665 // compressed pointers - compare types of ConN and ConP instead of nodes.
1666 const Type* t1 = jobj1->ideal_node()->bottom_type()->make_ptr();
1667 const Type* t2 = jobj2->ideal_node()->bottom_type()->make_ptr();
1668 assert(t1 != NULL && t2 != NULL, "sanity");
1669 if (t1->make_ptr() == t2->make_ptr()) {
1670 return _pcmp_eq;
136 } else { 1671 } else {
137 f->set_has_unknown_ptr(); 1672 return _pcmp_neq;
138 } 1673 }
139 } 1674 }
140 add_edge(f, to_i, PointsToNode::PointsToEdge); 1675 if (ptn1->meet(ptn2)) {
141 } 1676 return NULL; // Sets are not disjoint
142 1677 }
143 void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) { 1678
144 PointsToNode *f = ptnode_adr(from_i); 1679 // Sets are disjoint.
145 PointsToNode *t = ptnode_adr(to_i); 1680 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
146 1681 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
147 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set"); 1682 bool set1_has_null_ptr = ptn1->points_to(null_obj);
148 assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge"); 1683 bool set2_has_null_ptr = ptn2->points_to(null_obj);
149 assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge"); 1684 if (set1_has_unknown_ptr && set2_has_null_ptr ||
150 // don't add a self-referential edge, this can occur during removal of 1685 set2_has_unknown_ptr && set1_has_null_ptr) {
151 // deferred edges 1686 // Check nullness of unknown object.
152 if (from_i != to_i) 1687 return NULL;
153 add_edge(f, to_i, PointsToNode::DeferredEdge); 1688 }
154 } 1689
1690 // Disjointness by itself is not sufficient since
1691 // alias analysis is not complete for escaped objects.
1692 // Disjoint sets are definitely unrelated only when
1693 // at least one set has only not escaping allocations.
1694 if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
1695 if (ptn1->non_escaping_allocation()) {
1696 return _pcmp_neq;
1697 }
1698 }
1699 if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
1700 if (ptn2->non_escaping_allocation()) {
1701 return _pcmp_neq;
1702 }
1703 }
1704 return NULL;
1705 }
1706
1707 // Connection Graph constuction functions.
1708
1709 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
1710 PointsToNode* ptadr = _nodes.at(n->_idx);
1711 if (ptadr != NULL) {
1712 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
1713 return;
1714 }
1715 Compile* C = _compile;
1716 ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
1717 _nodes.at_put(n->_idx, ptadr);
1718 }
1719
1720 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
1721 PointsToNode* ptadr = _nodes.at(n->_idx);
1722 if (ptadr != NULL) {
1723 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
1724 return;
1725 }
1726 Compile* C = _compile;
1727 ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
1728 _nodes.at_put(n->_idx, ptadr);
1729 }
1730
1731 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
1732 PointsToNode* ptadr = _nodes.at(n->_idx);
1733 if (ptadr != NULL) {
1734 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
1735 return;
1736 }
1737 Compile* C = _compile;
1738 bool is_oop = is_oop_field(n, offset);
1739 FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
1740 _nodes.at_put(n->_idx, field);
1741 }
1742
1743 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
1744 PointsToNode* src, PointsToNode* dst) {
1745 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
1746 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
1747 PointsToNode* ptadr = _nodes.at(n->_idx);
1748 if (ptadr != NULL) {
1749 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
1750 return;
1751 }
1752 Compile* C = _compile;
1753 ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
1754 _nodes.at_put(n->_idx, ptadr);
1755 // Add edge from arraycopy node to source object.
1756 (void)add_edge(ptadr, src);
1757 src->set_arraycopy_src();
1758 // Add edge from destination object to arraycopy node.
1759 (void)add_edge(dst, ptadr);
1760 dst->set_arraycopy_dst();
1761 }
1762
1763 bool ConnectionGraph::is_oop_field(Node* n, int offset) {
1764 const Type* adr_type = n->as_AddP()->bottom_type();
1765 BasicType bt = T_INT;
1766 if (offset == Type::OffsetBot) {
1767 // Check only oop fields.
1768 if (!adr_type->isa_aryptr() ||
1769 (adr_type->isa_aryptr()->klass() == NULL) ||
1770 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
1771 // OffsetBot is used to reference array's element. Ignore first AddP.
1772 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
1773 bt = T_OBJECT;
1774 }
1775 }
1776 } else if (offset != oopDesc::klass_offset_in_bytes()) {
1777 if (adr_type->isa_instptr()) {
1778 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
1779 if (field != NULL) {
1780 bt = field->layout_type();
1781 } else {
1782 // Ignore non field load (for example, klass load)
1783 }
1784 } else if (adr_type->isa_aryptr()) {
1785 if (offset == arrayOopDesc::length_offset_in_bytes()) {
1786 // Ignore array length load.
1787 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
1788 // Ignore first AddP.
1789 } else {
1790 const Type* elemtype = adr_type->isa_aryptr()->elem();
1791 bt = elemtype->array_element_basic_type();
1792 }
1793 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
1794 // Allocation initialization, ThreadLocal field access, unsafe access
1795 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1796 int opcode = n->fast_out(i)->Opcode();
1797 if (opcode == Op_StoreP || opcode == Op_LoadP ||
1798 opcode == Op_StoreN || opcode == Op_LoadN) {
1799 bt = T_OBJECT;
1800 }
1801 }
1802 }
1803 }
1804 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
1805 }
1806
1807 // Returns unique pointed java object or NULL.
1808 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
1809 assert(!_collecting, "should not call when contructed graph");
1810 // If the node was created after the escape computation we can't answer.
1811 uint idx = n->_idx;
1812 if (idx >= nodes_size()) {
1813 return NULL;
1814 }
1815 PointsToNode* ptn = ptnode_adr(idx);
1816 if (ptn->is_JavaObject()) {
1817 return ptn->as_JavaObject();
1818 }
1819 assert(ptn->is_LocalVar(), "sanity");
1820 // Check all java objects it points to.
1821 JavaObjectNode* jobj = NULL;
1822 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1823 PointsToNode* e = i.get();
1824 if (e->is_JavaObject()) {
1825 if (jobj == NULL) {
1826 jobj = e->as_JavaObject();
1827 } else if (jobj != e) {
1828 return NULL;
1829 }
1830 }
1831 }
1832 return jobj;
1833 }
1834
1835 // Return true if this node points only to non-escaping allocations.
1836 bool PointsToNode::non_escaping_allocation() {
1837 if (is_JavaObject()) {
1838 Node* n = ideal_node();
1839 if (n->is_Allocate() || n->is_CallStaticJava()) {
1840 return (escape_state() == PointsToNode::NoEscape);
1841 } else {
1842 return false;
1843 }
1844 }
1845 assert(is_LocalVar(), "sanity");
1846 // Check all java objects it points to.
1847 for (EdgeIterator i(this); i.has_next(); i.next()) {
1848 PointsToNode* e = i.get();
1849 if (e->is_JavaObject()) {
1850 Node* n = e->ideal_node();
1851 if ((e->escape_state() != PointsToNode::NoEscape) ||
1852 !(n->is_Allocate() || n->is_CallStaticJava())) {
1853 return false;
1854 }
1855 }
1856 }
1857 return true;
1858 }
1859
1860 // Return true if we know the node does not escape globally.
1861 bool ConnectionGraph::not_global_escape(Node *n) {
1862 assert(!_collecting, "should not call during graph construction");
1863 // If the node was created after the escape computation we can't answer.
1864 uint idx = n->_idx;
1865 if (idx >= nodes_size()) {
1866 return false;
1867 }
1868 PointsToNode* ptn = ptnode_adr(idx);
1869 PointsToNode::EscapeState es = ptn->escape_state();
1870 // If we have already computed a value, return it.
1871 if (es >= PointsToNode::GlobalEscape)
1872 return false;
1873 if (ptn->is_JavaObject()) {
1874 return true; // (es < PointsToNode::GlobalEscape);
1875 }
1876 assert(ptn->is_LocalVar(), "sanity");
1877 // Check all java objects it points to.
1878 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1879 if (i.get()->escape_state() >= PointsToNode::GlobalEscape)
1880 return false;
1881 }
1882 return true;
1883 }
1884
1885
1886 // Helper functions
1887
1888 // Return true if this node points to specified node or nodes it points to.
1889 bool PointsToNode::points_to(JavaObjectNode* ptn) const {
1890 if (is_JavaObject()) {
1891 return (this == ptn);
1892 }
1893 assert(is_LocalVar(), "sanity");
1894 for (EdgeIterator i(this); i.has_next(); i.next()) {
1895 if (i.get() == ptn)
1896 return true;
1897 }
1898 return false;
1899 }
1900
1901 // Return true if one node points to an other.
1902 bool PointsToNode::meet(PointsToNode* ptn) {
1903 if (this == ptn) {
1904 return true;
1905 } else if (ptn->is_JavaObject()) {
1906 return this->points_to(ptn->as_JavaObject());
1907 } else if (this->is_JavaObject()) {
1908 return ptn->points_to(this->as_JavaObject());
1909 }
1910 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
1911 int ptn_count = ptn->edge_count();
1912 for (EdgeIterator i(this); i.has_next(); i.next()) {
1913 PointsToNode* this_e = i.get();
1914 for (int j = 0; j < ptn_count; j++) {
1915 if (this_e == ptn->edge(j))
1916 return true;
1917 }
1918 }
1919 return false;
1920 }
1921
1922 #ifdef ASSERT
1923 // Return true if bases point to this java object.
1924 bool FieldNode::has_base(JavaObjectNode* jobj) const {
1925 for (BaseIterator i(this); i.has_next(); i.next()) {
1926 if (i.get() == jobj)
1927 return true;
1928 }
1929 return false;
1930 }
1931 #endif
155 1932
156 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 1933 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
157 const Type *adr_type = phase->type(adr); 1934 const Type *adr_type = phase->type(adr);
158 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && 1935 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
159 adr->in(AddPNode::Address)->is_Proj() && 1936 adr->in(AddPNode::Address)->is_Proj() &&
169 const TypePtr *t_ptr = adr_type->isa_ptr(); 1946 const TypePtr *t_ptr = adr_type->isa_ptr();
170 assert(t_ptr != NULL, "must be a pointer type"); 1947 assert(t_ptr != NULL, "must be a pointer type");
171 return t_ptr->offset(); 1948 return t_ptr->offset();
172 } 1949 }
173 1950
174 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) { 1951 Node* ConnectionGraph::get_addp_base(Node *addp) {
175 // Don't add fields to NULL pointer.
176 if (is_null_ptr(from_i))
177 return;
178 PointsToNode *f = ptnode_adr(from_i);
179 PointsToNode *t = ptnode_adr(to_i);
180
181 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
182 assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge");
183 assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge");
184 assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets");
185 t->set_offset(offset);
186
187 add_edge(f, to_i, PointsToNode::FieldEdge);
188 }
189
190 void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) {
191 // Don't change non-escaping state of NULL pointer.
192 if (is_null_ptr(ni))
193 return;
194 PointsToNode *npt = ptnode_adr(ni);
195 PointsToNode::EscapeState old_es = npt->escape_state();
196 if (es > old_es)
197 npt->set_escape_state(es);
198 }
199
200 void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
201 PointsToNode::EscapeState es, bool done) {
202 PointsToNode* ptadr = ptnode_adr(n->_idx);
203 ptadr->_node = n;
204 ptadr->set_node_type(nt);
205
206 // inline set_escape_state(idx, es);
207 PointsToNode::EscapeState old_es = ptadr->escape_state();
208 if (es > old_es)
209 ptadr->set_escape_state(es);
210
211 if (done)
212 _processed.set(n->_idx);
213 }
214
215 PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n) {
216 uint idx = n->_idx;
217 PointsToNode::EscapeState es;
218
219 // If we are still collecting or there were no non-escaping allocations
220 // we don't know the answer yet
221 if (_collecting)
222 return PointsToNode::UnknownEscape;
223
224 // if the node was created after the escape computation, return
225 // UnknownEscape
226 if (idx >= nodes_size())
227 return PointsToNode::UnknownEscape;
228
229 es = ptnode_adr(idx)->escape_state();
230
231 // if we have already computed a value, return it
232 if (es != PointsToNode::UnknownEscape &&
233 ptnode_adr(idx)->node_type() == PointsToNode::JavaObject)
234 return es;
235
236 // PointsTo() calls n->uncast() which can return a new ideal node.
237 if (n->uncast()->_idx >= nodes_size())
238 return PointsToNode::UnknownEscape;
239
240 PointsToNode::EscapeState orig_es = es;
241
242 // compute max escape state of anything this node could point to
243 for(VectorSetI i(PointsTo(n)); i.test() && es != PointsToNode::GlobalEscape; ++i) {
244 uint pt = i.elem;
245 PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
246 if (pes > es)
247 es = pes;
248 }
249 if (orig_es != es) {
250 // cache the computed escape state
251 assert(es > orig_es, "should have computed an escape state");
252 set_escape_state(idx, es);
253 } // orig_es could be PointsToNode::UnknownEscape
254 return es;
255 }
256
257 VectorSet* ConnectionGraph::PointsTo(Node * n) {
258 pt_ptset.Reset();
259 pt_visited.Reset();
260 pt_worklist.clear();
261
262 #ifdef ASSERT
263 Node *orig_n = n;
264 #endif
265
266 n = n->uncast();
267 PointsToNode* npt = ptnode_adr(n->_idx);
268
269 // If we have a JavaObject, return just that object
270 if (npt->node_type() == PointsToNode::JavaObject) {
271 pt_ptset.set(n->_idx);
272 return &pt_ptset;
273 }
274 #ifdef ASSERT
275 if (npt->_node == NULL) {
276 if (orig_n != n)
277 orig_n->dump();
278 n->dump();
279 assert(npt->_node != NULL, "unregistered node");
280 }
281 #endif
282 pt_worklist.push(n->_idx);
283 while(pt_worklist.length() > 0) {
284 int ni = pt_worklist.pop();
285 if (pt_visited.test_set(ni))
286 continue;
287
288 PointsToNode* pn = ptnode_adr(ni);
289 // ensure that all inputs of a Phi have been processed
290 assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),"");
291
292 int edges_processed = 0;
293 uint e_cnt = pn->edge_count();
294 for (uint e = 0; e < e_cnt; e++) {
295 uint etgt = pn->edge_target(e);
296 PointsToNode::EdgeType et = pn->edge_type(e);
297 if (et == PointsToNode::PointsToEdge) {
298 pt_ptset.set(etgt);
299 edges_processed++;
300 } else if (et == PointsToNode::DeferredEdge) {
301 pt_worklist.push(etgt);
302 edges_processed++;
303 } else {
304 assert(false,"neither PointsToEdge or DeferredEdge");
305 }
306 }
307 if (edges_processed == 0) {
308 // no deferred or pointsto edges found. Assume the value was set
309 // outside this method. Add the phantom object to the pointsto set.
310 pt_ptset.set(_phantom_object);
311 }
312 }
313 return &pt_ptset;
314 }
315
316 void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) {
317 // This method is most expensive during ConnectionGraph construction.
318 // Reuse vectorSet and an additional growable array for deferred edges.
319 deferred_edges->clear();
320 visited->Reset();
321
322 visited->set(ni);
323 PointsToNode *ptn = ptnode_adr(ni);
324 assert(ptn->node_type() == PointsToNode::LocalVar ||
325 ptn->node_type() == PointsToNode::Field, "sanity");
326 assert(ptn->edge_count() != 0, "should have at least phantom_object");
327
328 // Mark current edges as visited and move deferred edges to separate array.
329 for (uint i = 0; i < ptn->edge_count(); ) {
330 uint t = ptn->edge_target(i);
331 #ifdef ASSERT
332 assert(!visited->test_set(t), "expecting no duplications");
333 #else
334 visited->set(t);
335 #endif
336 if (ptn->edge_type(i) == PointsToNode::DeferredEdge) {
337 ptn->remove_edge(t, PointsToNode::DeferredEdge);
338 deferred_edges->append(t);
339 } else {
340 i++;
341 }
342 }
343 for (int next = 0; next < deferred_edges->length(); ++next) {
344 uint t = deferred_edges->at(next);
345 PointsToNode *ptt = ptnode_adr(t);
346 uint e_cnt = ptt->edge_count();
347 assert(e_cnt != 0, "should have at least phantom_object");
348 for (uint e = 0; e < e_cnt; e++) {
349 uint etgt = ptt->edge_target(e);
350 if (visited->test_set(etgt))
351 continue;
352
353 PointsToNode::EdgeType et = ptt->edge_type(e);
354 if (et == PointsToNode::PointsToEdge) {
355 add_pointsto_edge(ni, etgt);
356 } else if (et == PointsToNode::DeferredEdge) {
357 deferred_edges->append(etgt);
358 } else {
359 assert(false,"invalid connection graph");
360 }
361 }
362 }
363 if (ptn->edge_count() == 0) {
364 // No pointsto edges found after deferred edges are removed.
365 // For example, in the next case where call is replaced
366 // with uncommon trap and as result array's load references
367 // itself through deferred edges:
368 //
369 // A a = b[i];
370 // if (c!=null) a = c.foo();
371 // b[i] = a;
372 //
373 // Assume the value was set outside this method and
374 // add edge to phantom object.
375 add_pointsto_edge(ni, _phantom_object);
376 }
377 }
378
379
380 // Add an edge to node given by "to_i" from any field of adr_i whose offset
381 // matches "offset" A deferred edge is added if to_i is a LocalVar, and
382 // a pointsto edge is added if it is a JavaObject
383
384 void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) {
385 // No fields for NULL pointer.
386 if (is_null_ptr(adr_i)) {
387 return;
388 }
389 PointsToNode* an = ptnode_adr(adr_i);
390 PointsToNode* to = ptnode_adr(to_i);
391 bool deferred = (to->node_type() == PointsToNode::LocalVar);
392 bool escaped = (to_i == _phantom_object) && (offs == Type::OffsetTop);
393 if (escaped) {
394 // Values in fields escaped during call.
395 assert(an->escape_state() >= PointsToNode::ArgEscape, "sanity");
396 offs = Type::OffsetBot;
397 }
398 for (uint fe = 0; fe < an->edge_count(); fe++) {
399 assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
400 int fi = an->edge_target(fe);
401 if (escaped) {
402 set_escape_state(fi, PointsToNode::GlobalEscape);
403 }
404 PointsToNode* pf = ptnode_adr(fi);
405 int po = pf->offset();
406 if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
407 if (deferred)
408 add_deferred_edge(fi, to_i);
409 else
410 add_pointsto_edge(fi, to_i);
411 }
412 }
413 }
414
415 // Add a deferred edge from node given by "from_i" to any field of adr_i
416 // whose offset matches "offset".
417 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
418 // No fields for NULL pointer.
419 if (is_null_ptr(adr_i)) {
420 return;
421 }
422 if (adr_i == _phantom_object) {
423 // Add only one edge for unknown object.
424 add_pointsto_edge(from_i, _phantom_object);
425 return;
426 }
427 PointsToNode* an = ptnode_adr(adr_i);
428 bool is_alloc = an->_node->is_Allocate();
429 for (uint fe = 0; fe < an->edge_count(); fe++) {
430 assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
431 int fi = an->edge_target(fe);
432 PointsToNode* pf = ptnode_adr(fi);
433 int offset = pf->offset();
434 if (!is_alloc) {
435 // Assume the field was set outside this method if it is not Allocation
436 add_pointsto_edge(fi, _phantom_object);
437 }
438 if (offset == offs || offset == Type::OffsetBot || offs == Type::OffsetBot) {
439 add_deferred_edge(from_i, fi);
440 }
441 }
442 // Some fields references (AddP) may still be missing
443 // until Connection Graph construction is complete.
444 // For example, loads from RAW pointers with offset 0
445 // which don't have AddP.
446 // A reference to phantom_object will be added if
447 // a field reference is still missing after completing
448 // Connection Graph (see remove_deferred()).
449 }
450
451 // Helper functions
452
453 static Node* get_addp_base(Node *addp) {
454 assert(addp->is_AddP(), "must be AddP"); 1952 assert(addp->is_AddP(), "must be AddP");
455 // 1953 //
456 // AddP cases for Base and Address inputs: 1954 // AddP cases for Base and Address inputs:
457 // case #1. Direct object's field reference: 1955 // case #1. Direct object's field reference:
458 // Allocate 1956 // Allocate
511 // | 2009 // |
512 // DecodeN 2010 // DecodeN
513 // | | 2011 // | |
514 // AddP ( base == address ) 2012 // AddP ( base == address )
515 // 2013 //
516 Node *base = addp->in(AddPNode::Base)->uncast(); 2014 Node *base = addp->in(AddPNode::Base);
517 if (base->is_top()) { // The AddP case #3 and #6. 2015 if (base->uncast()->is_top()) { // The AddP case #3 and #6.
518 base = addp->in(AddPNode::Address)->uncast(); 2016 base = addp->in(AddPNode::Address);
519 while (base->is_AddP()) { 2017 while (base->is_AddP()) {
520 // Case #6 (unsafe access) may have several chained AddP nodes. 2018 // Case #6 (unsafe access) may have several chained AddP nodes.
521 assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only"); 2019 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
522 base = base->in(AddPNode::Address)->uncast(); 2020 base = base->in(AddPNode::Address);
523 } 2021 }
524 assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal || 2022 Node* uncast_base = base->uncast();
525 base->Opcode() == Op_CastX2P || base->is_DecodeN() || 2023 int opcode = uncast_base->Opcode();
526 (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) || 2024 assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
527 (base->is_Proj() && base->in(0)->is_Allocate()), "sanity"); 2025 opcode == Op_CastX2P || uncast_base->is_DecodeN() ||
2026 (uncast_base->is_Mem() && uncast_base->bottom_type() == TypeRawPtr::NOTNULL) ||
2027 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
528 } 2028 }
529 return base; 2029 return base;
530 } 2030 }
531 2031
532 static Node* find_second_addp(Node* addp, Node* n) { 2032 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
533 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2033 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
534
535 Node* addp2 = addp->raw_out(0); 2034 Node* addp2 = addp->raw_out(0);
536 if (addp->outcnt() == 1 && addp2->is_AddP() && 2035 if (addp->outcnt() == 1 && addp2->is_AddP() &&
537 addp2->in(AddPNode::Base) == n && 2036 addp2->in(AddPNode::Base) == n &&
538 addp2->in(AddPNode::Address) == addp) { 2037 addp2->in(AddPNode::Address) == addp) {
539
540 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2038 assert(addp->in(AddPNode::Base) == n, "expecting the same base");
541 // 2039 //
542 // Find array's offset to push it on worklist first and 2040 // Find array's offset to push it on worklist first and
543 // as result process an array's element offset first (pushed second) 2041 // as result process an array's element offset first (pushed second)
544 // to avoid CastPP for the array's offset. 2042 // to avoid CastPP for the array's offset.
573 2071
574 // 2072 //
575 // Adjust the type and inputs of an AddP which computes the 2073 // Adjust the type and inputs of an AddP which computes the
576 // address of a field of an instance 2074 // address of a field of an instance
577 // 2075 //
578 bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) { 2076 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2077 PhaseGVN* igvn = _igvn;
579 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2078 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
580 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2079 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
581 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2080 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
582 if (t == NULL) { 2081 if (t == NULL) {
583 // We are computing a raw address for a store captured by an Initialize 2082 // We are computing a raw address for a store captured by an Initialize
610 // 2109 //
611 if (!t->is_known_instance() && 2110 if (!t->is_known_instance() &&
612 !base_t->klass()->is_subtype_of(t->klass())) { 2111 !base_t->klass()->is_subtype_of(t->klass())) {
613 return false; // bail out 2112 return false; // bail out
614 } 2113 }
615
616 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 2114 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
617 // Do NOT remove the next line: ensure a new alias index is allocated 2115 // Do NOT remove the next line: ensure a new alias index is allocated
618 // for the instance type. Note: C++ will not remove it since the call 2116 // for the instance type. Note: C++ will not remove it since the call
619 // has side effect. 2117 // has side effect.
620 int alias_idx = _compile->get_alias_index(tinst); 2118 int alias_idx = _compile->get_alias_index(tinst);
621 igvn->set_type(addp, tinst); 2119 igvn->set_type(addp, tinst);
622 // record the allocation in the node map 2120 // record the allocation in the node map
623 assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered"); 2121 set_map(addp, get_map(base->_idx));
624 set_map(addp->_idx, get_map(base->_idx));
625
626 // Set addp's Base and Address to 'base'. 2122 // Set addp's Base and Address to 'base'.
627 Node *abase = addp->in(AddPNode::Base); 2123 Node *abase = addp->in(AddPNode::Base);
628 Node *adr = addp->in(AddPNode::Address); 2124 Node *adr = addp->in(AddPNode::Address);
629 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2125 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
630 adr->in(0)->_idx == (uint)inst_id) { 2126 adr->in(0)->_idx == (uint)inst_id) {
655 // 2151 //
656 // Create a new version of orig_phi if necessary. Returns either the newly 2152 // Create a new version of orig_phi if necessary. Returns either the newly
657 // created phi or an existing phi. Sets create_new to indicate whether a new 2153 // created phi or an existing phi. Sets create_new to indicate whether a new
658 // phi was created. Cache the last newly created phi in the node map. 2154 // phi was created. Cache the last newly created phi in the node map.
659 // 2155 //
660 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created) { 2156 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) {
661 Compile *C = _compile; 2157 Compile *C = _compile;
2158 PhaseGVN* igvn = _igvn;
662 new_created = false; 2159 new_created = false;
663 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2160 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
664 // nothing to do if orig_phi is bottom memory or matches alias_idx 2161 // nothing to do if orig_phi is bottom memory or matches alias_idx
665 if (phi_alias_idx == alias_idx) { 2162 if (phi_alias_idx == alias_idx) {
666 return orig_phi; 2163 return orig_phi;
696 const TypePtr *atype = C->get_adr_type(alias_idx); 2193 const TypePtr *atype = C->get_adr_type(alias_idx);
697 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2194 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
698 C->copy_node_notes_to(result, orig_phi); 2195 C->copy_node_notes_to(result, orig_phi);
699 igvn->set_type(result, result->bottom_type()); 2196 igvn->set_type(result, result->bottom_type());
700 record_for_optimizer(result); 2197 record_for_optimizer(result);
701 2198 set_map(orig_phi, result);
702 debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;)
703 assert(pn == NULL || pn == orig_phi, "wrong node");
704 set_map(orig_phi->_idx, result);
705 ptnode_adr(orig_phi->_idx)->_node = orig_phi;
706
707 new_created = true; 2199 new_created = true;
708 return result; 2200 return result;
709 } 2201 }
710 2202
711 // 2203 //
712 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2204 // Return a new version of Memory Phi "orig_phi" with the inputs having the
713 // specified alias index. 2205 // specified alias index.
714 // 2206 //
715 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn) { 2207 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) {
716
717 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2208 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
718 Compile *C = _compile; 2209 Compile *C = _compile;
2210 PhaseGVN* igvn = _igvn;
719 bool new_phi_created; 2211 bool new_phi_created;
720 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created); 2212 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
721 if (!new_phi_created) { 2213 if (!new_phi_created) {
722 return result; 2214 return result;
723 } 2215 }
724
725 GrowableArray<PhiNode *> phi_list; 2216 GrowableArray<PhiNode *> phi_list;
726 GrowableArray<uint> cur_input; 2217 GrowableArray<uint> cur_input;
727
728 PhiNode *phi = orig_phi; 2218 PhiNode *phi = orig_phi;
729 uint idx = 1; 2219 uint idx = 1;
730 bool finished = false; 2220 bool finished = false;
731 while(!finished) { 2221 while(!finished) {
732 while (idx < phi->req()) { 2222 while (idx < phi->req()) {
733 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn); 2223 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);
734 if (mem != NULL && mem->is_Phi()) { 2224 if (mem != NULL && mem->is_Phi()) {
735 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created); 2225 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
736 if (new_phi_created) { 2226 if (new_phi_created) {
737 // found an phi for which we created a new split, push current one on worklist and begin 2227 // found an phi for which we created a new split, push current one on worklist and begin
738 // processing new one 2228 // processing new one
739 phi_list.push(phi); 2229 phi_list.push(phi);
740 cur_input.push(idx); 2230 cur_input.push(idx);
773 } 2263 }
774 } 2264 }
775 return result; 2265 return result;
776 } 2266 }
777 2267
778
779 // 2268 //
780 // The next methods are derived from methods in MemNode. 2269 // The next methods are derived from methods in MemNode.
781 // 2270 //
782 static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2271 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
783 Node *mem = mmem; 2272 Node *mem = mmem;
784 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2273 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
785 // means an array I have not precisely typed yet. Do not do any 2274 // means an array I have not precisely typed yet. Do not do any
786 // alias stuff with it any time soon. 2275 // alias stuff with it any time soon.
787 if( toop->base() != Type::AnyPtr && 2276 if (toop->base() != Type::AnyPtr &&
788 !(toop->klass() != NULL && 2277 !(toop->klass() != NULL &&
789 toop->klass()->is_java_lang_Object() && 2278 toop->klass()->is_java_lang_Object() &&
790 toop->offset() == Type::OffsetBot) ) { 2279 toop->offset() == Type::OffsetBot)) {
791 mem = mmem->memory_at(alias_idx); 2280 mem = mmem->memory_at(alias_idx);
792 // Update input if it is progress over what we have now 2281 // Update input if it is progress over what we have now
793 } 2282 }
794 return mem; 2283 return mem;
795 } 2284 }
796 2285
797 // 2286 //
798 // Move memory users to their memory slices. 2287 // Move memory users to their memory slices.
799 // 2288 //
800 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis, PhaseGVN *igvn) { 2289 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) {
801 Compile* C = _compile; 2290 Compile* C = _compile;
802 2291 PhaseGVN* igvn = _igvn;
803 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2292 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
804 assert(tp != NULL, "ptr type"); 2293 assert(tp != NULL, "ptr type");
805 int alias_idx = C->get_alias_index(tp); 2294 int alias_idx = C->get_alias_index(tp);
806 int general_idx = C->get_general_index(alias_idx); 2295 int general_idx = C->get_general_index(alias_idx);
807 2296
814 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2303 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
815 continue; // Nothing to do 2304 continue; // Nothing to do
816 } 2305 }
817 // Replace previous general reference to mem node. 2306 // Replace previous general reference to mem node.
818 uint orig_uniq = C->unique(); 2307 uint orig_uniq = C->unique();
819 Node* m = find_inst_mem(n, general_idx, orig_phis, igvn); 2308 Node* m = find_inst_mem(n, general_idx, orig_phis);
820 assert(orig_uniq == C->unique(), "no new nodes"); 2309 assert(orig_uniq == C->unique(), "no new nodes");
821 mmem->set_memory_at(general_idx, m); 2310 mmem->set_memory_at(general_idx, m);
822 --imax; 2311 --imax;
823 --i; 2312 --i;
824 } else if (use->is_MemBar()) { 2313 } else if (use->is_MemBar()) {
834 alias_idx == general_idx) { 2323 alias_idx == general_idx) {
835 continue; // Nothing to do 2324 continue; // Nothing to do
836 } 2325 }
837 // Move to general memory slice. 2326 // Move to general memory slice.
838 uint orig_uniq = C->unique(); 2327 uint orig_uniq = C->unique();
839 Node* m = find_inst_mem(n, general_idx, orig_phis, igvn); 2328 Node* m = find_inst_mem(n, general_idx, orig_phis);
840 assert(orig_uniq == C->unique(), "no new nodes"); 2329 assert(orig_uniq == C->unique(), "no new nodes");
841 igvn->hash_delete(use); 2330 igvn->hash_delete(use);
842 imax -= use->replace_edge(n, m); 2331 imax -= use->replace_edge(n, m);
843 igvn->hash_insert(use); 2332 igvn->hash_insert(use);
844 record_for_optimizer(use); 2333 record_for_optimizer(use);
871 2360
872 // 2361 //
873 // Search memory chain of "mem" to find a MemNode whose address 2362 // Search memory chain of "mem" to find a MemNode whose address
874 // is the specified alias index. 2363 // is the specified alias index.
875 // 2364 //
876 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis, PhaseGVN *phase) { 2365 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) {
877 if (orig_mem == NULL) 2366 if (orig_mem == NULL)
878 return orig_mem; 2367 return orig_mem;
879 Compile* C = phase->C; 2368 Compile* C = _compile;
2369 PhaseGVN* igvn = _igvn;
880 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2370 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
881 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2371 bool is_instance = (toop != NULL) && toop->is_known_instance();
882 Node *start_mem = C->start()->proj_out(TypeFunc::Memory); 2372 Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
883 Node *prev = NULL; 2373 Node *prev = NULL;
884 Node *result = orig_mem; 2374 Node *result = orig_mem;
885 while (prev != result) { 2375 while (prev != result) {
886 prev = result; 2376 prev = result;
887 if (result == start_mem) 2377 if (result == start_mem)
888 break; // hit one of our sentinels 2378 break; // hit one of our sentinels
889 if (result->is_Mem()) { 2379 if (result->is_Mem()) {
890 const Type *at = phase->type(result->in(MemNode::Address)); 2380 const Type *at = igvn->type(result->in(MemNode::Address));
891 if (at == Type::TOP) 2381 if (at == Type::TOP)
892 break; // Dead 2382 break; // Dead
893 assert (at->isa_ptr() != NULL, "pointer type required."); 2383 assert (at->isa_ptr() != NULL, "pointer type required.");
894 int idx = C->get_alias_index(at->is_ptr()); 2384 int idx = C->get_alias_index(at->is_ptr());
895 if (idx == alias_idx) 2385 if (idx == alias_idx)
907 Node *proj_in = result->in(0); 2397 Node *proj_in = result->in(0);
908 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2398 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
909 break; // hit one of our sentinels 2399 break; // hit one of our sentinels
910 } else if (proj_in->is_Call()) { 2400 } else if (proj_in->is_Call()) {
911 CallNode *call = proj_in->as_Call(); 2401 CallNode *call = proj_in->as_Call();
912 if (!call->may_modify(toop, phase)) { 2402 if (!call->may_modify(toop, igvn)) {
913 result = call->in(TypeFunc::Memory); 2403 result = call->in(TypeFunc::Memory);
914 } 2404 }
915 } else if (proj_in->is_Initialize()) { 2405 } else if (proj_in->is_Initialize()) {
916 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2406 AllocateNode* alloc = proj_in->as_Initialize()->allocation();
917 // Stop if this is the initialization for the object instance which 2407 // Stop if this is the initialization for the object instance which
926 MergeMemNode *mmem = result->as_MergeMem(); 2416 MergeMemNode *mmem = result->as_MergeMem();
927 result = step_through_mergemem(mmem, alias_idx, toop); 2417 result = step_through_mergemem(mmem, alias_idx, toop);
928 if (result == mmem->base_memory()) { 2418 if (result == mmem->base_memory()) {
929 // Didn't find instance memory, search through general slice recursively. 2419 // Didn't find instance memory, search through general slice recursively.
930 result = mmem->memory_at(C->get_general_index(alias_idx)); 2420 result = mmem->memory_at(C->get_general_index(alias_idx));
931 result = find_inst_mem(result, alias_idx, orig_phis, phase); 2421 result = find_inst_mem(result, alias_idx, orig_phis);
932 if (C->failing()) { 2422 if (C->failing()) {
933 return NULL; 2423 return NULL;
934 } 2424 }
935 mmem->set_memory_at(alias_idx, result); 2425 mmem->set_memory_at(alias_idx, result);
936 } 2426 }
937 } else if (result->is_Phi() && 2427 } else if (result->is_Phi() &&
938 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2428 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
939 Node *un = result->as_Phi()->unique_input(phase); 2429 Node *un = result->as_Phi()->unique_input(igvn);
940 if (un != NULL) { 2430 if (un != NULL) {
941 orig_phis.append_if_missing(result->as_Phi()); 2431 orig_phis.append_if_missing(result->as_Phi());
942 result = un; 2432 result = un;
943 } else { 2433 } else {
944 break; 2434 break;
945 } 2435 }
946 } else if (result->is_ClearArray()) { 2436 } else if (result->is_ClearArray()) {
947 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), phase)) { 2437 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
948 // Can not bypass initialization of the instance 2438 // Can not bypass initialization of the instance
949 // we are looking for. 2439 // we are looking for.
950 break; 2440 break;
951 } 2441 }
952 // Otherwise skip it (the call updated 'result' value). 2442 // Otherwise skip it (the call updated 'result' value).
953 } else if (result->Opcode() == Op_SCMemProj) { 2443 } else if (result->Opcode() == Op_SCMemProj) {
954 assert(result->in(0)->is_LoadStore(), "sanity"); 2444 assert(result->in(0)->is_LoadStore(), "sanity");
955 const Type *at = phase->type(result->in(0)->in(MemNode::Address)); 2445 const Type *at = igvn->type(result->in(0)->in(MemNode::Address));
956 if (at != Type::TOP) { 2446 if (at != Type::TOP) {
957 assert (at->isa_ptr() != NULL, "pointer type required."); 2447 assert (at->isa_ptr() != NULL, "pointer type required.");
958 int idx = C->get_alias_index(at->is_ptr()); 2448 int idx = C->get_alias_index(at->is_ptr());
959 assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field"); 2449 assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
960 break; 2450 break;
970 // Push all non-instance Phis on the orig_phis worklist to update inputs 2460 // Push all non-instance Phis on the orig_phis worklist to update inputs
971 // during Phase 4 if needed. 2461 // during Phase 4 if needed.
972 orig_phis.append_if_missing(mphi); 2462 orig_phis.append_if_missing(mphi);
973 } else if (C->get_alias_index(t) != alias_idx) { 2463 } else if (C->get_alias_index(t) != alias_idx) {
974 // Create a new Phi with the specified alias index type. 2464 // Create a new Phi with the specified alias index type.
975 result = split_memory_phi(mphi, alias_idx, orig_phis, phase); 2465 result = split_memory_phi(mphi, alias_idx, orig_phis);
976 } 2466 }
977 } 2467 }
978 // the result is either MemNode, PhiNode, InitializeNode. 2468 // the result is either MemNode, PhiNode, InitializeNode.
979 return result; 2469 return result;
980 } 2470 }
1069 // 100 LoadP _ 80 20 ... alias_index=4 2559 // 100 LoadP _ 80 20 ... alias_index=4
1070 // 2560 //
1071 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) { 2561 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) {
1072 GrowableArray<Node *> memnode_worklist; 2562 GrowableArray<Node *> memnode_worklist;
1073 GrowableArray<PhiNode *> orig_phis; 2563 GrowableArray<PhiNode *> orig_phis;
1074
1075 PhaseIterGVN *igvn = _igvn; 2564 PhaseIterGVN *igvn = _igvn;
1076 uint new_index_start = (uint) _compile->num_alias_types(); 2565 uint new_index_start = (uint) _compile->num_alias_types();
1077 Arena* arena = Thread::current()->resource_area(); 2566 Arena* arena = Thread::current()->resource_area();
1078 VectorSet visited(arena); 2567 VectorSet visited(arena);
1079 2568 ideal_nodes.clear(); // Reset for use with set_map/get_map.
2569 uint unique_old = _compile->unique();
1080 2570
1081 // Phase 1: Process possible allocations from alloc_worklist. 2571 // Phase 1: Process possible allocations from alloc_worklist.
1082 // Create instance types for the CheckCastPP for allocations where possible. 2572 // Create instance types for the CheckCastPP for allocations where possible.
1083 // 2573 //
1084 // (Note: don't forget to change the order of the second AddP node on 2574 // (Note: don't forget to change the order of the second AddP node on
1086 // see the comment in find_second_addp().) 2576 // see the comment in find_second_addp().)
1087 // 2577 //
1088 while (alloc_worklist.length() != 0) { 2578 while (alloc_worklist.length() != 0) {
1089 Node *n = alloc_worklist.pop(); 2579 Node *n = alloc_worklist.pop();
1090 uint ni = n->_idx; 2580 uint ni = n->_idx;
1091 const TypeOopPtr* tinst = NULL;
1092 if (n->is_Call()) { 2581 if (n->is_Call()) {
1093 CallNode *alloc = n->as_Call(); 2582 CallNode *alloc = n->as_Call();
1094 // copy escape information to call node 2583 // copy escape information to call node
1095 PointsToNode* ptn = ptnode_adr(alloc->_idx); 2584 PointsToNode* ptn = ptnode_adr(alloc->_idx);
1096 PointsToNode::EscapeState es = escape_state(alloc); 2585 PointsToNode::EscapeState es = ptn->escape_state();
1097 // We have an allocation or call which returns a Java object, 2586 // We have an allocation or call which returns a Java object,
1098 // see if it is unescaped. 2587 // see if it is unescaped.
1099 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 2588 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())
1100 continue; 2589 continue;
1101
1102 // Find CheckCastPP for the allocate or for the return value of a call 2590 // Find CheckCastPP for the allocate or for the return value of a call
1103 n = alloc->result_cast(); 2591 n = alloc->result_cast();
1104 if (n == NULL) { // No uses except Initialize node 2592 if (n == NULL) { // No uses except Initialize node
1105 if (alloc->is_Allocate()) { 2593 if (alloc->is_Allocate()) {
1106 // Set the scalar_replaceable flag for allocation 2594 // Set the scalar_replaceable flag for allocation
1143 if (alloc->is_Allocate()) { 2631 if (alloc->is_Allocate()) {
1144 // Set the scalar_replaceable flag for allocation 2632 // Set the scalar_replaceable flag for allocation
1145 // so it could be eliminated. 2633 // so it could be eliminated.
1146 alloc->as_Allocate()->_is_scalar_replaceable = true; 2634 alloc->as_Allocate()->_is_scalar_replaceable = true;
1147 } 2635 }
1148 set_escape_state(n->_idx, es); // CheckCastPP escape state 2636 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state
1149 // in order for an object to be scalar-replaceable, it must be: 2637 // in order for an object to be scalar-replaceable, it must be:
1150 // - a direct allocation (not a call returning an object) 2638 // - a direct allocation (not a call returning an object)
1151 // - non-escaping 2639 // - non-escaping
1152 // - eligible to be a unique type 2640 // - eligible to be a unique type
1153 // - not determined to be ineligible by escape analysis 2641 // - not determined to be ineligible by escape analysis
1154 assert(ptnode_adr(alloc->_idx)->_node != NULL && 2642 set_map(alloc, n);
1155 ptnode_adr(n->_idx)->_node != NULL, "should be registered"); 2643 set_map(n, alloc);
1156 set_map(alloc->_idx, n);
1157 set_map(n->_idx, alloc);
1158 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 2644 const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
1159 if (t == NULL) 2645 if (t == NULL)
1160 continue; // not a TypeOopPtr 2646 continue; // not a TypeOopPtr
1161 tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni); 2647 const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
1162 igvn->hash_delete(n); 2648 igvn->hash_delete(n);
1163 igvn->set_type(n, tinst); 2649 igvn->set_type(n, tinst);
1164 n->raise_bottom_type(tinst); 2650 n->raise_bottom_type(tinst);
1165 igvn->hash_insert(n); 2651 igvn->hash_insert(n);
1166 record_for_optimizer(n); 2652 record_for_optimizer(n);
1167 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 2653 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
1168 2654
1169 // First, put on the worklist all Field edges from Connection Graph 2655 // First, put on the worklist all Field edges from Connection Graph
1170 // which is more accurate then putting immediate users from Ideal Graph. 2656 // which is more accurate then putting immediate users from Ideal Graph.
1171 for (uint e = 0; e < ptn->edge_count(); e++) { 2657 for (EdgeIterator e(ptn); e.has_next(); e.next()) {
1172 Node *use = ptnode_adr(ptn->edge_target(e))->_node; 2658 PointsToNode* tgt = e.get();
1173 assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(), 2659 Node* use = tgt->ideal_node();
2660 assert(tgt->is_Field() && use->is_AddP(),
1174 "only AddP nodes are Field edges in CG"); 2661 "only AddP nodes are Field edges in CG");
1175 if (use->outcnt() > 0) { // Don't process dead nodes 2662 if (use->outcnt() > 0) { // Don't process dead nodes
1176 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 2663 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
1177 if (addp2 != NULL) { 2664 if (addp2 != NULL) {
1178 assert(alloc->is_AllocateArray(),"array allocation was expected"); 2665 assert(alloc->is_AllocateArray(),"array allocation was expected");
1200 memnode_worklist.append_if_missing(use); 2687 memnode_worklist.append_if_missing(use);
1201 } 2688 }
1202 } 2689 }
1203 } 2690 }
1204 } else if (n->is_AddP()) { 2691 } else if (n->is_AddP()) {
1205 VectorSet* ptset = PointsTo(get_addp_base(n)); 2692 JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
1206 assert(ptset->Size() == 1, "AddP address is unique"); 2693 if (jobj == NULL || jobj == phantom_obj) {
1207 uint elem = ptset->getelem(); // Allocation node's index 2694 #ifdef ASSERT
1208 if (elem == _phantom_object) { 2695 ptnode_adr(get_addp_base(n)->_idx)->dump();
1209 assert(false, "escaped allocation"); 2696 ptnode_adr(n->_idx)->dump();
1210 continue; // Assume the value was set outside this method. 2697 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
1211 } 2698 #endif
1212 Node *base = get_map(elem); // CheckCastPP node 2699 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
1213 if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path 2700 return;
1214 tinst = igvn->type(base)->isa_oopptr(); 2701 }
2702 Node *base = get_map(jobj->idx()); // CheckCastPP node
2703 if (!split_AddP(n, base)) continue; // wrong type from dead path
1215 } else if (n->is_Phi() || 2704 } else if (n->is_Phi() ||
1216 n->is_CheckCastPP() || 2705 n->is_CheckCastPP() ||
1217 n->is_EncodeP() || 2706 n->is_EncodeP() ||
1218 n->is_DecodeN() || 2707 n->is_DecodeN() ||
1219 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 2708 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
1220 if (visited.test_set(n->_idx)) { 2709 if (visited.test_set(n->_idx)) {
1221 assert(n->is_Phi(), "loops only through Phi's"); 2710 assert(n->is_Phi(), "loops only through Phi's");
1222 continue; // already processed 2711 continue; // already processed
1223 } 2712 }
1224 VectorSet* ptset = PointsTo(n); 2713 JavaObjectNode* jobj = unique_java_object(n);
1225 if (ptset->Size() == 1) { 2714 if (jobj == NULL || jobj == phantom_obj) {
1226 uint elem = ptset->getelem(); // Allocation node's index 2715 #ifdef ASSERT
1227 if (elem == _phantom_object) { 2716 ptnode_adr(n->_idx)->dump();
1228 assert(false, "escaped allocation"); 2717 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
1229 continue; // Assume the value was set outside this method. 2718 #endif
1230 } 2719 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
1231 Node *val = get_map(elem); // CheckCastPP node 2720 return;
2721 } else {
2722 Node *val = get_map(jobj->idx()); // CheckCastPP node
1232 TypeNode *tn = n->as_Type(); 2723 TypeNode *tn = n->as_Type();
1233 tinst = igvn->type(val)->isa_oopptr(); 2724 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
1234 assert(tinst != NULL && tinst->is_known_instance() && 2725 assert(tinst != NULL && tinst->is_known_instance() &&
1235 (uint)tinst->instance_id() == elem , "instance type expected."); 2726 tinst->instance_id() == jobj->idx() , "instance type expected.");
1236 2727
1237 const Type *tn_type = igvn->type(tn); 2728 const Type *tn_type = igvn->type(tn);
1238 const TypeOopPtr *tn_t; 2729 const TypeOopPtr *tn_t;
1239 if (tn_type->isa_narrowoop()) { 2730 if (tn_type->isa_narrowoop()) {
1240 tn_t = tn_type->make_ptr()->isa_oopptr(); 2731 tn_t = tn_type->make_ptr()->isa_oopptr();
1241 } else { 2732 } else {
1242 tn_t = tn_type->isa_oopptr(); 2733 tn_t = tn_type->isa_oopptr();
1243 } 2734 }
1244
1245 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 2735 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
1246 if (tn_type->isa_narrowoop()) { 2736 if (tn_type->isa_narrowoop()) {
1247 tn_type = tinst->make_narrowoop(); 2737 tn_type = tinst->make_narrowoop();
1248 } else { 2738 } else {
1249 tn_type = tinst; 2739 tn_type = tinst;
1312 } 2802 }
1313 2803
1314 } 2804 }
1315 // New alias types were created in split_AddP(). 2805 // New alias types were created in split_AddP().
1316 uint new_index_end = (uint) _compile->num_alias_types(); 2806 uint new_index_end = (uint) _compile->num_alias_types();
2807 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
1317 2808
1318 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2809 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
1319 // compute new values for Memory inputs (the Memory inputs are not 2810 // compute new values for Memory inputs (the Memory inputs are not
1320 // actually updated until phase 4.) 2811 // actually updated until phase 4.)
1321 if (memnode_worklist.length() == 0) 2812 if (memnode_worklist.length() == 0)
1322 return; // nothing to do 2813 return; // nothing to do
1323
1324 while (memnode_worklist.length() != 0) { 2814 while (memnode_worklist.length() != 0) {
1325 Node *n = memnode_worklist.pop(); 2815 Node *n = memnode_worklist.pop();
1326 if (visited.test_set(n->_idx)) 2816 if (visited.test_set(n->_idx))
1327 continue; 2817 continue;
1328 if (n->is_Phi() || n->is_ClearArray()) { 2818 if (n->is_Phi() || n->is_ClearArray()) {
1339 if (addr_t == Type::TOP) 2829 if (addr_t == Type::TOP)
1340 continue; 2830 continue;
1341 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 2831 assert (addr_t->isa_ptr() != NULL, "pointer type required.");
1342 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 2832 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
1343 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 2833 assert ((uint)alias_idx < new_index_end, "wrong alias index");
1344 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn); 2834 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
1345 if (_compile->failing()) { 2835 if (_compile->failing()) {
1346 return; 2836 return;
1347 } 2837 }
1348 if (mem != n->in(MemNode::Memory)) { 2838 if (mem != n->in(MemNode::Memory)) {
1349 // We delay the memory edge update since we need old one in 2839 // We delay the memory edge update since we need old one in
1350 // MergeMem code below when instances memory slices are separated. 2840 // MergeMem code below when instances memory slices are separated.
1351 debug_only(Node* pn = ptnode_adr(n->_idx)->_node;) 2841 set_map(n, mem);
1352 assert(pn == NULL || pn == n, "wrong node");
1353 set_map(n->_idx, mem);
1354 ptnode_adr(n->_idx)->_node = n;
1355 } 2842 }
1356 if (n->is_Load()) { 2843 if (n->is_Load()) {
1357 continue; // don't push users 2844 continue; // don't push users
1358 } else if (n->is_LoadStore()) { 2845 } else if (n->is_LoadStore()) {
1359 // get the memory projection 2846 // get the memory projection
1440 // already a memory slice of the instance along the memory chain. 2927 // already a memory slice of the instance along the memory chain.
1441 for (uint ni = new_index_start; ni < new_index_end; ni++) { 2928 for (uint ni = new_index_start; ni < new_index_end; ni++) {
1442 if((uint)_compile->get_general_index(ni) == i) { 2929 if((uint)_compile->get_general_index(ni) == i) {
1443 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 2930 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
1444 if (nmm->is_empty_memory(m)) { 2931 if (nmm->is_empty_memory(m)) {
1445 Node* result = find_inst_mem(mem, ni, orig_phis, igvn); 2932 Node* result = find_inst_mem(mem, ni, orig_phis);
1446 if (_compile->failing()) { 2933 if (_compile->failing()) {
1447 return; 2934 return;
1448 } 2935 }
1449 nmm->set_memory_at(ni, result); 2936 nmm->set_memory_at(ni, result);
1450 } 2937 }
1456 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 2943 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
1457 Node* result = step_through_mergemem(nmm, ni, tinst); 2944 Node* result = step_through_mergemem(nmm, ni, tinst);
1458 if (result == nmm->base_memory()) { 2945 if (result == nmm->base_memory()) {
1459 // Didn't find instance memory, search through general slice recursively. 2946 // Didn't find instance memory, search through general slice recursively.
1460 result = nmm->memory_at(_compile->get_general_index(ni)); 2947 result = nmm->memory_at(_compile->get_general_index(ni));
1461 result = find_inst_mem(result, ni, orig_phis, igvn); 2948 result = find_inst_mem(result, ni, orig_phis);
1462 if (_compile->failing()) { 2949 if (_compile->failing()) {
1463 return; 2950 return;
1464 } 2951 }
1465 nmm->set_memory_at(ni, result); 2952 nmm->set_memory_at(ni, result);
1466 } 2953 }
1480 PhiNode *phi = orig_phis.at(j); 2967 PhiNode *phi = orig_phis.at(j);
1481 int alias_idx = _compile->get_alias_index(phi->adr_type()); 2968 int alias_idx = _compile->get_alias_index(phi->adr_type());
1482 igvn->hash_delete(phi); 2969 igvn->hash_delete(phi);
1483 for (uint i = 1; i < phi->req(); i++) { 2970 for (uint i = 1; i < phi->req(); i++) {
1484 Node *mem = phi->in(i); 2971 Node *mem = phi->in(i);
1485 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn); 2972 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
1486 if (_compile->failing()) { 2973 if (_compile->failing()) {
1487 return; 2974 return;
1488 } 2975 }
1489 if (mem != new_mem) { 2976 if (mem != new_mem) {
1490 phi->set_req(i, new_mem); 2977 phi->set_req(i, new_mem);
1494 record_for_optimizer(phi); 2981 record_for_optimizer(phi);
1495 } 2982 }
1496 2983
1497 // Update the memory inputs of MemNodes with the value we computed 2984 // Update the memory inputs of MemNodes with the value we computed
1498 // in Phase 2 and move stores memory users to corresponding memory slices. 2985 // in Phase 2 and move stores memory users to corresponding memory slices.
1499
1500 // Disable memory split verification code until the fix for 6984348. 2986 // Disable memory split verification code until the fix for 6984348.
1501 // Currently it produces false negative results since it does not cover all cases. 2987 // Currently it produces false negative results since it does not cover all cases.
1502 #if 0 // ifdef ASSERT 2988 #if 0 // ifdef ASSERT
1503 visited.Reset(); 2989 visited.Reset();
1504 Node_Stack old_mems(arena, _compile->unique() >> 2); 2990 Node_Stack old_mems(arena, _compile->unique() >> 2);
1505 #endif 2991 #endif
1506 for (uint i = 0; i < nodes_size(); i++) { 2992 for (uint i = 0; i < ideal_nodes.size(); i++) {
1507 Node *nmem = get_map(i); 2993 Node* n = ideal_nodes.at(i);
1508 if (nmem != NULL) { 2994 Node* nmem = get_map(n->_idx);
1509 Node *n = ptnode_adr(i)->_node; 2995 assert(nmem != NULL, "sanity");
1510 assert(n != NULL, "sanity"); 2996 if (n->is_Mem()) {
1511 if (n->is_Mem()) {
1512 #if 0 // ifdef ASSERT 2997 #if 0 // ifdef ASSERT
1513 Node* old_mem = n->in(MemNode::Memory); 2998 Node* old_mem = n->in(MemNode::Memory);
1514 if (!visited.test_set(old_mem->_idx)) { 2999 if (!visited.test_set(old_mem->_idx)) {
1515 old_mems.push(old_mem, old_mem->outcnt()); 3000 old_mems.push(old_mem, old_mem->outcnt());
1516 } 3001 }
1517 #endif 3002 #endif
1518 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3003 assert(n->in(MemNode::Memory) != nmem, "sanity");
1519 if (!n->is_Load()) { 3004 if (!n->is_Load()) {
1520 // Move memory users of a store first. 3005 // Move memory users of a store first.
1521 move_inst_mem(n, orig_phis, igvn); 3006 move_inst_mem(n, orig_phis);
1522 } 3007 }
1523 // Now update memory input 3008 // Now update memory input
1524 igvn->hash_delete(n); 3009 igvn->hash_delete(n);
1525 n->set_req(MemNode::Memory, nmem); 3010 n->set_req(MemNode::Memory, nmem);
1526 igvn->hash_insert(n); 3011 igvn->hash_insert(n);
1527 record_for_optimizer(n); 3012 record_for_optimizer(n);
1528 } else { 3013 } else {
1529 assert(n->is_Allocate() || n->is_CheckCastPP() || 3014 assert(n->is_Allocate() || n->is_CheckCastPP() ||
1530 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3015 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
1531 }
1532 } 3016 }
1533 } 3017 }
1534 #if 0 // ifdef ASSERT 3018 #if 0 // ifdef ASSERT
1535 // Verify that memory was split correctly 3019 // Verify that memory was split correctly
1536 while (old_mems.is_nonempty()) { 3020 while (old_mems.is_nonempty()) {
1540 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3024 assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
1541 } 3025 }
1542 #endif 3026 #endif
1543 } 3027 }
1544 3028
1545 bool ConnectionGraph::has_candidates(Compile *C) {
1546 // EA brings benefits only when the code has allocations and/or locks which
1547 // are represented by ideal Macro nodes.
1548 int cnt = C->macro_count();
1549 for( int i=0; i < cnt; i++ ) {
1550 Node *n = C->macro_node(i);
1551 if ( n->is_Allocate() )
1552 return true;
1553 if( n->is_Lock() ) {
1554 Node* obj = n->as_Lock()->obj_node()->uncast();
1555 if( !(obj->is_Parm() || obj->is_Con()) )
1556 return true;
1557 }
1558 }
1559 return false;
1560 }
1561
1562 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
1563 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
1564 // to create space for them in ConnectionGraph::_nodes[].
1565 Node* oop_null = igvn->zerocon(T_OBJECT);
1566 Node* noop_null = igvn->zerocon(T_NARROWOOP);
1567
1568 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
1569 // Perform escape analysis
1570 if (congraph->compute_escape()) {
1571 // There are non escaping objects.
1572 C->set_congraph(congraph);
1573 }
1574
1575 // Cleanup.
1576 if (oop_null->outcnt() == 0)
1577 igvn->hash_delete(oop_null);
1578 if (noop_null->outcnt() == 0)
1579 igvn->hash_delete(noop_null);
1580 }
1581
1582 bool ConnectionGraph::compute_escape() {
1583 Compile* C = _compile;
1584
1585 // 1. Populate Connection Graph (CG) with Ideal nodes.
1586
1587 Unique_Node_List worklist_init;
1588 worklist_init.map(C->unique(), NULL); // preallocate space
1589
1590 // Initialize worklist
1591 if (C->root() != NULL) {
1592 worklist_init.push(C->root());
1593 }
1594
1595 GrowableArray<Node*> alloc_worklist;
1596 GrowableArray<Node*> addp_worklist;
1597 GrowableArray<Node*> ptr_cmp_worklist;
1598 GrowableArray<Node*> storestore_worklist;
1599 PhaseGVN* igvn = _igvn;
1600
1601 // Push all useful nodes onto CG list and set their type.
1602 for( uint next = 0; next < worklist_init.size(); ++next ) {
1603 Node* n = worklist_init.at(next);
1604 record_for_escape_analysis(n, igvn);
1605 // Only allocations and java static calls results are checked
1606 // for an escape status. See process_call_result() below.
1607 if (n->is_Allocate() || n->is_CallStaticJava() &&
1608 ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
1609 alloc_worklist.append(n);
1610 } else if(n->is_AddP()) {
1611 // Collect address nodes. Use them during stage 3 below
1612 // to build initial connection graph field edges.
1613 addp_worklist.append(n);
1614 } else if (n->is_MergeMem()) {
1615 // Collect all MergeMem nodes to add memory slices for
1616 // scalar replaceable objects in split_unique_types().
1617 _mergemem_worklist.append(n->as_MergeMem());
1618 } else if (OptimizePtrCompare && n->is_Cmp() &&
1619 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
1620 // Compare pointers nodes
1621 ptr_cmp_worklist.append(n);
1622 } else if (n->is_MemBarStoreStore()) {
1623 // Collect all MemBarStoreStore nodes so that depending on the
1624 // escape status of the associated Allocate node some of them
1625 // may be eliminated.
1626 storestore_worklist.append(n);
1627 }
1628 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1629 Node* m = n->fast_out(i); // Get user
1630 worklist_init.push(m);
1631 }
1632 }
1633
1634 if (alloc_worklist.length() == 0) {
1635 _collecting = false;
1636 return false; // Nothing to do.
1637 }
1638
1639 // 2. First pass to create simple CG edges (doesn't require to walk CG).
1640 uint delayed_size = _delayed_worklist.size();
1641 for( uint next = 0; next < delayed_size; ++next ) {
1642 Node* n = _delayed_worklist.at(next);
1643 build_connection_graph(n, igvn);
1644 }
1645
1646 // 3. Pass to create initial fields edges (JavaObject -F-> AddP)
1647 // to reduce number of iterations during stage 4 below.
1648 uint addp_length = addp_worklist.length();
1649 for( uint next = 0; next < addp_length; ++next ) {
1650 Node* n = addp_worklist.at(next);
1651 Node* base = get_addp_base(n);
1652 if (base->is_Proj() && base->in(0)->is_Call())
1653 base = base->in(0);
1654 PointsToNode::NodeType nt = ptnode_adr(base->_idx)->node_type();
1655 if (nt == PointsToNode::JavaObject) {
1656 build_connection_graph(n, igvn);
1657 }
1658 }
1659
1660 GrowableArray<int> cg_worklist;
1661 cg_worklist.append(_phantom_object);
1662 GrowableArray<uint> worklist;
1663
1664 // 4. Build Connection Graph which need
1665 // to walk the connection graph.
1666 _progress = false;
1667 for (uint ni = 0; ni < nodes_size(); ni++) {
1668 PointsToNode* ptn = ptnode_adr(ni);
1669 Node *n = ptn->_node;
1670 if (n != NULL) { // Call, AddP, LoadP, StoreP
1671 build_connection_graph(n, igvn);
1672 if (ptn->node_type() != PointsToNode::UnknownType)
1673 cg_worklist.append(n->_idx); // Collect CG nodes
1674 if (!_processed.test(n->_idx))
1675 worklist.append(n->_idx); // Collect C/A/L/S nodes
1676 }
1677 }
1678
1679 // After IGVN user nodes may have smaller _idx than
1680 // their inputs so they will be processed first in
1681 // previous loop. Because of that not all Graph
1682 // edges will be created. Walk over interesting
1683 // nodes again until no new edges are created.
1684 //
1685 // Normally only 1-3 passes needed to build
1686 // Connection Graph depending on graph complexity.
1687 // Observed 8 passes in jvm2008 compiler.compiler.
1688 // Set limit to 20 to catch situation when something
1689 // did go wrong and recompile the method without EA.
1690 // Also limit build time to 30 sec (60 in debug VM).
1691
1692 #define CG_BUILD_ITER_LIMIT 20
1693
1694 #ifdef ASSERT
1695 #define CG_BUILD_TIME_LIMIT 60.0
1696 #else
1697 #define CG_BUILD_TIME_LIMIT 30.0
1698 #endif
1699
1700 uint length = worklist.length();
1701 int iterations = 0;
1702 elapsedTimer time;
1703 while(_progress &&
1704 (iterations++ < CG_BUILD_ITER_LIMIT) &&
1705 (time.seconds() < CG_BUILD_TIME_LIMIT)) {
1706 time.start();
1707 _progress = false;
1708 for( uint next = 0; next < length; ++next ) {
1709 int ni = worklist.at(next);
1710 PointsToNode* ptn = ptnode_adr(ni);
1711 Node* n = ptn->_node;
1712 assert(n != NULL, "should be known node");
1713 build_connection_graph(n, igvn);
1714 }
1715 time.stop();
1716 }
1717 if ((iterations >= CG_BUILD_ITER_LIMIT) ||
1718 (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
1719 assert(false, err_msg("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
1720 time.seconds(), iterations, nodes_size(), length));
1721 // Possible infinite build_connection_graph loop,
1722 // bailout (no changes to ideal graph were made).
1723 _collecting = false;
1724 return false;
1725 }
1726 #undef CG_BUILD_ITER_LIMIT
1727 #undef CG_BUILD_TIME_LIMIT
1728
1729 // 5. Propagate escaped states.
1730 worklist.clear();
1731
1732 // mark all nodes reachable from GlobalEscape nodes
1733 (void)propagate_escape_state(&cg_worklist, &worklist, PointsToNode::GlobalEscape);
1734
1735 // mark all nodes reachable from ArgEscape nodes
1736 bool has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape);
1737
1738 Arena* arena = Thread::current()->resource_area();
1739 VectorSet visited(arena);
1740
1741 // 6. Find fields initializing values for not escaped allocations
1742 uint alloc_length = alloc_worklist.length();
1743 for (uint next = 0; next < alloc_length; ++next) {
1744 Node* n = alloc_worklist.at(next);
1745 PointsToNode::EscapeState es = ptnode_adr(n->_idx)->escape_state();
1746 if (es == PointsToNode::NoEscape) {
1747 has_non_escaping_obj = true;
1748 if (n->is_Allocate()) {
1749 find_init_values(n, &visited, igvn);
1750 // The object allocated by this Allocate node will never be
1751 // seen by an other thread. Mark it so that when it is
1752 // expanded no MemBarStoreStore is added.
1753 n->as_Allocate()->initialization()->set_does_not_escape();
1754 }
1755 } else if ((es == PointsToNode::ArgEscape) && n->is_Allocate()) {
1756 // Same as above. Mark this Allocate node so that when it is
1757 // expanded no MemBarStoreStore is added.
1758 n->as_Allocate()->initialization()->set_does_not_escape();
1759 }
1760 }
1761
1762 uint cg_length = cg_worklist.length();
1763
1764 // Skip the rest of code if all objects escaped.
1765 if (!has_non_escaping_obj) {
1766 cg_length = 0;
1767 addp_length = 0;
1768 }
1769
1770 for (uint next = 0; next < cg_length; ++next) {
1771 int ni = cg_worklist.at(next);
1772 PointsToNode* ptn = ptnode_adr(ni);
1773 PointsToNode::NodeType nt = ptn->node_type();
1774 if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
1775 if (ptn->edge_count() == 0) {
1776 // No values were found. Assume the value was set
1777 // outside this method - add edge to phantom object.
1778 add_pointsto_edge(ni, _phantom_object);
1779 }
1780 }
1781 }
1782
1783 // 7. Remove deferred edges from the graph.
1784 for (uint next = 0; next < cg_length; ++next) {
1785 int ni = cg_worklist.at(next);
1786 PointsToNode* ptn = ptnode_adr(ni);
1787 PointsToNode::NodeType nt = ptn->node_type();
1788 if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
1789 remove_deferred(ni, &worklist, &visited);
1790 }
1791 }
1792
1793 // 8. Adjust escape state of nonescaping objects.
1794 for (uint next = 0; next < addp_length; ++next) {
1795 Node* n = addp_worklist.at(next);
1796 adjust_escape_state(n);
1797 }
1798
1799 // push all NoEscape nodes on the worklist
1800 worklist.clear();
1801 for( uint next = 0; next < cg_length; ++next ) {
1802 int nk = cg_worklist.at(next);
1803 if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape &&
1804 !is_null_ptr(nk))
1805 worklist.push(nk);
1806 }
1807
1808 alloc_worklist.clear();
1809 // Propagate scalar_replaceable value.
1810 while(worklist.length() > 0) {
1811 uint nk = worklist.pop();
1812 PointsToNode* ptn = ptnode_adr(nk);
1813 Node* n = ptn->_node;
1814 bool scalar_replaceable = ptn->scalar_replaceable();
1815 if (n->is_Allocate() && scalar_replaceable) {
1816 // Push scalar replaceable allocations on alloc_worklist
1817 // for processing in split_unique_types(). Note,
1818 // following code may change scalar_replaceable value.
1819 alloc_worklist.append(n);
1820 }
1821 uint e_cnt = ptn->edge_count();
1822 for (uint ei = 0; ei < e_cnt; ei++) {
1823 uint npi = ptn->edge_target(ei);
1824 if (is_null_ptr(npi))
1825 continue;
1826 PointsToNode *np = ptnode_adr(npi);
1827 if (np->escape_state() < PointsToNode::NoEscape) {
1828 set_escape_state(npi, PointsToNode::NoEscape);
1829 if (!scalar_replaceable) {
1830 np->set_scalar_replaceable(false);
1831 }
1832 worklist.push(npi);
1833 } else if (np->scalar_replaceable() && !scalar_replaceable) {
1834 np->set_scalar_replaceable(false);
1835 worklist.push(npi);
1836 }
1837 }
1838 }
1839
1840 _collecting = false;
1841 assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
1842
1843 assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape &&
1844 ptnode_adr(_oop_null)->edge_count() == 0, "sanity");
1845 if (UseCompressedOops) {
1846 assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape &&
1847 ptnode_adr(_noop_null)->edge_count() == 0, "sanity");
1848 }
1849
1850 if (EliminateLocks && has_non_escaping_obj) {
1851 // Mark locks before changing ideal graph.
1852 int cnt = C->macro_count();
1853 for( int i=0; i < cnt; i++ ) {
1854 Node *n = C->macro_node(i);
1855 if (n->is_AbstractLock()) { // Lock and Unlock nodes
1856 AbstractLockNode* alock = n->as_AbstractLock();
1857 if (!alock->is_non_esc_obj()) {
1858 PointsToNode::EscapeState es = escape_state(alock->obj_node());
1859 assert(es != PointsToNode::UnknownEscape, "should know");
1860 if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
1861 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
1862 // The lock could be marked eliminated by lock coarsening
1863 // code during first IGVN before EA. Replace coarsened flag
1864 // to eliminate all associated locks/unlocks.
1865 alock->set_non_esc_obj();
1866 }
1867 }
1868 }
1869 }
1870 }
1871
1872 if (OptimizePtrCompare && has_non_escaping_obj) {
1873 // Add ConI(#CC_GT) and ConI(#CC_EQ).
1874 _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
1875 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
1876 // Optimize objects compare.
1877 while (ptr_cmp_worklist.length() != 0) {
1878 Node *n = ptr_cmp_worklist.pop();
1879 Node *res = optimize_ptr_compare(n);
1880 if (res != NULL) {
1881 #ifndef PRODUCT 3029 #ifndef PRODUCT
1882 if (PrintOptimizePtrCompare) { 3030 static const char *node_type_names[] = {
1883 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 3031 "UnknownType",
1884 if (Verbose) { 3032 "JavaObject",
1885 n->dump(1); 3033 "LocalVar",
1886 } 3034 "Field",
1887 } 3035 "Arraycopy"
1888 #endif 3036 };
1889 _igvn->replace_node(n, res); 3037
1890 } 3038 static const char *esc_names[] = {
1891 } 3039 "UnknownEscape",
1892 // cleanup 3040 "NoEscape",
1893 if (_pcmp_neq->outcnt() == 0) 3041 "ArgEscape",
1894 igvn->hash_delete(_pcmp_neq); 3042 "GlobalEscape"
1895 if (_pcmp_eq->outcnt() == 0) 3043 };
1896 igvn->hash_delete(_pcmp_eq); 3044
1897 } 3045 void PointsToNode::dump(bool print_state) const {
1898 3046 NodeType nt = node_type();
1899 // For MemBarStoreStore nodes added in library_call.cpp, check 3047 tty->print("%s ", node_type_names[(int) nt]);
1900 // escape status of associated AllocateNode and optimize out 3048 if (print_state) {
1901 // MemBarStoreStore node if the allocated object never escapes. 3049 EscapeState es = escape_state();
1902 while (storestore_worklist.length() != 0) { 3050 EscapeState fields_es = fields_escape_state();
1903 Node *n = storestore_worklist.pop(); 3051 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
1904 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 3052 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable())
1905 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 3053 tty->print("NSR");
1906 assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); 3054 }
1907 PointsToNode::EscapeState es = ptnode_adr(alloc->_idx)->escape_state(); 3055 if (is_Field()) {
1908 if (es == PointsToNode::NoEscape || es == PointsToNode::ArgEscape) { 3056 FieldNode* f = (FieldNode*)this;
1909 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 3057 tty->print("(");
1910 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 3058 for (BaseIterator i(f); i.has_next(); i.next()) {
1911 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 3059 PointsToNode* b = i.get();
1912 3060 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
1913 _igvn->register_new_node_with_optimizer(mb); 3061 }
1914 _igvn->replace_node(storestore, mb); 3062 tty->print(" )");
1915 } 3063 }
1916 } 3064 tty->print("[");
1917 3065 for (EdgeIterator i(this); i.has_next(); i.next()) {
1918 #ifndef PRODUCT 3066 PointsToNode* e = i.get();
1919 if (PrintEscapeAnalysis) { 3067 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
1920 dump(); // Dump ConnectionGraph 3068 }
1921 } 3069 tty->print(" [");
1922 #endif 3070 for (UseIterator i(this); i.has_next(); i.next()) {
1923 3071 PointsToNode* u = i.get();
1924 bool has_scalar_replaceable_candidates = false; 3072 bool is_base = false;
1925 alloc_length = alloc_worklist.length(); 3073 if (PointsToNode::is_base_use(u)) {
1926 for (uint next = 0; next < alloc_length; ++next) { 3074 is_base = true;
1927 Node* n = alloc_worklist.at(next); 3075 u = PointsToNode::get_use_node(u)->as_Field();
1928 PointsToNode* ptn = ptnode_adr(n->_idx); 3076 }
1929 assert(ptn->escape_state() == PointsToNode::NoEscape, "sanity"); 3077 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
1930 if (ptn->scalar_replaceable()) { 3078 }
1931 has_scalar_replaceable_candidates = true; 3079 tty->print(" ]] ");
1932 break; 3080 if (_node == NULL)
1933 } 3081 tty->print_cr("<null>");
1934 } 3082 else
1935 3083 _node->dump();
1936 if ( has_scalar_replaceable_candidates && 3084 }
1937 C->AliasLevel() >= 3 && EliminateAllocations ) { 3085
1938 3086 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
1939 // Now use the escape information to create unique types for
1940 // scalar replaceable objects.
1941 split_unique_types(alloc_worklist);
1942
1943 if (C->failing()) return false;
1944
1945 C->print_method("After Escape Analysis", 2);
1946
1947 #ifdef ASSERT
1948 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
1949 tty->print("=== No allocations eliminated for ");
1950 C->method()->print_short_name();
1951 if(!EliminateAllocations) {
1952 tty->print(" since EliminateAllocations is off ===");
1953 } else if(!has_scalar_replaceable_candidates) {
1954 tty->print(" since there are no scalar replaceable candidates ===");
1955 } else if(C->AliasLevel() < 3) {
1956 tty->print(" since AliasLevel < 3 ===");
1957 }
1958 tty->cr();
1959 #endif
1960 }
1961 return has_non_escaping_obj;
1962 }
1963
1964 // Find fields initializing values for allocations.
1965 void ConnectionGraph::find_init_values(Node* alloc, VectorSet* visited, PhaseTransform* phase) {
1966 assert(alloc->is_Allocate(), "Should be called for Allocate nodes only");
1967 PointsToNode* pta = ptnode_adr(alloc->_idx);
1968 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1969 InitializeNode* ini = alloc->as_Allocate()->initialization();
1970
1971 Compile* C = _compile;
1972 visited->Reset();
1973 // Check if a oop field's initializing value is recorded and add
1974 // a corresponding NULL field's value if it is not recorded.
1975 // Connection Graph does not record a default initialization by NULL
1976 // captured by Initialize node.
1977 //
1978 uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
1979 uint ae_cnt = pta->edge_count();
1980 bool visited_bottom_offset = false;
1981 for (uint ei = 0; ei < ae_cnt; ei++) {
1982 uint nidx = pta->edge_target(ei); // Field (AddP)
1983 PointsToNode* ptn = ptnode_adr(nidx);
1984 assert(ptn->_node->is_AddP(), "Should be AddP nodes only");
1985 int offset = ptn->offset();
1986 if (offset == Type::OffsetBot) {
1987 if (!visited_bottom_offset) {
1988 visited_bottom_offset = true;
1989 // Check only oop fields.
1990 const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
1991 if (!adr_type->isa_aryptr() ||
1992 (adr_type->isa_aryptr()->klass() == NULL) ||
1993 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
1994 // OffsetBot is used to reference array's element,
1995 // always add reference to NULL since we don't
1996 // known which element is referenced.
1997 add_edge_from_fields(alloc->_idx, null_idx, offset);
1998 }
1999 }
2000 } else if (offset != oopDesc::klass_offset_in_bytes() &&
2001 !visited->test_set(offset)) {
2002
2003 // Check only oop fields.
2004 const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
2005 BasicType basic_field_type = T_INT;
2006 if (adr_type->isa_instptr()) {
2007 ciField* field = C->alias_type(adr_type->isa_instptr())->field();
2008 if (field != NULL) {
2009 basic_field_type = field->layout_type();
2010 } else {
2011 // Ignore non field load (for example, klass load)
2012 }
2013 } else if (adr_type->isa_aryptr()) {
2014 if (offset != arrayOopDesc::length_offset_in_bytes()) {
2015 const Type* elemtype = adr_type->isa_aryptr()->elem();
2016 basic_field_type = elemtype->array_element_basic_type();
2017 } else {
2018 // Ignore array length load
2019 }
2020 #ifdef ASSERT
2021 } else {
2022 // Raw pointers are used for initializing stores so skip it
2023 // since it should be recorded already
2024 Node* base = get_addp_base(ptn->_node);
2025 assert(adr_type->isa_rawptr() && base->is_Proj() &&
2026 (base->in(0) == alloc),"unexpected pointer type");
2027 #endif
2028 }
2029 if (basic_field_type == T_OBJECT ||
2030 basic_field_type == T_NARROWOOP ||
2031 basic_field_type == T_ARRAY) {
2032 Node* value = NULL;
2033 if (ini != NULL) {
2034 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
2035 Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
2036 if (store != NULL && store->is_Store()) {
2037 value = store->in(MemNode::ValueIn);
2038 } else {
2039 // There could be initializing stores which follow allocation.
2040 // For example, a volatile field store is not collected
2041 // by Initialize node.
2042 //
2043 // Need to check for dependent loads to separate such stores from
2044 // stores which follow loads. For now, add initial value NULL so
2045 // that compare pointers optimization works correctly.
2046 }
2047 }
2048 if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
2049 // A field's initializing value was not recorded. Add NULL.
2050 add_edge_from_fields(alloc->_idx, null_idx, offset);
2051 }
2052 }
2053 }
2054 }
2055 }
2056
2057 // Adjust escape state after Connection Graph is built.
2058 void ConnectionGraph::adjust_escape_state(Node* n) {
2059 PointsToNode* ptn = ptnode_adr(n->_idx);
2060 assert(n->is_AddP(), "Should be called for AddP nodes only");
2061 // Search for objects which are not scalar replaceable
2062 // and mark them to propagate the state to referenced objects.
2063 //
2064
2065 int offset = ptn->offset();
2066 Node* base = get_addp_base(n);
2067 VectorSet* ptset = PointsTo(base);
2068 int ptset_size = ptset->Size();
2069
2070 // An object is not scalar replaceable if the field which may point
2071 // to it has unknown offset (unknown element of an array of objects).
2072 //
2073
2074 if (offset == Type::OffsetBot) {
2075 uint e_cnt = ptn->edge_count();
2076 for (uint ei = 0; ei < e_cnt; ei++) {
2077 uint npi = ptn->edge_target(ei);
2078 ptnode_adr(npi)->set_scalar_replaceable(false);
2079 }
2080 }
2081
2082 // Currently an object is not scalar replaceable if a LoadStore node
2083 // access its field since the field value is unknown after it.
2084 //
2085 bool has_LoadStore = false;
2086 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2087 Node *use = n->fast_out(i);
2088 if (use->is_LoadStore()) {
2089 has_LoadStore = true;
2090 break;
2091 }
2092 }
2093 // An object is not scalar replaceable if the address points
2094 // to unknown field (unknown element for arrays, offset is OffsetBot).
2095 //
2096 // Or the address may point to more then one object. This may produce
2097 // the false positive result (set not scalar replaceable)
2098 // since the flow-insensitive escape analysis can't separate
2099 // the case when stores overwrite the field's value from the case
2100 // when stores happened on different control branches.
2101 //
2102 // Note: it will disable scalar replacement in some cases:
2103 //
2104 // Point p[] = new Point[1];
2105 // p[0] = new Point(); // Will be not scalar replaced
2106 //
2107 // but it will save us from incorrect optimizations in next cases:
2108 //
2109 // Point p[] = new Point[1];
2110 // if ( x ) p[0] = new Point(); // Will be not scalar replaced
2111 //
2112 if (ptset_size > 1 || ptset_size != 0 &&
2113 (has_LoadStore || offset == Type::OffsetBot)) {
2114 for( VectorSetI j(ptset); j.test(); ++j ) {
2115 ptnode_adr(j.elem)->set_scalar_replaceable(false);
2116 }
2117 }
2118 }
2119
2120 // Propagate escape states to referenced nodes.
2121 bool ConnectionGraph::propagate_escape_state(GrowableArray<int>* cg_worklist,
2122 GrowableArray<uint>* worklist,
2123 PointsToNode::EscapeState esc_state) {
2124 bool has_java_obj = false;
2125
2126 // push all nodes with the same escape state on the worklist
2127 uint cg_length = cg_worklist->length();
2128 for (uint next = 0; next < cg_length; ++next) {
2129 int nk = cg_worklist->at(next);
2130 if (ptnode_adr(nk)->escape_state() == esc_state)
2131 worklist->push(nk);
2132 }
2133 // mark all reachable nodes
2134 while (worklist->length() > 0) {
2135 int pt = worklist->pop();
2136 PointsToNode* ptn = ptnode_adr(pt);
2137 if (ptn->node_type() == PointsToNode::JavaObject &&
2138 !is_null_ptr(pt)) {
2139 has_java_obj = true;
2140 if (esc_state > PointsToNode::NoEscape) {
2141 // fields values are unknown if object escapes
2142 add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
2143 }
2144 }
2145 uint e_cnt = ptn->edge_count();
2146 for (uint ei = 0; ei < e_cnt; ei++) {
2147 uint npi = ptn->edge_target(ei);
2148 if (is_null_ptr(npi))
2149 continue;
2150 PointsToNode *np = ptnode_adr(npi);
2151 if (np->escape_state() < esc_state) {
2152 set_escape_state(npi, esc_state);
2153 worklist->push(npi);
2154 }
2155 }
2156 }
2157 // Has not escaping java objects
2158 return has_java_obj && (esc_state < PointsToNode::GlobalEscape);
2159 }
2160
2161 // Optimize objects compare.
2162 Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
2163 assert(OptimizePtrCompare, "sanity");
2164 // Clone returned Set since PointsTo() returns pointer
2165 // to the same structure ConnectionGraph.pt_ptset.
2166 VectorSet ptset1 = *PointsTo(n->in(1));
2167 VectorSet ptset2 = *PointsTo(n->in(2));
2168
2169 // Check simple cases first.
2170 if (ptset1.Size() == 1) {
2171 uint pt1 = ptset1.getelem();
2172 PointsToNode* ptn1 = ptnode_adr(pt1);
2173 if (ptn1->escape_state() == PointsToNode::NoEscape) {
2174 if (ptset2.Size() == 1 && ptset2.getelem() == pt1) {
2175 // Comparing the same not escaping object.
2176 return _pcmp_eq;
2177 }
2178 Node* obj = ptn1->_node;
2179 // Comparing not escaping allocation.
2180 if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
2181 !ptset2.test(pt1)) {
2182 return _pcmp_neq; // This includes nullness check.
2183 }
2184 }
2185 } else if (ptset2.Size() == 1) {
2186 uint pt2 = ptset2.getelem();
2187 PointsToNode* ptn2 = ptnode_adr(pt2);
2188 if (ptn2->escape_state() == PointsToNode::NoEscape) {
2189 Node* obj = ptn2->_node;
2190 // Comparing not escaping allocation.
2191 if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
2192 !ptset1.test(pt2)) {
2193 return _pcmp_neq; // This includes nullness check.
2194 }
2195 }
2196 }
2197
2198 if (!ptset1.disjoint(ptset2)) {
2199 return NULL; // Sets are not disjoint
2200 }
2201
2202 // Sets are disjoint.
2203 bool set1_has_unknown_ptr = ptset1.test(_phantom_object) != 0;
2204 bool set2_has_unknown_ptr = ptset2.test(_phantom_object) != 0;
2205 bool set1_has_null_ptr = (ptset1.test(_oop_null) | ptset1.test(_noop_null)) != 0;
2206 bool set2_has_null_ptr = (ptset2.test(_oop_null) | ptset2.test(_noop_null)) != 0;
2207
2208 if (set1_has_unknown_ptr && set2_has_null_ptr ||
2209 set2_has_unknown_ptr && set1_has_null_ptr) {
2210 // Check nullness of unknown object.
2211 return NULL;
2212 }
2213
2214 // Disjointness by itself is not sufficient since
2215 // alias analysis is not complete for escaped objects.
2216 // Disjoint sets are definitely unrelated only when
2217 // at least one set has only not escaping objects.
2218 if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
2219 bool has_only_non_escaping_alloc = true;
2220 for (VectorSetI i(&ptset1); i.test(); ++i) {
2221 uint pt = i.elem;
2222 PointsToNode* ptn = ptnode_adr(pt);
2223 Node* obj = ptn->_node;
2224 if (ptn->escape_state() != PointsToNode::NoEscape ||
2225 !(obj->is_Allocate() || obj->is_CallStaticJava())) {
2226 has_only_non_escaping_alloc = false;
2227 break;
2228 }
2229 }
2230 if (has_only_non_escaping_alloc) {
2231 return _pcmp_neq;
2232 }
2233 }
2234 if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
2235 bool has_only_non_escaping_alloc = true;
2236 for (VectorSetI i(&ptset2); i.test(); ++i) {
2237 uint pt = i.elem;
2238 PointsToNode* ptn = ptnode_adr(pt);
2239 Node* obj = ptn->_node;
2240 if (ptn->escape_state() != PointsToNode::NoEscape ||
2241 !(obj->is_Allocate() || obj->is_CallStaticJava())) {
2242 has_only_non_escaping_alloc = false;
2243 break;
2244 }
2245 }
2246 if (has_only_non_escaping_alloc) {
2247 return _pcmp_neq;
2248 }
2249 }
2250 return NULL;
2251 }
2252
2253 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
2254 bool is_arraycopy = false;
2255 switch (call->Opcode()) {
2256 #ifdef ASSERT
2257 case Op_Allocate:
2258 case Op_AllocateArray:
2259 case Op_Lock:
2260 case Op_Unlock:
2261 assert(false, "should be done already");
2262 break;
2263 #endif
2264 case Op_CallLeafNoFP:
2265 is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
2266 strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
2267 // fall through
2268 case Op_CallLeaf:
2269 {
2270 // Stub calls, objects do not escape but they are not scale replaceable.
2271 // Adjust escape state for outgoing arguments.
2272 const TypeTuple * d = call->tf()->domain();
2273 bool src_has_oops = false;
2274 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2275 const Type* at = d->field_at(i);
2276 Node *arg = call->in(i)->uncast();
2277 const Type *aat = phase->type(arg);
2278 PointsToNode::EscapeState arg_esc = ptnode_adr(arg->_idx)->escape_state();
2279 if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
2280 (is_arraycopy || arg_esc < PointsToNode::ArgEscape)) {
2281 #ifdef ASSERT
2282 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2283 aat->isa_ptr() != NULL, "expecting an Ptr");
2284 if (!(is_arraycopy ||
2285 call->as_CallLeaf()->_name != NULL &&
2286 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||
2287 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
2288 ) {
2289 call->dump();
2290 assert(false, "EA: unexpected CallLeaf");
2291 }
2292 #endif
2293 if (arg_esc < PointsToNode::ArgEscape) {
2294 set_escape_state(arg->_idx, PointsToNode::ArgEscape);
2295 Node* arg_base = arg;
2296 if (arg->is_AddP()) {
2297 //
2298 // The inline_native_clone() case when the arraycopy stub is called
2299 // after the allocation before Initialize and CheckCastPP nodes.
2300 // Or normal arraycopy for object arrays case.
2301 //
2302 // Set AddP's base (Allocate) as not scalar replaceable since
2303 // pointer to the base (with offset) is passed as argument.
2304 //
2305 arg_base = get_addp_base(arg);
2306 set_escape_state(arg_base->_idx, PointsToNode::ArgEscape);
2307 }
2308 }
2309
2310 bool arg_has_oops = aat->isa_oopptr() &&
2311 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
2312 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
2313 if (i == TypeFunc::Parms) {
2314 src_has_oops = arg_has_oops;
2315 }
2316 //
2317 // src or dst could be j.l.Object when other is basic type array:
2318 //
2319 // arraycopy(char[],0,Object*,0,size);
2320 // arraycopy(Object*,0,char[],0,size);
2321 //
2322 // Do nothing special in such cases.
2323 //
2324 if (is_arraycopy && (i > TypeFunc::Parms) &&
2325 src_has_oops && arg_has_oops) {
2326 // Destination object's fields reference an unknown object.
2327 Node* arg_base = arg;
2328 if (arg->is_AddP()) {
2329 arg_base = get_addp_base(arg);
2330 }
2331 for (VectorSetI s(PointsTo(arg_base)); s.test(); ++s) {
2332 uint ps = s.elem;
2333 set_escape_state(ps, PointsToNode::ArgEscape);
2334 add_edge_from_fields(ps, _phantom_object, Type::OffsetBot);
2335 }
2336 // Conservatively all values in source object fields globally escape
2337 // since we don't know if values in destination object fields
2338 // escape (it could be traced but it is too expensive).
2339 Node* src = call->in(TypeFunc::Parms)->uncast();
2340 Node* src_base = src;
2341 if (src->is_AddP()) {
2342 src_base = get_addp_base(src);
2343 }
2344 for (VectorSetI s(PointsTo(src_base)); s.test(); ++s) {
2345 uint ps = s.elem;
2346 set_escape_state(ps, PointsToNode::ArgEscape);
2347 // Use OffsetTop to indicate fields global escape.
2348 add_edge_from_fields(ps, _phantom_object, Type::OffsetTop);
2349 }
2350 }
2351 }
2352 }
2353 break;
2354 }
2355
2356 case Op_CallStaticJava:
2357 // For a static call, we know exactly what method is being called.
2358 // Use bytecode estimator to record the call's escape affects
2359 {
2360 ciMethod *meth = call->as_CallJava()->method();
2361 BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
2362 // fall-through if not a Java method or no analyzer information
2363 if (call_analyzer != NULL) {
2364 const TypeTuple * d = call->tf()->domain();
2365 bool copy_dependencies = false;
2366 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2367 const Type* at = d->field_at(i);
2368 int k = i - TypeFunc::Parms;
2369 Node *arg = call->in(i)->uncast();
2370
2371 if (at->isa_oopptr() != NULL &&
2372 ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) {
2373
2374 bool global_escapes = false;
2375 bool fields_escapes = false;
2376 if (!call_analyzer->is_arg_stack(k)) {
2377 // The argument global escapes, mark everything it could point to
2378 set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
2379 global_escapes = true;
2380 } else {
2381 if (!call_analyzer->is_arg_local(k)) {
2382 // The argument itself doesn't escape, but any fields might
2383 fields_escapes = true;
2384 }
2385 set_escape_state(arg->_idx, PointsToNode::ArgEscape);
2386 copy_dependencies = true;
2387 }
2388
2389 for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
2390 uint pt = j.elem;
2391 if (global_escapes) {
2392 // The argument global escapes, mark everything it could point to
2393 set_escape_state(pt, PointsToNode::GlobalEscape);
2394 add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
2395 } else {
2396 set_escape_state(pt, PointsToNode::ArgEscape);
2397 if (fields_escapes) {
2398 // The argument itself doesn't escape, but any fields might.
2399 // Use OffsetTop to indicate such case.
2400 add_edge_from_fields(pt, _phantom_object, Type::OffsetTop);
2401 }
2402 }
2403 }
2404 }
2405 }
2406 if (copy_dependencies)
2407 call_analyzer->copy_dependencies(_compile->dependencies());
2408 break;
2409 }
2410 }
2411
2412 default:
2413 // Fall-through here if not a Java method or no analyzer information
2414 // or some other type of call, assume the worst case: all arguments
2415 // globally escape.
2416 {
2417 // adjust escape state for outgoing arguments
2418 const TypeTuple * d = call->tf()->domain();
2419 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2420 const Type* at = d->field_at(i);
2421 if (at->isa_oopptr() != NULL) {
2422 Node *arg = call->in(i)->uncast();
2423 set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
2424 for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
2425 uint pt = j.elem;
2426 set_escape_state(pt, PointsToNode::GlobalEscape);
2427 add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
2428 }
2429 }
2430 }
2431 }
2432 }
2433 }
2434 void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) {
2435 CallNode *call = resproj->in(0)->as_Call();
2436 uint call_idx = call->_idx;
2437 uint resproj_idx = resproj->_idx;
2438
2439 switch (call->Opcode()) {
2440 case Op_Allocate:
2441 {
2442 Node *k = call->in(AllocateNode::KlassNode);
2443 const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
2444 assert(kt != NULL, "TypeKlassPtr required.");
2445 ciKlass* cik = kt->klass();
2446
2447 PointsToNode::EscapeState es;
2448 uint edge_to;
2449 if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
2450 !cik->is_instance_klass() || // StressReflectiveCode
2451 cik->as_instance_klass()->has_finalizer()) {
2452 es = PointsToNode::GlobalEscape;
2453 edge_to = _phantom_object; // Could not be worse
2454 } else {
2455 es = PointsToNode::NoEscape;
2456 edge_to = call_idx;
2457 assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
2458 }
2459 set_escape_state(call_idx, es);
2460 add_pointsto_edge(resproj_idx, edge_to);
2461 _processed.set(resproj_idx);
2462 break;
2463 }
2464
2465 case Op_AllocateArray:
2466 {
2467
2468 Node *k = call->in(AllocateNode::KlassNode);
2469 const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
2470 assert(kt != NULL, "TypeKlassPtr required.");
2471 ciKlass* cik = kt->klass();
2472
2473 PointsToNode::EscapeState es;
2474 uint edge_to;
2475 if (!cik->is_array_klass()) { // StressReflectiveCode
2476 es = PointsToNode::GlobalEscape;
2477 edge_to = _phantom_object;
2478 } else {
2479 es = PointsToNode::NoEscape;
2480 edge_to = call_idx;
2481 assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
2482 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2483 if (length < 0 || length > EliminateAllocationArraySizeLimit) {
2484 // Not scalar replaceable if the length is not constant or too big.
2485 ptnode_adr(call_idx)->set_scalar_replaceable(false);
2486 }
2487 }
2488 set_escape_state(call_idx, es);
2489 add_pointsto_edge(resproj_idx, edge_to);
2490 _processed.set(resproj_idx);
2491 break;
2492 }
2493
2494 case Op_CallStaticJava:
2495 // For a static call, we know exactly what method is being called.
2496 // Use bytecode estimator to record whether the call's return value escapes
2497 {
2498 bool done = true;
2499 const TypeTuple *r = call->tf()->range();
2500 const Type* ret_type = NULL;
2501
2502 if (r->cnt() > TypeFunc::Parms)
2503 ret_type = r->field_at(TypeFunc::Parms);
2504
2505 // Note: we use isa_ptr() instead of isa_oopptr() here because the
2506 // _multianewarray functions return a TypeRawPtr.
2507 if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
2508 _processed.set(resproj_idx);
2509 break; // doesn't return a pointer type
2510 }
2511 ciMethod *meth = call->as_CallJava()->method();
2512 const TypeTuple * d = call->tf()->domain();
2513 if (meth == NULL) {
2514 // not a Java method, assume global escape
2515 set_escape_state(call_idx, PointsToNode::GlobalEscape);
2516 add_pointsto_edge(resproj_idx, _phantom_object);
2517 } else {
2518 BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
2519 bool copy_dependencies = false;
2520
2521 if (call_analyzer->is_return_allocated()) {
2522 // Returns a newly allocated unescaped object, simply
2523 // update dependency information.
2524 // Mark it as NoEscape so that objects referenced by
2525 // it's fields will be marked as NoEscape at least.
2526 set_escape_state(call_idx, PointsToNode::NoEscape);
2527 ptnode_adr(call_idx)->set_scalar_replaceable(false);
2528 // Fields values are unknown
2529 add_edge_from_fields(call_idx, _phantom_object, Type::OffsetBot);
2530 add_pointsto_edge(resproj_idx, call_idx);
2531 copy_dependencies = true;
2532 } else {
2533 // determine whether any arguments are returned
2534 set_escape_state(call_idx, PointsToNode::ArgEscape);
2535 bool ret_arg = false;
2536 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2537 const Type* at = d->field_at(i);
2538 if (at->isa_oopptr() != NULL) {
2539 Node *arg = call->in(i)->uncast();
2540
2541 if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2542 ret_arg = true;
2543 PointsToNode *arg_esp = ptnode_adr(arg->_idx);
2544 if (arg_esp->node_type() == PointsToNode::UnknownType)
2545 done = false;
2546 else if (arg_esp->node_type() == PointsToNode::JavaObject)
2547 add_pointsto_edge(resproj_idx, arg->_idx);
2548 else
2549 add_deferred_edge(resproj_idx, arg->_idx);
2550 }
2551 }
2552 }
2553 if (done) {
2554 copy_dependencies = true;
2555 // is_return_local() is true when only arguments are returned.
2556 if (!ret_arg || !call_analyzer->is_return_local()) {
2557 // Returns unknown object.
2558 add_pointsto_edge(resproj_idx, _phantom_object);
2559 }
2560 }
2561 }
2562 if (copy_dependencies)
2563 call_analyzer->copy_dependencies(_compile->dependencies());
2564 }
2565 if (done)
2566 _processed.set(resproj_idx);
2567 break;
2568 }
2569
2570 default:
2571 // Some other type of call, assume the worst case that the
2572 // returned value, if any, globally escapes.
2573 {
2574 const TypeTuple *r = call->tf()->range();
2575 if (r->cnt() > TypeFunc::Parms) {
2576 const Type* ret_type = r->field_at(TypeFunc::Parms);
2577
2578 // Note: we use isa_ptr() instead of isa_oopptr() here because the
2579 // _multianewarray functions return a TypeRawPtr.
2580 if (ret_type->isa_ptr() != NULL) {
2581 set_escape_state(call_idx, PointsToNode::GlobalEscape);
2582 add_pointsto_edge(resproj_idx, _phantom_object);
2583 }
2584 }
2585 _processed.set(resproj_idx);
2586 }
2587 }
2588 }
2589
2590 // Populate Connection Graph with Ideal nodes and create simple
2591 // connection graph edges (do not need to check the node_type of inputs
2592 // or to call PointsTo() to walk the connection graph).
2593 void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) {
2594 if (_processed.test(n->_idx))
2595 return; // No need to redefine node's state.
2596
2597 if (n->is_Call()) {
2598 // Arguments to allocation and locking don't escape.
2599 if (n->is_Allocate()) {
2600 add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true);
2601 record_for_optimizer(n);
2602 } else if (n->is_Lock() || n->is_Unlock()) {
2603 // Put Lock and Unlock nodes on IGVN worklist to process them during
2604 // the first IGVN optimization when escape information is still available.
2605 record_for_optimizer(n);
2606 _processed.set(n->_idx);
2607 } else {
2608 // Don't mark as processed since call's arguments have to be processed.
2609 PointsToNode::NodeType nt = PointsToNode::UnknownType;
2610 PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
2611
2612 // Check if a call returns an object.
2613 const TypeTuple *r = n->as_Call()->tf()->range();
2614 if (r->cnt() > TypeFunc::Parms &&
2615 r->field_at(TypeFunc::Parms)->isa_ptr() &&
2616 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
2617 nt = PointsToNode::JavaObject;
2618 if (!n->is_CallStaticJava()) {
2619 // Since the called mathod is statically unknown assume
2620 // the worst case that the returned value globally escapes.
2621 es = PointsToNode::GlobalEscape;
2622 }
2623 }
2624 add_node(n, nt, es, false);
2625 }
2626 return;
2627 }
2628
2629 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
2630 // ThreadLocal has RawPrt type.
2631 switch (n->Opcode()) {
2632 case Op_AddP:
2633 {
2634 add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false);
2635 break;
2636 }
2637 case Op_CastX2P:
2638 { // "Unsafe" memory access.
2639 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
2640 break;
2641 }
2642 case Op_CastPP:
2643 case Op_CheckCastPP:
2644 case Op_EncodeP:
2645 case Op_DecodeN:
2646 {
2647 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2648 int ti = n->in(1)->_idx;
2649 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
2650 if (nt == PointsToNode::UnknownType) {
2651 _delayed_worklist.push(n); // Process it later.
2652 break;
2653 } else if (nt == PointsToNode::JavaObject) {
2654 add_pointsto_edge(n->_idx, ti);
2655 } else {
2656 add_deferred_edge(n->_idx, ti);
2657 }
2658 _processed.set(n->_idx);
2659 break;
2660 }
2661 case Op_ConP:
2662 {
2663 // assume all pointer constants globally escape except for null
2664 PointsToNode::EscapeState es;
2665 if (phase->type(n) == TypePtr::NULL_PTR)
2666 es = PointsToNode::NoEscape;
2667 else
2668 es = PointsToNode::GlobalEscape;
2669
2670 add_node(n, PointsToNode::JavaObject, es, true);
2671 break;
2672 }
2673 case Op_ConN:
2674 {
2675 // assume all narrow oop constants globally escape except for null
2676 PointsToNode::EscapeState es;
2677 if (phase->type(n) == TypeNarrowOop::NULL_PTR)
2678 es = PointsToNode::NoEscape;
2679 else
2680 es = PointsToNode::GlobalEscape;
2681
2682 add_node(n, PointsToNode::JavaObject, es, true);
2683 break;
2684 }
2685 case Op_CreateEx:
2686 {
2687 // assume that all exception objects globally escape
2688 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
2689 break;
2690 }
2691 case Op_LoadKlass:
2692 case Op_LoadNKlass:
2693 {
2694 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
2695 break;
2696 }
2697 case Op_LoadP:
2698 case Op_LoadN:
2699 {
2700 const Type *t = phase->type(n);
2701 if (t->make_ptr() == NULL) {
2702 _processed.set(n->_idx);
2703 return;
2704 }
2705 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2706 break;
2707 }
2708 case Op_Parm:
2709 {
2710 _processed.set(n->_idx); // No need to redefine it state.
2711 uint con = n->as_Proj()->_con;
2712 if (con < TypeFunc::Parms)
2713 return;
2714 const Type *t = n->in(0)->as_Start()->_domain->field_at(con);
2715 if (t->isa_ptr() == NULL)
2716 return;
2717 // We have to assume all input parameters globally escape
2718 // (Note: passing 'false' since _processed is already set).
2719 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
2720 break;
2721 }
2722 case Op_PartialSubtypeCheck:
2723 { // Produces Null or notNull and is used in CmpP.
2724 add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
2725 break;
2726 }
2727 case Op_Phi:
2728 {
2729 const Type *t = n->as_Phi()->type();
2730 if (t->make_ptr() == NULL) {
2731 // nothing to do if not an oop or narrow oop
2732 _processed.set(n->_idx);
2733 return;
2734 }
2735 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2736 uint i;
2737 for (i = 1; i < n->req() ; i++) {
2738 Node* in = n->in(i);
2739 if (in == NULL)
2740 continue; // ignore NULL
2741 in = in->uncast();
2742 if (in->is_top() || in == n)
2743 continue; // ignore top or inputs which go back this node
2744 int ti = in->_idx;
2745 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
2746 if (nt == PointsToNode::UnknownType) {
2747 break;
2748 } else if (nt == PointsToNode::JavaObject) {
2749 add_pointsto_edge(n->_idx, ti);
2750 } else {
2751 add_deferred_edge(n->_idx, ti);
2752 }
2753 }
2754 if (i >= n->req())
2755 _processed.set(n->_idx);
2756 else
2757 _delayed_worklist.push(n);
2758 break;
2759 }
2760 case Op_Proj:
2761 {
2762 // we are only interested in the oop result projection from a call
2763 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
2764 const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
2765 assert(r->cnt() > TypeFunc::Parms, "sanity");
2766 if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
2767 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2768 int ti = n->in(0)->_idx;
2769 // The call may not be registered yet (since not all its inputs are registered)
2770 // if this is the projection from backbranch edge of Phi.
2771 if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
2772 process_call_result(n->as_Proj(), phase);
2773 }
2774 if (!_processed.test(n->_idx)) {
2775 // The call's result may need to be processed later if the call
2776 // returns it's argument and the argument is not processed yet.
2777 _delayed_worklist.push(n);
2778 }
2779 break;
2780 }
2781 }
2782 _processed.set(n->_idx);
2783 break;
2784 }
2785 case Op_Return:
2786 {
2787 if( n->req() > TypeFunc::Parms &&
2788 phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
2789 // Treat Return value as LocalVar with GlobalEscape escape state.
2790 add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false);
2791 int ti = n->in(TypeFunc::Parms)->_idx;
2792 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
2793 if (nt == PointsToNode::UnknownType) {
2794 _delayed_worklist.push(n); // Process it later.
2795 break;
2796 } else if (nt == PointsToNode::JavaObject) {
2797 add_pointsto_edge(n->_idx, ti);
2798 } else {
2799 add_deferred_edge(n->_idx, ti);
2800 }
2801 }
2802 _processed.set(n->_idx);
2803 break;
2804 }
2805 case Op_StoreP:
2806 case Op_StoreN:
2807 {
2808 const Type *adr_type = phase->type(n->in(MemNode::Address));
2809 adr_type = adr_type->make_ptr();
2810 if (adr_type->isa_oopptr()) {
2811 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2812 } else {
2813 Node* adr = n->in(MemNode::Address);
2814 if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL &&
2815 adr->in(AddPNode::Address)->is_Proj() &&
2816 adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
2817 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2818 // We are computing a raw address for a store captured
2819 // by an Initialize compute an appropriate address type.
2820 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2821 assert(offs != Type::OffsetBot, "offset must be a constant");
2822 } else {
2823 _processed.set(n->_idx);
2824 return;
2825 }
2826 }
2827 break;
2828 }
2829 case Op_StorePConditional:
2830 case Op_CompareAndSwapP:
2831 case Op_CompareAndSwapN:
2832 {
2833 const Type *adr_type = phase->type(n->in(MemNode::Address));
2834 adr_type = adr_type->make_ptr();
2835 if (adr_type->isa_oopptr()) {
2836 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2837 } else {
2838 _processed.set(n->_idx);
2839 return;
2840 }
2841 break;
2842 }
2843 case Op_AryEq:
2844 case Op_StrComp:
2845 case Op_StrEquals:
2846 case Op_StrIndexOf:
2847 {
2848 // char[] arrays passed to string intrinsics are not scalar replaceable.
2849 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2850 break;
2851 }
2852 case Op_ThreadLocal:
2853 {
2854 add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
2855 break;
2856 }
2857 default:
2858 ;
2859 // nothing to do
2860 }
2861 return;
2862 }
2863
2864 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
2865 uint n_idx = n->_idx;
2866 assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
2867
2868 // Don't set processed bit for AddP, LoadP, StoreP since
2869 // they may need more then one pass to process.
2870 // Also don't mark as processed Call nodes since their
2871 // arguments may need more then one pass to process.
2872 if (_processed.test(n_idx))
2873 return; // No need to redefine node's state.
2874
2875 if (n->is_Call()) {
2876 CallNode *call = n->as_Call();
2877 process_call_arguments(call, phase);
2878 return;
2879 }
2880
2881 switch (n->Opcode()) {
2882 case Op_AddP:
2883 {
2884 Node *base = get_addp_base(n);
2885 int offset = address_offset(n, phase);
2886 // Create a field edge to this node from everything base could point to.
2887 for( VectorSetI i(PointsTo(base)); i.test(); ++i ) {
2888 uint pt = i.elem;
2889 add_field_edge(pt, n_idx, offset);
2890 }
2891 break;
2892 }
2893 case Op_CastX2P:
2894 {
2895 assert(false, "Op_CastX2P");
2896 break;
2897 }
2898 case Op_CastPP:
2899 case Op_CheckCastPP:
2900 case Op_EncodeP:
2901 case Op_DecodeN:
2902 {
2903 int ti = n->in(1)->_idx;
2904 assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
2905 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
2906 add_pointsto_edge(n_idx, ti);
2907 } else {
2908 add_deferred_edge(n_idx, ti);
2909 }
2910 _processed.set(n_idx);
2911 break;
2912 }
2913 case Op_ConP:
2914 {
2915 assert(false, "Op_ConP");
2916 break;
2917 }
2918 case Op_ConN:
2919 {
2920 assert(false, "Op_ConN");
2921 break;
2922 }
2923 case Op_CreateEx:
2924 {
2925 assert(false, "Op_CreateEx");
2926 break;
2927 }
2928 case Op_LoadKlass:
2929 case Op_LoadNKlass:
2930 {
2931 assert(false, "Op_LoadKlass");
2932 break;
2933 }
2934 case Op_LoadP:
2935 case Op_LoadN:
2936 {
2937 const Type *t = phase->type(n);
2938 #ifdef ASSERT
2939 if (t->make_ptr() == NULL)
2940 assert(false, "Op_LoadP");
2941 #endif
2942
2943 Node* adr = n->in(MemNode::Address)->uncast();
2944 Node* adr_base;
2945 if (adr->is_AddP()) {
2946 adr_base = get_addp_base(adr);
2947 } else {
2948 adr_base = adr;
2949 }
2950
2951 // For everything "adr_base" could point to, create a deferred edge from
2952 // this node to each field with the same offset.
2953 int offset = address_offset(adr, phase);
2954 for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
2955 uint pt = i.elem;
2956 if (adr->is_AddP()) {
2957 // Add field edge if it is missing.
2958 add_field_edge(pt, adr->_idx, offset);
2959 }
2960 add_deferred_edge_to_fields(n_idx, pt, offset);
2961 }
2962 break;
2963 }
2964 case Op_Parm:
2965 {
2966 assert(false, "Op_Parm");
2967 break;
2968 }
2969 case Op_PartialSubtypeCheck:
2970 {
2971 assert(false, "Op_PartialSubtypeCheck");
2972 break;
2973 }
2974 case Op_Phi:
2975 {
2976 #ifdef ASSERT
2977 const Type *t = n->as_Phi()->type();
2978 if (t->make_ptr() == NULL)
2979 assert(false, "Op_Phi");
2980 #endif
2981 for (uint i = 1; i < n->req() ; i++) {
2982 Node* in = n->in(i);
2983 if (in == NULL)
2984 continue; // ignore NULL
2985 in = in->uncast();
2986 if (in->is_top() || in == n)
2987 continue; // ignore top or inputs which go back this node
2988 int ti = in->_idx;
2989 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
2990 assert(nt != PointsToNode::UnknownType, "all nodes should be known");
2991 if (nt == PointsToNode::JavaObject) {
2992 add_pointsto_edge(n_idx, ti);
2993 } else {
2994 add_deferred_edge(n_idx, ti);
2995 }
2996 }
2997 _processed.set(n_idx);
2998 break;
2999 }
3000 case Op_Proj:
3001 {
3002 // we are only interested in the oop result projection from a call
3003 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
3004 assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
3005 "all nodes should be registered");
3006 const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
3007 assert(r->cnt() > TypeFunc::Parms, "sanity");
3008 if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
3009 process_call_result(n->as_Proj(), phase);
3010 assert(_processed.test(n_idx), "all call results should be processed");
3011 break;
3012 }
3013 }
3014 assert(false, "Op_Proj");
3015 break;
3016 }
3017 case Op_Return:
3018 {
3019 #ifdef ASSERT
3020 if( n->req() <= TypeFunc::Parms ||
3021 !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
3022 assert(false, "Op_Return");
3023 }
3024 #endif
3025 int ti = n->in(TypeFunc::Parms)->_idx;
3026 assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
3027 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
3028 add_pointsto_edge(n_idx, ti);
3029 } else {
3030 add_deferred_edge(n_idx, ti);
3031 }
3032 _processed.set(n_idx);
3033 break;
3034 }
3035 case Op_StoreP:
3036 case Op_StoreN:
3037 case Op_StorePConditional:
3038 case Op_CompareAndSwapP:
3039 case Op_CompareAndSwapN:
3040 {
3041 Node *adr = n->in(MemNode::Address);
3042 const Type *adr_type = phase->type(adr)->make_ptr();
3043 #ifdef ASSERT
3044 if (!adr_type->isa_oopptr())
3045 assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP");
3046 #endif
3047
3048 assert(adr->is_AddP(), "expecting an AddP");
3049 Node *adr_base = get_addp_base(adr);
3050 Node *val = n->in(MemNode::ValueIn)->uncast();
3051 int offset = address_offset(adr, phase);
3052 // For everything "adr_base" could point to, create a deferred edge
3053 // to "val" from each field with the same offset.
3054 for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
3055 uint pt = i.elem;
3056 // Add field edge if it is missing.
3057 add_field_edge(pt, adr->_idx, offset);
3058 add_edge_from_fields(pt, val->_idx, offset);
3059 }
3060 break;
3061 }
3062 case Op_AryEq:
3063 case Op_StrComp:
3064 case Op_StrEquals:
3065 case Op_StrIndexOf:
3066 {
3067 // char[] arrays passed to string intrinsic do not escape but
3068 // they are not scalar replaceable. Adjust escape state for them.
3069 // Start from in(2) edge since in(1) is memory edge.
3070 for (uint i = 2; i < n->req(); i++) {
3071 Node* adr = n->in(i)->uncast();
3072 const Type *at = phase->type(adr);
3073 if (!adr->is_top() && at->isa_ptr()) {
3074 assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
3075 at->isa_ptr() != NULL, "expecting an Ptr");
3076 if (adr->is_AddP()) {
3077 adr = get_addp_base(adr);
3078 }
3079 // Mark as ArgEscape everything "adr" could point to.
3080 set_escape_state(adr->_idx, PointsToNode::ArgEscape);
3081 }
3082 }
3083 _processed.set(n_idx);
3084 break;
3085 }
3086 case Op_ThreadLocal:
3087 {
3088 assert(false, "Op_ThreadLocal");
3089 break;
3090 }
3091 default:
3092 // This method should be called only for EA specific nodes.
3093 ShouldNotReachHere();
3094 }
3095 }
3096
3097 #ifndef PRODUCT
3098 void ConnectionGraph::dump() {
3099 bool first = true; 3087 bool first = true;
3100 3088 int ptnodes_length = ptnodes_worklist.length();
3101 uint size = nodes_size(); 3089 for (int i = 0; i < ptnodes_length; i++) {
3102 for (uint ni = 0; ni < size; ni++) { 3090 PointsToNode *ptn = ptnodes_worklist.at(i);
3103 PointsToNode *ptn = ptnode_adr(ni); 3091 if (ptn == NULL || !ptn->is_JavaObject())
3104 PointsToNode::NodeType ptn_type = ptn->node_type();
3105
3106 if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
3107 continue; 3092 continue;
3108 PointsToNode::EscapeState es = escape_state(ptn->_node); 3093 PointsToNode::EscapeState es = ptn->escape_state();
3109 if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) { 3094 if (ptn->ideal_node()->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
3110 if (first) { 3095 if (first) {
3111 tty->cr(); 3096 tty->cr();
3112 tty->print("======== Connection graph for "); 3097 tty->print("======== Connection graph for ");
3113 _compile->method()->print_short_name(); 3098 _compile->method()->print_short_name();
3114 tty->cr(); 3099 tty->cr();
3115 first = false; 3100 first = false;
3116 } 3101 }
3117 tty->print("%6d ", ni);
3118 ptn->dump(); 3102 ptn->dump();
3119 // Print all locals which reference this allocation 3103 // Print all locals and fields which reference this allocation
3120 for (uint li = ni; li < size; li++) { 3104 for (UseIterator j(ptn); j.has_next(); j.next()) {
3121 PointsToNode *ptn_loc = ptnode_adr(li); 3105 PointsToNode* use = j.get();
3122 PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type(); 3106 if (use->is_LocalVar()) {
3123 if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL && 3107 use->dump(Verbose);
3124 ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) { 3108 } else if (Verbose) {
3125 ptnode_adr(li)->dump(false); 3109 use->dump();
3126 }
3127 }
3128 if (Verbose) {
3129 // Print all fields which reference this allocation
3130 for (uint i = 0; i < ptn->edge_count(); i++) {
3131 uint ei = ptn->edge_target(i);
3132 ptnode_adr(ei)->dump(false);
3133 } 3110 }
3134 } 3111 }
3135 tty->cr(); 3112 tty->cr();
3136 } 3113 }
3137 } 3114 }

mercurial