Tue, 02 Sep 2014 12:48:45 -0700
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
Summary: Add new C2 intrinsic for BigInteger::multiplyToLen() on x86 in 64-bit VM.
Reviewed-by: roland
1 /*
2 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "libadt/vectset.hpp"
29 #include "memory/allocation.hpp"
30 #include "opto/c2compiler.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/compile.hpp"
34 #include "opto/escape.hpp"
35 #include "opto/phaseX.hpp"
36 #include "opto/rootnode.hpp"
38 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
39 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
40 _collecting(true),
41 _verify(false),
42 _compile(C),
43 _igvn(igvn),
44 _node_map(C->comp_arena()) {
45 // Add unknown java object.
46 add_java_object(C->top(), PointsToNode::GlobalEscape);
47 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
48 // Add ConP(#NULL) and ConN(#NULL) nodes.
49 Node* oop_null = igvn->zerocon(T_OBJECT);
50 assert(oop_null->_idx < nodes_size(), "should be created already");
51 add_java_object(oop_null, PointsToNode::NoEscape);
52 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
53 if (UseCompressedOops) {
54 Node* noop_null = igvn->zerocon(T_NARROWOOP);
55 assert(noop_null->_idx < nodes_size(), "should be created already");
56 map_ideal_node(noop_null, null_obj);
57 }
58 _pcmp_neq = NULL; // Should be initialized
59 _pcmp_eq = NULL;
60 }
62 bool ConnectionGraph::has_candidates(Compile *C) {
63 // EA brings benefits only when the code has allocations and/or locks which
64 // are represented by ideal Macro nodes.
65 int cnt = C->macro_count();
66 for (int i = 0; i < cnt; i++) {
67 Node *n = C->macro_node(i);
68 if (n->is_Allocate())
69 return true;
70 if (n->is_Lock()) {
71 Node* obj = n->as_Lock()->obj_node()->uncast();
72 if (!(obj->is_Parm() || obj->is_Con()))
73 return true;
74 }
75 if (n->is_CallStaticJava() &&
76 n->as_CallStaticJava()->is_boxing_method()) {
77 return true;
78 }
79 }
80 return false;
81 }
83 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
84 Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true);
85 ResourceMark rm;
87 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
88 // to create space for them in ConnectionGraph::_nodes[].
89 Node* oop_null = igvn->zerocon(T_OBJECT);
90 Node* noop_null = igvn->zerocon(T_NARROWOOP);
91 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
92 // Perform escape analysis
93 if (congraph->compute_escape()) {
94 // There are non escaping objects.
95 C->set_congraph(congraph);
96 }
97 // Cleanup.
98 if (oop_null->outcnt() == 0)
99 igvn->hash_delete(oop_null);
100 if (noop_null->outcnt() == 0)
101 igvn->hash_delete(noop_null);
102 }
104 bool ConnectionGraph::compute_escape() {
105 Compile* C = _compile;
106 PhaseGVN* igvn = _igvn;
108 // Worklists used by EA.
109 Unique_Node_List delayed_worklist;
110 GrowableArray<Node*> alloc_worklist;
111 GrowableArray<Node*> ptr_cmp_worklist;
112 GrowableArray<Node*> storestore_worklist;
113 GrowableArray<PointsToNode*> ptnodes_worklist;
114 GrowableArray<JavaObjectNode*> java_objects_worklist;
115 GrowableArray<JavaObjectNode*> non_escaped_worklist;
116 GrowableArray<FieldNode*> oop_fields_worklist;
117 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
119 { Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);
121 // 1. Populate Connection Graph (CG) with PointsTo nodes.
122 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space
123 // Initialize worklist
124 if (C->root() != NULL) {
125 ideal_nodes.push(C->root());
126 }
127 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
128 Node* n = ideal_nodes.at(next);
129 // Create PointsTo nodes and add them to Connection Graph. Called
130 // only once per ideal node since ideal_nodes is Unique_Node list.
131 add_node_to_connection_graph(n, &delayed_worklist);
132 PointsToNode* ptn = ptnode_adr(n->_idx);
133 if (ptn != NULL) {
134 ptnodes_worklist.append(ptn);
135 if (ptn->is_JavaObject()) {
136 java_objects_worklist.append(ptn->as_JavaObject());
137 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
138 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
139 // Only allocations and java static calls results are interesting.
140 non_escaped_worklist.append(ptn->as_JavaObject());
141 }
142 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
143 oop_fields_worklist.append(ptn->as_Field());
144 }
145 }
146 if (n->is_MergeMem()) {
147 // Collect all MergeMem nodes to add memory slices for
148 // scalar replaceable objects in split_unique_types().
149 _mergemem_worklist.append(n->as_MergeMem());
150 } else if (OptimizePtrCompare && n->is_Cmp() &&
151 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
152 // Collect compare pointers nodes.
153 ptr_cmp_worklist.append(n);
154 } else if (n->is_MemBarStoreStore()) {
155 // Collect all MemBarStoreStore nodes so that depending on the
156 // escape status of the associated Allocate node some of them
157 // may be eliminated.
158 storestore_worklist.append(n);
159 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&
160 (n->req() > MemBarNode::Precedent)) {
161 record_for_optimizer(n);
162 #ifdef ASSERT
163 } else if (n->is_AddP()) {
164 // Collect address nodes for graph verification.
165 addp_worklist.append(n);
166 #endif
167 }
168 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
169 Node* m = n->fast_out(i); // Get user
170 ideal_nodes.push(m);
171 }
172 }
173 if (non_escaped_worklist.length() == 0) {
174 _collecting = false;
175 return false; // Nothing to do.
176 }
177 // Add final simple edges to graph.
178 while(delayed_worklist.size() > 0) {
179 Node* n = delayed_worklist.pop();
180 add_final_edges(n);
181 }
182 int ptnodes_length = ptnodes_worklist.length();
184 #ifdef ASSERT
185 if (VerifyConnectionGraph) {
186 // Verify that no new simple edges could be created and all
187 // local vars has edges.
188 _verify = true;
189 for (int next = 0; next < ptnodes_length; ++next) {
190 PointsToNode* ptn = ptnodes_worklist.at(next);
191 add_final_edges(ptn->ideal_node());
192 if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
193 ptn->dump();
194 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
195 }
196 }
197 _verify = false;
198 }
199 #endif
201 // 2. Finish Graph construction by propagating references to all
202 // java objects through graph.
203 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist,
204 java_objects_worklist, oop_fields_worklist)) {
205 // All objects escaped or hit time or iterations limits.
206 _collecting = false;
207 return false;
208 }
210 // 3. Adjust scalar_replaceable state of nonescaping objects and push
211 // scalar replaceable allocations on alloc_worklist for processing
212 // in split_unique_types().
213 int non_escaped_length = non_escaped_worklist.length();
214 for (int next = 0; next < non_escaped_length; next++) {
215 JavaObjectNode* ptn = non_escaped_worklist.at(next);
216 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
217 Node* n = ptn->ideal_node();
218 if (n->is_Allocate()) {
219 n->as_Allocate()->_is_non_escaping = noescape;
220 }
221 if (n->is_CallStaticJava()) {
222 n->as_CallStaticJava()->_is_non_escaping = noescape;
223 }
224 if (noescape && ptn->scalar_replaceable()) {
225 adjust_scalar_replaceable_state(ptn);
226 if (ptn->scalar_replaceable()) {
227 alloc_worklist.append(ptn->ideal_node());
228 }
229 }
230 }
232 #ifdef ASSERT
233 if (VerifyConnectionGraph) {
234 // Verify that graph is complete - no new edges could be added or needed.
235 verify_connection_graph(ptnodes_worklist, non_escaped_worklist,
236 java_objects_worklist, addp_worklist);
237 }
238 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
239 assert(null_obj->escape_state() == PointsToNode::NoEscape &&
240 null_obj->edge_count() == 0 &&
241 !null_obj->arraycopy_src() &&
242 !null_obj->arraycopy_dst(), "sanity");
243 #endif
245 _collecting = false;
247 } // TracePhase t3("connectionGraph")
249 // 4. Optimize ideal graph based on EA information.
250 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);
251 if (has_non_escaping_obj) {
252 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
253 }
255 #ifndef PRODUCT
256 if (PrintEscapeAnalysis) {
257 dump(ptnodes_worklist); // Dump ConnectionGraph
258 }
259 #endif
261 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
262 #ifdef ASSERT
263 if (VerifyConnectionGraph) {
264 int alloc_length = alloc_worklist.length();
265 for (int next = 0; next < alloc_length; ++next) {
266 Node* n = alloc_worklist.at(next);
267 PointsToNode* ptn = ptnode_adr(n->_idx);
268 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
269 }
270 }
271 #endif
273 // 5. Separate memory graph for scalar replaceable allcations.
274 if (has_scalar_replaceable_candidates &&
275 C->AliasLevel() >= 3 && EliminateAllocations) {
276 // Now use the escape information to create unique types for
277 // scalar replaceable objects.
278 split_unique_types(alloc_worklist);
279 if (C->failing()) return false;
280 C->print_method(PHASE_AFTER_EA, 2);
282 #ifdef ASSERT
283 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
284 tty->print("=== No allocations eliminated for ");
285 C->method()->print_short_name();
286 if(!EliminateAllocations) {
287 tty->print(" since EliminateAllocations is off ===");
288 } else if(!has_scalar_replaceable_candidates) {
289 tty->print(" since there are no scalar replaceable candidates ===");
290 } else if(C->AliasLevel() < 3) {
291 tty->print(" since AliasLevel < 3 ===");
292 }
293 tty->cr();
294 #endif
295 }
296 return has_non_escaping_obj;
297 }
299 // Utility function for nodes that load an object
300 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
301 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
302 // ThreadLocal has RawPtr type.
303 const Type* t = _igvn->type(n);
304 if (t->make_ptr() != NULL) {
305 Node* adr = n->in(MemNode::Address);
306 #ifdef ASSERT
307 if (!adr->is_AddP()) {
308 assert(_igvn->type(adr)->isa_rawptr(), "sanity");
309 } else {
310 assert((ptnode_adr(adr->_idx) == NULL ||
311 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
312 }
313 #endif
314 add_local_var_and_edge(n, PointsToNode::NoEscape,
315 adr, delayed_worklist);
316 }
317 }
319 // Populate Connection Graph with PointsTo nodes and create simple
320 // connection graph edges.
321 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
322 assert(!_verify, "this method sould not be called for verification");
323 PhaseGVN* igvn = _igvn;
324 uint n_idx = n->_idx;
325 PointsToNode* n_ptn = ptnode_adr(n_idx);
326 if (n_ptn != NULL)
327 return; // No need to redefine PointsTo node during first iteration.
329 if (n->is_Call()) {
330 // Arguments to allocation and locking don't escape.
331 if (n->is_AbstractLock()) {
332 // Put Lock and Unlock nodes on IGVN worklist to process them during
333 // first IGVN optimization when escape information is still available.
334 record_for_optimizer(n);
335 } else if (n->is_Allocate()) {
336 add_call_node(n->as_Call());
337 record_for_optimizer(n);
338 } else {
339 if (n->is_CallStaticJava()) {
340 const char* name = n->as_CallStaticJava()->_name;
341 if (name != NULL && strcmp(name, "uncommon_trap") == 0)
342 return; // Skip uncommon traps
343 }
344 // Don't mark as processed since call's arguments have to be processed.
345 delayed_worklist->push(n);
346 // Check if a call returns an object.
347 if ((n->as_Call()->returns_pointer() &&
348 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) ||
349 (n->is_CallStaticJava() &&
350 n->as_CallStaticJava()->is_boxing_method())) {
351 add_call_node(n->as_Call());
352 }
353 }
354 return;
355 }
356 // Put this check here to process call arguments since some call nodes
357 // point to phantom_obj.
358 if (n_ptn == phantom_obj || n_ptn == null_obj)
359 return; // Skip predefined nodes.
361 int opcode = n->Opcode();
362 switch (opcode) {
363 case Op_AddP: {
364 Node* base = get_addp_base(n);
365 PointsToNode* ptn_base = ptnode_adr(base->_idx);
366 // Field nodes are created for all field types. They are used in
367 // adjust_scalar_replaceable_state() and split_unique_types().
368 // Note, non-oop fields will have only base edges in Connection
369 // Graph because such fields are not used for oop loads and stores.
370 int offset = address_offset(n, igvn);
371 add_field(n, PointsToNode::NoEscape, offset);
372 if (ptn_base == NULL) {
373 delayed_worklist->push(n); // Process it later.
374 } else {
375 n_ptn = ptnode_adr(n_idx);
376 add_base(n_ptn->as_Field(), ptn_base);
377 }
378 break;
379 }
380 case Op_CastX2P: {
381 map_ideal_node(n, phantom_obj);
382 break;
383 }
384 case Op_CastPP:
385 case Op_CheckCastPP:
386 case Op_EncodeP:
387 case Op_DecodeN:
388 case Op_EncodePKlass:
389 case Op_DecodeNKlass: {
390 add_local_var_and_edge(n, PointsToNode::NoEscape,
391 n->in(1), delayed_worklist);
392 break;
393 }
394 case Op_CMoveP: {
395 add_local_var(n, PointsToNode::NoEscape);
396 // Do not add edges during first iteration because some could be
397 // not defined yet.
398 delayed_worklist->push(n);
399 break;
400 }
401 case Op_ConP:
402 case Op_ConN:
403 case Op_ConNKlass: {
404 // assume all oop constants globally escape except for null
405 PointsToNode::EscapeState es;
406 const Type* t = igvn->type(n);
407 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
408 es = PointsToNode::NoEscape;
409 } else {
410 es = PointsToNode::GlobalEscape;
411 }
412 add_java_object(n, es);
413 break;
414 }
415 case Op_CreateEx: {
416 // assume that all exception objects globally escape
417 add_java_object(n, PointsToNode::GlobalEscape);
418 break;
419 }
420 case Op_LoadKlass:
421 case Op_LoadNKlass: {
422 // Unknown class is loaded
423 map_ideal_node(n, phantom_obj);
424 break;
425 }
426 case Op_LoadP:
427 case Op_LoadN:
428 case Op_LoadPLocked: {
429 add_objload_to_connection_graph(n, delayed_worklist);
430 break;
431 }
432 case Op_Parm: {
433 map_ideal_node(n, phantom_obj);
434 break;
435 }
436 case Op_PartialSubtypeCheck: {
437 // Produces Null or notNull and is used in only in CmpP so
438 // phantom_obj could be used.
439 map_ideal_node(n, phantom_obj); // Result is unknown
440 break;
441 }
442 case Op_Phi: {
443 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
444 // ThreadLocal has RawPtr type.
445 const Type* t = n->as_Phi()->type();
446 if (t->make_ptr() != NULL) {
447 add_local_var(n, PointsToNode::NoEscape);
448 // Do not add edges during first iteration because some could be
449 // not defined yet.
450 delayed_worklist->push(n);
451 }
452 break;
453 }
454 case Op_Proj: {
455 // we are only interested in the oop result projection from a call
456 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
457 n->in(0)->as_Call()->returns_pointer()) {
458 add_local_var_and_edge(n, PointsToNode::NoEscape,
459 n->in(0), delayed_worklist);
460 }
461 break;
462 }
463 case Op_Rethrow: // Exception object escapes
464 case Op_Return: {
465 if (n->req() > TypeFunc::Parms &&
466 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
467 // Treat Return value as LocalVar with GlobalEscape escape state.
468 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
469 n->in(TypeFunc::Parms), delayed_worklist);
470 }
471 break;
472 }
473 case Op_GetAndSetP:
474 case Op_GetAndSetN: {
475 add_objload_to_connection_graph(n, delayed_worklist);
476 // fallthrough
477 }
478 case Op_StoreP:
479 case Op_StoreN:
480 case Op_StoreNKlass:
481 case Op_StorePConditional:
482 case Op_CompareAndSwapP:
483 case Op_CompareAndSwapN: {
484 Node* adr = n->in(MemNode::Address);
485 const Type *adr_type = igvn->type(adr);
486 adr_type = adr_type->make_ptr();
487 if (adr_type == NULL) {
488 break; // skip dead nodes
489 }
490 if (adr_type->isa_oopptr() ||
491 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&
492 (adr_type == TypeRawPtr::NOTNULL &&
493 adr->in(AddPNode::Address)->is_Proj() &&
494 adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
495 delayed_worklist->push(n); // Process it later.
496 #ifdef ASSERT
497 assert(adr->is_AddP(), "expecting an AddP");
498 if (adr_type == TypeRawPtr::NOTNULL) {
499 // Verify a raw address for a store captured by Initialize node.
500 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
501 assert(offs != Type::OffsetBot, "offset must be a constant");
502 }
503 #endif
504 } else {
505 // Ignore copy the displaced header to the BoxNode (OSR compilation).
506 if (adr->is_BoxLock())
507 break;
508 // Stored value escapes in unsafe access.
509 if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {
510 // Pointer stores in G1 barriers looks like unsafe access.
511 // Ignore such stores to be able scalar replace non-escaping
512 // allocations.
513 if (UseG1GC && adr->is_AddP()) {
514 Node* base = get_addp_base(adr);
515 if (base->Opcode() == Op_LoadP &&
516 base->in(MemNode::Address)->is_AddP()) {
517 adr = base->in(MemNode::Address);
518 Node* tls = get_addp_base(adr);
519 if (tls->Opcode() == Op_ThreadLocal) {
520 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
521 if (offs == in_bytes(JavaThread::satb_mark_queue_offset() +
522 PtrQueue::byte_offset_of_buf())) {
523 break; // G1 pre barier previous oop value store.
524 }
525 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() +
526 PtrQueue::byte_offset_of_buf())) {
527 break; // G1 post barier card address store.
528 }
529 }
530 }
531 }
532 delayed_worklist->push(n); // Process unsafe access later.
533 break;
534 }
535 #ifdef ASSERT
536 n->dump(1);
537 assert(false, "not unsafe or G1 barrier raw StoreP");
538 #endif
539 }
540 break;
541 }
542 case Op_AryEq:
543 case Op_StrComp:
544 case Op_StrEquals:
545 case Op_StrIndexOf:
546 case Op_EncodeISOArray: {
547 add_local_var(n, PointsToNode::ArgEscape);
548 delayed_worklist->push(n); // Process it later.
549 break;
550 }
551 case Op_ThreadLocal: {
552 add_java_object(n, PointsToNode::ArgEscape);
553 break;
554 }
555 default:
556 ; // Do nothing for nodes not related to EA.
557 }
558 return;
559 }
561 #ifdef ASSERT
562 #define ELSE_FAIL(name) \
563 /* Should not be called for not pointer type. */ \
564 n->dump(1); \
565 assert(false, name); \
566 break;
567 #else
568 #define ELSE_FAIL(name) \
569 break;
570 #endif
572 // Add final simple edges to graph.
573 void ConnectionGraph::add_final_edges(Node *n) {
574 PointsToNode* n_ptn = ptnode_adr(n->_idx);
575 #ifdef ASSERT
576 if (_verify && n_ptn->is_JavaObject())
577 return; // This method does not change graph for JavaObject.
578 #endif
580 if (n->is_Call()) {
581 process_call_arguments(n->as_Call());
582 return;
583 }
584 assert(n->is_Store() || n->is_LoadStore() ||
585 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
586 "node should be registered already");
587 int opcode = n->Opcode();
588 switch (opcode) {
589 case Op_AddP: {
590 Node* base = get_addp_base(n);
591 PointsToNode* ptn_base = ptnode_adr(base->_idx);
592 assert(ptn_base != NULL, "field's base should be registered");
593 add_base(n_ptn->as_Field(), ptn_base);
594 break;
595 }
596 case Op_CastPP:
597 case Op_CheckCastPP:
598 case Op_EncodeP:
599 case Op_DecodeN:
600 case Op_EncodePKlass:
601 case Op_DecodeNKlass: {
602 add_local_var_and_edge(n, PointsToNode::NoEscape,
603 n->in(1), NULL);
604 break;
605 }
606 case Op_CMoveP: {
607 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
608 Node* in = n->in(i);
609 if (in == NULL)
610 continue; // ignore NULL
611 Node* uncast_in = in->uncast();
612 if (uncast_in->is_top() || uncast_in == n)
613 continue; // ignore top or inputs which go back this node
614 PointsToNode* ptn = ptnode_adr(in->_idx);
615 assert(ptn != NULL, "node should be registered");
616 add_edge(n_ptn, ptn);
617 }
618 break;
619 }
620 case Op_LoadP:
621 case Op_LoadN:
622 case Op_LoadPLocked: {
623 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
624 // ThreadLocal has RawPtr type.
625 const Type* t = _igvn->type(n);
626 if (t->make_ptr() != NULL) {
627 Node* adr = n->in(MemNode::Address);
628 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
629 break;
630 }
631 ELSE_FAIL("Op_LoadP");
632 }
633 case Op_Phi: {
634 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
635 // ThreadLocal has RawPtr type.
636 const Type* t = n->as_Phi()->type();
637 if (t->make_ptr() != NULL) {
638 for (uint i = 1; i < n->req(); i++) {
639 Node* in = n->in(i);
640 if (in == NULL)
641 continue; // ignore NULL
642 Node* uncast_in = in->uncast();
643 if (uncast_in->is_top() || uncast_in == n)
644 continue; // ignore top or inputs which go back this node
645 PointsToNode* ptn = ptnode_adr(in->_idx);
646 assert(ptn != NULL, "node should be registered");
647 add_edge(n_ptn, ptn);
648 }
649 break;
650 }
651 ELSE_FAIL("Op_Phi");
652 }
653 case Op_Proj: {
654 // we are only interested in the oop result projection from a call
655 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
656 n->in(0)->as_Call()->returns_pointer()) {
657 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
658 break;
659 }
660 ELSE_FAIL("Op_Proj");
661 }
662 case Op_Rethrow: // Exception object escapes
663 case Op_Return: {
664 if (n->req() > TypeFunc::Parms &&
665 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
666 // Treat Return value as LocalVar with GlobalEscape escape state.
667 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
668 n->in(TypeFunc::Parms), NULL);
669 break;
670 }
671 ELSE_FAIL("Op_Return");
672 }
673 case Op_StoreP:
674 case Op_StoreN:
675 case Op_StoreNKlass:
676 case Op_StorePConditional:
677 case Op_CompareAndSwapP:
678 case Op_CompareAndSwapN:
679 case Op_GetAndSetP:
680 case Op_GetAndSetN: {
681 Node* adr = n->in(MemNode::Address);
682 const Type *adr_type = _igvn->type(adr);
683 adr_type = adr_type->make_ptr();
684 #ifdef ASSERT
685 if (adr_type == NULL) {
686 n->dump(1);
687 assert(adr_type != NULL, "dead node should not be on list");
688 break;
689 }
690 #endif
691 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) {
692 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
693 }
694 if (adr_type->isa_oopptr() ||
695 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&
696 (adr_type == TypeRawPtr::NOTNULL &&
697 adr->in(AddPNode::Address)->is_Proj() &&
698 adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
699 // Point Address to Value
700 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
701 assert(adr_ptn != NULL &&
702 adr_ptn->as_Field()->is_oop(), "node should be registered");
703 Node *val = n->in(MemNode::ValueIn);
704 PointsToNode* ptn = ptnode_adr(val->_idx);
705 assert(ptn != NULL, "node should be registered");
706 add_edge(adr_ptn, ptn);
707 break;
708 } else if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {
709 // Stored value escapes in unsafe access.
710 Node *val = n->in(MemNode::ValueIn);
711 PointsToNode* ptn = ptnode_adr(val->_idx);
712 assert(ptn != NULL, "node should be registered");
713 set_escape_state(ptn, PointsToNode::GlobalEscape);
714 // Add edge to object for unsafe access with offset.
715 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
716 assert(adr_ptn != NULL, "node should be registered");
717 if (adr_ptn->is_Field()) {
718 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
719 add_edge(adr_ptn, ptn);
720 }
721 break;
722 }
723 ELSE_FAIL("Op_StoreP");
724 }
725 case Op_AryEq:
726 case Op_StrComp:
727 case Op_StrEquals:
728 case Op_StrIndexOf:
729 case Op_EncodeISOArray: {
730 // char[] arrays passed to string intrinsic do not escape but
731 // they are not scalar replaceable. Adjust escape state for them.
732 // Start from in(2) edge since in(1) is memory edge.
733 for (uint i = 2; i < n->req(); i++) {
734 Node* adr = n->in(i);
735 const Type* at = _igvn->type(adr);
736 if (!adr->is_top() && at->isa_ptr()) {
737 assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
738 at->isa_ptr() != NULL, "expecting a pointer");
739 if (adr->is_AddP()) {
740 adr = get_addp_base(adr);
741 }
742 PointsToNode* ptn = ptnode_adr(adr->_idx);
743 assert(ptn != NULL, "node should be registered");
744 add_edge(n_ptn, ptn);
745 }
746 }
747 break;
748 }
749 default: {
750 // This method should be called only for EA specific nodes which may
751 // miss some edges when they were created.
752 #ifdef ASSERT
753 n->dump(1);
754 #endif
755 guarantee(false, "unknown node");
756 }
757 }
758 return;
759 }
761 void ConnectionGraph::add_call_node(CallNode* call) {
762 assert(call->returns_pointer(), "only for call which returns pointer");
763 uint call_idx = call->_idx;
764 if (call->is_Allocate()) {
765 Node* k = call->in(AllocateNode::KlassNode);
766 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
767 assert(kt != NULL, "TypeKlassPtr required.");
768 ciKlass* cik = kt->klass();
769 PointsToNode::EscapeState es = PointsToNode::NoEscape;
770 bool scalar_replaceable = true;
771 if (call->is_AllocateArray()) {
772 if (!cik->is_array_klass()) { // StressReflectiveCode
773 es = PointsToNode::GlobalEscape;
774 } else {
775 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
776 if (length < 0 || length > EliminateAllocationArraySizeLimit) {
777 // Not scalar replaceable if the length is not constant or too big.
778 scalar_replaceable = false;
779 }
780 }
781 } else { // Allocate instance
782 if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
783 cik->is_subclass_of(_compile->env()->Reference_klass()) ||
784 !cik->is_instance_klass() || // StressReflectiveCode
785 cik->as_instance_klass()->has_finalizer()) {
786 es = PointsToNode::GlobalEscape;
787 }
788 }
789 add_java_object(call, es);
790 PointsToNode* ptn = ptnode_adr(call_idx);
791 if (!scalar_replaceable && ptn->scalar_replaceable()) {
792 ptn->set_scalar_replaceable(false);
793 }
794 } else if (call->is_CallStaticJava()) {
795 // Call nodes could be different types:
796 //
797 // 1. CallDynamicJavaNode (what happened during call is unknown):
798 //
799 // - mapped to GlobalEscape JavaObject node if oop is returned;
800 //
801 // - all oop arguments are escaping globally;
802 //
803 // 2. CallStaticJavaNode (execute bytecode analysis if possible):
804 //
805 // - the same as CallDynamicJavaNode if can't do bytecode analysis;
806 //
807 // - mapped to GlobalEscape JavaObject node if unknown oop is returned;
808 // - mapped to NoEscape JavaObject node if non-escaping object allocated
809 // during call is returned;
810 // - mapped to ArgEscape LocalVar node pointed to object arguments
811 // which are returned and does not escape during call;
812 //
813 // - oop arguments escaping status is defined by bytecode analysis;
814 //
815 // For a static call, we know exactly what method is being called.
816 // Use bytecode estimator to record whether the call's return value escapes.
817 ciMethod* meth = call->as_CallJava()->method();
818 if (meth == NULL) {
819 const char* name = call->as_CallStaticJava()->_name;
820 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
821 // Returns a newly allocated unescaped object.
822 add_java_object(call, PointsToNode::NoEscape);
823 ptnode_adr(call_idx)->set_scalar_replaceable(false);
824 } else if (meth->is_boxing_method()) {
825 // Returns boxing object
826 PointsToNode::EscapeState es;
827 vmIntrinsics::ID intr = meth->intrinsic_id();
828 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
829 // It does not escape if object is always allocated.
830 es = PointsToNode::NoEscape;
831 } else {
832 // It escapes globally if object could be loaded from cache.
833 es = PointsToNode::GlobalEscape;
834 }
835 add_java_object(call, es);
836 } else {
837 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
838 call_analyzer->copy_dependencies(_compile->dependencies());
839 if (call_analyzer->is_return_allocated()) {
840 // Returns a newly allocated unescaped object, simply
841 // update dependency information.
842 // Mark it as NoEscape so that objects referenced by
843 // it's fields will be marked as NoEscape at least.
844 add_java_object(call, PointsToNode::NoEscape);
845 ptnode_adr(call_idx)->set_scalar_replaceable(false);
846 } else {
847 // Determine whether any arguments are returned.
848 const TypeTuple* d = call->tf()->domain();
849 bool ret_arg = false;
850 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
851 if (d->field_at(i)->isa_ptr() != NULL &&
852 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
853 ret_arg = true;
854 break;
855 }
856 }
857 if (ret_arg) {
858 add_local_var(call, PointsToNode::ArgEscape);
859 } else {
860 // Returns unknown object.
861 map_ideal_node(call, phantom_obj);
862 }
863 }
864 }
865 } else {
866 // An other type of call, assume the worst case:
867 // returned value is unknown and globally escapes.
868 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
869 map_ideal_node(call, phantom_obj);
870 }
871 }
873 void ConnectionGraph::process_call_arguments(CallNode *call) {
874 bool is_arraycopy = false;
875 switch (call->Opcode()) {
876 #ifdef ASSERT
877 case Op_Allocate:
878 case Op_AllocateArray:
879 case Op_Lock:
880 case Op_Unlock:
881 assert(false, "should be done already");
882 break;
883 #endif
884 case Op_CallLeafNoFP:
885 is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
886 strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
887 // fall through
888 case Op_CallLeaf: {
889 // Stub calls, objects do not escape but they are not scale replaceable.
890 // Adjust escape state for outgoing arguments.
891 const TypeTuple * d = call->tf()->domain();
892 bool src_has_oops = false;
893 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
894 const Type* at = d->field_at(i);
895 Node *arg = call->in(i);
896 const Type *aat = _igvn->type(arg);
897 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
898 continue;
899 if (arg->is_AddP()) {
900 //
901 // The inline_native_clone() case when the arraycopy stub is called
902 // after the allocation before Initialize and CheckCastPP nodes.
903 // Or normal arraycopy for object arrays case.
904 //
905 // Set AddP's base (Allocate) as not scalar replaceable since
906 // pointer to the base (with offset) is passed as argument.
907 //
908 arg = get_addp_base(arg);
909 }
910 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
911 assert(arg_ptn != NULL, "should be registered");
912 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
913 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
914 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
915 aat->isa_ptr() != NULL, "expecting an Ptr");
916 bool arg_has_oops = aat->isa_oopptr() &&
917 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
918 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
919 if (i == TypeFunc::Parms) {
920 src_has_oops = arg_has_oops;
921 }
922 //
923 // src or dst could be j.l.Object when other is basic type array:
924 //
925 // arraycopy(char[],0,Object*,0,size);
926 // arraycopy(Object*,0,char[],0,size);
927 //
928 // Don't add edges in such cases.
929 //
930 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
931 arg_has_oops && (i > TypeFunc::Parms);
932 #ifdef ASSERT
933 if (!(is_arraycopy ||
934 (call->as_CallLeaf()->_name != NULL &&
935 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||
936 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ||
937 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
938 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
939 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
940 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
941 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
942 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
943 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
944 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
945 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
946 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
947 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
948 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0)
949 ))) {
950 call->dump();
951 fatal(err_msg_res("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name));
952 }
953 #endif
954 // Always process arraycopy's destination object since
955 // we need to add all possible edges to references in
956 // source object.
957 if (arg_esc >= PointsToNode::ArgEscape &&
958 !arg_is_arraycopy_dest) {
959 continue;
960 }
961 set_escape_state(arg_ptn, PointsToNode::ArgEscape);
962 if (arg_is_arraycopy_dest) {
963 Node* src = call->in(TypeFunc::Parms);
964 if (src->is_AddP()) {
965 src = get_addp_base(src);
966 }
967 PointsToNode* src_ptn = ptnode_adr(src->_idx);
968 assert(src_ptn != NULL, "should be registered");
969 if (arg_ptn != src_ptn) {
970 // Special arraycopy edge:
971 // A destination object's field can't have the source object
972 // as base since objects escape states are not related.
973 // Only escape state of destination object's fields affects
974 // escape state of fields in source object.
975 add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn);
976 }
977 }
978 }
979 }
980 break;
981 }
982 case Op_CallStaticJava: {
983 // For a static call, we know exactly what method is being called.
984 // Use bytecode estimator to record the call's escape affects
985 #ifdef ASSERT
986 const char* name = call->as_CallStaticJava()->_name;
987 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
988 #endif
989 ciMethod* meth = call->as_CallJava()->method();
990 if ((meth != NULL) && meth->is_boxing_method()) {
991 break; // Boxing methods do not modify any oops.
992 }
993 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
994 // fall-through if not a Java method or no analyzer information
995 if (call_analyzer != NULL) {
996 PointsToNode* call_ptn = ptnode_adr(call->_idx);
997 const TypeTuple* d = call->tf()->domain();
998 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
999 const Type* at = d->field_at(i);
1000 int k = i - TypeFunc::Parms;
1001 Node* arg = call->in(i);
1002 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1003 if (at->isa_ptr() != NULL &&
1004 call_analyzer->is_arg_returned(k)) {
1005 // The call returns arguments.
1006 if (call_ptn != NULL) { // Is call's result used?
1007 assert(call_ptn->is_LocalVar(), "node should be registered");
1008 assert(arg_ptn != NULL, "node should be registered");
1009 add_edge(call_ptn, arg_ptn);
1010 }
1011 }
1012 if (at->isa_oopptr() != NULL &&
1013 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1014 if (!call_analyzer->is_arg_stack(k)) {
1015 // The argument global escapes
1016 set_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1017 } else {
1018 set_escape_state(arg_ptn, PointsToNode::ArgEscape);
1019 if (!call_analyzer->is_arg_local(k)) {
1020 // The argument itself doesn't escape, but any fields might
1021 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1022 }
1023 }
1024 }
1025 }
1026 if (call_ptn != NULL && call_ptn->is_LocalVar()) {
1027 // The call returns arguments.
1028 assert(call_ptn->edge_count() > 0, "sanity");
1029 if (!call_analyzer->is_return_local()) {
1030 // Returns also unknown object.
1031 add_edge(call_ptn, phantom_obj);
1032 }
1033 }
1034 break;
1035 }
1036 }
1037 default: {
1038 // Fall-through here if not a Java method or no analyzer information
1039 // or some other type of call, assume the worst case: all arguments
1040 // globally escape.
1041 const TypeTuple* d = call->tf()->domain();
1042 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1043 const Type* at = d->field_at(i);
1044 if (at->isa_oopptr() != NULL) {
1045 Node* arg = call->in(i);
1046 if (arg->is_AddP()) {
1047 arg = get_addp_base(arg);
1048 }
1049 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
1050 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);
1051 }
1052 }
1053 }
1054 }
1055 }
1058 // Finish Graph construction.
1059 bool ConnectionGraph::complete_connection_graph(
1060 GrowableArray<PointsToNode*>& ptnodes_worklist,
1061 GrowableArray<JavaObjectNode*>& non_escaped_worklist,
1062 GrowableArray<JavaObjectNode*>& java_objects_worklist,
1063 GrowableArray<FieldNode*>& oop_fields_worklist) {
1064 // Normally only 1-3 passes needed to build Connection Graph depending
1065 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
1066 // Set limit to 20 to catch situation when something did go wrong and
1067 // bailout Escape Analysis.
1068 // Also limit build time to 30 sec (60 in debug VM).
1069 #define CG_BUILD_ITER_LIMIT 20
1070 #ifdef ASSERT
1071 #define CG_BUILD_TIME_LIMIT 60.0
1072 #else
1073 #define CG_BUILD_TIME_LIMIT 30.0
1074 #endif
1076 // Propagate GlobalEscape and ArgEscape escape states and check that
1077 // we still have non-escaping objects. The method pushs on _worklist
1078 // Field nodes which reference phantom_object.
1079 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
1080 return false; // Nothing to do.
1081 }
1082 // Now propagate references to all JavaObject nodes.
1083 int java_objects_length = java_objects_worklist.length();
1084 elapsedTimer time;
1085 int new_edges = 1;
1086 int iterations = 0;
1087 do {
1088 while ((new_edges > 0) &&
1089 (iterations++ < CG_BUILD_ITER_LIMIT) &&
1090 (time.seconds() < CG_BUILD_TIME_LIMIT)) {
1091 time.start();
1092 new_edges = 0;
1093 // Propagate references to phantom_object for nodes pushed on _worklist
1094 // by find_non_escaped_objects() and find_field_value().
1095 new_edges += add_java_object_edges(phantom_obj, false);
1096 for (int next = 0; next < java_objects_length; ++next) {
1097 JavaObjectNode* ptn = java_objects_worklist.at(next);
1098 new_edges += add_java_object_edges(ptn, true);
1099 }
1100 if (new_edges > 0) {
1101 // Update escape states on each iteration if graph was updated.
1102 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
1103 return false; // Nothing to do.
1104 }
1105 }
1106 time.stop();
1107 }
1108 if ((iterations < CG_BUILD_ITER_LIMIT) &&
1109 (time.seconds() < CG_BUILD_TIME_LIMIT)) {
1110 time.start();
1111 // Find fields which have unknown value.
1112 int fields_length = oop_fields_worklist.length();
1113 for (int next = 0; next < fields_length; next++) {
1114 FieldNode* field = oop_fields_worklist.at(next);
1115 if (field->edge_count() == 0) {
1116 new_edges += find_field_value(field);
1117 // This code may added new edges to phantom_object.
1118 // Need an other cycle to propagate references to phantom_object.
1119 }
1120 }
1121 time.stop();
1122 } else {
1123 new_edges = 0; // Bailout
1124 }
1125 } while (new_edges > 0);
1127 // Bailout if passed limits.
1128 if ((iterations >= CG_BUILD_ITER_LIMIT) ||
1129 (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
1130 Compile* C = _compile;
1131 if (C->log() != NULL) {
1132 C->log()->begin_elem("connectionGraph_bailout reason='reached ");
1133 C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
1134 C->log()->end_elem(" limit'");
1135 }
1136 assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
1137 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()));
1138 // Possible infinite build_connection_graph loop,
1139 // bailout (no changes to ideal graph were made).
1140 return false;
1141 }
1142 #ifdef ASSERT
1143 if (Verbose && PrintEscapeAnalysis) {
1144 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",
1145 iterations, nodes_size(), ptnodes_worklist.length());
1146 }
1147 #endif
1149 #undef CG_BUILD_ITER_LIMIT
1150 #undef CG_BUILD_TIME_LIMIT
1152 // Find fields initialized by NULL for non-escaping Allocations.
1153 int non_escaped_length = non_escaped_worklist.length();
1154 for (int next = 0; next < non_escaped_length; next++) {
1155 JavaObjectNode* ptn = non_escaped_worklist.at(next);
1156 PointsToNode::EscapeState es = ptn->escape_state();
1157 assert(es <= PointsToNode::ArgEscape, "sanity");
1158 if (es == PointsToNode::NoEscape) {
1159 if (find_init_values(ptn, null_obj, _igvn) > 0) {
1160 // Adding references to NULL object does not change escape states
1161 // since it does not escape. Also no fields are added to NULL object.
1162 add_java_object_edges(null_obj, false);
1163 }
1164 }
1165 Node* n = ptn->ideal_node();
1166 if (n->is_Allocate()) {
1167 // The object allocated by this Allocate node will never be
1168 // seen by an other thread. Mark it so that when it is
1169 // expanded no MemBarStoreStore is added.
1170 InitializeNode* ini = n->as_Allocate()->initialization();
1171 if (ini != NULL)
1172 ini->set_does_not_escape();
1173 }
1174 }
1175 return true; // Finished graph construction.
1176 }
1178 // Propagate GlobalEscape and ArgEscape escape states to all nodes
1179 // and check that we still have non-escaping java objects.
1180 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
1181 GrowableArray<JavaObjectNode*>& non_escaped_worklist) {
1182 GrowableArray<PointsToNode*> escape_worklist;
1183 // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
1184 int ptnodes_length = ptnodes_worklist.length();
1185 for (int next = 0; next < ptnodes_length; ++next) {
1186 PointsToNode* ptn = ptnodes_worklist.at(next);
1187 if (ptn->escape_state() >= PointsToNode::ArgEscape ||
1188 ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
1189 escape_worklist.push(ptn);
1190 }
1191 }
1192 // Set escape states to referenced nodes (edges list).
1193 while (escape_worklist.length() > 0) {
1194 PointsToNode* ptn = escape_worklist.pop();
1195 PointsToNode::EscapeState es = ptn->escape_state();
1196 PointsToNode::EscapeState field_es = ptn->fields_escape_state();
1197 if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
1198 es >= PointsToNode::ArgEscape) {
1199 // GlobalEscape or ArgEscape state of field means it has unknown value.
1200 if (add_edge(ptn, phantom_obj)) {
1201 // New edge was added
1202 add_field_uses_to_worklist(ptn->as_Field());
1203 }
1204 }
1205 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1206 PointsToNode* e = i.get();
1207 if (e->is_Arraycopy()) {
1208 assert(ptn->arraycopy_dst(), "sanity");
1209 // Propagate only fields escape state through arraycopy edge.
1210 if (e->fields_escape_state() < field_es) {
1211 set_fields_escape_state(e, field_es);
1212 escape_worklist.push(e);
1213 }
1214 } else if (es >= field_es) {
1215 // fields_escape_state is also set to 'es' if it is less than 'es'.
1216 if (e->escape_state() < es) {
1217 set_escape_state(e, es);
1218 escape_worklist.push(e);
1219 }
1220 } else {
1221 // Propagate field escape state.
1222 bool es_changed = false;
1223 if (e->fields_escape_state() < field_es) {
1224 set_fields_escape_state(e, field_es);
1225 es_changed = true;
1226 }
1227 if ((e->escape_state() < field_es) &&
1228 e->is_Field() && ptn->is_JavaObject() &&
1229 e->as_Field()->is_oop()) {
1230 // Change escape state of referenced fileds.
1231 set_escape_state(e, field_es);
1232 es_changed = true;;
1233 } else if (e->escape_state() < es) {
1234 set_escape_state(e, es);
1235 es_changed = true;;
1236 }
1237 if (es_changed) {
1238 escape_worklist.push(e);
1239 }
1240 }
1241 }
1242 }
1243 // Remove escaped objects from non_escaped list.
1244 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) {
1245 JavaObjectNode* ptn = non_escaped_worklist.at(next);
1246 if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
1247 non_escaped_worklist.delete_at(next);
1248 }
1249 if (ptn->escape_state() == PointsToNode::NoEscape) {
1250 // Find fields in non-escaped allocations which have unknown value.
1251 find_init_values(ptn, phantom_obj, NULL);
1252 }
1253 }
1254 return (non_escaped_worklist.length() > 0);
1255 }
1257 // Add all references to JavaObject node by walking over all uses.
1258 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
1259 int new_edges = 0;
1260 if (populate_worklist) {
1261 // Populate _worklist by uses of jobj's uses.
1262 for (UseIterator i(jobj); i.has_next(); i.next()) {
1263 PointsToNode* use = i.get();
1264 if (use->is_Arraycopy())
1265 continue;
1266 add_uses_to_worklist(use);
1267 if (use->is_Field() && use->as_Field()->is_oop()) {
1268 // Put on worklist all field's uses (loads) and
1269 // related field nodes (same base and offset).
1270 add_field_uses_to_worklist(use->as_Field());
1271 }
1272 }
1273 }
1274 while(_worklist.length() > 0) {
1275 PointsToNode* use = _worklist.pop();
1276 if (PointsToNode::is_base_use(use)) {
1277 // Add reference from jobj to field and from field to jobj (field's base).
1278 use = PointsToNode::get_use_node(use)->as_Field();
1279 if (add_base(use->as_Field(), jobj)) {
1280 new_edges++;
1281 }
1282 continue;
1283 }
1284 assert(!use->is_JavaObject(), "sanity");
1285 if (use->is_Arraycopy()) {
1286 if (jobj == null_obj) // NULL object does not have field edges
1287 continue;
1288 // Added edge from Arraycopy node to arraycopy's source java object
1289 if (add_edge(use, jobj)) {
1290 jobj->set_arraycopy_src();
1291 new_edges++;
1292 }
1293 // and stop here.
1294 continue;
1295 }
1296 if (!add_edge(use, jobj))
1297 continue; // No new edge added, there was such edge already.
1298 new_edges++;
1299 if (use->is_LocalVar()) {
1300 add_uses_to_worklist(use);
1301 if (use->arraycopy_dst()) {
1302 for (EdgeIterator i(use); i.has_next(); i.next()) {
1303 PointsToNode* e = i.get();
1304 if (e->is_Arraycopy()) {
1305 if (jobj == null_obj) // NULL object does not have field edges
1306 continue;
1307 // Add edge from arraycopy's destination java object to Arraycopy node.
1308 if (add_edge(jobj, e)) {
1309 new_edges++;
1310 jobj->set_arraycopy_dst();
1311 }
1312 }
1313 }
1314 }
1315 } else {
1316 // Added new edge to stored in field values.
1317 // Put on worklist all field's uses (loads) and
1318 // related field nodes (same base and offset).
1319 add_field_uses_to_worklist(use->as_Field());
1320 }
1321 }
1322 return new_edges;
1323 }
1325 // Put on worklist all related field nodes.
1326 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
1327 assert(field->is_oop(), "sanity");
1328 int offset = field->offset();
1329 add_uses_to_worklist(field);
1330 // Loop over all bases of this field and push on worklist Field nodes
1331 // with the same offset and base (since they may reference the same field).
1332 for (BaseIterator i(field); i.has_next(); i.next()) {
1333 PointsToNode* base = i.get();
1334 add_fields_to_worklist(field, base);
1335 // Check if the base was source object of arraycopy and go over arraycopy's
1336 // destination objects since values stored to a field of source object are
1337 // accessable by uses (loads) of fields of destination objects.
1338 if (base->arraycopy_src()) {
1339 for (UseIterator j(base); j.has_next(); j.next()) {
1340 PointsToNode* arycp = j.get();
1341 if (arycp->is_Arraycopy()) {
1342 for (UseIterator k(arycp); k.has_next(); k.next()) {
1343 PointsToNode* abase = k.get();
1344 if (abase->arraycopy_dst() && abase != base) {
1345 // Look for the same arracopy reference.
1346 add_fields_to_worklist(field, abase);
1347 }
1348 }
1349 }
1350 }
1351 }
1352 }
1353 }
1355 // Put on worklist all related field nodes.
1356 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
1357 int offset = field->offset();
1358 if (base->is_LocalVar()) {
1359 for (UseIterator j(base); j.has_next(); j.next()) {
1360 PointsToNode* f = j.get();
1361 if (PointsToNode::is_base_use(f)) { // Field
1362 f = PointsToNode::get_use_node(f);
1363 if (f == field || !f->as_Field()->is_oop())
1364 continue;
1365 int offs = f->as_Field()->offset();
1366 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1367 add_to_worklist(f);
1368 }
1369 }
1370 }
1371 } else {
1372 assert(base->is_JavaObject(), "sanity");
1373 if (// Skip phantom_object since it is only used to indicate that
1374 // this field's content globally escapes.
1375 (base != phantom_obj) &&
1376 // NULL object node does not have fields.
1377 (base != null_obj)) {
1378 for (EdgeIterator i(base); i.has_next(); i.next()) {
1379 PointsToNode* f = i.get();
1380 // Skip arraycopy edge since store to destination object field
1381 // does not update value in source object field.
1382 if (f->is_Arraycopy()) {
1383 assert(base->arraycopy_dst(), "sanity");
1384 continue;
1385 }
1386 if (f == field || !f->as_Field()->is_oop())
1387 continue;
1388 int offs = f->as_Field()->offset();
1389 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1390 add_to_worklist(f);
1391 }
1392 }
1393 }
1394 }
1395 }
1397 // Find fields which have unknown value.
1398 int ConnectionGraph::find_field_value(FieldNode* field) {
1399 // Escaped fields should have init value already.
1400 assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
1401 int new_edges = 0;
1402 for (BaseIterator i(field); i.has_next(); i.next()) {
1403 PointsToNode* base = i.get();
1404 if (base->is_JavaObject()) {
1405 // Skip Allocate's fields which will be processed later.
1406 if (base->ideal_node()->is_Allocate())
1407 return 0;
1408 assert(base == null_obj, "only NULL ptr base expected here");
1409 }
1410 }
1411 if (add_edge(field, phantom_obj)) {
1412 // New edge was added
1413 new_edges++;
1414 add_field_uses_to_worklist(field);
1415 }
1416 return new_edges;
1417 }
1419 // Find fields initializing values for allocations.
1420 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) {
1421 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1422 int new_edges = 0;
1423 Node* alloc = pta->ideal_node();
1424 if (init_val == phantom_obj) {
1425 // Do nothing for Allocate nodes since its fields values are "known".
1426 if (alloc->is_Allocate())
1427 return 0;
1428 assert(alloc->as_CallStaticJava(), "sanity");
1429 #ifdef ASSERT
1430 if (alloc->as_CallStaticJava()->method() == NULL) {
1431 const char* name = alloc->as_CallStaticJava()->_name;
1432 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
1433 }
1434 #endif
1435 // Non-escaped allocation returned from Java or runtime call have
1436 // unknown values in fields.
1437 for (EdgeIterator i(pta); i.has_next(); i.next()) {
1438 PointsToNode* field = i.get();
1439 if (field->is_Field() && field->as_Field()->is_oop()) {
1440 if (add_edge(field, phantom_obj)) {
1441 // New edge was added
1442 new_edges++;
1443 add_field_uses_to_worklist(field->as_Field());
1444 }
1445 }
1446 }
1447 return new_edges;
1448 }
1449 assert(init_val == null_obj, "sanity");
1450 // Do nothing for Call nodes since its fields values are unknown.
1451 if (!alloc->is_Allocate())
1452 return 0;
1454 InitializeNode* ini = alloc->as_Allocate()->initialization();
1455 Compile* C = _compile;
1456 bool visited_bottom_offset = false;
1457 GrowableArray<int> offsets_worklist;
1459 // Check if an oop field's initializing value is recorded and add
1460 // a corresponding NULL if field's value if it is not recorded.
1461 // Connection Graph does not record a default initialization by NULL
1462 // captured by Initialize node.
1463 //
1464 for (EdgeIterator i(pta); i.has_next(); i.next()) {
1465 PointsToNode* field = i.get(); // Field (AddP)
1466 if (!field->is_Field() || !field->as_Field()->is_oop())
1467 continue; // Not oop field
1468 int offset = field->as_Field()->offset();
1469 if (offset == Type::OffsetBot) {
1470 if (!visited_bottom_offset) {
1471 // OffsetBot is used to reference array's element,
1472 // always add reference to NULL to all Field nodes since we don't
1473 // known which element is referenced.
1474 if (add_edge(field, null_obj)) {
1475 // New edge was added
1476 new_edges++;
1477 add_field_uses_to_worklist(field->as_Field());
1478 visited_bottom_offset = true;
1479 }
1480 }
1481 } else {
1482 // Check only oop fields.
1483 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();
1484 if (adr_type->isa_rawptr()) {
1485 #ifdef ASSERT
1486 // Raw pointers are used for initializing stores so skip it
1487 // since it should be recorded already
1488 Node* base = get_addp_base(field->ideal_node());
1489 assert(adr_type->isa_rawptr() && base->is_Proj() &&
1490 (base->in(0) == alloc),"unexpected pointer type");
1491 #endif
1492 continue;
1493 }
1494 if (!offsets_worklist.contains(offset)) {
1495 offsets_worklist.append(offset);
1496 Node* value = NULL;
1497 if (ini != NULL) {
1498 // StoreP::memory_type() == T_ADDRESS
1499 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;
1500 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);
1501 // Make sure initializing store has the same type as this AddP.
1502 // This AddP may reference non existing field because it is on a
1503 // dead branch of bimorphic call which is not eliminated yet.
1504 if (store != NULL && store->is_Store() &&
1505 store->as_Store()->memory_type() == ft) {
1506 value = store->in(MemNode::ValueIn);
1507 #ifdef ASSERT
1508 if (VerifyConnectionGraph) {
1509 // Verify that AddP already points to all objects the value points to.
1510 PointsToNode* val = ptnode_adr(value->_idx);
1511 assert((val != NULL), "should be processed already");
1512 PointsToNode* missed_obj = NULL;
1513 if (val->is_JavaObject()) {
1514 if (!field->points_to(val->as_JavaObject())) {
1515 missed_obj = val;
1516 }
1517 } else {
1518 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1519 tty->print_cr("----------init store has invalid value -----");
1520 store->dump();
1521 val->dump();
1522 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1523 }
1524 for (EdgeIterator j(val); j.has_next(); j.next()) {
1525 PointsToNode* obj = j.get();
1526 if (obj->is_JavaObject()) {
1527 if (!field->points_to(obj->as_JavaObject())) {
1528 missed_obj = obj;
1529 break;
1530 }
1531 }
1532 }
1533 }
1534 if (missed_obj != NULL) {
1535 tty->print_cr("----------field---------------------------------");
1536 field->dump();
1537 tty->print_cr("----------missed referernce to object-----------");
1538 missed_obj->dump();
1539 tty->print_cr("----------object referernced by init store -----");
1540 store->dump();
1541 val->dump();
1542 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1543 }
1544 }
1545 #endif
1546 } else {
1547 // There could be initializing stores which follow allocation.
1548 // For example, a volatile field store is not collected
1549 // by Initialize node.
1550 //
1551 // Need to check for dependent loads to separate such stores from
1552 // stores which follow loads. For now, add initial value NULL so
1553 // that compare pointers optimization works correctly.
1554 }
1555 }
1556 if (value == NULL) {
1557 // A field's initializing value was not recorded. Add NULL.
1558 if (add_edge(field, null_obj)) {
1559 // New edge was added
1560 new_edges++;
1561 add_field_uses_to_worklist(field->as_Field());
1562 }
1563 }
1564 }
1565 }
1566 }
1567 return new_edges;
1568 }
1570 // Adjust scalar_replaceable state after Connection Graph is built.
1571 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {
1572 // Search for non-escaping objects which are not scalar replaceable
1573 // and mark them to propagate the state to referenced objects.
1575 // 1. An object is not scalar replaceable if the field into which it is
1576 // stored has unknown offset (stored into unknown element of an array).
1577 //
1578 for (UseIterator i(jobj); i.has_next(); i.next()) {
1579 PointsToNode* use = i.get();
1580 assert(!use->is_Arraycopy(), "sanity");
1581 if (use->is_Field()) {
1582 FieldNode* field = use->as_Field();
1583 assert(field->is_oop() && field->scalar_replaceable() &&
1584 field->fields_escape_state() == PointsToNode::NoEscape, "sanity");
1585 if (field->offset() == Type::OffsetBot) {
1586 jobj->set_scalar_replaceable(false);
1587 return;
1588 }
1589 // 2. An object is not scalar replaceable if the field into which it is
1590 // stored has multiple bases one of which is null.
1591 if (field->base_count() > 1) {
1592 for (BaseIterator i(field); i.has_next(); i.next()) {
1593 PointsToNode* base = i.get();
1594 if (base == null_obj) {
1595 jobj->set_scalar_replaceable(false);
1596 return;
1597 }
1598 }
1599 }
1600 }
1601 assert(use->is_Field() || use->is_LocalVar(), "sanity");
1602 // 3. An object is not scalar replaceable if it is merged with other objects.
1603 for (EdgeIterator j(use); j.has_next(); j.next()) {
1604 PointsToNode* ptn = j.get();
1605 if (ptn->is_JavaObject() && ptn != jobj) {
1606 // Mark all objects.
1607 jobj->set_scalar_replaceable(false);
1608 ptn->set_scalar_replaceable(false);
1609 }
1610 }
1611 if (!jobj->scalar_replaceable()) {
1612 return;
1613 }
1614 }
1616 for (EdgeIterator j(jobj); j.has_next(); j.next()) {
1617 // Non-escaping object node should point only to field nodes.
1618 FieldNode* field = j.get()->as_Field();
1619 int offset = field->as_Field()->offset();
1621 // 4. An object is not scalar replaceable if it has a field with unknown
1622 // offset (array's element is accessed in loop).
1623 if (offset == Type::OffsetBot) {
1624 jobj->set_scalar_replaceable(false);
1625 return;
1626 }
1627 // 5. Currently an object is not scalar replaceable if a LoadStore node
1628 // access its field since the field value is unknown after it.
1629 //
1630 Node* n = field->ideal_node();
1631 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1632 if (n->fast_out(i)->is_LoadStore()) {
1633 jobj->set_scalar_replaceable(false);
1634 return;
1635 }
1636 }
1638 // 6. Or the address may point to more then one object. This may produce
1639 // the false positive result (set not scalar replaceable)
1640 // since the flow-insensitive escape analysis can't separate
1641 // the case when stores overwrite the field's value from the case
1642 // when stores happened on different control branches.
1643 //
1644 // Note: it will disable scalar replacement in some cases:
1645 //
1646 // Point p[] = new Point[1];
1647 // p[0] = new Point(); // Will be not scalar replaced
1648 //
1649 // but it will save us from incorrect optimizations in next cases:
1650 //
1651 // Point p[] = new Point[1];
1652 // if ( x ) p[0] = new Point(); // Will be not scalar replaced
1653 //
1654 if (field->base_count() > 1) {
1655 for (BaseIterator i(field); i.has_next(); i.next()) {
1656 PointsToNode* base = i.get();
1657 // Don't take into account LocalVar nodes which
1658 // may point to only one object which should be also
1659 // this field's base by now.
1660 if (base->is_JavaObject() && base != jobj) {
1661 // Mark all bases.
1662 jobj->set_scalar_replaceable(false);
1663 base->set_scalar_replaceable(false);
1664 }
1665 }
1666 }
1667 }
1668 }
1670 #ifdef ASSERT
1671 void ConnectionGraph::verify_connection_graph(
1672 GrowableArray<PointsToNode*>& ptnodes_worklist,
1673 GrowableArray<JavaObjectNode*>& non_escaped_worklist,
1674 GrowableArray<JavaObjectNode*>& java_objects_worklist,
1675 GrowableArray<Node*>& addp_worklist) {
1676 // Verify that graph is complete - no new edges could be added.
1677 int java_objects_length = java_objects_worklist.length();
1678 int non_escaped_length = non_escaped_worklist.length();
1679 int new_edges = 0;
1680 for (int next = 0; next < java_objects_length; ++next) {
1681 JavaObjectNode* ptn = java_objects_worklist.at(next);
1682 new_edges += add_java_object_edges(ptn, true);
1683 }
1684 assert(new_edges == 0, "graph was not complete");
1685 // Verify that escape state is final.
1686 int length = non_escaped_worklist.length();
1687 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist);
1688 assert((non_escaped_length == non_escaped_worklist.length()) &&
1689 (non_escaped_length == length) &&
1690 (_worklist.length() == 0), "escape state was not final");
1692 // Verify fields information.
1693 int addp_length = addp_worklist.length();
1694 for (int next = 0; next < addp_length; ++next ) {
1695 Node* n = addp_worklist.at(next);
1696 FieldNode* field = ptnode_adr(n->_idx)->as_Field();
1697 if (field->is_oop()) {
1698 // Verify that field has all bases
1699 Node* base = get_addp_base(n);
1700 PointsToNode* ptn = ptnode_adr(base->_idx);
1701 if (ptn->is_JavaObject()) {
1702 assert(field->has_base(ptn->as_JavaObject()), "sanity");
1703 } else {
1704 assert(ptn->is_LocalVar(), "sanity");
1705 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1706 PointsToNode* e = i.get();
1707 if (e->is_JavaObject()) {
1708 assert(field->has_base(e->as_JavaObject()), "sanity");
1709 }
1710 }
1711 }
1712 // Verify that all fields have initializing values.
1713 if (field->edge_count() == 0) {
1714 tty->print_cr("----------field does not have references----------");
1715 field->dump();
1716 for (BaseIterator i(field); i.has_next(); i.next()) {
1717 PointsToNode* base = i.get();
1718 tty->print_cr("----------field has next base---------------------");
1719 base->dump();
1720 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {
1721 tty->print_cr("----------base has fields-------------------------");
1722 for (EdgeIterator j(base); j.has_next(); j.next()) {
1723 j.get()->dump();
1724 }
1725 tty->print_cr("----------base has references---------------------");
1726 for (UseIterator j(base); j.has_next(); j.next()) {
1727 j.get()->dump();
1728 }
1729 }
1730 }
1731 for (UseIterator i(field); i.has_next(); i.next()) {
1732 i.get()->dump();
1733 }
1734 assert(field->edge_count() > 0, "sanity");
1735 }
1736 }
1737 }
1738 }
1739 #endif
1741 // Optimize ideal graph.
1742 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
1743 GrowableArray<Node*>& storestore_worklist) {
1744 Compile* C = _compile;
1745 PhaseIterGVN* igvn = _igvn;
1746 if (EliminateLocks) {
1747 // Mark locks before changing ideal graph.
1748 int cnt = C->macro_count();
1749 for( int i=0; i < cnt; i++ ) {
1750 Node *n = C->macro_node(i);
1751 if (n->is_AbstractLock()) { // Lock and Unlock nodes
1752 AbstractLockNode* alock = n->as_AbstractLock();
1753 if (!alock->is_non_esc_obj()) {
1754 if (not_global_escape(alock->obj_node())) {
1755 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
1756 // The lock could be marked eliminated by lock coarsening
1757 // code during first IGVN before EA. Replace coarsened flag
1758 // to eliminate all associated locks/unlocks.
1759 alock->set_non_esc_obj();
1760 }
1761 }
1762 }
1763 }
1764 }
1766 if (OptimizePtrCompare) {
1767 // Add ConI(#CC_GT) and ConI(#CC_EQ).
1768 _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
1769 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
1770 // Optimize objects compare.
1771 while (ptr_cmp_worklist.length() != 0) {
1772 Node *n = ptr_cmp_worklist.pop();
1773 Node *res = optimize_ptr_compare(n);
1774 if (res != NULL) {
1775 #ifndef PRODUCT
1776 if (PrintOptimizePtrCompare) {
1777 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
1778 if (Verbose) {
1779 n->dump(1);
1780 }
1781 }
1782 #endif
1783 igvn->replace_node(n, res);
1784 }
1785 }
1786 // cleanup
1787 if (_pcmp_neq->outcnt() == 0)
1788 igvn->hash_delete(_pcmp_neq);
1789 if (_pcmp_eq->outcnt() == 0)
1790 igvn->hash_delete(_pcmp_eq);
1791 }
1793 // For MemBarStoreStore nodes added in library_call.cpp, check
1794 // escape status of associated AllocateNode and optimize out
1795 // MemBarStoreStore node if the allocated object never escapes.
1796 while (storestore_worklist.length() != 0) {
1797 Node *n = storestore_worklist.pop();
1798 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
1799 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
1800 assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
1801 if (not_global_escape(alloc)) {
1802 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
1803 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
1804 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
1805 igvn->register_new_node_with_optimizer(mb);
1806 igvn->replace_node(storestore, mb);
1807 }
1808 }
1809 }
1811 // Optimize objects compare.
1812 Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
1813 assert(OptimizePtrCompare, "sanity");
1814 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
1815 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
1816 JavaObjectNode* jobj1 = unique_java_object(n->in(1));
1817 JavaObjectNode* jobj2 = unique_java_object(n->in(2));
1818 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
1819 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
1821 // Check simple cases first.
1822 if (jobj1 != NULL) {
1823 if (jobj1->escape_state() == PointsToNode::NoEscape) {
1824 if (jobj1 == jobj2) {
1825 // Comparing the same not escaping object.
1826 return _pcmp_eq;
1827 }
1828 Node* obj = jobj1->ideal_node();
1829 // Comparing not escaping allocation.
1830 if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
1831 !ptn2->points_to(jobj1)) {
1832 return _pcmp_neq; // This includes nullness check.
1833 }
1834 }
1835 }
1836 if (jobj2 != NULL) {
1837 if (jobj2->escape_state() == PointsToNode::NoEscape) {
1838 Node* obj = jobj2->ideal_node();
1839 // Comparing not escaping allocation.
1840 if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
1841 !ptn1->points_to(jobj2)) {
1842 return _pcmp_neq; // This includes nullness check.
1843 }
1844 }
1845 }
1846 if (jobj1 != NULL && jobj1 != phantom_obj &&
1847 jobj2 != NULL && jobj2 != phantom_obj &&
1848 jobj1->ideal_node()->is_Con() &&
1849 jobj2->ideal_node()->is_Con()) {
1850 // Klass or String constants compare. Need to be careful with
1851 // compressed pointers - compare types of ConN and ConP instead of nodes.
1852 const Type* t1 = jobj1->ideal_node()->get_ptr_type();
1853 const Type* t2 = jobj2->ideal_node()->get_ptr_type();
1854 if (t1->make_ptr() == t2->make_ptr()) {
1855 return _pcmp_eq;
1856 } else {
1857 return _pcmp_neq;
1858 }
1859 }
1860 if (ptn1->meet(ptn2)) {
1861 return NULL; // Sets are not disjoint
1862 }
1864 // Sets are disjoint.
1865 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
1866 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
1867 bool set1_has_null_ptr = ptn1->points_to(null_obj);
1868 bool set2_has_null_ptr = ptn2->points_to(null_obj);
1869 if (set1_has_unknown_ptr && set2_has_null_ptr ||
1870 set2_has_unknown_ptr && set1_has_null_ptr) {
1871 // Check nullness of unknown object.
1872 return NULL;
1873 }
1875 // Disjointness by itself is not sufficient since
1876 // alias analysis is not complete for escaped objects.
1877 // Disjoint sets are definitely unrelated only when
1878 // at least one set has only not escaping allocations.
1879 if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
1880 if (ptn1->non_escaping_allocation()) {
1881 return _pcmp_neq;
1882 }
1883 }
1884 if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
1885 if (ptn2->non_escaping_allocation()) {
1886 return _pcmp_neq;
1887 }
1888 }
1889 return NULL;
1890 }
1892 // Connection Graph constuction functions.
1894 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
1895 PointsToNode* ptadr = _nodes.at(n->_idx);
1896 if (ptadr != NULL) {
1897 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
1898 return;
1899 }
1900 Compile* C = _compile;
1901 ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
1902 _nodes.at_put(n->_idx, ptadr);
1903 }
1905 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
1906 PointsToNode* ptadr = _nodes.at(n->_idx);
1907 if (ptadr != NULL) {
1908 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
1909 return;
1910 }
1911 Compile* C = _compile;
1912 ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
1913 _nodes.at_put(n->_idx, ptadr);
1914 }
1916 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
1917 PointsToNode* ptadr = _nodes.at(n->_idx);
1918 if (ptadr != NULL) {
1919 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
1920 return;
1921 }
1922 bool unsafe = false;
1923 bool is_oop = is_oop_field(n, offset, &unsafe);
1924 if (unsafe) {
1925 es = PointsToNode::GlobalEscape;
1926 }
1927 Compile* C = _compile;
1928 FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
1929 _nodes.at_put(n->_idx, field);
1930 }
1932 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
1933 PointsToNode* src, PointsToNode* dst) {
1934 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
1935 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
1936 PointsToNode* ptadr = _nodes.at(n->_idx);
1937 if (ptadr != NULL) {
1938 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
1939 return;
1940 }
1941 Compile* C = _compile;
1942 ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
1943 _nodes.at_put(n->_idx, ptadr);
1944 // Add edge from arraycopy node to source object.
1945 (void)add_edge(ptadr, src);
1946 src->set_arraycopy_src();
1947 // Add edge from destination object to arraycopy node.
1948 (void)add_edge(dst, ptadr);
1949 dst->set_arraycopy_dst();
1950 }
1952 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
1953 const Type* adr_type = n->as_AddP()->bottom_type();
1954 BasicType bt = T_INT;
1955 if (offset == Type::OffsetBot) {
1956 // Check only oop fields.
1957 if (!adr_type->isa_aryptr() ||
1958 (adr_type->isa_aryptr()->klass() == NULL) ||
1959 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
1960 // OffsetBot is used to reference array's element. Ignore first AddP.
1961 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
1962 bt = T_OBJECT;
1963 }
1964 }
1965 } else if (offset != oopDesc::klass_offset_in_bytes()) {
1966 if (adr_type->isa_instptr()) {
1967 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
1968 if (field != NULL) {
1969 bt = field->layout_type();
1970 } else {
1971 // Check for unsafe oop field access
1972 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1973 int opcode = n->fast_out(i)->Opcode();
1974 if (opcode == Op_StoreP || opcode == Op_LoadP ||
1975 opcode == Op_StoreN || opcode == Op_LoadN) {
1976 bt = T_OBJECT;
1977 (*unsafe) = true;
1978 break;
1979 }
1980 }
1981 }
1982 } else if (adr_type->isa_aryptr()) {
1983 if (offset == arrayOopDesc::length_offset_in_bytes()) {
1984 // Ignore array length load.
1985 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
1986 // Ignore first AddP.
1987 } else {
1988 const Type* elemtype = adr_type->isa_aryptr()->elem();
1989 bt = elemtype->array_element_basic_type();
1990 }
1991 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
1992 // Allocation initialization, ThreadLocal field access, unsafe access
1993 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1994 int opcode = n->fast_out(i)->Opcode();
1995 if (opcode == Op_StoreP || opcode == Op_LoadP ||
1996 opcode == Op_StoreN || opcode == Op_LoadN) {
1997 bt = T_OBJECT;
1998 break;
1999 }
2000 }
2001 }
2002 }
2003 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
2004 }
2006 // Returns unique pointed java object or NULL.
2007 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2008 assert(!_collecting, "should not call when contructed graph");
2009 // If the node was created after the escape computation we can't answer.
2010 uint idx = n->_idx;
2011 if (idx >= nodes_size()) {
2012 return NULL;
2013 }
2014 PointsToNode* ptn = ptnode_adr(idx);
2015 if (ptn->is_JavaObject()) {
2016 return ptn->as_JavaObject();
2017 }
2018 assert(ptn->is_LocalVar(), "sanity");
2019 // Check all java objects it points to.
2020 JavaObjectNode* jobj = NULL;
2021 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2022 PointsToNode* e = i.get();
2023 if (e->is_JavaObject()) {
2024 if (jobj == NULL) {
2025 jobj = e->as_JavaObject();
2026 } else if (jobj != e) {
2027 return NULL;
2028 }
2029 }
2030 }
2031 return jobj;
2032 }
2034 // Return true if this node points only to non-escaping allocations.
2035 bool PointsToNode::non_escaping_allocation() {
2036 if (is_JavaObject()) {
2037 Node* n = ideal_node();
2038 if (n->is_Allocate() || n->is_CallStaticJava()) {
2039 return (escape_state() == PointsToNode::NoEscape);
2040 } else {
2041 return false;
2042 }
2043 }
2044 assert(is_LocalVar(), "sanity");
2045 // Check all java objects it points to.
2046 for (EdgeIterator i(this); i.has_next(); i.next()) {
2047 PointsToNode* e = i.get();
2048 if (e->is_JavaObject()) {
2049 Node* n = e->ideal_node();
2050 if ((e->escape_state() != PointsToNode::NoEscape) ||
2051 !(n->is_Allocate() || n->is_CallStaticJava())) {
2052 return false;
2053 }
2054 }
2055 }
2056 return true;
2057 }
2059 // Return true if we know the node does not escape globally.
2060 bool ConnectionGraph::not_global_escape(Node *n) {
2061 assert(!_collecting, "should not call during graph construction");
2062 // If the node was created after the escape computation we can't answer.
2063 uint idx = n->_idx;
2064 if (idx >= nodes_size()) {
2065 return false;
2066 }
2067 PointsToNode* ptn = ptnode_adr(idx);
2068 PointsToNode::EscapeState es = ptn->escape_state();
2069 // If we have already computed a value, return it.
2070 if (es >= PointsToNode::GlobalEscape)
2071 return false;
2072 if (ptn->is_JavaObject()) {
2073 return true; // (es < PointsToNode::GlobalEscape);
2074 }
2075 assert(ptn->is_LocalVar(), "sanity");
2076 // Check all java objects it points to.
2077 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2078 if (i.get()->escape_state() >= PointsToNode::GlobalEscape)
2079 return false;
2080 }
2081 return true;
2082 }
2085 // Helper functions
2087 // Return true if this node points to specified node or nodes it points to.
2088 bool PointsToNode::points_to(JavaObjectNode* ptn) const {
2089 if (is_JavaObject()) {
2090 return (this == ptn);
2091 }
2092 assert(is_LocalVar() || is_Field(), "sanity");
2093 for (EdgeIterator i(this); i.has_next(); i.next()) {
2094 if (i.get() == ptn)
2095 return true;
2096 }
2097 return false;
2098 }
2100 // Return true if one node points to an other.
2101 bool PointsToNode::meet(PointsToNode* ptn) {
2102 if (this == ptn) {
2103 return true;
2104 } else if (ptn->is_JavaObject()) {
2105 return this->points_to(ptn->as_JavaObject());
2106 } else if (this->is_JavaObject()) {
2107 return ptn->points_to(this->as_JavaObject());
2108 }
2109 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
2110 int ptn_count = ptn->edge_count();
2111 for (EdgeIterator i(this); i.has_next(); i.next()) {
2112 PointsToNode* this_e = i.get();
2113 for (int j = 0; j < ptn_count; j++) {
2114 if (this_e == ptn->edge(j))
2115 return true;
2116 }
2117 }
2118 return false;
2119 }
2121 #ifdef ASSERT
2122 // Return true if bases point to this java object.
2123 bool FieldNode::has_base(JavaObjectNode* jobj) const {
2124 for (BaseIterator i(this); i.has_next(); i.next()) {
2125 if (i.get() == jobj)
2126 return true;
2127 }
2128 return false;
2129 }
2130 #endif
2132 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2133 const Type *adr_type = phase->type(adr);
2134 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
2135 adr->in(AddPNode::Address)->is_Proj() &&
2136 adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
2137 // We are computing a raw address for a store captured by an Initialize
2138 // compute an appropriate address type. AddP cases #3 and #5 (see below).
2139 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2140 assert(offs != Type::OffsetBot ||
2141 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2142 "offset must be a constant or it is initialization of array");
2143 return offs;
2144 }
2145 const TypePtr *t_ptr = adr_type->isa_ptr();
2146 assert(t_ptr != NULL, "must be a pointer type");
2147 return t_ptr->offset();
2148 }
2150 Node* ConnectionGraph::get_addp_base(Node *addp) {
2151 assert(addp->is_AddP(), "must be AddP");
2152 //
2153 // AddP cases for Base and Address inputs:
2154 // case #1. Direct object's field reference:
2155 // Allocate
2156 // |
2157 // Proj #5 ( oop result )
2158 // |
2159 // CheckCastPP (cast to instance type)
2160 // | |
2161 // AddP ( base == address )
2162 //
2163 // case #2. Indirect object's field reference:
2164 // Phi
2165 // |
2166 // CastPP (cast to instance type)
2167 // | |
2168 // AddP ( base == address )
2169 //
2170 // case #3. Raw object's field reference for Initialize node:
2171 // Allocate
2172 // |
2173 // Proj #5 ( oop result )
2174 // top |
2175 // \ |
2176 // AddP ( base == top )
2177 //
2178 // case #4. Array's element reference:
2179 // {CheckCastPP | CastPP}
2180 // | | |
2181 // | AddP ( array's element offset )
2182 // | |
2183 // AddP ( array's offset )
2184 //
2185 // case #5. Raw object's field reference for arraycopy stub call:
2186 // The inline_native_clone() case when the arraycopy stub is called
2187 // after the allocation before Initialize and CheckCastPP nodes.
2188 // Allocate
2189 // |
2190 // Proj #5 ( oop result )
2191 // | |
2192 // AddP ( base == address )
2193 //
2194 // case #6. Constant Pool, ThreadLocal, CastX2P or
2195 // Raw object's field reference:
2196 // {ConP, ThreadLocal, CastX2P, raw Load}
2197 // top |
2198 // \ |
2199 // AddP ( base == top )
2200 //
2201 // case #7. Klass's field reference.
2202 // LoadKlass
2203 // | |
2204 // AddP ( base == address )
2205 //
2206 // case #8. narrow Klass's field reference.
2207 // LoadNKlass
2208 // |
2209 // DecodeN
2210 // | |
2211 // AddP ( base == address )
2212 //
2213 Node *base = addp->in(AddPNode::Base);
2214 if (base->uncast()->is_top()) { // The AddP case #3 and #6.
2215 base = addp->in(AddPNode::Address);
2216 while (base->is_AddP()) {
2217 // Case #6 (unsafe access) may have several chained AddP nodes.
2218 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2219 base = base->in(AddPNode::Address);
2220 }
2221 Node* uncast_base = base->uncast();
2222 int opcode = uncast_base->Opcode();
2223 assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2224 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2225 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2226 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
2227 }
2228 return base;
2229 }
2231 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2232 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2233 Node* addp2 = addp->raw_out(0);
2234 if (addp->outcnt() == 1 && addp2->is_AddP() &&
2235 addp2->in(AddPNode::Base) == n &&
2236 addp2->in(AddPNode::Address) == addp) {
2237 assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2238 //
2239 // Find array's offset to push it on worklist first and
2240 // as result process an array's element offset first (pushed second)
2241 // to avoid CastPP for the array's offset.
2242 // Otherwise the inserted CastPP (LocalVar) will point to what
2243 // the AddP (Field) points to. Which would be wrong since
2244 // the algorithm expects the CastPP has the same point as
2245 // as AddP's base CheckCastPP (LocalVar).
2246 //
2247 // ArrayAllocation
2248 // |
2249 // CheckCastPP
2250 // |
2251 // memProj (from ArrayAllocation CheckCastPP)
2252 // | ||
2253 // | || Int (element index)
2254 // | || | ConI (log(element size))
2255 // | || | /
2256 // | || LShift
2257 // | || /
2258 // | AddP (array's element offset)
2259 // | |
2260 // | | ConI (array's offset: #12(32-bits) or #24(64-bits))
2261 // | / /
2262 // AddP (array's offset)
2263 // |
2264 // Load/Store (memory operation on array's element)
2265 //
2266 return addp2;
2267 }
2268 return NULL;
2269 }
2271 //
2272 // Adjust the type and inputs of an AddP which computes the
2273 // address of a field of an instance
2274 //
2275 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2276 PhaseGVN* igvn = _igvn;
2277 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2278 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2279 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2280 if (t == NULL) {
2281 // We are computing a raw address for a store captured by an Initialize
2282 // compute an appropriate address type (cases #3 and #5).
2283 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2284 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2285 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2286 assert(offs != Type::OffsetBot, "offset must be a constant");
2287 t = base_t->add_offset(offs)->is_oopptr();
2288 }
2289 int inst_id = base_t->instance_id();
2290 assert(!t->is_known_instance() || t->instance_id() == inst_id,
2291 "old type must be non-instance or match new type");
2293 // The type 't' could be subclass of 'base_t'.
2294 // As result t->offset() could be large then base_t's size and it will
2295 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2296 // constructor verifies correctness of the offset.
2297 //
2298 // It could happened on subclass's branch (from the type profiling
2299 // inlining) which was not eliminated during parsing since the exactness
2300 // of the allocation type was not propagated to the subclass type check.
2301 //
2302 // Or the type 't' could be not related to 'base_t' at all.
2303 // It could happened when CHA type is different from MDO type on a dead path
2304 // (for example, from instanceof check) which is not collapsed during parsing.
2305 //
2306 // Do nothing for such AddP node and don't process its users since
2307 // this code branch will go away.
2308 //
2309 if (!t->is_known_instance() &&
2310 !base_t->klass()->is_subtype_of(t->klass())) {
2311 return false; // bail out
2312 }
2313 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
2314 // Do NOT remove the next line: ensure a new alias index is allocated
2315 // for the instance type. Note: C++ will not remove it since the call
2316 // has side effect.
2317 int alias_idx = _compile->get_alias_index(tinst);
2318 igvn->set_type(addp, tinst);
2319 // record the allocation in the node map
2320 set_map(addp, get_map(base->_idx));
2321 // Set addp's Base and Address to 'base'.
2322 Node *abase = addp->in(AddPNode::Base);
2323 Node *adr = addp->in(AddPNode::Address);
2324 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2325 adr->in(0)->_idx == (uint)inst_id) {
2326 // Skip AddP cases #3 and #5.
2327 } else {
2328 assert(!abase->is_top(), "sanity"); // AddP case #3
2329 if (abase != base) {
2330 igvn->hash_delete(addp);
2331 addp->set_req(AddPNode::Base, base);
2332 if (abase == adr) {
2333 addp->set_req(AddPNode::Address, base);
2334 } else {
2335 // AddP case #4 (adr is array's element offset AddP node)
2336 #ifdef ASSERT
2337 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
2338 assert(adr->is_AddP() && atype != NULL &&
2339 atype->instance_id() == inst_id, "array's element offset should be processed first");
2340 #endif
2341 }
2342 igvn->hash_insert(addp);
2343 }
2344 }
2345 // Put on IGVN worklist since at least addp's type was changed above.
2346 record_for_optimizer(addp);
2347 return true;
2348 }
2350 //
2351 // Create a new version of orig_phi if necessary. Returns either the newly
2352 // created phi or an existing phi. Sets create_new to indicate whether a new
2353 // phi was created. Cache the last newly created phi in the node map.
2354 //
2355 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) {
2356 Compile *C = _compile;
2357 PhaseGVN* igvn = _igvn;
2358 new_created = false;
2359 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
2360 // nothing to do if orig_phi is bottom memory or matches alias_idx
2361 if (phi_alias_idx == alias_idx) {
2362 return orig_phi;
2363 }
2364 // Have we recently created a Phi for this alias index?
2365 PhiNode *result = get_map_phi(orig_phi->_idx);
2366 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
2367 return result;
2368 }
2369 // Previous check may fail when the same wide memory Phi was split into Phis
2370 // for different memory slices. Search all Phis for this region.
2371 if (result != NULL) {
2372 Node* region = orig_phi->in(0);
2373 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2374 Node* phi = region->fast_out(i);
2375 if (phi->is_Phi() &&
2376 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
2377 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
2378 return phi->as_Phi();
2379 }
2380 }
2381 }
2382 if ((int) (C->live_nodes() + 2*NodeLimitFudgeFactor) > MaxNodeLimit) {
2383 if (C->do_escape_analysis() == true && !C->failing()) {
2384 // Retry compilation without escape analysis.
2385 // If this is the first failure, the sentinel string will "stick"
2386 // to the Compile object, and the C2Compiler will see it and retry.
2387 C->record_failure(C2Compiler::retry_no_escape_analysis());
2388 }
2389 return NULL;
2390 }
2391 orig_phi_worklist.append_if_missing(orig_phi);
2392 const TypePtr *atype = C->get_adr_type(alias_idx);
2393 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
2394 C->copy_node_notes_to(result, orig_phi);
2395 igvn->set_type(result, result->bottom_type());
2396 record_for_optimizer(result);
2397 set_map(orig_phi, result);
2398 new_created = true;
2399 return result;
2400 }
2402 //
2403 // Return a new version of Memory Phi "orig_phi" with the inputs having the
2404 // specified alias index.
2405 //
2406 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) {
2407 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
2408 Compile *C = _compile;
2409 PhaseGVN* igvn = _igvn;
2410 bool new_phi_created;
2411 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
2412 if (!new_phi_created) {
2413 return result;
2414 }
2415 GrowableArray<PhiNode *> phi_list;
2416 GrowableArray<uint> cur_input;
2417 PhiNode *phi = orig_phi;
2418 uint idx = 1;
2419 bool finished = false;
2420 while(!finished) {
2421 while (idx < phi->req()) {
2422 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);
2423 if (mem != NULL && mem->is_Phi()) {
2424 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
2425 if (new_phi_created) {
2426 // found an phi for which we created a new split, push current one on worklist and begin
2427 // processing new one
2428 phi_list.push(phi);
2429 cur_input.push(idx);
2430 phi = mem->as_Phi();
2431 result = newphi;
2432 idx = 1;
2433 continue;
2434 } else {
2435 mem = newphi;
2436 }
2437 }
2438 if (C->failing()) {
2439 return NULL;
2440 }
2441 result->set_req(idx++, mem);
2442 }
2443 #ifdef ASSERT
2444 // verify that the new Phi has an input for each input of the original
2445 assert( phi->req() == result->req(), "must have same number of inputs.");
2446 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
2447 #endif
2448 // Check if all new phi's inputs have specified alias index.
2449 // Otherwise use old phi.
2450 for (uint i = 1; i < phi->req(); i++) {
2451 Node* in = result->in(i);
2452 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
2453 }
2454 // we have finished processing a Phi, see if there are any more to do
2455 finished = (phi_list.length() == 0 );
2456 if (!finished) {
2457 phi = phi_list.pop();
2458 idx = cur_input.pop();
2459 PhiNode *prev_result = get_map_phi(phi->_idx);
2460 prev_result->set_req(idx++, result);
2461 result = prev_result;
2462 }
2463 }
2464 return result;
2465 }
2467 //
2468 // The next methods are derived from methods in MemNode.
2469 //
2470 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
2471 Node *mem = mmem;
2472 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
2473 // means an array I have not precisely typed yet. Do not do any
2474 // alias stuff with it any time soon.
2475 if (toop->base() != Type::AnyPtr &&
2476 !(toop->klass() != NULL &&
2477 toop->klass()->is_java_lang_Object() &&
2478 toop->offset() == Type::OffsetBot)) {
2479 mem = mmem->memory_at(alias_idx);
2480 // Update input if it is progress over what we have now
2481 }
2482 return mem;
2483 }
2485 //
2486 // Move memory users to their memory slices.
2487 //
2488 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) {
2489 Compile* C = _compile;
2490 PhaseGVN* igvn = _igvn;
2491 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
2492 assert(tp != NULL, "ptr type");
2493 int alias_idx = C->get_alias_index(tp);
2494 int general_idx = C->get_general_index(alias_idx);
2496 // Move users first
2497 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2498 Node* use = n->fast_out(i);
2499 if (use->is_MergeMem()) {
2500 MergeMemNode* mmem = use->as_MergeMem();
2501 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
2502 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
2503 continue; // Nothing to do
2504 }
2505 // Replace previous general reference to mem node.
2506 uint orig_uniq = C->unique();
2507 Node* m = find_inst_mem(n, general_idx, orig_phis);
2508 assert(orig_uniq == C->unique(), "no new nodes");
2509 mmem->set_memory_at(general_idx, m);
2510 --imax;
2511 --i;
2512 } else if (use->is_MemBar()) {
2513 assert(!use->is_Initialize(), "initializing stores should not be moved");
2514 if (use->req() > MemBarNode::Precedent &&
2515 use->in(MemBarNode::Precedent) == n) {
2516 // Don't move related membars.
2517 record_for_optimizer(use);
2518 continue;
2519 }
2520 tp = use->as_MemBar()->adr_type()->isa_ptr();
2521 if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
2522 alias_idx == general_idx) {
2523 continue; // Nothing to do
2524 }
2525 // Move to general memory slice.
2526 uint orig_uniq = C->unique();
2527 Node* m = find_inst_mem(n, general_idx, orig_phis);
2528 assert(orig_uniq == C->unique(), "no new nodes");
2529 igvn->hash_delete(use);
2530 imax -= use->replace_edge(n, m);
2531 igvn->hash_insert(use);
2532 record_for_optimizer(use);
2533 --i;
2534 #ifdef ASSERT
2535 } else if (use->is_Mem()) {
2536 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
2537 // Don't move related cardmark.
2538 continue;
2539 }
2540 // Memory nodes should have new memory input.
2541 tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
2542 assert(tp != NULL, "ptr type");
2543 int idx = C->get_alias_index(tp);
2544 assert(get_map(use->_idx) != NULL || idx == alias_idx,
2545 "Following memory nodes should have new memory input or be on the same memory slice");
2546 } else if (use->is_Phi()) {
2547 // Phi nodes should be split and moved already.
2548 tp = use->as_Phi()->adr_type()->isa_ptr();
2549 assert(tp != NULL, "ptr type");
2550 int idx = C->get_alias_index(tp);
2551 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
2552 } else {
2553 use->dump();
2554 assert(false, "should not be here");
2555 #endif
2556 }
2557 }
2558 }
2560 //
2561 // Search memory chain of "mem" to find a MemNode whose address
2562 // is the specified alias index.
2563 //
2564 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) {
2565 if (orig_mem == NULL)
2566 return orig_mem;
2567 Compile* C = _compile;
2568 PhaseGVN* igvn = _igvn;
2569 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
2570 bool is_instance = (toop != NULL) && toop->is_known_instance();
2571 Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
2572 Node *prev = NULL;
2573 Node *result = orig_mem;
2574 while (prev != result) {
2575 prev = result;
2576 if (result == start_mem)
2577 break; // hit one of our sentinels
2578 if (result->is_Mem()) {
2579 const Type *at = igvn->type(result->in(MemNode::Address));
2580 if (at == Type::TOP)
2581 break; // Dead
2582 assert (at->isa_ptr() != NULL, "pointer type required.");
2583 int idx = C->get_alias_index(at->is_ptr());
2584 if (idx == alias_idx)
2585 break; // Found
2586 if (!is_instance && (at->isa_oopptr() == NULL ||
2587 !at->is_oopptr()->is_known_instance())) {
2588 break; // Do not skip store to general memory slice.
2589 }
2590 result = result->in(MemNode::Memory);
2591 }
2592 if (!is_instance)
2593 continue; // don't search further for non-instance types
2594 // skip over a call which does not affect this memory slice
2595 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
2596 Node *proj_in = result->in(0);
2597 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
2598 break; // hit one of our sentinels
2599 } else if (proj_in->is_Call()) {
2600 CallNode *call = proj_in->as_Call();
2601 if (!call->may_modify(toop, igvn)) {
2602 result = call->in(TypeFunc::Memory);
2603 }
2604 } else if (proj_in->is_Initialize()) {
2605 AllocateNode* alloc = proj_in->as_Initialize()->allocation();
2606 // Stop if this is the initialization for the object instance which
2607 // which contains this memory slice, otherwise skip over it.
2608 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {
2609 result = proj_in->in(TypeFunc::Memory);
2610 }
2611 } else if (proj_in->is_MemBar()) {
2612 result = proj_in->in(TypeFunc::Memory);
2613 }
2614 } else if (result->is_MergeMem()) {
2615 MergeMemNode *mmem = result->as_MergeMem();
2616 result = step_through_mergemem(mmem, alias_idx, toop);
2617 if (result == mmem->base_memory()) {
2618 // Didn't find instance memory, search through general slice recursively.
2619 result = mmem->memory_at(C->get_general_index(alias_idx));
2620 result = find_inst_mem(result, alias_idx, orig_phis);
2621 if (C->failing()) {
2622 return NULL;
2623 }
2624 mmem->set_memory_at(alias_idx, result);
2625 }
2626 } else if (result->is_Phi() &&
2627 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
2628 Node *un = result->as_Phi()->unique_input(igvn);
2629 if (un != NULL) {
2630 orig_phis.append_if_missing(result->as_Phi());
2631 result = un;
2632 } else {
2633 break;
2634 }
2635 } else if (result->is_ClearArray()) {
2636 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
2637 // Can not bypass initialization of the instance
2638 // we are looking for.
2639 break;
2640 }
2641 // Otherwise skip it (the call updated 'result' value).
2642 } else if (result->Opcode() == Op_SCMemProj) {
2643 Node* mem = result->in(0);
2644 Node* adr = NULL;
2645 if (mem->is_LoadStore()) {
2646 adr = mem->in(MemNode::Address);
2647 } else {
2648 assert(mem->Opcode() == Op_EncodeISOArray, "sanity");
2649 adr = mem->in(3); // Memory edge corresponds to destination array
2650 }
2651 const Type *at = igvn->type(adr);
2652 if (at != Type::TOP) {
2653 assert (at->isa_ptr() != NULL, "pointer type required.");
2654 int idx = C->get_alias_index(at->is_ptr());
2655 assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
2656 break;
2657 }
2658 result = mem->in(MemNode::Memory);
2659 }
2660 }
2661 if (result->is_Phi()) {
2662 PhiNode *mphi = result->as_Phi();
2663 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
2664 const TypePtr *t = mphi->adr_type();
2665 if (!is_instance) {
2666 // Push all non-instance Phis on the orig_phis worklist to update inputs
2667 // during Phase 4 if needed.
2668 orig_phis.append_if_missing(mphi);
2669 } else if (C->get_alias_index(t) != alias_idx) {
2670 // Create a new Phi with the specified alias index type.
2671 result = split_memory_phi(mphi, alias_idx, orig_phis);
2672 }
2673 }
2674 // the result is either MemNode, PhiNode, InitializeNode.
2675 return result;
2676 }
2678 //
2679 // Convert the types of unescaped object to instance types where possible,
2680 // propagate the new type information through the graph, and update memory
2681 // edges and MergeMem inputs to reflect the new type.
2682 //
2683 // We start with allocations (and calls which may be allocations) on alloc_worklist.
2684 // The processing is done in 4 phases:
2685 //
2686 // Phase 1: Process possible allocations from alloc_worklist. Create instance
2687 // types for the CheckCastPP for allocations where possible.
2688 // Propagate the the new types through users as follows:
2689 // casts and Phi: push users on alloc_worklist
2690 // AddP: cast Base and Address inputs to the instance type
2691 // push any AddP users on alloc_worklist and push any memnode
2692 // users onto memnode_worklist.
2693 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
2694 // search the Memory chain for a store with the appropriate type
2695 // address type. If a Phi is found, create a new version with
2696 // the appropriate memory slices from each of the Phi inputs.
2697 // For stores, process the users as follows:
2698 // MemNode: push on memnode_worklist
2699 // MergeMem: push on mergemem_worklist
2700 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice
2701 // moving the first node encountered of each instance type to the
2702 // the input corresponding to its alias index.
2703 // appropriate memory slice.
2704 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes.
2705 //
2706 // In the following example, the CheckCastPP nodes are the cast of allocation
2707 // results and the allocation of node 29 is unescaped and eligible to be an
2708 // instance type.
2709 //
2710 // We start with:
2711 //
2712 // 7 Parm #memory
2713 // 10 ConI "12"
2714 // 19 CheckCastPP "Foo"
2715 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
2716 // 29 CheckCastPP "Foo"
2717 // 30 AddP _ 29 29 10 Foo+12 alias_index=4
2718 //
2719 // 40 StoreP 25 7 20 ... alias_index=4
2720 // 50 StoreP 35 40 30 ... alias_index=4
2721 // 60 StoreP 45 50 20 ... alias_index=4
2722 // 70 LoadP _ 60 30 ... alias_index=4
2723 // 80 Phi 75 50 60 Memory alias_index=4
2724 // 90 LoadP _ 80 30 ... alias_index=4
2725 // 100 LoadP _ 80 20 ... alias_index=4
2726 //
2727 //
2728 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
2729 // and creating a new alias index for node 30. This gives:
2730 //
2731 // 7 Parm #memory
2732 // 10 ConI "12"
2733 // 19 CheckCastPP "Foo"
2734 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
2735 // 29 CheckCastPP "Foo" iid=24
2736 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
2737 //
2738 // 40 StoreP 25 7 20 ... alias_index=4
2739 // 50 StoreP 35 40 30 ... alias_index=6
2740 // 60 StoreP 45 50 20 ... alias_index=4
2741 // 70 LoadP _ 60 30 ... alias_index=6
2742 // 80 Phi 75 50 60 Memory alias_index=4
2743 // 90 LoadP _ 80 30 ... alias_index=6
2744 // 100 LoadP _ 80 20 ... alias_index=4
2745 //
2746 // In phase 2, new memory inputs are computed for the loads and stores,
2747 // And a new version of the phi is created. In phase 4, the inputs to
2748 // node 80 are updated and then the memory nodes are updated with the
2749 // values computed in phase 2. This results in:
2750 //
2751 // 7 Parm #memory
2752 // 10 ConI "12"
2753 // 19 CheckCastPP "Foo"
2754 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
2755 // 29 CheckCastPP "Foo" iid=24
2756 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
2757 //
2758 // 40 StoreP 25 7 20 ... alias_index=4
2759 // 50 StoreP 35 7 30 ... alias_index=6
2760 // 60 StoreP 45 40 20 ... alias_index=4
2761 // 70 LoadP _ 50 30 ... alias_index=6
2762 // 80 Phi 75 40 60 Memory alias_index=4
2763 // 120 Phi 75 50 50 Memory alias_index=6
2764 // 90 LoadP _ 120 30 ... alias_index=6
2765 // 100 LoadP _ 80 20 ... alias_index=4
2766 //
2767 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) {
2768 GrowableArray<Node *> memnode_worklist;
2769 GrowableArray<PhiNode *> orig_phis;
2770 PhaseIterGVN *igvn = _igvn;
2771 uint new_index_start = (uint) _compile->num_alias_types();
2772 Arena* arena = Thread::current()->resource_area();
2773 VectorSet visited(arena);
2774 ideal_nodes.clear(); // Reset for use with set_map/get_map.
2775 uint unique_old = _compile->unique();
2777 // Phase 1: Process possible allocations from alloc_worklist.
2778 // Create instance types for the CheckCastPP for allocations where possible.
2779 //
2780 // (Note: don't forget to change the order of the second AddP node on
2781 // the alloc_worklist if the order of the worklist processing is changed,
2782 // see the comment in find_second_addp().)
2783 //
2784 while (alloc_worklist.length() != 0) {
2785 Node *n = alloc_worklist.pop();
2786 uint ni = n->_idx;
2787 if (n->is_Call()) {
2788 CallNode *alloc = n->as_Call();
2789 // copy escape information to call node
2790 PointsToNode* ptn = ptnode_adr(alloc->_idx);
2791 PointsToNode::EscapeState es = ptn->escape_state();
2792 // We have an allocation or call which returns a Java object,
2793 // see if it is unescaped.
2794 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())
2795 continue;
2796 // Find CheckCastPP for the allocate or for the return value of a call
2797 n = alloc->result_cast();
2798 if (n == NULL) { // No uses except Initialize node
2799 if (alloc->is_Allocate()) {
2800 // Set the scalar_replaceable flag for allocation
2801 // so it could be eliminated if it has no uses.
2802 alloc->as_Allocate()->_is_scalar_replaceable = true;
2803 }
2804 if (alloc->is_CallStaticJava()) {
2805 // Set the scalar_replaceable flag for boxing method
2806 // so it could be eliminated if it has no uses.
2807 alloc->as_CallStaticJava()->_is_scalar_replaceable = true;
2808 }
2809 continue;
2810 }
2811 if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
2812 assert(!alloc->is_Allocate(), "allocation should have unique type");
2813 continue;
2814 }
2816 // The inline code for Object.clone() casts the allocation result to
2817 // java.lang.Object and then to the actual type of the allocated
2818 // object. Detect this case and use the second cast.
2819 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
2820 // the allocation result is cast to java.lang.Object and then
2821 // to the actual Array type.
2822 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
2823 && (alloc->is_AllocateArray() ||
2824 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {
2825 Node *cast2 = NULL;
2826 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2827 Node *use = n->fast_out(i);
2828 if (use->is_CheckCastPP()) {
2829 cast2 = use;
2830 break;
2831 }
2832 }
2833 if (cast2 != NULL) {
2834 n = cast2;
2835 } else {
2836 // Non-scalar replaceable if the allocation type is unknown statically
2837 // (reflection allocation), the object can't be restored during
2838 // deoptimization without precise type.
2839 continue;
2840 }
2841 }
2842 if (alloc->is_Allocate()) {
2843 // Set the scalar_replaceable flag for allocation
2844 // so it could be eliminated.
2845 alloc->as_Allocate()->_is_scalar_replaceable = true;
2846 }
2847 if (alloc->is_CallStaticJava()) {
2848 // Set the scalar_replaceable flag for boxing method
2849 // so it could be eliminated.
2850 alloc->as_CallStaticJava()->_is_scalar_replaceable = true;
2851 }
2852 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state
2853 // in order for an object to be scalar-replaceable, it must be:
2854 // - a direct allocation (not a call returning an object)
2855 // - non-escaping
2856 // - eligible to be a unique type
2857 // - not determined to be ineligible by escape analysis
2858 set_map(alloc, n);
2859 set_map(n, alloc);
2860 const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
2861 if (t == NULL)
2862 continue; // not a TypeOopPtr
2863 const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
2864 igvn->hash_delete(n);
2865 igvn->set_type(n, tinst);
2866 n->raise_bottom_type(tinst);
2867 igvn->hash_insert(n);
2868 record_for_optimizer(n);
2869 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
2871 // First, put on the worklist all Field edges from Connection Graph
2872 // which is more accurate then putting immediate users from Ideal Graph.
2873 for (EdgeIterator e(ptn); e.has_next(); e.next()) {
2874 PointsToNode* tgt = e.get();
2875 Node* use = tgt->ideal_node();
2876 assert(tgt->is_Field() && use->is_AddP(),
2877 "only AddP nodes are Field edges in CG");
2878 if (use->outcnt() > 0) { // Don't process dead nodes
2879 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
2880 if (addp2 != NULL) {
2881 assert(alloc->is_AllocateArray(),"array allocation was expected");
2882 alloc_worklist.append_if_missing(addp2);
2883 }
2884 alloc_worklist.append_if_missing(use);
2885 }
2886 }
2888 // An allocation may have an Initialize which has raw stores. Scan
2889 // the users of the raw allocation result and push AddP users
2890 // on alloc_worklist.
2891 Node *raw_result = alloc->proj_out(TypeFunc::Parms);
2892 assert (raw_result != NULL, "must have an allocation result");
2893 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
2894 Node *use = raw_result->fast_out(i);
2895 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
2896 Node* addp2 = find_second_addp(use, raw_result);
2897 if (addp2 != NULL) {
2898 assert(alloc->is_AllocateArray(),"array allocation was expected");
2899 alloc_worklist.append_if_missing(addp2);
2900 }
2901 alloc_worklist.append_if_missing(use);
2902 } else if (use->is_MemBar()) {
2903 memnode_worklist.append_if_missing(use);
2904 }
2905 }
2906 }
2907 } else if (n->is_AddP()) {
2908 JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
2909 if (jobj == NULL || jobj == phantom_obj) {
2910 #ifdef ASSERT
2911 ptnode_adr(get_addp_base(n)->_idx)->dump();
2912 ptnode_adr(n->_idx)->dump();
2913 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
2914 #endif
2915 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
2916 return;
2917 }
2918 Node *base = get_map(jobj->idx()); // CheckCastPP node
2919 if (!split_AddP(n, base)) continue; // wrong type from dead path
2920 } else if (n->is_Phi() ||
2921 n->is_CheckCastPP() ||
2922 n->is_EncodeP() ||
2923 n->is_DecodeN() ||
2924 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
2925 if (visited.test_set(n->_idx)) {
2926 assert(n->is_Phi(), "loops only through Phi's");
2927 continue; // already processed
2928 }
2929 JavaObjectNode* jobj = unique_java_object(n);
2930 if (jobj == NULL || jobj == phantom_obj) {
2931 #ifdef ASSERT
2932 ptnode_adr(n->_idx)->dump();
2933 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
2934 #endif
2935 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
2936 return;
2937 } else {
2938 Node *val = get_map(jobj->idx()); // CheckCastPP node
2939 TypeNode *tn = n->as_Type();
2940 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
2941 assert(tinst != NULL && tinst->is_known_instance() &&
2942 tinst->instance_id() == jobj->idx() , "instance type expected.");
2944 const Type *tn_type = igvn->type(tn);
2945 const TypeOopPtr *tn_t;
2946 if (tn_type->isa_narrowoop()) {
2947 tn_t = tn_type->make_ptr()->isa_oopptr();
2948 } else {
2949 tn_t = tn_type->isa_oopptr();
2950 }
2951 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
2952 if (tn_type->isa_narrowoop()) {
2953 tn_type = tinst->make_narrowoop();
2954 } else {
2955 tn_type = tinst;
2956 }
2957 igvn->hash_delete(tn);
2958 igvn->set_type(tn, tn_type);
2959 tn->set_type(tn_type);
2960 igvn->hash_insert(tn);
2961 record_for_optimizer(n);
2962 } else {
2963 assert(tn_type == TypePtr::NULL_PTR ||
2964 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
2965 "unexpected type");
2966 continue; // Skip dead path with different type
2967 }
2968 }
2969 } else {
2970 debug_only(n->dump();)
2971 assert(false, "EA: unexpected node");
2972 continue;
2973 }
2974 // push allocation's users on appropriate worklist
2975 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2976 Node *use = n->fast_out(i);
2977 if(use->is_Mem() && use->in(MemNode::Address) == n) {
2978 // Load/store to instance's field
2979 memnode_worklist.append_if_missing(use);
2980 } else if (use->is_MemBar()) {
2981 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
2982 memnode_worklist.append_if_missing(use);
2983 }
2984 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
2985 Node* addp2 = find_second_addp(use, n);
2986 if (addp2 != NULL) {
2987 alloc_worklist.append_if_missing(addp2);
2988 }
2989 alloc_worklist.append_if_missing(use);
2990 } else if (use->is_Phi() ||
2991 use->is_CheckCastPP() ||
2992 use->is_EncodeNarrowPtr() ||
2993 use->is_DecodeNarrowPtr() ||
2994 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
2995 alloc_worklist.append_if_missing(use);
2996 #ifdef ASSERT
2997 } else if (use->is_Mem()) {
2998 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
2999 } else if (use->is_MergeMem()) {
3000 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3001 } else if (use->is_SafePoint()) {
3002 // Look for MergeMem nodes for calls which reference unique allocation
3003 // (through CheckCastPP nodes) even for debug info.
3004 Node* m = use->in(TypeFunc::Memory);
3005 if (m->is_MergeMem()) {
3006 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3007 }
3008 } else if (use->Opcode() == Op_EncodeISOArray) {
3009 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3010 // EncodeISOArray overwrites destination array
3011 memnode_worklist.append_if_missing(use);
3012 }
3013 } else {
3014 uint op = use->Opcode();
3015 if (!(op == Op_CmpP || op == Op_Conv2B ||
3016 op == Op_CastP2X || op == Op_StoreCM ||
3017 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
3018 op == Op_StrEquals || op == Op_StrIndexOf)) {
3019 n->dump();
3020 use->dump();
3021 assert(false, "EA: missing allocation reference path");
3022 }
3023 #endif
3024 }
3025 }
3027 }
3028 // New alias types were created in split_AddP().
3029 uint new_index_end = (uint) _compile->num_alias_types();
3030 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
3032 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
3033 // compute new values for Memory inputs (the Memory inputs are not
3034 // actually updated until phase 4.)
3035 if (memnode_worklist.length() == 0)
3036 return; // nothing to do
3037 while (memnode_worklist.length() != 0) {
3038 Node *n = memnode_worklist.pop();
3039 if (visited.test_set(n->_idx))
3040 continue;
3041 if (n->is_Phi() || n->is_ClearArray()) {
3042 // we don't need to do anything, but the users must be pushed
3043 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3044 // we don't need to do anything, but the users must be pushed
3045 n = n->as_MemBar()->proj_out(TypeFunc::Memory);
3046 if (n == NULL)
3047 continue;
3048 } else if (n->Opcode() == Op_EncodeISOArray) {
3049 // get the memory projection
3050 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3051 Node *use = n->fast_out(i);
3052 if (use->Opcode() == Op_SCMemProj) {
3053 n = use;
3054 break;
3055 }
3056 }
3057 assert(n->Opcode() == Op_SCMemProj, "memory projection required");
3058 } else {
3059 assert(n->is_Mem(), "memory node required.");
3060 Node *addr = n->in(MemNode::Address);
3061 const Type *addr_t = igvn->type(addr);
3062 if (addr_t == Type::TOP)
3063 continue;
3064 assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3065 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3066 assert ((uint)alias_idx < new_index_end, "wrong alias index");
3067 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3068 if (_compile->failing()) {
3069 return;
3070 }
3071 if (mem != n->in(MemNode::Memory)) {
3072 // We delay the memory edge update since we need old one in
3073 // MergeMem code below when instances memory slices are separated.
3074 set_map(n, mem);
3075 }
3076 if (n->is_Load()) {
3077 continue; // don't push users
3078 } else if (n->is_LoadStore()) {
3079 // get the memory projection
3080 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3081 Node *use = n->fast_out(i);
3082 if (use->Opcode() == Op_SCMemProj) {
3083 n = use;
3084 break;
3085 }
3086 }
3087 assert(n->Opcode() == Op_SCMemProj, "memory projection required");
3088 }
3089 }
3090 // push user on appropriate worklist
3091 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3092 Node *use = n->fast_out(i);
3093 if (use->is_Phi() || use->is_ClearArray()) {
3094 memnode_worklist.append_if_missing(use);
3095 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3096 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
3097 continue;
3098 memnode_worklist.append_if_missing(use);
3099 } else if (use->is_MemBar()) {
3100 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3101 memnode_worklist.append_if_missing(use);
3102 }
3103 #ifdef ASSERT
3104 } else if(use->is_Mem()) {
3105 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3106 } else if (use->is_MergeMem()) {
3107 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3108 } else if (use->Opcode() == Op_EncodeISOArray) {
3109 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3110 // EncodeISOArray overwrites destination array
3111 memnode_worklist.append_if_missing(use);
3112 }
3113 } else {
3114 uint op = use->Opcode();
3115 if (!(op == Op_StoreCM ||
3116 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
3117 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
3118 op == Op_AryEq || op == Op_StrComp ||
3119 op == Op_StrEquals || op == Op_StrIndexOf)) {
3120 n->dump();
3121 use->dump();
3122 assert(false, "EA: missing memory path");
3123 }
3124 #endif
3125 }
3126 }
3127 }
3129 // Phase 3: Process MergeMem nodes from mergemem_worklist.
3130 // Walk each memory slice moving the first node encountered of each
3131 // instance type to the the input corresponding to its alias index.
3132 uint length = _mergemem_worklist.length();
3133 for( uint next = 0; next < length; ++next ) {
3134 MergeMemNode* nmm = _mergemem_worklist.at(next);
3135 assert(!visited.test_set(nmm->_idx), "should not be visited before");
3136 // Note: we don't want to use MergeMemStream here because we only want to
3137 // scan inputs which exist at the start, not ones we add during processing.
3138 // Note 2: MergeMem may already contains instance memory slices added
3139 // during find_inst_mem() call when memory nodes were processed above.
3140 igvn->hash_delete(nmm);
3141 uint nslices = nmm->req();
3142 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
3143 Node* mem = nmm->in(i);
3144 Node* cur = NULL;
3145 if (mem == NULL || mem->is_top())
3146 continue;
3147 // First, update mergemem by moving memory nodes to corresponding slices
3148 // if their type became more precise since this mergemem was created.
3149 while (mem->is_Mem()) {
3150 const Type *at = igvn->type(mem->in(MemNode::Address));
3151 if (at != Type::TOP) {
3152 assert (at->isa_ptr() != NULL, "pointer type required.");
3153 uint idx = (uint)_compile->get_alias_index(at->is_ptr());
3154 if (idx == i) {
3155 if (cur == NULL)
3156 cur = mem;
3157 } else {
3158 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
3159 nmm->set_memory_at(idx, mem);
3160 }
3161 }
3162 }
3163 mem = mem->in(MemNode::Memory);
3164 }
3165 nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
3166 // Find any instance of the current type if we haven't encountered
3167 // already a memory slice of the instance along the memory chain.
3168 for (uint ni = new_index_start; ni < new_index_end; ni++) {
3169 if((uint)_compile->get_general_index(ni) == i) {
3170 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
3171 if (nmm->is_empty_memory(m)) {
3172 Node* result = find_inst_mem(mem, ni, orig_phis);
3173 if (_compile->failing()) {
3174 return;
3175 }
3176 nmm->set_memory_at(ni, result);
3177 }
3178 }
3179 }
3180 }
3181 // Find the rest of instances values
3182 for (uint ni = new_index_start; ni < new_index_end; ni++) {
3183 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
3184 Node* result = step_through_mergemem(nmm, ni, tinst);
3185 if (result == nmm->base_memory()) {
3186 // Didn't find instance memory, search through general slice recursively.
3187 result = nmm->memory_at(_compile->get_general_index(ni));
3188 result = find_inst_mem(result, ni, orig_phis);
3189 if (_compile->failing()) {
3190 return;
3191 }
3192 nmm->set_memory_at(ni, result);
3193 }
3194 }
3195 igvn->hash_insert(nmm);
3196 record_for_optimizer(nmm);
3197 }
3199 // Phase 4: Update the inputs of non-instance memory Phis and
3200 // the Memory input of memnodes
3201 // First update the inputs of any non-instance Phi's from
3202 // which we split out an instance Phi. Note we don't have
3203 // to recursively process Phi's encounted on the input memory
3204 // chains as is done in split_memory_phi() since they will
3205 // also be processed here.
3206 for (int j = 0; j < orig_phis.length(); j++) {
3207 PhiNode *phi = orig_phis.at(j);
3208 int alias_idx = _compile->get_alias_index(phi->adr_type());
3209 igvn->hash_delete(phi);
3210 for (uint i = 1; i < phi->req(); i++) {
3211 Node *mem = phi->in(i);
3212 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3213 if (_compile->failing()) {
3214 return;
3215 }
3216 if (mem != new_mem) {
3217 phi->set_req(i, new_mem);
3218 }
3219 }
3220 igvn->hash_insert(phi);
3221 record_for_optimizer(phi);
3222 }
3224 // Update the memory inputs of MemNodes with the value we computed
3225 // in Phase 2 and move stores memory users to corresponding memory slices.
3226 // Disable memory split verification code until the fix for 6984348.
3227 // Currently it produces false negative results since it does not cover all cases.
3228 #if 0 // ifdef ASSERT
3229 visited.Reset();
3230 Node_Stack old_mems(arena, _compile->unique() >> 2);
3231 #endif
3232 for (uint i = 0; i < ideal_nodes.size(); i++) {
3233 Node* n = ideal_nodes.at(i);
3234 Node* nmem = get_map(n->_idx);
3235 assert(nmem != NULL, "sanity");
3236 if (n->is_Mem()) {
3237 #if 0 // ifdef ASSERT
3238 Node* old_mem = n->in(MemNode::Memory);
3239 if (!visited.test_set(old_mem->_idx)) {
3240 old_mems.push(old_mem, old_mem->outcnt());
3241 }
3242 #endif
3243 assert(n->in(MemNode::Memory) != nmem, "sanity");
3244 if (!n->is_Load()) {
3245 // Move memory users of a store first.
3246 move_inst_mem(n, orig_phis);
3247 }
3248 // Now update memory input
3249 igvn->hash_delete(n);
3250 n->set_req(MemNode::Memory, nmem);
3251 igvn->hash_insert(n);
3252 record_for_optimizer(n);
3253 } else {
3254 assert(n->is_Allocate() || n->is_CheckCastPP() ||
3255 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
3256 }
3257 }
3258 #if 0 // ifdef ASSERT
3259 // Verify that memory was split correctly
3260 while (old_mems.is_nonempty()) {
3261 Node* old_mem = old_mems.node();
3262 uint old_cnt = old_mems.index();
3263 old_mems.pop();
3264 assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
3265 }
3266 #endif
3267 }
3269 #ifndef PRODUCT
3270 static const char *node_type_names[] = {
3271 "UnknownType",
3272 "JavaObject",
3273 "LocalVar",
3274 "Field",
3275 "Arraycopy"
3276 };
3278 static const char *esc_names[] = {
3279 "UnknownEscape",
3280 "NoEscape",
3281 "ArgEscape",
3282 "GlobalEscape"
3283 };
3285 void PointsToNode::dump(bool print_state) const {
3286 NodeType nt = node_type();
3287 tty->print("%s ", node_type_names[(int) nt]);
3288 if (print_state) {
3289 EscapeState es = escape_state();
3290 EscapeState fields_es = fields_escape_state();
3291 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
3292 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable())
3293 tty->print("NSR ");
3294 }
3295 if (is_Field()) {
3296 FieldNode* f = (FieldNode*)this;
3297 if (f->is_oop())
3298 tty->print("oop ");
3299 if (f->offset() > 0)
3300 tty->print("+%d ", f->offset());
3301 tty->print("(");
3302 for (BaseIterator i(f); i.has_next(); i.next()) {
3303 PointsToNode* b = i.get();
3304 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
3305 }
3306 tty->print(" )");
3307 }
3308 tty->print("[");
3309 for (EdgeIterator i(this); i.has_next(); i.next()) {
3310 PointsToNode* e = i.get();
3311 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
3312 }
3313 tty->print(" [");
3314 for (UseIterator i(this); i.has_next(); i.next()) {
3315 PointsToNode* u = i.get();
3316 bool is_base = false;
3317 if (PointsToNode::is_base_use(u)) {
3318 is_base = true;
3319 u = PointsToNode::get_use_node(u)->as_Field();
3320 }
3321 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
3322 }
3323 tty->print(" ]] ");
3324 if (_node == NULL)
3325 tty->print_cr("<null>");
3326 else
3327 _node->dump();
3328 }
3330 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
3331 bool first = true;
3332 int ptnodes_length = ptnodes_worklist.length();
3333 for (int i = 0; i < ptnodes_length; i++) {
3334 PointsToNode *ptn = ptnodes_worklist.at(i);
3335 if (ptn == NULL || !ptn->is_JavaObject())
3336 continue;
3337 PointsToNode::EscapeState es = ptn->escape_state();
3338 if ((es != PointsToNode::NoEscape) && !Verbose) {
3339 continue;
3340 }
3341 Node* n = ptn->ideal_node();
3342 if (n->is_Allocate() || (n->is_CallStaticJava() &&
3343 n->as_CallStaticJava()->is_boxing_method())) {
3344 if (first) {
3345 tty->cr();
3346 tty->print("======== Connection graph for ");
3347 _compile->method()->print_short_name();
3348 tty->cr();
3349 first = false;
3350 }
3351 ptn->dump();
3352 // Print all locals and fields which reference this allocation
3353 for (UseIterator j(ptn); j.has_next(); j.next()) {
3354 PointsToNode* use = j.get();
3355 if (use->is_LocalVar()) {
3356 use->dump(Verbose);
3357 } else if (Verbose) {
3358 use->dump();
3359 }
3360 }
3361 tty->cr();
3362 }
3363 }
3364 }
3365 #endif