Wed, 17 Feb 2016 13:40:12 +0300
8081778: Use Intel x64 CPU instructions for RSA acceleration
Summary: Add intrinsics for BigInteger squareToLen and mulAdd methods.
Reviewed-by: kvn, jrose
1 /*
2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "libadt/vectset.hpp"
29 #include "memory/allocation.hpp"
30 #include "opto/c2compiler.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/compile.hpp"
34 #include "opto/escape.hpp"
35 #include "opto/phaseX.hpp"
36 #include "opto/rootnode.hpp"
38 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
39 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
40 _in_worklist(C->comp_arena()),
41 _next_pidx(0),
42 _collecting(true),
43 _verify(false),
44 _compile(C),
45 _igvn(igvn),
46 _node_map(C->comp_arena()) {
47 // Add unknown java object.
48 add_java_object(C->top(), PointsToNode::GlobalEscape);
49 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
50 // Add ConP(#NULL) and ConN(#NULL) nodes.
51 Node* oop_null = igvn->zerocon(T_OBJECT);
52 assert(oop_null->_idx < nodes_size(), "should be created already");
53 add_java_object(oop_null, PointsToNode::NoEscape);
54 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
55 if (UseCompressedOops) {
56 Node* noop_null = igvn->zerocon(T_NARROWOOP);
57 assert(noop_null->_idx < nodes_size(), "should be created already");
58 map_ideal_node(noop_null, null_obj);
59 }
60 _pcmp_neq = NULL; // Should be initialized
61 _pcmp_eq = NULL;
62 }
64 bool ConnectionGraph::has_candidates(Compile *C) {
65 // EA brings benefits only when the code has allocations and/or locks which
66 // are represented by ideal Macro nodes.
67 int cnt = C->macro_count();
68 for (int i = 0; i < cnt; i++) {
69 Node *n = C->macro_node(i);
70 if (n->is_Allocate())
71 return true;
72 if (n->is_Lock()) {
73 Node* obj = n->as_Lock()->obj_node()->uncast();
74 if (!(obj->is_Parm() || obj->is_Con()))
75 return true;
76 }
77 if (n->is_CallStaticJava() &&
78 n->as_CallStaticJava()->is_boxing_method()) {
79 return true;
80 }
81 }
82 return false;
83 }
85 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
86 Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true);
87 ResourceMark rm;
89 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
90 // to create space for them in ConnectionGraph::_nodes[].
91 Node* oop_null = igvn->zerocon(T_OBJECT);
92 Node* noop_null = igvn->zerocon(T_NARROWOOP);
93 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
94 // Perform escape analysis
95 if (congraph->compute_escape()) {
96 // There are non escaping objects.
97 C->set_congraph(congraph);
98 }
99 // Cleanup.
100 if (oop_null->outcnt() == 0)
101 igvn->hash_delete(oop_null);
102 if (noop_null->outcnt() == 0)
103 igvn->hash_delete(noop_null);
104 }
106 bool ConnectionGraph::compute_escape() {
107 Compile* C = _compile;
108 PhaseGVN* igvn = _igvn;
110 // Worklists used by EA.
111 Unique_Node_List delayed_worklist;
112 GrowableArray<Node*> alloc_worklist;
113 GrowableArray<Node*> ptr_cmp_worklist;
114 GrowableArray<Node*> storestore_worklist;
115 GrowableArray<PointsToNode*> ptnodes_worklist;
116 GrowableArray<JavaObjectNode*> java_objects_worklist;
117 GrowableArray<JavaObjectNode*> non_escaped_worklist;
118 GrowableArray<FieldNode*> oop_fields_worklist;
119 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
121 { Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);
123 // 1. Populate Connection Graph (CG) with PointsTo nodes.
124 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space
125 // Initialize worklist
126 if (C->root() != NULL) {
127 ideal_nodes.push(C->root());
128 }
129 // Processed ideal nodes are unique on ideal_nodes list
130 // but several ideal nodes are mapped to the phantom_obj.
131 // To avoid duplicated entries on the following worklists
132 // add the phantom_obj only once to them.
133 ptnodes_worklist.append(phantom_obj);
134 java_objects_worklist.append(phantom_obj);
135 for( uint next = 0; next < ideal_nodes.size(); ++next ) {
136 Node* n = ideal_nodes.at(next);
137 // Create PointsTo nodes and add them to Connection Graph. Called
138 // only once per ideal node since ideal_nodes is Unique_Node list.
139 add_node_to_connection_graph(n, &delayed_worklist);
140 PointsToNode* ptn = ptnode_adr(n->_idx);
141 if (ptn != NULL && ptn != phantom_obj) {
142 ptnodes_worklist.append(ptn);
143 if (ptn->is_JavaObject()) {
144 java_objects_worklist.append(ptn->as_JavaObject());
145 if ((n->is_Allocate() || n->is_CallStaticJava()) &&
146 (ptn->escape_state() < PointsToNode::GlobalEscape)) {
147 // Only allocations and java static calls results are interesting.
148 non_escaped_worklist.append(ptn->as_JavaObject());
149 }
150 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
151 oop_fields_worklist.append(ptn->as_Field());
152 }
153 }
154 if (n->is_MergeMem()) {
155 // Collect all MergeMem nodes to add memory slices for
156 // scalar replaceable objects in split_unique_types().
157 _mergemem_worklist.append(n->as_MergeMem());
158 } else if (OptimizePtrCompare && n->is_Cmp() &&
159 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
160 // Collect compare pointers nodes.
161 ptr_cmp_worklist.append(n);
162 } else if (n->is_MemBarStoreStore()) {
163 // Collect all MemBarStoreStore nodes so that depending on the
164 // escape status of the associated Allocate node some of them
165 // may be eliminated.
166 storestore_worklist.append(n);
167 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) &&
168 (n->req() > MemBarNode::Precedent)) {
169 record_for_optimizer(n);
170 #ifdef ASSERT
171 } else if (n->is_AddP()) {
172 // Collect address nodes for graph verification.
173 addp_worklist.append(n);
174 #endif
175 }
176 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
177 Node* m = n->fast_out(i); // Get user
178 ideal_nodes.push(m);
179 }
180 }
181 if (non_escaped_worklist.length() == 0) {
182 _collecting = false;
183 return false; // Nothing to do.
184 }
185 // Add final simple edges to graph.
186 while(delayed_worklist.size() > 0) {
187 Node* n = delayed_worklist.pop();
188 add_final_edges(n);
189 }
190 int ptnodes_length = ptnodes_worklist.length();
192 #ifdef ASSERT
193 if (VerifyConnectionGraph) {
194 // Verify that no new simple edges could be created and all
195 // local vars has edges.
196 _verify = true;
197 for (int next = 0; next < ptnodes_length; ++next) {
198 PointsToNode* ptn = ptnodes_worklist.at(next);
199 add_final_edges(ptn->ideal_node());
200 if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
201 ptn->dump();
202 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
203 }
204 }
205 _verify = false;
206 }
207 #endif
208 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
209 // processing, calls to CI to resolve symbols (types, fields, methods)
210 // referenced in bytecode. During symbol resolution VM may throw
211 // an exception which CI cleans and converts to compilation failure.
212 if (C->failing()) return false;
214 // 2. Finish Graph construction by propagating references to all
215 // java objects through graph.
216 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist,
217 java_objects_worklist, oop_fields_worklist)) {
218 // All objects escaped or hit time or iterations limits.
219 _collecting = false;
220 return false;
221 }
223 // 3. Adjust scalar_replaceable state of nonescaping objects and push
224 // scalar replaceable allocations on alloc_worklist for processing
225 // in split_unique_types().
226 int non_escaped_length = non_escaped_worklist.length();
227 for (int next = 0; next < non_escaped_length; next++) {
228 JavaObjectNode* ptn = non_escaped_worklist.at(next);
229 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
230 Node* n = ptn->ideal_node();
231 if (n->is_Allocate()) {
232 n->as_Allocate()->_is_non_escaping = noescape;
233 }
234 if (n->is_CallStaticJava()) {
235 n->as_CallStaticJava()->_is_non_escaping = noescape;
236 }
237 if (noescape && ptn->scalar_replaceable()) {
238 adjust_scalar_replaceable_state(ptn);
239 if (ptn->scalar_replaceable()) {
240 alloc_worklist.append(ptn->ideal_node());
241 }
242 }
243 }
245 #ifdef ASSERT
246 if (VerifyConnectionGraph) {
247 // Verify that graph is complete - no new edges could be added or needed.
248 verify_connection_graph(ptnodes_worklist, non_escaped_worklist,
249 java_objects_worklist, addp_worklist);
250 }
251 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
252 assert(null_obj->escape_state() == PointsToNode::NoEscape &&
253 null_obj->edge_count() == 0 &&
254 !null_obj->arraycopy_src() &&
255 !null_obj->arraycopy_dst(), "sanity");
256 #endif
258 _collecting = false;
260 } // TracePhase t3("connectionGraph")
262 // 4. Optimize ideal graph based on EA information.
263 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);
264 if (has_non_escaping_obj) {
265 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
266 }
268 #ifndef PRODUCT
269 if (PrintEscapeAnalysis) {
270 dump(ptnodes_worklist); // Dump ConnectionGraph
271 }
272 #endif
274 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
275 #ifdef ASSERT
276 if (VerifyConnectionGraph) {
277 int alloc_length = alloc_worklist.length();
278 for (int next = 0; next < alloc_length; ++next) {
279 Node* n = alloc_worklist.at(next);
280 PointsToNode* ptn = ptnode_adr(n->_idx);
281 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
282 }
283 }
284 #endif
286 // 5. Separate memory graph for scalar replaceable allcations.
287 if (has_scalar_replaceable_candidates &&
288 C->AliasLevel() >= 3 && EliminateAllocations) {
289 // Now use the escape information to create unique types for
290 // scalar replaceable objects.
291 split_unique_types(alloc_worklist);
292 if (C->failing()) return false;
293 C->print_method(PHASE_AFTER_EA, 2);
295 #ifdef ASSERT
296 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
297 tty->print("=== No allocations eliminated for ");
298 C->method()->print_short_name();
299 if(!EliminateAllocations) {
300 tty->print(" since EliminateAllocations is off ===");
301 } else if(!has_scalar_replaceable_candidates) {
302 tty->print(" since there are no scalar replaceable candidates ===");
303 } else if(C->AliasLevel() < 3) {
304 tty->print(" since AliasLevel < 3 ===");
305 }
306 tty->cr();
307 #endif
308 }
309 return has_non_escaping_obj;
310 }
312 // Utility function for nodes that load an object
313 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
314 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
315 // ThreadLocal has RawPtr type.
316 const Type* t = _igvn->type(n);
317 if (t->make_ptr() != NULL) {
318 Node* adr = n->in(MemNode::Address);
319 #ifdef ASSERT
320 if (!adr->is_AddP()) {
321 assert(_igvn->type(adr)->isa_rawptr(), "sanity");
322 } else {
323 assert((ptnode_adr(adr->_idx) == NULL ||
324 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
325 }
326 #endif
327 add_local_var_and_edge(n, PointsToNode::NoEscape,
328 adr, delayed_worklist);
329 }
330 }
332 // Populate Connection Graph with PointsTo nodes and create simple
333 // connection graph edges.
334 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
335 assert(!_verify, "this method sould not be called for verification");
336 PhaseGVN* igvn = _igvn;
337 uint n_idx = n->_idx;
338 PointsToNode* n_ptn = ptnode_adr(n_idx);
339 if (n_ptn != NULL)
340 return; // No need to redefine PointsTo node during first iteration.
342 if (n->is_Call()) {
343 // Arguments to allocation and locking don't escape.
344 if (n->is_AbstractLock()) {
345 // Put Lock and Unlock nodes on IGVN worklist to process them during
346 // first IGVN optimization when escape information is still available.
347 record_for_optimizer(n);
348 } else if (n->is_Allocate()) {
349 add_call_node(n->as_Call());
350 record_for_optimizer(n);
351 } else {
352 if (n->is_CallStaticJava()) {
353 const char* name = n->as_CallStaticJava()->_name;
354 if (name != NULL && strcmp(name, "uncommon_trap") == 0)
355 return; // Skip uncommon traps
356 }
357 // Don't mark as processed since call's arguments have to be processed.
358 delayed_worklist->push(n);
359 // Check if a call returns an object.
360 if ((n->as_Call()->returns_pointer() &&
361 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) ||
362 (n->is_CallStaticJava() &&
363 n->as_CallStaticJava()->is_boxing_method())) {
364 add_call_node(n->as_Call());
365 }
366 }
367 return;
368 }
369 // Put this check here to process call arguments since some call nodes
370 // point to phantom_obj.
371 if (n_ptn == phantom_obj || n_ptn == null_obj)
372 return; // Skip predefined nodes.
374 int opcode = n->Opcode();
375 switch (opcode) {
376 case Op_AddP: {
377 Node* base = get_addp_base(n);
378 PointsToNode* ptn_base = ptnode_adr(base->_idx);
379 // Field nodes are created for all field types. They are used in
380 // adjust_scalar_replaceable_state() and split_unique_types().
381 // Note, non-oop fields will have only base edges in Connection
382 // Graph because such fields are not used for oop loads and stores.
383 int offset = address_offset(n, igvn);
384 add_field(n, PointsToNode::NoEscape, offset);
385 if (ptn_base == NULL) {
386 delayed_worklist->push(n); // Process it later.
387 } else {
388 n_ptn = ptnode_adr(n_idx);
389 add_base(n_ptn->as_Field(), ptn_base);
390 }
391 break;
392 }
393 case Op_CastX2P: {
394 map_ideal_node(n, phantom_obj);
395 break;
396 }
397 case Op_CastPP:
398 case Op_CheckCastPP:
399 case Op_EncodeP:
400 case Op_DecodeN:
401 case Op_EncodePKlass:
402 case Op_DecodeNKlass: {
403 add_local_var_and_edge(n, PointsToNode::NoEscape,
404 n->in(1), delayed_worklist);
405 break;
406 }
407 case Op_CMoveP: {
408 add_local_var(n, PointsToNode::NoEscape);
409 // Do not add edges during first iteration because some could be
410 // not defined yet.
411 delayed_worklist->push(n);
412 break;
413 }
414 case Op_ConP:
415 case Op_ConN:
416 case Op_ConNKlass: {
417 // assume all oop constants globally escape except for null
418 PointsToNode::EscapeState es;
419 const Type* t = igvn->type(n);
420 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
421 es = PointsToNode::NoEscape;
422 } else {
423 es = PointsToNode::GlobalEscape;
424 }
425 add_java_object(n, es);
426 break;
427 }
428 case Op_CreateEx: {
429 // assume that all exception objects globally escape
430 map_ideal_node(n, phantom_obj);
431 break;
432 }
433 case Op_LoadKlass:
434 case Op_LoadNKlass: {
435 // Unknown class is loaded
436 map_ideal_node(n, phantom_obj);
437 break;
438 }
439 case Op_LoadP:
440 case Op_LoadN:
441 case Op_LoadPLocked: {
442 add_objload_to_connection_graph(n, delayed_worklist);
443 break;
444 }
445 case Op_Parm: {
446 map_ideal_node(n, phantom_obj);
447 break;
448 }
449 case Op_PartialSubtypeCheck: {
450 // Produces Null or notNull and is used in only in CmpP so
451 // phantom_obj could be used.
452 map_ideal_node(n, phantom_obj); // Result is unknown
453 break;
454 }
455 case Op_Phi: {
456 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
457 // ThreadLocal has RawPtr type.
458 const Type* t = n->as_Phi()->type();
459 if (t->make_ptr() != NULL) {
460 add_local_var(n, PointsToNode::NoEscape);
461 // Do not add edges during first iteration because some could be
462 // not defined yet.
463 delayed_worklist->push(n);
464 }
465 break;
466 }
467 case Op_Proj: {
468 // we are only interested in the oop result projection from a call
469 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
470 n->in(0)->as_Call()->returns_pointer()) {
471 add_local_var_and_edge(n, PointsToNode::NoEscape,
472 n->in(0), delayed_worklist);
473 }
474 break;
475 }
476 case Op_Rethrow: // Exception object escapes
477 case Op_Return: {
478 if (n->req() > TypeFunc::Parms &&
479 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
480 // Treat Return value as LocalVar with GlobalEscape escape state.
481 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
482 n->in(TypeFunc::Parms), delayed_worklist);
483 }
484 break;
485 }
486 case Op_GetAndSetP:
487 case Op_GetAndSetN: {
488 add_objload_to_connection_graph(n, delayed_worklist);
489 // fallthrough
490 }
491 case Op_StoreP:
492 case Op_StoreN:
493 case Op_StoreNKlass:
494 case Op_StorePConditional:
495 case Op_CompareAndSwapP:
496 case Op_CompareAndSwapN: {
497 Node* adr = n->in(MemNode::Address);
498 const Type *adr_type = igvn->type(adr);
499 adr_type = adr_type->make_ptr();
500 if (adr_type == NULL) {
501 break; // skip dead nodes
502 }
503 if (adr_type->isa_oopptr() ||
504 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&
505 (adr_type == TypeRawPtr::NOTNULL &&
506 adr->in(AddPNode::Address)->is_Proj() &&
507 adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
508 delayed_worklist->push(n); // Process it later.
509 #ifdef ASSERT
510 assert(adr->is_AddP(), "expecting an AddP");
511 if (adr_type == TypeRawPtr::NOTNULL) {
512 // Verify a raw address for a store captured by Initialize node.
513 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
514 assert(offs != Type::OffsetBot, "offset must be a constant");
515 }
516 #endif
517 } else {
518 // Ignore copy the displaced header to the BoxNode (OSR compilation).
519 if (adr->is_BoxLock())
520 break;
521 // Stored value escapes in unsafe access.
522 if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {
523 // Pointer stores in G1 barriers looks like unsafe access.
524 // Ignore such stores to be able scalar replace non-escaping
525 // allocations.
526 if (UseG1GC && adr->is_AddP()) {
527 Node* base = get_addp_base(adr);
528 if (base->Opcode() == Op_LoadP &&
529 base->in(MemNode::Address)->is_AddP()) {
530 adr = base->in(MemNode::Address);
531 Node* tls = get_addp_base(adr);
532 if (tls->Opcode() == Op_ThreadLocal) {
533 int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
534 if (offs == in_bytes(JavaThread::satb_mark_queue_offset() +
535 PtrQueue::byte_offset_of_buf())) {
536 break; // G1 pre barier previous oop value store.
537 }
538 if (offs == in_bytes(JavaThread::dirty_card_queue_offset() +
539 PtrQueue::byte_offset_of_buf())) {
540 break; // G1 post barier card address store.
541 }
542 }
543 }
544 }
545 delayed_worklist->push(n); // Process unsafe access later.
546 break;
547 }
548 #ifdef ASSERT
549 n->dump(1);
550 assert(false, "not unsafe or G1 barrier raw StoreP");
551 #endif
552 }
553 break;
554 }
555 case Op_AryEq:
556 case Op_StrComp:
557 case Op_StrEquals:
558 case Op_StrIndexOf:
559 case Op_EncodeISOArray: {
560 add_local_var(n, PointsToNode::ArgEscape);
561 delayed_worklist->push(n); // Process it later.
562 break;
563 }
564 case Op_ThreadLocal: {
565 add_java_object(n, PointsToNode::ArgEscape);
566 break;
567 }
568 default:
569 ; // Do nothing for nodes not related to EA.
570 }
571 return;
572 }
574 #ifdef ASSERT
575 #define ELSE_FAIL(name) \
576 /* Should not be called for not pointer type. */ \
577 n->dump(1); \
578 assert(false, name); \
579 break;
580 #else
581 #define ELSE_FAIL(name) \
582 break;
583 #endif
585 // Add final simple edges to graph.
586 void ConnectionGraph::add_final_edges(Node *n) {
587 PointsToNode* n_ptn = ptnode_adr(n->_idx);
588 #ifdef ASSERT
589 if (_verify && n_ptn->is_JavaObject())
590 return; // This method does not change graph for JavaObject.
591 #endif
593 if (n->is_Call()) {
594 process_call_arguments(n->as_Call());
595 return;
596 }
597 assert(n->is_Store() || n->is_LoadStore() ||
598 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
599 "node should be registered already");
600 int opcode = n->Opcode();
601 switch (opcode) {
602 case Op_AddP: {
603 Node* base = get_addp_base(n);
604 PointsToNode* ptn_base = ptnode_adr(base->_idx);
605 assert(ptn_base != NULL, "field's base should be registered");
606 add_base(n_ptn->as_Field(), ptn_base);
607 break;
608 }
609 case Op_CastPP:
610 case Op_CheckCastPP:
611 case Op_EncodeP:
612 case Op_DecodeN:
613 case Op_EncodePKlass:
614 case Op_DecodeNKlass: {
615 add_local_var_and_edge(n, PointsToNode::NoEscape,
616 n->in(1), NULL);
617 break;
618 }
619 case Op_CMoveP: {
620 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
621 Node* in = n->in(i);
622 if (in == NULL)
623 continue; // ignore NULL
624 Node* uncast_in = in->uncast();
625 if (uncast_in->is_top() || uncast_in == n)
626 continue; // ignore top or inputs which go back this node
627 PointsToNode* ptn = ptnode_adr(in->_idx);
628 assert(ptn != NULL, "node should be registered");
629 add_edge(n_ptn, ptn);
630 }
631 break;
632 }
633 case Op_LoadP:
634 case Op_LoadN:
635 case Op_LoadPLocked: {
636 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
637 // ThreadLocal has RawPtr type.
638 const Type* t = _igvn->type(n);
639 if (t->make_ptr() != NULL) {
640 Node* adr = n->in(MemNode::Address);
641 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
642 break;
643 }
644 ELSE_FAIL("Op_LoadP");
645 }
646 case Op_Phi: {
647 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
648 // ThreadLocal has RawPtr type.
649 const Type* t = n->as_Phi()->type();
650 if (t->make_ptr() != NULL) {
651 for (uint i = 1; i < n->req(); i++) {
652 Node* in = n->in(i);
653 if (in == NULL)
654 continue; // ignore NULL
655 Node* uncast_in = in->uncast();
656 if (uncast_in->is_top() || uncast_in == n)
657 continue; // ignore top or inputs which go back this node
658 PointsToNode* ptn = ptnode_adr(in->_idx);
659 assert(ptn != NULL, "node should be registered");
660 add_edge(n_ptn, ptn);
661 }
662 break;
663 }
664 ELSE_FAIL("Op_Phi");
665 }
666 case Op_Proj: {
667 // we are only interested in the oop result projection from a call
668 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
669 n->in(0)->as_Call()->returns_pointer()) {
670 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
671 break;
672 }
673 ELSE_FAIL("Op_Proj");
674 }
675 case Op_Rethrow: // Exception object escapes
676 case Op_Return: {
677 if (n->req() > TypeFunc::Parms &&
678 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
679 // Treat Return value as LocalVar with GlobalEscape escape state.
680 add_local_var_and_edge(n, PointsToNode::GlobalEscape,
681 n->in(TypeFunc::Parms), NULL);
682 break;
683 }
684 ELSE_FAIL("Op_Return");
685 }
686 case Op_StoreP:
687 case Op_StoreN:
688 case Op_StoreNKlass:
689 case Op_StorePConditional:
690 case Op_CompareAndSwapP:
691 case Op_CompareAndSwapN:
692 case Op_GetAndSetP:
693 case Op_GetAndSetN: {
694 Node* adr = n->in(MemNode::Address);
695 const Type *adr_type = _igvn->type(adr);
696 adr_type = adr_type->make_ptr();
697 #ifdef ASSERT
698 if (adr_type == NULL) {
699 n->dump(1);
700 assert(adr_type != NULL, "dead node should not be on list");
701 break;
702 }
703 #endif
704 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN) {
705 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
706 }
707 if (adr_type->isa_oopptr() ||
708 (opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) &&
709 (adr_type == TypeRawPtr::NOTNULL &&
710 adr->in(AddPNode::Address)->is_Proj() &&
711 adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
712 // Point Address to Value
713 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
714 assert(adr_ptn != NULL &&
715 adr_ptn->as_Field()->is_oop(), "node should be registered");
716 Node *val = n->in(MemNode::ValueIn);
717 PointsToNode* ptn = ptnode_adr(val->_idx);
718 assert(ptn != NULL, "node should be registered");
719 add_edge(adr_ptn, ptn);
720 break;
721 } else if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {
722 // Stored value escapes in unsafe access.
723 Node *val = n->in(MemNode::ValueIn);
724 PointsToNode* ptn = ptnode_adr(val->_idx);
725 assert(ptn != NULL, "node should be registered");
726 set_escape_state(ptn, PointsToNode::GlobalEscape);
727 // Add edge to object for unsafe access with offset.
728 PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
729 assert(adr_ptn != NULL, "node should be registered");
730 if (adr_ptn->is_Field()) {
731 assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
732 add_edge(adr_ptn, ptn);
733 }
734 break;
735 }
736 ELSE_FAIL("Op_StoreP");
737 }
738 case Op_AryEq:
739 case Op_StrComp:
740 case Op_StrEquals:
741 case Op_StrIndexOf:
742 case Op_EncodeISOArray: {
743 // char[] arrays passed to string intrinsic do not escape but
744 // they are not scalar replaceable. Adjust escape state for them.
745 // Start from in(2) edge since in(1) is memory edge.
746 for (uint i = 2; i < n->req(); i++) {
747 Node* adr = n->in(i);
748 const Type* at = _igvn->type(adr);
749 if (!adr->is_top() && at->isa_ptr()) {
750 assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
751 at->isa_ptr() != NULL, "expecting a pointer");
752 if (adr->is_AddP()) {
753 adr = get_addp_base(adr);
754 }
755 PointsToNode* ptn = ptnode_adr(adr->_idx);
756 assert(ptn != NULL, "node should be registered");
757 add_edge(n_ptn, ptn);
758 }
759 }
760 break;
761 }
762 default: {
763 // This method should be called only for EA specific nodes which may
764 // miss some edges when they were created.
765 #ifdef ASSERT
766 n->dump(1);
767 #endif
768 guarantee(false, "unknown node");
769 }
770 }
771 return;
772 }
774 void ConnectionGraph::add_call_node(CallNode* call) {
775 assert(call->returns_pointer(), "only for call which returns pointer");
776 uint call_idx = call->_idx;
777 if (call->is_Allocate()) {
778 Node* k = call->in(AllocateNode::KlassNode);
779 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
780 assert(kt != NULL, "TypeKlassPtr required.");
781 ciKlass* cik = kt->klass();
782 PointsToNode::EscapeState es = PointsToNode::NoEscape;
783 bool scalar_replaceable = true;
784 if (call->is_AllocateArray()) {
785 if (!cik->is_array_klass()) { // StressReflectiveCode
786 es = PointsToNode::GlobalEscape;
787 } else {
788 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
789 if (length < 0 || length > EliminateAllocationArraySizeLimit) {
790 // Not scalar replaceable if the length is not constant or too big.
791 scalar_replaceable = false;
792 }
793 }
794 } else { // Allocate instance
795 if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
796 cik->is_subclass_of(_compile->env()->Reference_klass()) ||
797 !cik->is_instance_klass() || // StressReflectiveCode
798 cik->as_instance_klass()->has_finalizer()) {
799 es = PointsToNode::GlobalEscape;
800 }
801 }
802 add_java_object(call, es);
803 PointsToNode* ptn = ptnode_adr(call_idx);
804 if (!scalar_replaceable && ptn->scalar_replaceable()) {
805 ptn->set_scalar_replaceable(false);
806 }
807 } else if (call->is_CallStaticJava()) {
808 // Call nodes could be different types:
809 //
810 // 1. CallDynamicJavaNode (what happened during call is unknown):
811 //
812 // - mapped to GlobalEscape JavaObject node if oop is returned;
813 //
814 // - all oop arguments are escaping globally;
815 //
816 // 2. CallStaticJavaNode (execute bytecode analysis if possible):
817 //
818 // - the same as CallDynamicJavaNode if can't do bytecode analysis;
819 //
820 // - mapped to GlobalEscape JavaObject node if unknown oop is returned;
821 // - mapped to NoEscape JavaObject node if non-escaping object allocated
822 // during call is returned;
823 // - mapped to ArgEscape LocalVar node pointed to object arguments
824 // which are returned and does not escape during call;
825 //
826 // - oop arguments escaping status is defined by bytecode analysis;
827 //
828 // For a static call, we know exactly what method is being called.
829 // Use bytecode estimator to record whether the call's return value escapes.
830 ciMethod* meth = call->as_CallJava()->method();
831 if (meth == NULL) {
832 const char* name = call->as_CallStaticJava()->_name;
833 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
834 // Returns a newly allocated unescaped object.
835 add_java_object(call, PointsToNode::NoEscape);
836 ptnode_adr(call_idx)->set_scalar_replaceable(false);
837 } else if (meth->is_boxing_method()) {
838 // Returns boxing object
839 PointsToNode::EscapeState es;
840 vmIntrinsics::ID intr = meth->intrinsic_id();
841 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
842 // It does not escape if object is always allocated.
843 es = PointsToNode::NoEscape;
844 } else {
845 // It escapes globally if object could be loaded from cache.
846 es = PointsToNode::GlobalEscape;
847 }
848 add_java_object(call, es);
849 } else {
850 BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
851 call_analyzer->copy_dependencies(_compile->dependencies());
852 if (call_analyzer->is_return_allocated()) {
853 // Returns a newly allocated unescaped object, simply
854 // update dependency information.
855 // Mark it as NoEscape so that objects referenced by
856 // it's fields will be marked as NoEscape at least.
857 add_java_object(call, PointsToNode::NoEscape);
858 ptnode_adr(call_idx)->set_scalar_replaceable(false);
859 } else {
860 // Determine whether any arguments are returned.
861 const TypeTuple* d = call->tf()->domain();
862 bool ret_arg = false;
863 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
864 if (d->field_at(i)->isa_ptr() != NULL &&
865 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
866 ret_arg = true;
867 break;
868 }
869 }
870 if (ret_arg) {
871 add_local_var(call, PointsToNode::ArgEscape);
872 } else {
873 // Returns unknown object.
874 map_ideal_node(call, phantom_obj);
875 }
876 }
877 }
878 } else {
879 // An other type of call, assume the worst case:
880 // returned value is unknown and globally escapes.
881 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
882 map_ideal_node(call, phantom_obj);
883 }
884 }
886 void ConnectionGraph::process_call_arguments(CallNode *call) {
887 bool is_arraycopy = false;
888 switch (call->Opcode()) {
889 #ifdef ASSERT
890 case Op_Allocate:
891 case Op_AllocateArray:
892 case Op_Lock:
893 case Op_Unlock:
894 assert(false, "should be done already");
895 break;
896 #endif
897 case Op_CallLeafNoFP:
898 is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
899 strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
900 // fall through
901 case Op_CallLeaf: {
902 // Stub calls, objects do not escape but they are not scale replaceable.
903 // Adjust escape state for outgoing arguments.
904 const TypeTuple * d = call->tf()->domain();
905 bool src_has_oops = false;
906 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
907 const Type* at = d->field_at(i);
908 Node *arg = call->in(i);
909 const Type *aat = _igvn->type(arg);
910 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
911 continue;
912 if (arg->is_AddP()) {
913 //
914 // The inline_native_clone() case when the arraycopy stub is called
915 // after the allocation before Initialize and CheckCastPP nodes.
916 // Or normal arraycopy for object arrays case.
917 //
918 // Set AddP's base (Allocate) as not scalar replaceable since
919 // pointer to the base (with offset) is passed as argument.
920 //
921 arg = get_addp_base(arg);
922 }
923 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
924 assert(arg_ptn != NULL, "should be registered");
925 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
926 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
927 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
928 aat->isa_ptr() != NULL, "expecting an Ptr");
929 bool arg_has_oops = aat->isa_oopptr() &&
930 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
931 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
932 if (i == TypeFunc::Parms) {
933 src_has_oops = arg_has_oops;
934 }
935 //
936 // src or dst could be j.l.Object when other is basic type array:
937 //
938 // arraycopy(char[],0,Object*,0,size);
939 // arraycopy(Object*,0,char[],0,size);
940 //
941 // Don't add edges in such cases.
942 //
943 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
944 arg_has_oops && (i > TypeFunc::Parms);
945 #ifdef ASSERT
946 if (!(is_arraycopy ||
947 (call->as_CallLeaf()->_name != NULL &&
948 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||
949 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ||
950 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
951 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
952 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
953 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
954 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
955 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
956 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
957 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
958 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
959 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
960 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
961 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
962 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
963 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0)
964 ))) {
965 call->dump();
966 fatal(err_msg_res("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name));
967 }
968 #endif
969 // Always process arraycopy's destination object since
970 // we need to add all possible edges to references in
971 // source object.
972 if (arg_esc >= PointsToNode::ArgEscape &&
973 !arg_is_arraycopy_dest) {
974 continue;
975 }
976 set_escape_state(arg_ptn, PointsToNode::ArgEscape);
977 if (arg_is_arraycopy_dest) {
978 Node* src = call->in(TypeFunc::Parms);
979 if (src->is_AddP()) {
980 src = get_addp_base(src);
981 }
982 PointsToNode* src_ptn = ptnode_adr(src->_idx);
983 assert(src_ptn != NULL, "should be registered");
984 if (arg_ptn != src_ptn) {
985 // Special arraycopy edge:
986 // A destination object's field can't have the source object
987 // as base since objects escape states are not related.
988 // Only escape state of destination object's fields affects
989 // escape state of fields in source object.
990 add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn);
991 }
992 }
993 }
994 }
995 break;
996 }
997 case Op_CallStaticJava: {
998 // For a static call, we know exactly what method is being called.
999 // Use bytecode estimator to record the call's escape affects
1000 #ifdef ASSERT
1001 const char* name = call->as_CallStaticJava()->_name;
1002 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
1003 #endif
1004 ciMethod* meth = call->as_CallJava()->method();
1005 if ((meth != NULL) && meth->is_boxing_method()) {
1006 break; // Boxing methods do not modify any oops.
1007 }
1008 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1009 // fall-through if not a Java method or no analyzer information
1010 if (call_analyzer != NULL) {
1011 PointsToNode* call_ptn = ptnode_adr(call->_idx);
1012 const TypeTuple* d = call->tf()->domain();
1013 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1014 const Type* at = d->field_at(i);
1015 int k = i - TypeFunc::Parms;
1016 Node* arg = call->in(i);
1017 PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
1018 if (at->isa_ptr() != NULL &&
1019 call_analyzer->is_arg_returned(k)) {
1020 // The call returns arguments.
1021 if (call_ptn != NULL) { // Is call's result used?
1022 assert(call_ptn->is_LocalVar(), "node should be registered");
1023 assert(arg_ptn != NULL, "node should be registered");
1024 add_edge(call_ptn, arg_ptn);
1025 }
1026 }
1027 if (at->isa_oopptr() != NULL &&
1028 arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
1029 if (!call_analyzer->is_arg_stack(k)) {
1030 // The argument global escapes
1031 set_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1032 } else {
1033 set_escape_state(arg_ptn, PointsToNode::ArgEscape);
1034 if (!call_analyzer->is_arg_local(k)) {
1035 // The argument itself doesn't escape, but any fields might
1036 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);
1037 }
1038 }
1039 }
1040 }
1041 if (call_ptn != NULL && call_ptn->is_LocalVar()) {
1042 // The call returns arguments.
1043 assert(call_ptn->edge_count() > 0, "sanity");
1044 if (!call_analyzer->is_return_local()) {
1045 // Returns also unknown object.
1046 add_edge(call_ptn, phantom_obj);
1047 }
1048 }
1049 break;
1050 }
1051 }
1052 default: {
1053 // Fall-through here if not a Java method or no analyzer information
1054 // or some other type of call, assume the worst case: all arguments
1055 // globally escape.
1056 const TypeTuple* d = call->tf()->domain();
1057 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1058 const Type* at = d->field_at(i);
1059 if (at->isa_oopptr() != NULL) {
1060 Node* arg = call->in(i);
1061 if (arg->is_AddP()) {
1062 arg = get_addp_base(arg);
1063 }
1064 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
1065 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);
1066 }
1067 }
1068 }
1069 }
1070 }
1073 // Finish Graph construction.
1074 bool ConnectionGraph::complete_connection_graph(
1075 GrowableArray<PointsToNode*>& ptnodes_worklist,
1076 GrowableArray<JavaObjectNode*>& non_escaped_worklist,
1077 GrowableArray<JavaObjectNode*>& java_objects_worklist,
1078 GrowableArray<FieldNode*>& oop_fields_worklist) {
1079 // Normally only 1-3 passes needed to build Connection Graph depending
1080 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
1081 // Set limit to 20 to catch situation when something did go wrong and
1082 // bailout Escape Analysis.
1083 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
1084 #define CG_BUILD_ITER_LIMIT 20
1086 // Propagate GlobalEscape and ArgEscape escape states and check that
1087 // we still have non-escaping objects. The method pushs on _worklist
1088 // Field nodes which reference phantom_object.
1089 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
1090 return false; // Nothing to do.
1091 }
1092 // Now propagate references to all JavaObject nodes.
1093 int java_objects_length = java_objects_worklist.length();
1094 elapsedTimer time;
1095 bool timeout = false;
1096 int new_edges = 1;
1097 int iterations = 0;
1098 do {
1099 while ((new_edges > 0) &&
1100 (iterations++ < CG_BUILD_ITER_LIMIT)) {
1101 double start_time = time.seconds();
1102 time.start();
1103 new_edges = 0;
1104 // Propagate references to phantom_object for nodes pushed on _worklist
1105 // by find_non_escaped_objects() and find_field_value().
1106 new_edges += add_java_object_edges(phantom_obj, false);
1107 for (int next = 0; next < java_objects_length; ++next) {
1108 JavaObjectNode* ptn = java_objects_worklist.at(next);
1109 new_edges += add_java_object_edges(ptn, true);
1111 #define SAMPLE_SIZE 4
1112 if ((next % SAMPLE_SIZE) == 0) {
1113 // Each 4 iterations calculate how much time it will take
1114 // to complete graph construction.
1115 time.stop();
1116 // Poll for requests from shutdown mechanism to quiesce compiler
1117 // because Connection graph construction may take long time.
1118 CompileBroker::maybe_block();
1119 double stop_time = time.seconds();
1120 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
1121 double time_until_end = time_per_iter * (double)(java_objects_length - next);
1122 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
1123 timeout = true;
1124 break; // Timeout
1125 }
1126 start_time = stop_time;
1127 time.start();
1128 }
1129 #undef SAMPLE_SIZE
1131 }
1132 if (timeout) break;
1133 if (new_edges > 0) {
1134 // Update escape states on each iteration if graph was updated.
1135 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
1136 return false; // Nothing to do.
1137 }
1138 }
1139 time.stop();
1140 if (time.seconds() >= EscapeAnalysisTimeout) {
1141 timeout = true;
1142 break;
1143 }
1144 }
1145 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) {
1146 time.start();
1147 // Find fields which have unknown value.
1148 int fields_length = oop_fields_worklist.length();
1149 for (int next = 0; next < fields_length; next++) {
1150 FieldNode* field = oop_fields_worklist.at(next);
1151 if (field->edge_count() == 0) {
1152 new_edges += find_field_value(field);
1153 // This code may added new edges to phantom_object.
1154 // Need an other cycle to propagate references to phantom_object.
1155 }
1156 }
1157 time.stop();
1158 if (time.seconds() >= EscapeAnalysisTimeout) {
1159 timeout = true;
1160 break;
1161 }
1162 } else {
1163 new_edges = 0; // Bailout
1164 }
1165 } while (new_edges > 0);
1167 // Bailout if passed limits.
1168 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {
1169 Compile* C = _compile;
1170 if (C->log() != NULL) {
1171 C->log()->begin_elem("connectionGraph_bailout reason='reached ");
1172 C->log()->text("%s", timeout ? "time" : "iterations");
1173 C->log()->end_elem(" limit'");
1174 }
1175 assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
1176 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()));
1177 // Possible infinite build_connection_graph loop,
1178 // bailout (no changes to ideal graph were made).
1179 return false;
1180 }
1181 #ifdef ASSERT
1182 if (Verbose && PrintEscapeAnalysis) {
1183 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",
1184 iterations, nodes_size(), ptnodes_worklist.length());
1185 }
1186 #endif
1188 #undef CG_BUILD_ITER_LIMIT
1190 // Find fields initialized by NULL for non-escaping Allocations.
1191 int non_escaped_length = non_escaped_worklist.length();
1192 for (int next = 0; next < non_escaped_length; next++) {
1193 JavaObjectNode* ptn = non_escaped_worklist.at(next);
1194 PointsToNode::EscapeState es = ptn->escape_state();
1195 assert(es <= PointsToNode::ArgEscape, "sanity");
1196 if (es == PointsToNode::NoEscape) {
1197 if (find_init_values(ptn, null_obj, _igvn) > 0) {
1198 // Adding references to NULL object does not change escape states
1199 // since it does not escape. Also no fields are added to NULL object.
1200 add_java_object_edges(null_obj, false);
1201 }
1202 }
1203 Node* n = ptn->ideal_node();
1204 if (n->is_Allocate()) {
1205 // The object allocated by this Allocate node will never be
1206 // seen by an other thread. Mark it so that when it is
1207 // expanded no MemBarStoreStore is added.
1208 InitializeNode* ini = n->as_Allocate()->initialization();
1209 if (ini != NULL)
1210 ini->set_does_not_escape();
1211 }
1212 }
1213 return true; // Finished graph construction.
1214 }
1216 // Propagate GlobalEscape and ArgEscape escape states to all nodes
1217 // and check that we still have non-escaping java objects.
1218 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
1219 GrowableArray<JavaObjectNode*>& non_escaped_worklist) {
1220 GrowableArray<PointsToNode*> escape_worklist;
1221 // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
1222 int ptnodes_length = ptnodes_worklist.length();
1223 for (int next = 0; next < ptnodes_length; ++next) {
1224 PointsToNode* ptn = ptnodes_worklist.at(next);
1225 if (ptn->escape_state() >= PointsToNode::ArgEscape ||
1226 ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
1227 escape_worklist.push(ptn);
1228 }
1229 }
1230 // Set escape states to referenced nodes (edges list).
1231 while (escape_worklist.length() > 0) {
1232 PointsToNode* ptn = escape_worklist.pop();
1233 PointsToNode::EscapeState es = ptn->escape_state();
1234 PointsToNode::EscapeState field_es = ptn->fields_escape_state();
1235 if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
1236 es >= PointsToNode::ArgEscape) {
1237 // GlobalEscape or ArgEscape state of field means it has unknown value.
1238 if (add_edge(ptn, phantom_obj)) {
1239 // New edge was added
1240 add_field_uses_to_worklist(ptn->as_Field());
1241 }
1242 }
1243 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1244 PointsToNode* e = i.get();
1245 if (e->is_Arraycopy()) {
1246 assert(ptn->arraycopy_dst(), "sanity");
1247 // Propagate only fields escape state through arraycopy edge.
1248 if (e->fields_escape_state() < field_es) {
1249 set_fields_escape_state(e, field_es);
1250 escape_worklist.push(e);
1251 }
1252 } else if (es >= field_es) {
1253 // fields_escape_state is also set to 'es' if it is less than 'es'.
1254 if (e->escape_state() < es) {
1255 set_escape_state(e, es);
1256 escape_worklist.push(e);
1257 }
1258 } else {
1259 // Propagate field escape state.
1260 bool es_changed = false;
1261 if (e->fields_escape_state() < field_es) {
1262 set_fields_escape_state(e, field_es);
1263 es_changed = true;
1264 }
1265 if ((e->escape_state() < field_es) &&
1266 e->is_Field() && ptn->is_JavaObject() &&
1267 e->as_Field()->is_oop()) {
1268 // Change escape state of referenced fileds.
1269 set_escape_state(e, field_es);
1270 es_changed = true;;
1271 } else if (e->escape_state() < es) {
1272 set_escape_state(e, es);
1273 es_changed = true;;
1274 }
1275 if (es_changed) {
1276 escape_worklist.push(e);
1277 }
1278 }
1279 }
1280 }
1281 // Remove escaped objects from non_escaped list.
1282 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) {
1283 JavaObjectNode* ptn = non_escaped_worklist.at(next);
1284 if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
1285 non_escaped_worklist.delete_at(next);
1286 }
1287 if (ptn->escape_state() == PointsToNode::NoEscape) {
1288 // Find fields in non-escaped allocations which have unknown value.
1289 find_init_values(ptn, phantom_obj, NULL);
1290 }
1291 }
1292 return (non_escaped_worklist.length() > 0);
1293 }
1295 // Add all references to JavaObject node by walking over all uses.
1296 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
1297 int new_edges = 0;
1298 if (populate_worklist) {
1299 // Populate _worklist by uses of jobj's uses.
1300 for (UseIterator i(jobj); i.has_next(); i.next()) {
1301 PointsToNode* use = i.get();
1302 if (use->is_Arraycopy())
1303 continue;
1304 add_uses_to_worklist(use);
1305 if (use->is_Field() && use->as_Field()->is_oop()) {
1306 // Put on worklist all field's uses (loads) and
1307 // related field nodes (same base and offset).
1308 add_field_uses_to_worklist(use->as_Field());
1309 }
1310 }
1311 }
1312 for (int l = 0; l < _worklist.length(); l++) {
1313 PointsToNode* use = _worklist.at(l);
1314 if (PointsToNode::is_base_use(use)) {
1315 // Add reference from jobj to field and from field to jobj (field's base).
1316 use = PointsToNode::get_use_node(use)->as_Field();
1317 if (add_base(use->as_Field(), jobj)) {
1318 new_edges++;
1319 }
1320 continue;
1321 }
1322 assert(!use->is_JavaObject(), "sanity");
1323 if (use->is_Arraycopy()) {
1324 if (jobj == null_obj) // NULL object does not have field edges
1325 continue;
1326 // Added edge from Arraycopy node to arraycopy's source java object
1327 if (add_edge(use, jobj)) {
1328 jobj->set_arraycopy_src();
1329 new_edges++;
1330 }
1331 // and stop here.
1332 continue;
1333 }
1334 if (!add_edge(use, jobj))
1335 continue; // No new edge added, there was such edge already.
1336 new_edges++;
1337 if (use->is_LocalVar()) {
1338 add_uses_to_worklist(use);
1339 if (use->arraycopy_dst()) {
1340 for (EdgeIterator i(use); i.has_next(); i.next()) {
1341 PointsToNode* e = i.get();
1342 if (e->is_Arraycopy()) {
1343 if (jobj == null_obj) // NULL object does not have field edges
1344 continue;
1345 // Add edge from arraycopy's destination java object to Arraycopy node.
1346 if (add_edge(jobj, e)) {
1347 new_edges++;
1348 jobj->set_arraycopy_dst();
1349 }
1350 }
1351 }
1352 }
1353 } else {
1354 // Added new edge to stored in field values.
1355 // Put on worklist all field's uses (loads) and
1356 // related field nodes (same base and offset).
1357 add_field_uses_to_worklist(use->as_Field());
1358 }
1359 }
1360 _worklist.clear();
1361 _in_worklist.Reset();
1362 return new_edges;
1363 }
1365 // Put on worklist all related field nodes.
1366 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
1367 assert(field->is_oop(), "sanity");
1368 int offset = field->offset();
1369 add_uses_to_worklist(field);
1370 // Loop over all bases of this field and push on worklist Field nodes
1371 // with the same offset and base (since they may reference the same field).
1372 for (BaseIterator i(field); i.has_next(); i.next()) {
1373 PointsToNode* base = i.get();
1374 add_fields_to_worklist(field, base);
1375 // Check if the base was source object of arraycopy and go over arraycopy's
1376 // destination objects since values stored to a field of source object are
1377 // accessable by uses (loads) of fields of destination objects.
1378 if (base->arraycopy_src()) {
1379 for (UseIterator j(base); j.has_next(); j.next()) {
1380 PointsToNode* arycp = j.get();
1381 if (arycp->is_Arraycopy()) {
1382 for (UseIterator k(arycp); k.has_next(); k.next()) {
1383 PointsToNode* abase = k.get();
1384 if (abase->arraycopy_dst() && abase != base) {
1385 // Look for the same arracopy reference.
1386 add_fields_to_worklist(field, abase);
1387 }
1388 }
1389 }
1390 }
1391 }
1392 }
1393 }
1395 // Put on worklist all related field nodes.
1396 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
1397 int offset = field->offset();
1398 if (base->is_LocalVar()) {
1399 for (UseIterator j(base); j.has_next(); j.next()) {
1400 PointsToNode* f = j.get();
1401 if (PointsToNode::is_base_use(f)) { // Field
1402 f = PointsToNode::get_use_node(f);
1403 if (f == field || !f->as_Field()->is_oop())
1404 continue;
1405 int offs = f->as_Field()->offset();
1406 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1407 add_to_worklist(f);
1408 }
1409 }
1410 }
1411 } else {
1412 assert(base->is_JavaObject(), "sanity");
1413 if (// Skip phantom_object since it is only used to indicate that
1414 // this field's content globally escapes.
1415 (base != phantom_obj) &&
1416 // NULL object node does not have fields.
1417 (base != null_obj)) {
1418 for (EdgeIterator i(base); i.has_next(); i.next()) {
1419 PointsToNode* f = i.get();
1420 // Skip arraycopy edge since store to destination object field
1421 // does not update value in source object field.
1422 if (f->is_Arraycopy()) {
1423 assert(base->arraycopy_dst(), "sanity");
1424 continue;
1425 }
1426 if (f == field || !f->as_Field()->is_oop())
1427 continue;
1428 int offs = f->as_Field()->offset();
1429 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
1430 add_to_worklist(f);
1431 }
1432 }
1433 }
1434 }
1435 }
1437 // Find fields which have unknown value.
1438 int ConnectionGraph::find_field_value(FieldNode* field) {
1439 // Escaped fields should have init value already.
1440 assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
1441 int new_edges = 0;
1442 for (BaseIterator i(field); i.has_next(); i.next()) {
1443 PointsToNode* base = i.get();
1444 if (base->is_JavaObject()) {
1445 // Skip Allocate's fields which will be processed later.
1446 if (base->ideal_node()->is_Allocate())
1447 return 0;
1448 assert(base == null_obj, "only NULL ptr base expected here");
1449 }
1450 }
1451 if (add_edge(field, phantom_obj)) {
1452 // New edge was added
1453 new_edges++;
1454 add_field_uses_to_worklist(field);
1455 }
1456 return new_edges;
1457 }
1459 // Find fields initializing values for allocations.
1460 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) {
1461 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
1462 int new_edges = 0;
1463 Node* alloc = pta->ideal_node();
1464 if (init_val == phantom_obj) {
1465 // Do nothing for Allocate nodes since its fields values are "known".
1466 if (alloc->is_Allocate())
1467 return 0;
1468 assert(alloc->as_CallStaticJava(), "sanity");
1469 #ifdef ASSERT
1470 if (alloc->as_CallStaticJava()->method() == NULL) {
1471 const char* name = alloc->as_CallStaticJava()->_name;
1472 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
1473 }
1474 #endif
1475 // Non-escaped allocation returned from Java or runtime call have
1476 // unknown values in fields.
1477 for (EdgeIterator i(pta); i.has_next(); i.next()) {
1478 PointsToNode* field = i.get();
1479 if (field->is_Field() && field->as_Field()->is_oop()) {
1480 if (add_edge(field, phantom_obj)) {
1481 // New edge was added
1482 new_edges++;
1483 add_field_uses_to_worklist(field->as_Field());
1484 }
1485 }
1486 }
1487 return new_edges;
1488 }
1489 assert(init_val == null_obj, "sanity");
1490 // Do nothing for Call nodes since its fields values are unknown.
1491 if (!alloc->is_Allocate())
1492 return 0;
1494 InitializeNode* ini = alloc->as_Allocate()->initialization();
1495 Compile* C = _compile;
1496 bool visited_bottom_offset = false;
1497 GrowableArray<int> offsets_worklist;
1499 // Check if an oop field's initializing value is recorded and add
1500 // a corresponding NULL if field's value if it is not recorded.
1501 // Connection Graph does not record a default initialization by NULL
1502 // captured by Initialize node.
1503 //
1504 for (EdgeIterator i(pta); i.has_next(); i.next()) {
1505 PointsToNode* field = i.get(); // Field (AddP)
1506 if (!field->is_Field() || !field->as_Field()->is_oop())
1507 continue; // Not oop field
1508 int offset = field->as_Field()->offset();
1509 if (offset == Type::OffsetBot) {
1510 if (!visited_bottom_offset) {
1511 // OffsetBot is used to reference array's element,
1512 // always add reference to NULL to all Field nodes since we don't
1513 // known which element is referenced.
1514 if (add_edge(field, null_obj)) {
1515 // New edge was added
1516 new_edges++;
1517 add_field_uses_to_worklist(field->as_Field());
1518 visited_bottom_offset = true;
1519 }
1520 }
1521 } else {
1522 // Check only oop fields.
1523 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();
1524 if (adr_type->isa_rawptr()) {
1525 #ifdef ASSERT
1526 // Raw pointers are used for initializing stores so skip it
1527 // since it should be recorded already
1528 Node* base = get_addp_base(field->ideal_node());
1529 assert(adr_type->isa_rawptr() && base->is_Proj() &&
1530 (base->in(0) == alloc),"unexpected pointer type");
1531 #endif
1532 continue;
1533 }
1534 if (!offsets_worklist.contains(offset)) {
1535 offsets_worklist.append(offset);
1536 Node* value = NULL;
1537 if (ini != NULL) {
1538 // StoreP::memory_type() == T_ADDRESS
1539 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;
1540 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);
1541 // Make sure initializing store has the same type as this AddP.
1542 // This AddP may reference non existing field because it is on a
1543 // dead branch of bimorphic call which is not eliminated yet.
1544 if (store != NULL && store->is_Store() &&
1545 store->as_Store()->memory_type() == ft) {
1546 value = store->in(MemNode::ValueIn);
1547 #ifdef ASSERT
1548 if (VerifyConnectionGraph) {
1549 // Verify that AddP already points to all objects the value points to.
1550 PointsToNode* val = ptnode_adr(value->_idx);
1551 assert((val != NULL), "should be processed already");
1552 PointsToNode* missed_obj = NULL;
1553 if (val->is_JavaObject()) {
1554 if (!field->points_to(val->as_JavaObject())) {
1555 missed_obj = val;
1556 }
1557 } else {
1558 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
1559 tty->print_cr("----------init store has invalid value -----");
1560 store->dump();
1561 val->dump();
1562 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
1563 }
1564 for (EdgeIterator j(val); j.has_next(); j.next()) {
1565 PointsToNode* obj = j.get();
1566 if (obj->is_JavaObject()) {
1567 if (!field->points_to(obj->as_JavaObject())) {
1568 missed_obj = obj;
1569 break;
1570 }
1571 }
1572 }
1573 }
1574 if (missed_obj != NULL) {
1575 tty->print_cr("----------field---------------------------------");
1576 field->dump();
1577 tty->print_cr("----------missed referernce to object-----------");
1578 missed_obj->dump();
1579 tty->print_cr("----------object referernced by init store -----");
1580 store->dump();
1581 val->dump();
1582 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
1583 }
1584 }
1585 #endif
1586 } else {
1587 // There could be initializing stores which follow allocation.
1588 // For example, a volatile field store is not collected
1589 // by Initialize node.
1590 //
1591 // Need to check for dependent loads to separate such stores from
1592 // stores which follow loads. For now, add initial value NULL so
1593 // that compare pointers optimization works correctly.
1594 }
1595 }
1596 if (value == NULL) {
1597 // A field's initializing value was not recorded. Add NULL.
1598 if (add_edge(field, null_obj)) {
1599 // New edge was added
1600 new_edges++;
1601 add_field_uses_to_worklist(field->as_Field());
1602 }
1603 }
1604 }
1605 }
1606 }
1607 return new_edges;
1608 }
1610 // Adjust scalar_replaceable state after Connection Graph is built.
1611 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {
1612 // Search for non-escaping objects which are not scalar replaceable
1613 // and mark them to propagate the state to referenced objects.
1615 // 1. An object is not scalar replaceable if the field into which it is
1616 // stored has unknown offset (stored into unknown element of an array).
1617 //
1618 for (UseIterator i(jobj); i.has_next(); i.next()) {
1619 PointsToNode* use = i.get();
1620 assert(!use->is_Arraycopy(), "sanity");
1621 if (use->is_Field()) {
1622 FieldNode* field = use->as_Field();
1623 assert(field->is_oop() && field->scalar_replaceable() &&
1624 field->fields_escape_state() == PointsToNode::NoEscape, "sanity");
1625 if (field->offset() == Type::OffsetBot) {
1626 jobj->set_scalar_replaceable(false);
1627 return;
1628 }
1629 // 2. An object is not scalar replaceable if the field into which it is
1630 // stored has multiple bases one of which is null.
1631 if (field->base_count() > 1) {
1632 for (BaseIterator i(field); i.has_next(); i.next()) {
1633 PointsToNode* base = i.get();
1634 if (base == null_obj) {
1635 jobj->set_scalar_replaceable(false);
1636 return;
1637 }
1638 }
1639 }
1640 }
1641 assert(use->is_Field() || use->is_LocalVar(), "sanity");
1642 // 3. An object is not scalar replaceable if it is merged with other objects.
1643 for (EdgeIterator j(use); j.has_next(); j.next()) {
1644 PointsToNode* ptn = j.get();
1645 if (ptn->is_JavaObject() && ptn != jobj) {
1646 // Mark all objects.
1647 jobj->set_scalar_replaceable(false);
1648 ptn->set_scalar_replaceable(false);
1649 }
1650 }
1651 if (!jobj->scalar_replaceable()) {
1652 return;
1653 }
1654 }
1656 for (EdgeIterator j(jobj); j.has_next(); j.next()) {
1657 // Non-escaping object node should point only to field nodes.
1658 FieldNode* field = j.get()->as_Field();
1659 int offset = field->as_Field()->offset();
1661 // 4. An object is not scalar replaceable if it has a field with unknown
1662 // offset (array's element is accessed in loop).
1663 if (offset == Type::OffsetBot) {
1664 jobj->set_scalar_replaceable(false);
1665 return;
1666 }
1667 // 5. Currently an object is not scalar replaceable if a LoadStore node
1668 // access its field since the field value is unknown after it.
1669 //
1670 Node* n = field->ideal_node();
1671 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1672 if (n->fast_out(i)->is_LoadStore()) {
1673 jobj->set_scalar_replaceable(false);
1674 return;
1675 }
1676 }
1678 // 6. Or the address may point to more then one object. This may produce
1679 // the false positive result (set not scalar replaceable)
1680 // since the flow-insensitive escape analysis can't separate
1681 // the case when stores overwrite the field's value from the case
1682 // when stores happened on different control branches.
1683 //
1684 // Note: it will disable scalar replacement in some cases:
1685 //
1686 // Point p[] = new Point[1];
1687 // p[0] = new Point(); // Will be not scalar replaced
1688 //
1689 // but it will save us from incorrect optimizations in next cases:
1690 //
1691 // Point p[] = new Point[1];
1692 // if ( x ) p[0] = new Point(); // Will be not scalar replaced
1693 //
1694 if (field->base_count() > 1) {
1695 for (BaseIterator i(field); i.has_next(); i.next()) {
1696 PointsToNode* base = i.get();
1697 // Don't take into account LocalVar nodes which
1698 // may point to only one object which should be also
1699 // this field's base by now.
1700 if (base->is_JavaObject() && base != jobj) {
1701 // Mark all bases.
1702 jobj->set_scalar_replaceable(false);
1703 base->set_scalar_replaceable(false);
1704 }
1705 }
1706 }
1707 }
1708 }
1710 #ifdef ASSERT
1711 void ConnectionGraph::verify_connection_graph(
1712 GrowableArray<PointsToNode*>& ptnodes_worklist,
1713 GrowableArray<JavaObjectNode*>& non_escaped_worklist,
1714 GrowableArray<JavaObjectNode*>& java_objects_worklist,
1715 GrowableArray<Node*>& addp_worklist) {
1716 // Verify that graph is complete - no new edges could be added.
1717 int java_objects_length = java_objects_worklist.length();
1718 int non_escaped_length = non_escaped_worklist.length();
1719 int new_edges = 0;
1720 for (int next = 0; next < java_objects_length; ++next) {
1721 JavaObjectNode* ptn = java_objects_worklist.at(next);
1722 new_edges += add_java_object_edges(ptn, true);
1723 }
1724 assert(new_edges == 0, "graph was not complete");
1725 // Verify that escape state is final.
1726 int length = non_escaped_worklist.length();
1727 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist);
1728 assert((non_escaped_length == non_escaped_worklist.length()) &&
1729 (non_escaped_length == length) &&
1730 (_worklist.length() == 0), "escape state was not final");
1732 // Verify fields information.
1733 int addp_length = addp_worklist.length();
1734 for (int next = 0; next < addp_length; ++next ) {
1735 Node* n = addp_worklist.at(next);
1736 FieldNode* field = ptnode_adr(n->_idx)->as_Field();
1737 if (field->is_oop()) {
1738 // Verify that field has all bases
1739 Node* base = get_addp_base(n);
1740 PointsToNode* ptn = ptnode_adr(base->_idx);
1741 if (ptn->is_JavaObject()) {
1742 assert(field->has_base(ptn->as_JavaObject()), "sanity");
1743 } else {
1744 assert(ptn->is_LocalVar(), "sanity");
1745 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
1746 PointsToNode* e = i.get();
1747 if (e->is_JavaObject()) {
1748 assert(field->has_base(e->as_JavaObject()), "sanity");
1749 }
1750 }
1751 }
1752 // Verify that all fields have initializing values.
1753 if (field->edge_count() == 0) {
1754 tty->print_cr("----------field does not have references----------");
1755 field->dump();
1756 for (BaseIterator i(field); i.has_next(); i.next()) {
1757 PointsToNode* base = i.get();
1758 tty->print_cr("----------field has next base---------------------");
1759 base->dump();
1760 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {
1761 tty->print_cr("----------base has fields-------------------------");
1762 for (EdgeIterator j(base); j.has_next(); j.next()) {
1763 j.get()->dump();
1764 }
1765 tty->print_cr("----------base has references---------------------");
1766 for (UseIterator j(base); j.has_next(); j.next()) {
1767 j.get()->dump();
1768 }
1769 }
1770 }
1771 for (UseIterator i(field); i.has_next(); i.next()) {
1772 i.get()->dump();
1773 }
1774 assert(field->edge_count() > 0, "sanity");
1775 }
1776 }
1777 }
1778 }
1779 #endif
1781 // Optimize ideal graph.
1782 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
1783 GrowableArray<Node*>& storestore_worklist) {
1784 Compile* C = _compile;
1785 PhaseIterGVN* igvn = _igvn;
1786 if (EliminateLocks) {
1787 // Mark locks before changing ideal graph.
1788 int cnt = C->macro_count();
1789 for( int i=0; i < cnt; i++ ) {
1790 Node *n = C->macro_node(i);
1791 if (n->is_AbstractLock()) { // Lock and Unlock nodes
1792 AbstractLockNode* alock = n->as_AbstractLock();
1793 if (!alock->is_non_esc_obj()) {
1794 if (not_global_escape(alock->obj_node())) {
1795 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
1796 // The lock could be marked eliminated by lock coarsening
1797 // code during first IGVN before EA. Replace coarsened flag
1798 // to eliminate all associated locks/unlocks.
1799 #ifdef ASSERT
1800 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
1801 #endif
1802 alock->set_non_esc_obj();
1803 }
1804 }
1805 }
1806 }
1807 }
1809 if (OptimizePtrCompare) {
1810 // Add ConI(#CC_GT) and ConI(#CC_EQ).
1811 _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
1812 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
1813 // Optimize objects compare.
1814 while (ptr_cmp_worklist.length() != 0) {
1815 Node *n = ptr_cmp_worklist.pop();
1816 Node *res = optimize_ptr_compare(n);
1817 if (res != NULL) {
1818 #ifndef PRODUCT
1819 if (PrintOptimizePtrCompare) {
1820 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
1821 if (Verbose) {
1822 n->dump(1);
1823 }
1824 }
1825 #endif
1826 igvn->replace_node(n, res);
1827 }
1828 }
1829 // cleanup
1830 if (_pcmp_neq->outcnt() == 0)
1831 igvn->hash_delete(_pcmp_neq);
1832 if (_pcmp_eq->outcnt() == 0)
1833 igvn->hash_delete(_pcmp_eq);
1834 }
1836 // For MemBarStoreStore nodes added in library_call.cpp, check
1837 // escape status of associated AllocateNode and optimize out
1838 // MemBarStoreStore node if the allocated object never escapes.
1839 while (storestore_worklist.length() != 0) {
1840 Node *n = storestore_worklist.pop();
1841 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
1842 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
1843 assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
1844 if (not_global_escape(alloc)) {
1845 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
1846 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
1847 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
1848 igvn->register_new_node_with_optimizer(mb);
1849 igvn->replace_node(storestore, mb);
1850 }
1851 }
1852 }
1854 // Optimize objects compare.
1855 Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
1856 assert(OptimizePtrCompare, "sanity");
1857 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
1858 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
1859 JavaObjectNode* jobj1 = unique_java_object(n->in(1));
1860 JavaObjectNode* jobj2 = unique_java_object(n->in(2));
1861 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
1862 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
1864 // Check simple cases first.
1865 if (jobj1 != NULL) {
1866 if (jobj1->escape_state() == PointsToNode::NoEscape) {
1867 if (jobj1 == jobj2) {
1868 // Comparing the same not escaping object.
1869 return _pcmp_eq;
1870 }
1871 Node* obj = jobj1->ideal_node();
1872 // Comparing not escaping allocation.
1873 if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
1874 !ptn2->points_to(jobj1)) {
1875 return _pcmp_neq; // This includes nullness check.
1876 }
1877 }
1878 }
1879 if (jobj2 != NULL) {
1880 if (jobj2->escape_state() == PointsToNode::NoEscape) {
1881 Node* obj = jobj2->ideal_node();
1882 // Comparing not escaping allocation.
1883 if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
1884 !ptn1->points_to(jobj2)) {
1885 return _pcmp_neq; // This includes nullness check.
1886 }
1887 }
1888 }
1889 if (jobj1 != NULL && jobj1 != phantom_obj &&
1890 jobj2 != NULL && jobj2 != phantom_obj &&
1891 jobj1->ideal_node()->is_Con() &&
1892 jobj2->ideal_node()->is_Con()) {
1893 // Klass or String constants compare. Need to be careful with
1894 // compressed pointers - compare types of ConN and ConP instead of nodes.
1895 const Type* t1 = jobj1->ideal_node()->get_ptr_type();
1896 const Type* t2 = jobj2->ideal_node()->get_ptr_type();
1897 if (t1->make_ptr() == t2->make_ptr()) {
1898 return _pcmp_eq;
1899 } else {
1900 return _pcmp_neq;
1901 }
1902 }
1903 if (ptn1->meet(ptn2)) {
1904 return NULL; // Sets are not disjoint
1905 }
1907 // Sets are disjoint.
1908 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
1909 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
1910 bool set1_has_null_ptr = ptn1->points_to(null_obj);
1911 bool set2_has_null_ptr = ptn2->points_to(null_obj);
1912 if (set1_has_unknown_ptr && set2_has_null_ptr ||
1913 set2_has_unknown_ptr && set1_has_null_ptr) {
1914 // Check nullness of unknown object.
1915 return NULL;
1916 }
1918 // Disjointness by itself is not sufficient since
1919 // alias analysis is not complete for escaped objects.
1920 // Disjoint sets are definitely unrelated only when
1921 // at least one set has only not escaping allocations.
1922 if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
1923 if (ptn1->non_escaping_allocation()) {
1924 return _pcmp_neq;
1925 }
1926 }
1927 if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
1928 if (ptn2->non_escaping_allocation()) {
1929 return _pcmp_neq;
1930 }
1931 }
1932 return NULL;
1933 }
1935 // Connection Graph constuction functions.
1937 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
1938 PointsToNode* ptadr = _nodes.at(n->_idx);
1939 if (ptadr != NULL) {
1940 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
1941 return;
1942 }
1943 Compile* C = _compile;
1944 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
1945 _nodes.at_put(n->_idx, ptadr);
1946 }
1948 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
1949 PointsToNode* ptadr = _nodes.at(n->_idx);
1950 if (ptadr != NULL) {
1951 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
1952 return;
1953 }
1954 Compile* C = _compile;
1955 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
1956 _nodes.at_put(n->_idx, ptadr);
1957 }
1959 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
1960 PointsToNode* ptadr = _nodes.at(n->_idx);
1961 if (ptadr != NULL) {
1962 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
1963 return;
1964 }
1965 bool unsafe = false;
1966 bool is_oop = is_oop_field(n, offset, &unsafe);
1967 if (unsafe) {
1968 es = PointsToNode::GlobalEscape;
1969 }
1970 Compile* C = _compile;
1971 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
1972 _nodes.at_put(n->_idx, field);
1973 }
1975 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
1976 PointsToNode* src, PointsToNode* dst) {
1977 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
1978 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
1979 PointsToNode* ptadr = _nodes.at(n->_idx);
1980 if (ptadr != NULL) {
1981 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
1982 return;
1983 }
1984 Compile* C = _compile;
1985 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
1986 _nodes.at_put(n->_idx, ptadr);
1987 // Add edge from arraycopy node to source object.
1988 (void)add_edge(ptadr, src);
1989 src->set_arraycopy_src();
1990 // Add edge from destination object to arraycopy node.
1991 (void)add_edge(dst, ptadr);
1992 dst->set_arraycopy_dst();
1993 }
1995 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
1996 const Type* adr_type = n->as_AddP()->bottom_type();
1997 BasicType bt = T_INT;
1998 if (offset == Type::OffsetBot) {
1999 // Check only oop fields.
2000 if (!adr_type->isa_aryptr() ||
2001 (adr_type->isa_aryptr()->klass() == NULL) ||
2002 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
2003 // OffsetBot is used to reference array's element. Ignore first AddP.
2004 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
2005 bt = T_OBJECT;
2006 }
2007 }
2008 } else if (offset != oopDesc::klass_offset_in_bytes()) {
2009 if (adr_type->isa_instptr()) {
2010 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
2011 if (field != NULL) {
2012 bt = field->layout_type();
2013 } else {
2014 // Check for unsafe oop field access
2015 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2016 int opcode = n->fast_out(i)->Opcode();
2017 if (opcode == Op_StoreP || opcode == Op_LoadP ||
2018 opcode == Op_StoreN || opcode == Op_LoadN) {
2019 bt = T_OBJECT;
2020 (*unsafe) = true;
2021 break;
2022 }
2023 }
2024 }
2025 } else if (adr_type->isa_aryptr()) {
2026 if (offset == arrayOopDesc::length_offset_in_bytes()) {
2027 // Ignore array length load.
2028 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
2029 // Ignore first AddP.
2030 } else {
2031 const Type* elemtype = adr_type->isa_aryptr()->elem();
2032 bt = elemtype->array_element_basic_type();
2033 }
2034 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
2035 // Allocation initialization, ThreadLocal field access, unsafe access
2036 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2037 int opcode = n->fast_out(i)->Opcode();
2038 if (opcode == Op_StoreP || opcode == Op_LoadP ||
2039 opcode == Op_StoreN || opcode == Op_LoadN) {
2040 bt = T_OBJECT;
2041 break;
2042 }
2043 }
2044 }
2045 }
2046 return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
2047 }
2049 // Returns unique pointed java object or NULL.
2050 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
2051 assert(!_collecting, "should not call when contructed graph");
2052 // If the node was created after the escape computation we can't answer.
2053 uint idx = n->_idx;
2054 if (idx >= nodes_size()) {
2055 return NULL;
2056 }
2057 PointsToNode* ptn = ptnode_adr(idx);
2058 if (ptn->is_JavaObject()) {
2059 return ptn->as_JavaObject();
2060 }
2061 assert(ptn->is_LocalVar(), "sanity");
2062 // Check all java objects it points to.
2063 JavaObjectNode* jobj = NULL;
2064 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2065 PointsToNode* e = i.get();
2066 if (e->is_JavaObject()) {
2067 if (jobj == NULL) {
2068 jobj = e->as_JavaObject();
2069 } else if (jobj != e) {
2070 return NULL;
2071 }
2072 }
2073 }
2074 return jobj;
2075 }
2077 // Return true if this node points only to non-escaping allocations.
2078 bool PointsToNode::non_escaping_allocation() {
2079 if (is_JavaObject()) {
2080 Node* n = ideal_node();
2081 if (n->is_Allocate() || n->is_CallStaticJava()) {
2082 return (escape_state() == PointsToNode::NoEscape);
2083 } else {
2084 return false;
2085 }
2086 }
2087 assert(is_LocalVar(), "sanity");
2088 // Check all java objects it points to.
2089 for (EdgeIterator i(this); i.has_next(); i.next()) {
2090 PointsToNode* e = i.get();
2091 if (e->is_JavaObject()) {
2092 Node* n = e->ideal_node();
2093 if ((e->escape_state() != PointsToNode::NoEscape) ||
2094 !(n->is_Allocate() || n->is_CallStaticJava())) {
2095 return false;
2096 }
2097 }
2098 }
2099 return true;
2100 }
2102 // Return true if we know the node does not escape globally.
2103 bool ConnectionGraph::not_global_escape(Node *n) {
2104 assert(!_collecting, "should not call during graph construction");
2105 // If the node was created after the escape computation we can't answer.
2106 uint idx = n->_idx;
2107 if (idx >= nodes_size()) {
2108 return false;
2109 }
2110 PointsToNode* ptn = ptnode_adr(idx);
2111 PointsToNode::EscapeState es = ptn->escape_state();
2112 // If we have already computed a value, return it.
2113 if (es >= PointsToNode::GlobalEscape)
2114 return false;
2115 if (ptn->is_JavaObject()) {
2116 return true; // (es < PointsToNode::GlobalEscape);
2117 }
2118 assert(ptn->is_LocalVar(), "sanity");
2119 // Check all java objects it points to.
2120 for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2121 if (i.get()->escape_state() >= PointsToNode::GlobalEscape)
2122 return false;
2123 }
2124 return true;
2125 }
2128 // Helper functions
2130 // Return true if this node points to specified node or nodes it points to.
2131 bool PointsToNode::points_to(JavaObjectNode* ptn) const {
2132 if (is_JavaObject()) {
2133 return (this == ptn);
2134 }
2135 assert(is_LocalVar() || is_Field(), "sanity");
2136 for (EdgeIterator i(this); i.has_next(); i.next()) {
2137 if (i.get() == ptn)
2138 return true;
2139 }
2140 return false;
2141 }
2143 // Return true if one node points to an other.
2144 bool PointsToNode::meet(PointsToNode* ptn) {
2145 if (this == ptn) {
2146 return true;
2147 } else if (ptn->is_JavaObject()) {
2148 return this->points_to(ptn->as_JavaObject());
2149 } else if (this->is_JavaObject()) {
2150 return ptn->points_to(this->as_JavaObject());
2151 }
2152 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
2153 int ptn_count = ptn->edge_count();
2154 for (EdgeIterator i(this); i.has_next(); i.next()) {
2155 PointsToNode* this_e = i.get();
2156 for (int j = 0; j < ptn_count; j++) {
2157 if (this_e == ptn->edge(j))
2158 return true;
2159 }
2160 }
2161 return false;
2162 }
2164 #ifdef ASSERT
2165 // Return true if bases point to this java object.
2166 bool FieldNode::has_base(JavaObjectNode* jobj) const {
2167 for (BaseIterator i(this); i.has_next(); i.next()) {
2168 if (i.get() == jobj)
2169 return true;
2170 }
2171 return false;
2172 }
2173 #endif
2175 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
2176 const Type *adr_type = phase->type(adr);
2177 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
2178 adr->in(AddPNode::Address)->is_Proj() &&
2179 adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
2180 // We are computing a raw address for a store captured by an Initialize
2181 // compute an appropriate address type. AddP cases #3 and #5 (see below).
2182 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2183 assert(offs != Type::OffsetBot ||
2184 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
2185 "offset must be a constant or it is initialization of array");
2186 return offs;
2187 }
2188 const TypePtr *t_ptr = adr_type->isa_ptr();
2189 assert(t_ptr != NULL, "must be a pointer type");
2190 return t_ptr->offset();
2191 }
2193 Node* ConnectionGraph::get_addp_base(Node *addp) {
2194 assert(addp->is_AddP(), "must be AddP");
2195 //
2196 // AddP cases for Base and Address inputs:
2197 // case #1. Direct object's field reference:
2198 // Allocate
2199 // |
2200 // Proj #5 ( oop result )
2201 // |
2202 // CheckCastPP (cast to instance type)
2203 // | |
2204 // AddP ( base == address )
2205 //
2206 // case #2. Indirect object's field reference:
2207 // Phi
2208 // |
2209 // CastPP (cast to instance type)
2210 // | |
2211 // AddP ( base == address )
2212 //
2213 // case #3. Raw object's field reference for Initialize node:
2214 // Allocate
2215 // |
2216 // Proj #5 ( oop result )
2217 // top |
2218 // \ |
2219 // AddP ( base == top )
2220 //
2221 // case #4. Array's element reference:
2222 // {CheckCastPP | CastPP}
2223 // | | |
2224 // | AddP ( array's element offset )
2225 // | |
2226 // AddP ( array's offset )
2227 //
2228 // case #5. Raw object's field reference for arraycopy stub call:
2229 // The inline_native_clone() case when the arraycopy stub is called
2230 // after the allocation before Initialize and CheckCastPP nodes.
2231 // Allocate
2232 // |
2233 // Proj #5 ( oop result )
2234 // | |
2235 // AddP ( base == address )
2236 //
2237 // case #6. Constant Pool, ThreadLocal, CastX2P or
2238 // Raw object's field reference:
2239 // {ConP, ThreadLocal, CastX2P, raw Load}
2240 // top |
2241 // \ |
2242 // AddP ( base == top )
2243 //
2244 // case #7. Klass's field reference.
2245 // LoadKlass
2246 // | |
2247 // AddP ( base == address )
2248 //
2249 // case #8. narrow Klass's field reference.
2250 // LoadNKlass
2251 // |
2252 // DecodeN
2253 // | |
2254 // AddP ( base == address )
2255 //
2256 Node *base = addp->in(AddPNode::Base);
2257 if (base->uncast()->is_top()) { // The AddP case #3 and #6.
2258 base = addp->in(AddPNode::Address);
2259 while (base->is_AddP()) {
2260 // Case #6 (unsafe access) may have several chained AddP nodes.
2261 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
2262 base = base->in(AddPNode::Address);
2263 }
2264 Node* uncast_base = base->uncast();
2265 int opcode = uncast_base->Opcode();
2266 assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
2267 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
2268 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
2269 (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
2270 }
2271 return base;
2272 }
2274 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
2275 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
2276 Node* addp2 = addp->raw_out(0);
2277 if (addp->outcnt() == 1 && addp2->is_AddP() &&
2278 addp2->in(AddPNode::Base) == n &&
2279 addp2->in(AddPNode::Address) == addp) {
2280 assert(addp->in(AddPNode::Base) == n, "expecting the same base");
2281 //
2282 // Find array's offset to push it on worklist first and
2283 // as result process an array's element offset first (pushed second)
2284 // to avoid CastPP for the array's offset.
2285 // Otherwise the inserted CastPP (LocalVar) will point to what
2286 // the AddP (Field) points to. Which would be wrong since
2287 // the algorithm expects the CastPP has the same point as
2288 // as AddP's base CheckCastPP (LocalVar).
2289 //
2290 // ArrayAllocation
2291 // |
2292 // CheckCastPP
2293 // |
2294 // memProj (from ArrayAllocation CheckCastPP)
2295 // | ||
2296 // | || Int (element index)
2297 // | || | ConI (log(element size))
2298 // | || | /
2299 // | || LShift
2300 // | || /
2301 // | AddP (array's element offset)
2302 // | |
2303 // | | ConI (array's offset: #12(32-bits) or #24(64-bits))
2304 // | / /
2305 // AddP (array's offset)
2306 // |
2307 // Load/Store (memory operation on array's element)
2308 //
2309 return addp2;
2310 }
2311 return NULL;
2312 }
2314 //
2315 // Adjust the type and inputs of an AddP which computes the
2316 // address of a field of an instance
2317 //
2318 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
2319 PhaseGVN* igvn = _igvn;
2320 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
2321 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
2322 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
2323 if (t == NULL) {
2324 // We are computing a raw address for a store captured by an Initialize
2325 // compute an appropriate address type (cases #3 and #5).
2326 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
2327 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
2328 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
2329 assert(offs != Type::OffsetBot, "offset must be a constant");
2330 t = base_t->add_offset(offs)->is_oopptr();
2331 }
2332 int inst_id = base_t->instance_id();
2333 assert(!t->is_known_instance() || t->instance_id() == inst_id,
2334 "old type must be non-instance or match new type");
2336 // The type 't' could be subclass of 'base_t'.
2337 // As result t->offset() could be large then base_t's size and it will
2338 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
2339 // constructor verifies correctness of the offset.
2340 //
2341 // It could happened on subclass's branch (from the type profiling
2342 // inlining) which was not eliminated during parsing since the exactness
2343 // of the allocation type was not propagated to the subclass type check.
2344 //
2345 // Or the type 't' could be not related to 'base_t' at all.
2346 // It could happened when CHA type is different from MDO type on a dead path
2347 // (for example, from instanceof check) which is not collapsed during parsing.
2348 //
2349 // Do nothing for such AddP node and don't process its users since
2350 // this code branch will go away.
2351 //
2352 if (!t->is_known_instance() &&
2353 !base_t->klass()->is_subtype_of(t->klass())) {
2354 return false; // bail out
2355 }
2356 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
2357 // Do NOT remove the next line: ensure a new alias index is allocated
2358 // for the instance type. Note: C++ will not remove it since the call
2359 // has side effect.
2360 int alias_idx = _compile->get_alias_index(tinst);
2361 igvn->set_type(addp, tinst);
2362 // record the allocation in the node map
2363 set_map(addp, get_map(base->_idx));
2364 // Set addp's Base and Address to 'base'.
2365 Node *abase = addp->in(AddPNode::Base);
2366 Node *adr = addp->in(AddPNode::Address);
2367 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
2368 adr->in(0)->_idx == (uint)inst_id) {
2369 // Skip AddP cases #3 and #5.
2370 } else {
2371 assert(!abase->is_top(), "sanity"); // AddP case #3
2372 if (abase != base) {
2373 igvn->hash_delete(addp);
2374 addp->set_req(AddPNode::Base, base);
2375 if (abase == adr) {
2376 addp->set_req(AddPNode::Address, base);
2377 } else {
2378 // AddP case #4 (adr is array's element offset AddP node)
2379 #ifdef ASSERT
2380 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
2381 assert(adr->is_AddP() && atype != NULL &&
2382 atype->instance_id() == inst_id, "array's element offset should be processed first");
2383 #endif
2384 }
2385 igvn->hash_insert(addp);
2386 }
2387 }
2388 // Put on IGVN worklist since at least addp's type was changed above.
2389 record_for_optimizer(addp);
2390 return true;
2391 }
2393 //
2394 // Create a new version of orig_phi if necessary. Returns either the newly
2395 // created phi or an existing phi. Sets create_new to indicate whether a new
2396 // phi was created. Cache the last newly created phi in the node map.
2397 //
2398 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) {
2399 Compile *C = _compile;
2400 PhaseGVN* igvn = _igvn;
2401 new_created = false;
2402 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
2403 // nothing to do if orig_phi is bottom memory or matches alias_idx
2404 if (phi_alias_idx == alias_idx) {
2405 return orig_phi;
2406 }
2407 // Have we recently created a Phi for this alias index?
2408 PhiNode *result = get_map_phi(orig_phi->_idx);
2409 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
2410 return result;
2411 }
2412 // Previous check may fail when the same wide memory Phi was split into Phis
2413 // for different memory slices. Search all Phis for this region.
2414 if (result != NULL) {
2415 Node* region = orig_phi->in(0);
2416 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2417 Node* phi = region->fast_out(i);
2418 if (phi->is_Phi() &&
2419 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
2420 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
2421 return phi->as_Phi();
2422 }
2423 }
2424 }
2425 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {
2426 if (C->do_escape_analysis() == true && !C->failing()) {
2427 // Retry compilation without escape analysis.
2428 // If this is the first failure, the sentinel string will "stick"
2429 // to the Compile object, and the C2Compiler will see it and retry.
2430 C->record_failure(C2Compiler::retry_no_escape_analysis());
2431 }
2432 return NULL;
2433 }
2434 orig_phi_worklist.append_if_missing(orig_phi);
2435 const TypePtr *atype = C->get_adr_type(alias_idx);
2436 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
2437 C->copy_node_notes_to(result, orig_phi);
2438 igvn->set_type(result, result->bottom_type());
2439 record_for_optimizer(result);
2440 set_map(orig_phi, result);
2441 new_created = true;
2442 return result;
2443 }
2445 //
2446 // Return a new version of Memory Phi "orig_phi" with the inputs having the
2447 // specified alias index.
2448 //
2449 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) {
2450 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
2451 Compile *C = _compile;
2452 PhaseGVN* igvn = _igvn;
2453 bool new_phi_created;
2454 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
2455 if (!new_phi_created) {
2456 return result;
2457 }
2458 GrowableArray<PhiNode *> phi_list;
2459 GrowableArray<uint> cur_input;
2460 PhiNode *phi = orig_phi;
2461 uint idx = 1;
2462 bool finished = false;
2463 while(!finished) {
2464 while (idx < phi->req()) {
2465 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);
2466 if (mem != NULL && mem->is_Phi()) {
2467 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
2468 if (new_phi_created) {
2469 // found an phi for which we created a new split, push current one on worklist and begin
2470 // processing new one
2471 phi_list.push(phi);
2472 cur_input.push(idx);
2473 phi = mem->as_Phi();
2474 result = newphi;
2475 idx = 1;
2476 continue;
2477 } else {
2478 mem = newphi;
2479 }
2480 }
2481 if (C->failing()) {
2482 return NULL;
2483 }
2484 result->set_req(idx++, mem);
2485 }
2486 #ifdef ASSERT
2487 // verify that the new Phi has an input for each input of the original
2488 assert( phi->req() == result->req(), "must have same number of inputs.");
2489 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
2490 #endif
2491 // Check if all new phi's inputs have specified alias index.
2492 // Otherwise use old phi.
2493 for (uint i = 1; i < phi->req(); i++) {
2494 Node* in = result->in(i);
2495 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
2496 }
2497 // we have finished processing a Phi, see if there are any more to do
2498 finished = (phi_list.length() == 0 );
2499 if (!finished) {
2500 phi = phi_list.pop();
2501 idx = cur_input.pop();
2502 PhiNode *prev_result = get_map_phi(phi->_idx);
2503 prev_result->set_req(idx++, result);
2504 result = prev_result;
2505 }
2506 }
2507 return result;
2508 }
2510 //
2511 // The next methods are derived from methods in MemNode.
2512 //
2513 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
2514 Node *mem = mmem;
2515 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
2516 // means an array I have not precisely typed yet. Do not do any
2517 // alias stuff with it any time soon.
2518 if (toop->base() != Type::AnyPtr &&
2519 !(toop->klass() != NULL &&
2520 toop->klass()->is_java_lang_Object() &&
2521 toop->offset() == Type::OffsetBot)) {
2522 mem = mmem->memory_at(alias_idx);
2523 // Update input if it is progress over what we have now
2524 }
2525 return mem;
2526 }
2528 //
2529 // Move memory users to their memory slices.
2530 //
2531 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) {
2532 Compile* C = _compile;
2533 PhaseGVN* igvn = _igvn;
2534 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
2535 assert(tp != NULL, "ptr type");
2536 int alias_idx = C->get_alias_index(tp);
2537 int general_idx = C->get_general_index(alias_idx);
2539 // Move users first
2540 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2541 Node* use = n->fast_out(i);
2542 if (use->is_MergeMem()) {
2543 MergeMemNode* mmem = use->as_MergeMem();
2544 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
2545 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
2546 continue; // Nothing to do
2547 }
2548 // Replace previous general reference to mem node.
2549 uint orig_uniq = C->unique();
2550 Node* m = find_inst_mem(n, general_idx, orig_phis);
2551 assert(orig_uniq == C->unique(), "no new nodes");
2552 mmem->set_memory_at(general_idx, m);
2553 --imax;
2554 --i;
2555 } else if (use->is_MemBar()) {
2556 assert(!use->is_Initialize(), "initializing stores should not be moved");
2557 if (use->req() > MemBarNode::Precedent &&
2558 use->in(MemBarNode::Precedent) == n) {
2559 // Don't move related membars.
2560 record_for_optimizer(use);
2561 continue;
2562 }
2563 tp = use->as_MemBar()->adr_type()->isa_ptr();
2564 if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
2565 alias_idx == general_idx) {
2566 continue; // Nothing to do
2567 }
2568 // Move to general memory slice.
2569 uint orig_uniq = C->unique();
2570 Node* m = find_inst_mem(n, general_idx, orig_phis);
2571 assert(orig_uniq == C->unique(), "no new nodes");
2572 igvn->hash_delete(use);
2573 imax -= use->replace_edge(n, m);
2574 igvn->hash_insert(use);
2575 record_for_optimizer(use);
2576 --i;
2577 #ifdef ASSERT
2578 } else if (use->is_Mem()) {
2579 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
2580 // Don't move related cardmark.
2581 continue;
2582 }
2583 // Memory nodes should have new memory input.
2584 tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
2585 assert(tp != NULL, "ptr type");
2586 int idx = C->get_alias_index(tp);
2587 assert(get_map(use->_idx) != NULL || idx == alias_idx,
2588 "Following memory nodes should have new memory input or be on the same memory slice");
2589 } else if (use->is_Phi()) {
2590 // Phi nodes should be split and moved already.
2591 tp = use->as_Phi()->adr_type()->isa_ptr();
2592 assert(tp != NULL, "ptr type");
2593 int idx = C->get_alias_index(tp);
2594 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
2595 } else {
2596 use->dump();
2597 assert(false, "should not be here");
2598 #endif
2599 }
2600 }
2601 }
2603 //
2604 // Search memory chain of "mem" to find a MemNode whose address
2605 // is the specified alias index.
2606 //
2607 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) {
2608 if (orig_mem == NULL)
2609 return orig_mem;
2610 Compile* C = _compile;
2611 PhaseGVN* igvn = _igvn;
2612 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
2613 bool is_instance = (toop != NULL) && toop->is_known_instance();
2614 Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
2615 Node *prev = NULL;
2616 Node *result = orig_mem;
2617 while (prev != result) {
2618 prev = result;
2619 if (result == start_mem)
2620 break; // hit one of our sentinels
2621 if (result->is_Mem()) {
2622 const Type *at = igvn->type(result->in(MemNode::Address));
2623 if (at == Type::TOP)
2624 break; // Dead
2625 assert (at->isa_ptr() != NULL, "pointer type required.");
2626 int idx = C->get_alias_index(at->is_ptr());
2627 if (idx == alias_idx)
2628 break; // Found
2629 if (!is_instance && (at->isa_oopptr() == NULL ||
2630 !at->is_oopptr()->is_known_instance())) {
2631 break; // Do not skip store to general memory slice.
2632 }
2633 result = result->in(MemNode::Memory);
2634 }
2635 if (!is_instance)
2636 continue; // don't search further for non-instance types
2637 // skip over a call which does not affect this memory slice
2638 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
2639 Node *proj_in = result->in(0);
2640 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
2641 break; // hit one of our sentinels
2642 } else if (proj_in->is_Call()) {
2643 CallNode *call = proj_in->as_Call();
2644 if (!call->may_modify(toop, igvn)) {
2645 result = call->in(TypeFunc::Memory);
2646 }
2647 } else if (proj_in->is_Initialize()) {
2648 AllocateNode* alloc = proj_in->as_Initialize()->allocation();
2649 // Stop if this is the initialization for the object instance which
2650 // which contains this memory slice, otherwise skip over it.
2651 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) {
2652 result = proj_in->in(TypeFunc::Memory);
2653 }
2654 } else if (proj_in->is_MemBar()) {
2655 result = proj_in->in(TypeFunc::Memory);
2656 }
2657 } else if (result->is_MergeMem()) {
2658 MergeMemNode *mmem = result->as_MergeMem();
2659 result = step_through_mergemem(mmem, alias_idx, toop);
2660 if (result == mmem->base_memory()) {
2661 // Didn't find instance memory, search through general slice recursively.
2662 result = mmem->memory_at(C->get_general_index(alias_idx));
2663 result = find_inst_mem(result, alias_idx, orig_phis);
2664 if (C->failing()) {
2665 return NULL;
2666 }
2667 mmem->set_memory_at(alias_idx, result);
2668 }
2669 } else if (result->is_Phi() &&
2670 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
2671 Node *un = result->as_Phi()->unique_input(igvn);
2672 if (un != NULL) {
2673 orig_phis.append_if_missing(result->as_Phi());
2674 result = un;
2675 } else {
2676 break;
2677 }
2678 } else if (result->is_ClearArray()) {
2679 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
2680 // Can not bypass initialization of the instance
2681 // we are looking for.
2682 break;
2683 }
2684 // Otherwise skip it (the call updated 'result' value).
2685 } else if (result->Opcode() == Op_SCMemProj) {
2686 Node* mem = result->in(0);
2687 Node* adr = NULL;
2688 if (mem->is_LoadStore()) {
2689 adr = mem->in(MemNode::Address);
2690 } else {
2691 assert(mem->Opcode() == Op_EncodeISOArray, "sanity");
2692 adr = mem->in(3); // Memory edge corresponds to destination array
2693 }
2694 const Type *at = igvn->type(adr);
2695 if (at != Type::TOP) {
2696 assert (at->isa_ptr() != NULL, "pointer type required.");
2697 int idx = C->get_alias_index(at->is_ptr());
2698 assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
2699 break;
2700 }
2701 result = mem->in(MemNode::Memory);
2702 }
2703 }
2704 if (result->is_Phi()) {
2705 PhiNode *mphi = result->as_Phi();
2706 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
2707 const TypePtr *t = mphi->adr_type();
2708 if (!is_instance) {
2709 // Push all non-instance Phis on the orig_phis worklist to update inputs
2710 // during Phase 4 if needed.
2711 orig_phis.append_if_missing(mphi);
2712 } else if (C->get_alias_index(t) != alias_idx) {
2713 // Create a new Phi with the specified alias index type.
2714 result = split_memory_phi(mphi, alias_idx, orig_phis);
2715 }
2716 }
2717 // the result is either MemNode, PhiNode, InitializeNode.
2718 return result;
2719 }
2721 //
2722 // Convert the types of unescaped object to instance types where possible,
2723 // propagate the new type information through the graph, and update memory
2724 // edges and MergeMem inputs to reflect the new type.
2725 //
2726 // We start with allocations (and calls which may be allocations) on alloc_worklist.
2727 // The processing is done in 4 phases:
2728 //
2729 // Phase 1: Process possible allocations from alloc_worklist. Create instance
2730 // types for the CheckCastPP for allocations where possible.
2731 // Propagate the the new types through users as follows:
2732 // casts and Phi: push users on alloc_worklist
2733 // AddP: cast Base and Address inputs to the instance type
2734 // push any AddP users on alloc_worklist and push any memnode
2735 // users onto memnode_worklist.
2736 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
2737 // search the Memory chain for a store with the appropriate type
2738 // address type. If a Phi is found, create a new version with
2739 // the appropriate memory slices from each of the Phi inputs.
2740 // For stores, process the users as follows:
2741 // MemNode: push on memnode_worklist
2742 // MergeMem: push on mergemem_worklist
2743 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice
2744 // moving the first node encountered of each instance type to the
2745 // the input corresponding to its alias index.
2746 // appropriate memory slice.
2747 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes.
2748 //
2749 // In the following example, the CheckCastPP nodes are the cast of allocation
2750 // results and the allocation of node 29 is unescaped and eligible to be an
2751 // instance type.
2752 //
2753 // We start with:
2754 //
2755 // 7 Parm #memory
2756 // 10 ConI "12"
2757 // 19 CheckCastPP "Foo"
2758 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
2759 // 29 CheckCastPP "Foo"
2760 // 30 AddP _ 29 29 10 Foo+12 alias_index=4
2761 //
2762 // 40 StoreP 25 7 20 ... alias_index=4
2763 // 50 StoreP 35 40 30 ... alias_index=4
2764 // 60 StoreP 45 50 20 ... alias_index=4
2765 // 70 LoadP _ 60 30 ... alias_index=4
2766 // 80 Phi 75 50 60 Memory alias_index=4
2767 // 90 LoadP _ 80 30 ... alias_index=4
2768 // 100 LoadP _ 80 20 ... alias_index=4
2769 //
2770 //
2771 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
2772 // and creating a new alias index for node 30. This gives:
2773 //
2774 // 7 Parm #memory
2775 // 10 ConI "12"
2776 // 19 CheckCastPP "Foo"
2777 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
2778 // 29 CheckCastPP "Foo" iid=24
2779 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
2780 //
2781 // 40 StoreP 25 7 20 ... alias_index=4
2782 // 50 StoreP 35 40 30 ... alias_index=6
2783 // 60 StoreP 45 50 20 ... alias_index=4
2784 // 70 LoadP _ 60 30 ... alias_index=6
2785 // 80 Phi 75 50 60 Memory alias_index=4
2786 // 90 LoadP _ 80 30 ... alias_index=6
2787 // 100 LoadP _ 80 20 ... alias_index=4
2788 //
2789 // In phase 2, new memory inputs are computed for the loads and stores,
2790 // And a new version of the phi is created. In phase 4, the inputs to
2791 // node 80 are updated and then the memory nodes are updated with the
2792 // values computed in phase 2. This results in:
2793 //
2794 // 7 Parm #memory
2795 // 10 ConI "12"
2796 // 19 CheckCastPP "Foo"
2797 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
2798 // 29 CheckCastPP "Foo" iid=24
2799 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
2800 //
2801 // 40 StoreP 25 7 20 ... alias_index=4
2802 // 50 StoreP 35 7 30 ... alias_index=6
2803 // 60 StoreP 45 40 20 ... alias_index=4
2804 // 70 LoadP _ 50 30 ... alias_index=6
2805 // 80 Phi 75 40 60 Memory alias_index=4
2806 // 120 Phi 75 50 50 Memory alias_index=6
2807 // 90 LoadP _ 120 30 ... alias_index=6
2808 // 100 LoadP _ 80 20 ... alias_index=4
2809 //
2810 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) {
2811 GrowableArray<Node *> memnode_worklist;
2812 GrowableArray<PhiNode *> orig_phis;
2813 PhaseIterGVN *igvn = _igvn;
2814 uint new_index_start = (uint) _compile->num_alias_types();
2815 Arena* arena = Thread::current()->resource_area();
2816 VectorSet visited(arena);
2817 ideal_nodes.clear(); // Reset for use with set_map/get_map.
2818 uint unique_old = _compile->unique();
2820 // Phase 1: Process possible allocations from alloc_worklist.
2821 // Create instance types for the CheckCastPP for allocations where possible.
2822 //
2823 // (Note: don't forget to change the order of the second AddP node on
2824 // the alloc_worklist if the order of the worklist processing is changed,
2825 // see the comment in find_second_addp().)
2826 //
2827 while (alloc_worklist.length() != 0) {
2828 Node *n = alloc_worklist.pop();
2829 uint ni = n->_idx;
2830 if (n->is_Call()) {
2831 CallNode *alloc = n->as_Call();
2832 // copy escape information to call node
2833 PointsToNode* ptn = ptnode_adr(alloc->_idx);
2834 PointsToNode::EscapeState es = ptn->escape_state();
2835 // We have an allocation or call which returns a Java object,
2836 // see if it is unescaped.
2837 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())
2838 continue;
2839 // Find CheckCastPP for the allocate or for the return value of a call
2840 n = alloc->result_cast();
2841 if (n == NULL) { // No uses except Initialize node
2842 if (alloc->is_Allocate()) {
2843 // Set the scalar_replaceable flag for allocation
2844 // so it could be eliminated if it has no uses.
2845 alloc->as_Allocate()->_is_scalar_replaceable = true;
2846 }
2847 if (alloc->is_CallStaticJava()) {
2848 // Set the scalar_replaceable flag for boxing method
2849 // so it could be eliminated if it has no uses.
2850 alloc->as_CallStaticJava()->_is_scalar_replaceable = true;
2851 }
2852 continue;
2853 }
2854 if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
2855 assert(!alloc->is_Allocate(), "allocation should have unique type");
2856 continue;
2857 }
2859 // The inline code for Object.clone() casts the allocation result to
2860 // java.lang.Object and then to the actual type of the allocated
2861 // object. Detect this case and use the second cast.
2862 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
2863 // the allocation result is cast to java.lang.Object and then
2864 // to the actual Array type.
2865 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
2866 && (alloc->is_AllocateArray() ||
2867 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {
2868 Node *cast2 = NULL;
2869 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2870 Node *use = n->fast_out(i);
2871 if (use->is_CheckCastPP()) {
2872 cast2 = use;
2873 break;
2874 }
2875 }
2876 if (cast2 != NULL) {
2877 n = cast2;
2878 } else {
2879 // Non-scalar replaceable if the allocation type is unknown statically
2880 // (reflection allocation), the object can't be restored during
2881 // deoptimization without precise type.
2882 continue;
2883 }
2884 }
2886 const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
2887 if (t == NULL)
2888 continue; // not a TypeOopPtr
2889 if (!t->klass_is_exact())
2890 continue; // not an unique type
2892 if (alloc->is_Allocate()) {
2893 // Set the scalar_replaceable flag for allocation
2894 // so it could be eliminated.
2895 alloc->as_Allocate()->_is_scalar_replaceable = true;
2896 }
2897 if (alloc->is_CallStaticJava()) {
2898 // Set the scalar_replaceable flag for boxing method
2899 // so it could be eliminated.
2900 alloc->as_CallStaticJava()->_is_scalar_replaceable = true;
2901 }
2902 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state
2903 // in order for an object to be scalar-replaceable, it must be:
2904 // - a direct allocation (not a call returning an object)
2905 // - non-escaping
2906 // - eligible to be a unique type
2907 // - not determined to be ineligible by escape analysis
2908 set_map(alloc, n);
2909 set_map(n, alloc);
2910 const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
2911 igvn->hash_delete(n);
2912 igvn->set_type(n, tinst);
2913 n->raise_bottom_type(tinst);
2914 igvn->hash_insert(n);
2915 record_for_optimizer(n);
2916 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
2918 // First, put on the worklist all Field edges from Connection Graph
2919 // which is more accurate then putting immediate users from Ideal Graph.
2920 for (EdgeIterator e(ptn); e.has_next(); e.next()) {
2921 PointsToNode* tgt = e.get();
2922 Node* use = tgt->ideal_node();
2923 assert(tgt->is_Field() && use->is_AddP(),
2924 "only AddP nodes are Field edges in CG");
2925 if (use->outcnt() > 0) { // Don't process dead nodes
2926 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
2927 if (addp2 != NULL) {
2928 assert(alloc->is_AllocateArray(),"array allocation was expected");
2929 alloc_worklist.append_if_missing(addp2);
2930 }
2931 alloc_worklist.append_if_missing(use);
2932 }
2933 }
2935 // An allocation may have an Initialize which has raw stores. Scan
2936 // the users of the raw allocation result and push AddP users
2937 // on alloc_worklist.
2938 Node *raw_result = alloc->proj_out(TypeFunc::Parms);
2939 assert (raw_result != NULL, "must have an allocation result");
2940 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
2941 Node *use = raw_result->fast_out(i);
2942 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
2943 Node* addp2 = find_second_addp(use, raw_result);
2944 if (addp2 != NULL) {
2945 assert(alloc->is_AllocateArray(),"array allocation was expected");
2946 alloc_worklist.append_if_missing(addp2);
2947 }
2948 alloc_worklist.append_if_missing(use);
2949 } else if (use->is_MemBar()) {
2950 memnode_worklist.append_if_missing(use);
2951 }
2952 }
2953 }
2954 } else if (n->is_AddP()) {
2955 JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
2956 if (jobj == NULL || jobj == phantom_obj) {
2957 #ifdef ASSERT
2958 ptnode_adr(get_addp_base(n)->_idx)->dump();
2959 ptnode_adr(n->_idx)->dump();
2960 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
2961 #endif
2962 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
2963 return;
2964 }
2965 Node *base = get_map(jobj->idx()); // CheckCastPP node
2966 if (!split_AddP(n, base)) continue; // wrong type from dead path
2967 } else if (n->is_Phi() ||
2968 n->is_CheckCastPP() ||
2969 n->is_EncodeP() ||
2970 n->is_DecodeN() ||
2971 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
2972 if (visited.test_set(n->_idx)) {
2973 assert(n->is_Phi(), "loops only through Phi's");
2974 continue; // already processed
2975 }
2976 JavaObjectNode* jobj = unique_java_object(n);
2977 if (jobj == NULL || jobj == phantom_obj) {
2978 #ifdef ASSERT
2979 ptnode_adr(n->_idx)->dump();
2980 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
2981 #endif
2982 _compile->record_failure(C2Compiler::retry_no_escape_analysis());
2983 return;
2984 } else {
2985 Node *val = get_map(jobj->idx()); // CheckCastPP node
2986 TypeNode *tn = n->as_Type();
2987 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
2988 assert(tinst != NULL && tinst->is_known_instance() &&
2989 tinst->instance_id() == jobj->idx() , "instance type expected.");
2991 const Type *tn_type = igvn->type(tn);
2992 const TypeOopPtr *tn_t;
2993 if (tn_type->isa_narrowoop()) {
2994 tn_t = tn_type->make_ptr()->isa_oopptr();
2995 } else {
2996 tn_t = tn_type->isa_oopptr();
2997 }
2998 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
2999 if (tn_type->isa_narrowoop()) {
3000 tn_type = tinst->make_narrowoop();
3001 } else {
3002 tn_type = tinst;
3003 }
3004 igvn->hash_delete(tn);
3005 igvn->set_type(tn, tn_type);
3006 tn->set_type(tn_type);
3007 igvn->hash_insert(tn);
3008 record_for_optimizer(n);
3009 } else {
3010 assert(tn_type == TypePtr::NULL_PTR ||
3011 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
3012 "unexpected type");
3013 continue; // Skip dead path with different type
3014 }
3015 }
3016 } else {
3017 debug_only(n->dump();)
3018 assert(false, "EA: unexpected node");
3019 continue;
3020 }
3021 // push allocation's users on appropriate worklist
3022 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3023 Node *use = n->fast_out(i);
3024 if(use->is_Mem() && use->in(MemNode::Address) == n) {
3025 // Load/store to instance's field
3026 memnode_worklist.append_if_missing(use);
3027 } else if (use->is_MemBar()) {
3028 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3029 memnode_worklist.append_if_missing(use);
3030 }
3031 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
3032 Node* addp2 = find_second_addp(use, n);
3033 if (addp2 != NULL) {
3034 alloc_worklist.append_if_missing(addp2);
3035 }
3036 alloc_worklist.append_if_missing(use);
3037 } else if (use->is_Phi() ||
3038 use->is_CheckCastPP() ||
3039 use->is_EncodeNarrowPtr() ||
3040 use->is_DecodeNarrowPtr() ||
3041 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
3042 alloc_worklist.append_if_missing(use);
3043 #ifdef ASSERT
3044 } else if (use->is_Mem()) {
3045 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
3046 } else if (use->is_MergeMem()) {
3047 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3048 } else if (use->is_SafePoint()) {
3049 // Look for MergeMem nodes for calls which reference unique allocation
3050 // (through CheckCastPP nodes) even for debug info.
3051 Node* m = use->in(TypeFunc::Memory);
3052 if (m->is_MergeMem()) {
3053 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3054 }
3055 } else if (use->Opcode() == Op_EncodeISOArray) {
3056 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3057 // EncodeISOArray overwrites destination array
3058 memnode_worklist.append_if_missing(use);
3059 }
3060 } else {
3061 uint op = use->Opcode();
3062 if (!(op == Op_CmpP || op == Op_Conv2B ||
3063 op == Op_CastP2X || op == Op_StoreCM ||
3064 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
3065 op == Op_StrEquals || op == Op_StrIndexOf)) {
3066 n->dump();
3067 use->dump();
3068 assert(false, "EA: missing allocation reference path");
3069 }
3070 #endif
3071 }
3072 }
3074 }
3075 // New alias types were created in split_AddP().
3076 uint new_index_end = (uint) _compile->num_alias_types();
3077 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
3079 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
3080 // compute new values for Memory inputs (the Memory inputs are not
3081 // actually updated until phase 4.)
3082 if (memnode_worklist.length() == 0)
3083 return; // nothing to do
3084 while (memnode_worklist.length() != 0) {
3085 Node *n = memnode_worklist.pop();
3086 if (visited.test_set(n->_idx))
3087 continue;
3088 if (n->is_Phi() || n->is_ClearArray()) {
3089 // we don't need to do anything, but the users must be pushed
3090 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
3091 // we don't need to do anything, but the users must be pushed
3092 n = n->as_MemBar()->proj_out(TypeFunc::Memory);
3093 if (n == NULL)
3094 continue;
3095 } else if (n->Opcode() == Op_EncodeISOArray) {
3096 // get the memory projection
3097 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3098 Node *use = n->fast_out(i);
3099 if (use->Opcode() == Op_SCMemProj) {
3100 n = use;
3101 break;
3102 }
3103 }
3104 assert(n->Opcode() == Op_SCMemProj, "memory projection required");
3105 } else {
3106 assert(n->is_Mem(), "memory node required.");
3107 Node *addr = n->in(MemNode::Address);
3108 const Type *addr_t = igvn->type(addr);
3109 if (addr_t == Type::TOP)
3110 continue;
3111 assert (addr_t->isa_ptr() != NULL, "pointer type required.");
3112 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
3113 assert ((uint)alias_idx < new_index_end, "wrong alias index");
3114 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
3115 if (_compile->failing()) {
3116 return;
3117 }
3118 if (mem != n->in(MemNode::Memory)) {
3119 // We delay the memory edge update since we need old one in
3120 // MergeMem code below when instances memory slices are separated.
3121 set_map(n, mem);
3122 }
3123 if (n->is_Load()) {
3124 continue; // don't push users
3125 } else if (n->is_LoadStore()) {
3126 // get the memory projection
3127 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3128 Node *use = n->fast_out(i);
3129 if (use->Opcode() == Op_SCMemProj) {
3130 n = use;
3131 break;
3132 }
3133 }
3134 assert(n->Opcode() == Op_SCMemProj, "memory projection required");
3135 }
3136 }
3137 // push user on appropriate worklist
3138 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3139 Node *use = n->fast_out(i);
3140 if (use->is_Phi() || use->is_ClearArray()) {
3141 memnode_worklist.append_if_missing(use);
3142 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
3143 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
3144 continue;
3145 memnode_worklist.append_if_missing(use);
3146 } else if (use->is_MemBar()) {
3147 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
3148 memnode_worklist.append_if_missing(use);
3149 }
3150 #ifdef ASSERT
3151 } else if(use->is_Mem()) {
3152 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
3153 } else if (use->is_MergeMem()) {
3154 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
3155 } else if (use->Opcode() == Op_EncodeISOArray) {
3156 if (use->in(MemNode::Memory) == n || use->in(3) == n) {
3157 // EncodeISOArray overwrites destination array
3158 memnode_worklist.append_if_missing(use);
3159 }
3160 } else {
3161 uint op = use->Opcode();
3162 if (!(op == Op_StoreCM ||
3163 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
3164 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
3165 op == Op_AryEq || op == Op_StrComp ||
3166 op == Op_StrEquals || op == Op_StrIndexOf)) {
3167 n->dump();
3168 use->dump();
3169 assert(false, "EA: missing memory path");
3170 }
3171 #endif
3172 }
3173 }
3174 }
3176 // Phase 3: Process MergeMem nodes from mergemem_worklist.
3177 // Walk each memory slice moving the first node encountered of each
3178 // instance type to the the input corresponding to its alias index.
3179 uint length = _mergemem_worklist.length();
3180 for( uint next = 0; next < length; ++next ) {
3181 MergeMemNode* nmm = _mergemem_worklist.at(next);
3182 assert(!visited.test_set(nmm->_idx), "should not be visited before");
3183 // Note: we don't want to use MergeMemStream here because we only want to
3184 // scan inputs which exist at the start, not ones we add during processing.
3185 // Note 2: MergeMem may already contains instance memory slices added
3186 // during find_inst_mem() call when memory nodes were processed above.
3187 igvn->hash_delete(nmm);
3188 uint nslices = MIN2(nmm->req(), new_index_start);
3189 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
3190 Node* mem = nmm->in(i);
3191 Node* cur = NULL;
3192 if (mem == NULL || mem->is_top())
3193 continue;
3194 // First, update mergemem by moving memory nodes to corresponding slices
3195 // if their type became more precise since this mergemem was created.
3196 while (mem->is_Mem()) {
3197 const Type *at = igvn->type(mem->in(MemNode::Address));
3198 if (at != Type::TOP) {
3199 assert (at->isa_ptr() != NULL, "pointer type required.");
3200 uint idx = (uint)_compile->get_alias_index(at->is_ptr());
3201 if (idx == i) {
3202 if (cur == NULL)
3203 cur = mem;
3204 } else {
3205 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
3206 nmm->set_memory_at(idx, mem);
3207 }
3208 }
3209 }
3210 mem = mem->in(MemNode::Memory);
3211 }
3212 nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
3213 // Find any instance of the current type if we haven't encountered
3214 // already a memory slice of the instance along the memory chain.
3215 for (uint ni = new_index_start; ni < new_index_end; ni++) {
3216 if((uint)_compile->get_general_index(ni) == i) {
3217 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
3218 if (nmm->is_empty_memory(m)) {
3219 Node* result = find_inst_mem(mem, ni, orig_phis);
3220 if (_compile->failing()) {
3221 return;
3222 }
3223 nmm->set_memory_at(ni, result);
3224 }
3225 }
3226 }
3227 }
3228 // Find the rest of instances values
3229 for (uint ni = new_index_start; ni < new_index_end; ni++) {
3230 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
3231 Node* result = step_through_mergemem(nmm, ni, tinst);
3232 if (result == nmm->base_memory()) {
3233 // Didn't find instance memory, search through general slice recursively.
3234 result = nmm->memory_at(_compile->get_general_index(ni));
3235 result = find_inst_mem(result, ni, orig_phis);
3236 if (_compile->failing()) {
3237 return;
3238 }
3239 nmm->set_memory_at(ni, result);
3240 }
3241 }
3242 igvn->hash_insert(nmm);
3243 record_for_optimizer(nmm);
3244 }
3246 // Phase 4: Update the inputs of non-instance memory Phis and
3247 // the Memory input of memnodes
3248 // First update the inputs of any non-instance Phi's from
3249 // which we split out an instance Phi. Note we don't have
3250 // to recursively process Phi's encounted on the input memory
3251 // chains as is done in split_memory_phi() since they will
3252 // also be processed here.
3253 for (int j = 0; j < orig_phis.length(); j++) {
3254 PhiNode *phi = orig_phis.at(j);
3255 int alias_idx = _compile->get_alias_index(phi->adr_type());
3256 igvn->hash_delete(phi);
3257 for (uint i = 1; i < phi->req(); i++) {
3258 Node *mem = phi->in(i);
3259 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
3260 if (_compile->failing()) {
3261 return;
3262 }
3263 if (mem != new_mem) {
3264 phi->set_req(i, new_mem);
3265 }
3266 }
3267 igvn->hash_insert(phi);
3268 record_for_optimizer(phi);
3269 }
3271 // Update the memory inputs of MemNodes with the value we computed
3272 // in Phase 2 and move stores memory users to corresponding memory slices.
3273 // Disable memory split verification code until the fix for 6984348.
3274 // Currently it produces false negative results since it does not cover all cases.
3275 #if 0 // ifdef ASSERT
3276 visited.Reset();
3277 Node_Stack old_mems(arena, _compile->unique() >> 2);
3278 #endif
3279 for (uint i = 0; i < ideal_nodes.size(); i++) {
3280 Node* n = ideal_nodes.at(i);
3281 Node* nmem = get_map(n->_idx);
3282 assert(nmem != NULL, "sanity");
3283 if (n->is_Mem()) {
3284 #if 0 // ifdef ASSERT
3285 Node* old_mem = n->in(MemNode::Memory);
3286 if (!visited.test_set(old_mem->_idx)) {
3287 old_mems.push(old_mem, old_mem->outcnt());
3288 }
3289 #endif
3290 assert(n->in(MemNode::Memory) != nmem, "sanity");
3291 if (!n->is_Load()) {
3292 // Move memory users of a store first.
3293 move_inst_mem(n, orig_phis);
3294 }
3295 // Now update memory input
3296 igvn->hash_delete(n);
3297 n->set_req(MemNode::Memory, nmem);
3298 igvn->hash_insert(n);
3299 record_for_optimizer(n);
3300 } else {
3301 assert(n->is_Allocate() || n->is_CheckCastPP() ||
3302 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
3303 }
3304 }
3305 #if 0 // ifdef ASSERT
3306 // Verify that memory was split correctly
3307 while (old_mems.is_nonempty()) {
3308 Node* old_mem = old_mems.node();
3309 uint old_cnt = old_mems.index();
3310 old_mems.pop();
3311 assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
3312 }
3313 #endif
3314 }
3316 #ifndef PRODUCT
3317 static const char *node_type_names[] = {
3318 "UnknownType",
3319 "JavaObject",
3320 "LocalVar",
3321 "Field",
3322 "Arraycopy"
3323 };
3325 static const char *esc_names[] = {
3326 "UnknownEscape",
3327 "NoEscape",
3328 "ArgEscape",
3329 "GlobalEscape"
3330 };
3332 void PointsToNode::dump(bool print_state) const {
3333 NodeType nt = node_type();
3334 tty->print("%s ", node_type_names[(int) nt]);
3335 if (print_state) {
3336 EscapeState es = escape_state();
3337 EscapeState fields_es = fields_escape_state();
3338 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
3339 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable())
3340 tty->print("NSR ");
3341 }
3342 if (is_Field()) {
3343 FieldNode* f = (FieldNode*)this;
3344 if (f->is_oop())
3345 tty->print("oop ");
3346 if (f->offset() > 0)
3347 tty->print("+%d ", f->offset());
3348 tty->print("(");
3349 for (BaseIterator i(f); i.has_next(); i.next()) {
3350 PointsToNode* b = i.get();
3351 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
3352 }
3353 tty->print(" )");
3354 }
3355 tty->print("[");
3356 for (EdgeIterator i(this); i.has_next(); i.next()) {
3357 PointsToNode* e = i.get();
3358 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
3359 }
3360 tty->print(" [");
3361 for (UseIterator i(this); i.has_next(); i.next()) {
3362 PointsToNode* u = i.get();
3363 bool is_base = false;
3364 if (PointsToNode::is_base_use(u)) {
3365 is_base = true;
3366 u = PointsToNode::get_use_node(u)->as_Field();
3367 }
3368 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
3369 }
3370 tty->print(" ]] ");
3371 if (_node == NULL)
3372 tty->print_cr("<null>");
3373 else
3374 _node->dump();
3375 }
3377 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
3378 bool first = true;
3379 int ptnodes_length = ptnodes_worklist.length();
3380 for (int i = 0; i < ptnodes_length; i++) {
3381 PointsToNode *ptn = ptnodes_worklist.at(i);
3382 if (ptn == NULL || !ptn->is_JavaObject())
3383 continue;
3384 PointsToNode::EscapeState es = ptn->escape_state();
3385 if ((es != PointsToNode::NoEscape) && !Verbose) {
3386 continue;
3387 }
3388 Node* n = ptn->ideal_node();
3389 if (n->is_Allocate() || (n->is_CallStaticJava() &&
3390 n->as_CallStaticJava()->is_boxing_method())) {
3391 if (first) {
3392 tty->cr();
3393 tty->print("======== Connection graph for ");
3394 _compile->method()->print_short_name();
3395 tty->cr();
3396 first = false;
3397 }
3398 ptn->dump();
3399 // Print all locals and fields which reference this allocation
3400 for (UseIterator j(ptn); j.has_next(); j.next()) {
3401 PointsToNode* use = j.get();
3402 if (use->is_LocalVar()) {
3403 use->dump(Verbose);
3404 } else if (Verbose) {
3405 use->dump();
3406 }
3407 }
3408 tty->cr();
3409 }
3410 }
3411 }
3412 #endif