Fri, 16 Oct 2009 02:05:46 -0700
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
6889757: G1: enable card mark elision for initializing writes from compiled code (ReduceInitialCardMarks)
Summary: Defer the (compiler-elided) card-mark upon a slow-path allocation until after the store and before the next subsequent safepoint; G1 now answers yes to can_elide_tlab_write_barriers().
Reviewed-by: jcoomes, kvn, never
1 /*
2 * Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_escape.cpp.incl"
28 void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) {
29 uint v = (targIdx << EdgeShift) + ((uint) et);
30 if (_edges == NULL) {
31 Arena *a = Compile::current()->comp_arena();
32 _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0);
33 }
34 _edges->append_if_missing(v);
35 }
37 void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
38 uint v = (targIdx << EdgeShift) + ((uint) et);
40 _edges->remove(v);
41 }
43 #ifndef PRODUCT
44 static const char *node_type_names[] = {
45 "UnknownType",
46 "JavaObject",
47 "LocalVar",
48 "Field"
49 };
51 static const char *esc_names[] = {
52 "UnknownEscape",
53 "NoEscape",
54 "ArgEscape",
55 "GlobalEscape"
56 };
58 static const char *edge_type_suffix[] = {
59 "?", // UnknownEdge
60 "P", // PointsToEdge
61 "D", // DeferredEdge
62 "F" // FieldEdge
63 };
65 void PointsToNode::dump(bool print_state) const {
66 NodeType nt = node_type();
67 tty->print("%s ", node_type_names[(int) nt]);
68 if (print_state) {
69 EscapeState es = escape_state();
70 tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR");
71 }
72 tty->print("[[");
73 for (uint i = 0; i < edge_count(); i++) {
74 tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
75 }
76 tty->print("]] ");
77 if (_node == NULL)
78 tty->print_cr("<null>");
79 else
80 _node->dump();
81 }
82 #endif
84 ConnectionGraph::ConnectionGraph(Compile * C) :
85 _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
86 _processed(C->comp_arena()),
87 _collecting(true),
88 _compile(C),
89 _node_map(C->comp_arena()) {
91 _phantom_object = C->top()->_idx,
92 add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true);
94 // Add ConP(#NULL) and ConN(#NULL) nodes.
95 PhaseGVN* igvn = C->initial_gvn();
96 Node* oop_null = igvn->zerocon(T_OBJECT);
97 _oop_null = oop_null->_idx;
98 assert(_oop_null < C->unique(), "should be created already");
99 add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
101 if (UseCompressedOops) {
102 Node* noop_null = igvn->zerocon(T_NARROWOOP);
103 _noop_null = noop_null->_idx;
104 assert(_noop_null < C->unique(), "should be created already");
105 add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
106 }
107 }
109 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
110 PointsToNode *f = ptnode_adr(from_i);
111 PointsToNode *t = ptnode_adr(to_i);
113 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
114 assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge");
115 assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge");
116 f->add_edge(to_i, PointsToNode::PointsToEdge);
117 }
119 void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) {
120 PointsToNode *f = ptnode_adr(from_i);
121 PointsToNode *t = ptnode_adr(to_i);
123 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
124 assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge");
125 assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge");
126 // don't add a self-referential edge, this can occur during removal of
127 // deferred edges
128 if (from_i != to_i)
129 f->add_edge(to_i, PointsToNode::DeferredEdge);
130 }
132 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
133 const Type *adr_type = phase->type(adr);
134 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
135 adr->in(AddPNode::Address)->is_Proj() &&
136 adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
137 // We are computing a raw address for a store captured by an Initialize
138 // compute an appropriate address type. AddP cases #3 and #5 (see below).
139 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
140 assert(offs != Type::OffsetBot ||
141 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
142 "offset must be a constant or it is initialization of array");
143 return offs;
144 }
145 const TypePtr *t_ptr = adr_type->isa_ptr();
146 assert(t_ptr != NULL, "must be a pointer type");
147 return t_ptr->offset();
148 }
150 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) {
151 PointsToNode *f = ptnode_adr(from_i);
152 PointsToNode *t = ptnode_adr(to_i);
154 assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
155 assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge");
156 assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge");
157 assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets");
158 t->set_offset(offset);
160 f->add_edge(to_i, PointsToNode::FieldEdge);
161 }
163 void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) {
164 PointsToNode *npt = ptnode_adr(ni);
165 PointsToNode::EscapeState old_es = npt->escape_state();
166 if (es > old_es)
167 npt->set_escape_state(es);
168 }
170 void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
171 PointsToNode::EscapeState es, bool done) {
172 PointsToNode* ptadr = ptnode_adr(n->_idx);
173 ptadr->_node = n;
174 ptadr->set_node_type(nt);
176 // inline set_escape_state(idx, es);
177 PointsToNode::EscapeState old_es = ptadr->escape_state();
178 if (es > old_es)
179 ptadr->set_escape_state(es);
181 if (done)
182 _processed.set(n->_idx);
183 }
185 PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform *phase) {
186 uint idx = n->_idx;
187 PointsToNode::EscapeState es;
189 // If we are still collecting or there were no non-escaping allocations
190 // we don't know the answer yet
191 if (_collecting)
192 return PointsToNode::UnknownEscape;
194 // if the node was created after the escape computation, return
195 // UnknownEscape
196 if (idx >= nodes_size())
197 return PointsToNode::UnknownEscape;
199 es = ptnode_adr(idx)->escape_state();
201 // if we have already computed a value, return it
202 if (es != PointsToNode::UnknownEscape &&
203 ptnode_adr(idx)->node_type() == PointsToNode::JavaObject)
204 return es;
206 // PointsTo() calls n->uncast() which can return a new ideal node.
207 if (n->uncast()->_idx >= nodes_size())
208 return PointsToNode::UnknownEscape;
210 // compute max escape state of anything this node could point to
211 VectorSet ptset(Thread::current()->resource_area());
212 PointsTo(ptset, n, phase);
213 for(VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i) {
214 uint pt = i.elem;
215 PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
216 if (pes > es)
217 es = pes;
218 }
219 // cache the computed escape state
220 assert(es != PointsToNode::UnknownEscape, "should have computed an escape state");
221 ptnode_adr(idx)->set_escape_state(es);
222 return es;
223 }
225 void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n, PhaseTransform *phase) {
226 VectorSet visited(Thread::current()->resource_area());
227 GrowableArray<uint> worklist;
229 #ifdef ASSERT
230 Node *orig_n = n;
231 #endif
233 n = n->uncast();
234 PointsToNode* npt = ptnode_adr(n->_idx);
236 // If we have a JavaObject, return just that object
237 if (npt->node_type() == PointsToNode::JavaObject) {
238 ptset.set(n->_idx);
239 return;
240 }
241 #ifdef ASSERT
242 if (npt->_node == NULL) {
243 if (orig_n != n)
244 orig_n->dump();
245 n->dump();
246 assert(npt->_node != NULL, "unregistered node");
247 }
248 #endif
249 worklist.push(n->_idx);
250 while(worklist.length() > 0) {
251 int ni = worklist.pop();
252 if (visited.test_set(ni))
253 continue;
255 PointsToNode* pn = ptnode_adr(ni);
256 // ensure that all inputs of a Phi have been processed
257 assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),"");
259 int edges_processed = 0;
260 uint e_cnt = pn->edge_count();
261 for (uint e = 0; e < e_cnt; e++) {
262 uint etgt = pn->edge_target(e);
263 PointsToNode::EdgeType et = pn->edge_type(e);
264 if (et == PointsToNode::PointsToEdge) {
265 ptset.set(etgt);
266 edges_processed++;
267 } else if (et == PointsToNode::DeferredEdge) {
268 worklist.push(etgt);
269 edges_processed++;
270 } else {
271 assert(false,"neither PointsToEdge or DeferredEdge");
272 }
273 }
274 if (edges_processed == 0) {
275 // no deferred or pointsto edges found. Assume the value was set
276 // outside this method. Add the phantom object to the pointsto set.
277 ptset.set(_phantom_object);
278 }
279 }
280 }
282 void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) {
283 // This method is most expensive during ConnectionGraph construction.
284 // Reuse vectorSet and an additional growable array for deferred edges.
285 deferred_edges->clear();
286 visited->Clear();
288 visited->set(ni);
289 PointsToNode *ptn = ptnode_adr(ni);
291 // Mark current edges as visited and move deferred edges to separate array.
292 for (uint i = 0; i < ptn->edge_count(); ) {
293 uint t = ptn->edge_target(i);
294 #ifdef ASSERT
295 assert(!visited->test_set(t), "expecting no duplications");
296 #else
297 visited->set(t);
298 #endif
299 if (ptn->edge_type(i) == PointsToNode::DeferredEdge) {
300 ptn->remove_edge(t, PointsToNode::DeferredEdge);
301 deferred_edges->append(t);
302 } else {
303 i++;
304 }
305 }
306 for (int next = 0; next < deferred_edges->length(); ++next) {
307 uint t = deferred_edges->at(next);
308 PointsToNode *ptt = ptnode_adr(t);
309 uint e_cnt = ptt->edge_count();
310 for (uint e = 0; e < e_cnt; e++) {
311 uint etgt = ptt->edge_target(e);
312 if (visited->test_set(etgt))
313 continue;
315 PointsToNode::EdgeType et = ptt->edge_type(e);
316 if (et == PointsToNode::PointsToEdge) {
317 add_pointsto_edge(ni, etgt);
318 if(etgt == _phantom_object) {
319 // Special case - field set outside (globally escaping).
320 ptn->set_escape_state(PointsToNode::GlobalEscape);
321 }
322 } else if (et == PointsToNode::DeferredEdge) {
323 deferred_edges->append(etgt);
324 } else {
325 assert(false,"invalid connection graph");
326 }
327 }
328 }
329 }
332 // Add an edge to node given by "to_i" from any field of adr_i whose offset
333 // matches "offset" A deferred edge is added if to_i is a LocalVar, and
334 // a pointsto edge is added if it is a JavaObject
336 void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) {
337 PointsToNode* an = ptnode_adr(adr_i);
338 PointsToNode* to = ptnode_adr(to_i);
339 bool deferred = (to->node_type() == PointsToNode::LocalVar);
341 for (uint fe = 0; fe < an->edge_count(); fe++) {
342 assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
343 int fi = an->edge_target(fe);
344 PointsToNode* pf = ptnode_adr(fi);
345 int po = pf->offset();
346 if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
347 if (deferred)
348 add_deferred_edge(fi, to_i);
349 else
350 add_pointsto_edge(fi, to_i);
351 }
352 }
353 }
355 // Add a deferred edge from node given by "from_i" to any field of adr_i
356 // whose offset matches "offset".
357 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
358 PointsToNode* an = ptnode_adr(adr_i);
359 for (uint fe = 0; fe < an->edge_count(); fe++) {
360 assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
361 int fi = an->edge_target(fe);
362 PointsToNode* pf = ptnode_adr(fi);
363 int po = pf->offset();
364 if (pf->edge_count() == 0) {
365 // we have not seen any stores to this field, assume it was set outside this method
366 add_pointsto_edge(fi, _phantom_object);
367 }
368 if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
369 add_deferred_edge(from_i, fi);
370 }
371 }
372 }
374 // Helper functions
376 static Node* get_addp_base(Node *addp) {
377 assert(addp->is_AddP(), "must be AddP");
378 //
379 // AddP cases for Base and Address inputs:
380 // case #1. Direct object's field reference:
381 // Allocate
382 // |
383 // Proj #5 ( oop result )
384 // |
385 // CheckCastPP (cast to instance type)
386 // | |
387 // AddP ( base == address )
388 //
389 // case #2. Indirect object's field reference:
390 // Phi
391 // |
392 // CastPP (cast to instance type)
393 // | |
394 // AddP ( base == address )
395 //
396 // case #3. Raw object's field reference for Initialize node:
397 // Allocate
398 // |
399 // Proj #5 ( oop result )
400 // top |
401 // \ |
402 // AddP ( base == top )
403 //
404 // case #4. Array's element reference:
405 // {CheckCastPP | CastPP}
406 // | | |
407 // | AddP ( array's element offset )
408 // | |
409 // AddP ( array's offset )
410 //
411 // case #5. Raw object's field reference for arraycopy stub call:
412 // The inline_native_clone() case when the arraycopy stub is called
413 // after the allocation before Initialize and CheckCastPP nodes.
414 // Allocate
415 // |
416 // Proj #5 ( oop result )
417 // | |
418 // AddP ( base == address )
419 //
420 // case #6. Constant Pool, ThreadLocal, CastX2P or
421 // Raw object's field reference:
422 // {ConP, ThreadLocal, CastX2P, raw Load}
423 // top |
424 // \ |
425 // AddP ( base == top )
426 //
427 // case #7. Klass's field reference.
428 // LoadKlass
429 // | |
430 // AddP ( base == address )
431 //
432 // case #8. narrow Klass's field reference.
433 // LoadNKlass
434 // |
435 // DecodeN
436 // | |
437 // AddP ( base == address )
438 //
439 Node *base = addp->in(AddPNode::Base)->uncast();
440 if (base->is_top()) { // The AddP case #3 and #6.
441 base = addp->in(AddPNode::Address)->uncast();
442 while (base->is_AddP()) {
443 // Case #6 (unsafe access) may have several chained AddP nodes.
444 assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only");
445 base = base->in(AddPNode::Address)->uncast();
446 }
447 assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal ||
448 base->Opcode() == Op_CastX2P || base->is_DecodeN() ||
449 (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) ||
450 (base->is_Proj() && base->in(0)->is_Allocate()), "sanity");
451 }
452 return base;
453 }
455 static Node* find_second_addp(Node* addp, Node* n) {
456 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
458 Node* addp2 = addp->raw_out(0);
459 if (addp->outcnt() == 1 && addp2->is_AddP() &&
460 addp2->in(AddPNode::Base) == n &&
461 addp2->in(AddPNode::Address) == addp) {
463 assert(addp->in(AddPNode::Base) == n, "expecting the same base");
464 //
465 // Find array's offset to push it on worklist first and
466 // as result process an array's element offset first (pushed second)
467 // to avoid CastPP for the array's offset.
468 // Otherwise the inserted CastPP (LocalVar) will point to what
469 // the AddP (Field) points to. Which would be wrong since
470 // the algorithm expects the CastPP has the same point as
471 // as AddP's base CheckCastPP (LocalVar).
472 //
473 // ArrayAllocation
474 // |
475 // CheckCastPP
476 // |
477 // memProj (from ArrayAllocation CheckCastPP)
478 // | ||
479 // | || Int (element index)
480 // | || | ConI (log(element size))
481 // | || | /
482 // | || LShift
483 // | || /
484 // | AddP (array's element offset)
485 // | |
486 // | | ConI (array's offset: #12(32-bits) or #24(64-bits))
487 // | / /
488 // AddP (array's offset)
489 // |
490 // Load/Store (memory operation on array's element)
491 //
492 return addp2;
493 }
494 return NULL;
495 }
497 //
498 // Adjust the type and inputs of an AddP which computes the
499 // address of a field of an instance
500 //
501 bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
502 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
503 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
504 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
505 if (t == NULL) {
506 // We are computing a raw address for a store captured by an Initialize
507 // compute an appropriate address type (cases #3 and #5).
508 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
509 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
510 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
511 assert(offs != Type::OffsetBot, "offset must be a constant");
512 t = base_t->add_offset(offs)->is_oopptr();
513 }
514 int inst_id = base_t->instance_id();
515 assert(!t->is_known_instance() || t->instance_id() == inst_id,
516 "old type must be non-instance or match new type");
518 // The type 't' could be subclass of 'base_t'.
519 // As result t->offset() could be large then base_t's size and it will
520 // cause the failure in add_offset() with narrow oops since TypeOopPtr()
521 // constructor verifies correctness of the offset.
522 //
523 // It could happened on subclass's branch (from the type profiling
524 // inlining) which was not eliminated during parsing since the exactness
525 // of the allocation type was not propagated to the subclass type check.
526 //
527 // Or the type 't' could be not related to 'base_t' at all.
528 // It could happened when CHA type is different from MDO type on a dead path
529 // (for example, from instanceof check) which is not collapsed during parsing.
530 //
531 // Do nothing for such AddP node and don't process its users since
532 // this code branch will go away.
533 //
534 if (!t->is_known_instance() &&
535 !base_t->klass()->is_subtype_of(t->klass())) {
536 return false; // bail out
537 }
539 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
540 // Do NOT remove the next call: ensure an new alias index is allocated
541 // for the instance type
542 int alias_idx = _compile->get_alias_index(tinst);
543 igvn->set_type(addp, tinst);
544 // record the allocation in the node map
545 set_map(addp->_idx, get_map(base->_idx));
547 // Set addp's Base and Address to 'base'.
548 Node *abase = addp->in(AddPNode::Base);
549 Node *adr = addp->in(AddPNode::Address);
550 if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
551 adr->in(0)->_idx == (uint)inst_id) {
552 // Skip AddP cases #3 and #5.
553 } else {
554 assert(!abase->is_top(), "sanity"); // AddP case #3
555 if (abase != base) {
556 igvn->hash_delete(addp);
557 addp->set_req(AddPNode::Base, base);
558 if (abase == adr) {
559 addp->set_req(AddPNode::Address, base);
560 } else {
561 // AddP case #4 (adr is array's element offset AddP node)
562 #ifdef ASSERT
563 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
564 assert(adr->is_AddP() && atype != NULL &&
565 atype->instance_id() == inst_id, "array's element offset should be processed first");
566 #endif
567 }
568 igvn->hash_insert(addp);
569 }
570 }
571 // Put on IGVN worklist since at least addp's type was changed above.
572 record_for_optimizer(addp);
573 return true;
574 }
576 //
577 // Create a new version of orig_phi if necessary. Returns either the newly
578 // created phi or an existing phi. Sets create_new to indicate wheter a new
579 // phi was created. Cache the last newly created phi in the node map.
580 //
581 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created) {
582 Compile *C = _compile;
583 new_created = false;
584 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
585 // nothing to do if orig_phi is bottom memory or matches alias_idx
586 if (phi_alias_idx == alias_idx) {
587 return orig_phi;
588 }
589 // Have we recently created a Phi for this alias index?
590 PhiNode *result = get_map_phi(orig_phi->_idx);
591 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
592 return result;
593 }
594 // Previous check may fail when the same wide memory Phi was split into Phis
595 // for different memory slices. Search all Phis for this region.
596 if (result != NULL) {
597 Node* region = orig_phi->in(0);
598 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
599 Node* phi = region->fast_out(i);
600 if (phi->is_Phi() &&
601 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
602 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
603 return phi->as_Phi();
604 }
605 }
606 }
607 if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
608 if (C->do_escape_analysis() == true && !C->failing()) {
609 // Retry compilation without escape analysis.
610 // If this is the first failure, the sentinel string will "stick"
611 // to the Compile object, and the C2Compiler will see it and retry.
612 C->record_failure(C2Compiler::retry_no_escape_analysis());
613 }
614 return NULL;
615 }
616 orig_phi_worklist.append_if_missing(orig_phi);
617 const TypePtr *atype = C->get_adr_type(alias_idx);
618 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
619 C->copy_node_notes_to(result, orig_phi);
620 set_map_phi(orig_phi->_idx, result);
621 igvn->set_type(result, result->bottom_type());
622 record_for_optimizer(result);
623 new_created = true;
624 return result;
625 }
627 //
628 // Return a new version of Memory Phi "orig_phi" with the inputs having the
629 // specified alias index.
630 //
631 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn) {
633 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
634 Compile *C = _compile;
635 bool new_phi_created;
636 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created);
637 if (!new_phi_created) {
638 return result;
639 }
641 GrowableArray<PhiNode *> phi_list;
642 GrowableArray<uint> cur_input;
644 PhiNode *phi = orig_phi;
645 uint idx = 1;
646 bool finished = false;
647 while(!finished) {
648 while (idx < phi->req()) {
649 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn);
650 if (mem != NULL && mem->is_Phi()) {
651 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created);
652 if (new_phi_created) {
653 // found an phi for which we created a new split, push current one on worklist and begin
654 // processing new one
655 phi_list.push(phi);
656 cur_input.push(idx);
657 phi = mem->as_Phi();
658 result = newphi;
659 idx = 1;
660 continue;
661 } else {
662 mem = newphi;
663 }
664 }
665 if (C->failing()) {
666 return NULL;
667 }
668 result->set_req(idx++, mem);
669 }
670 #ifdef ASSERT
671 // verify that the new Phi has an input for each input of the original
672 assert( phi->req() == result->req(), "must have same number of inputs.");
673 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
674 #endif
675 // Check if all new phi's inputs have specified alias index.
676 // Otherwise use old phi.
677 for (uint i = 1; i < phi->req(); i++) {
678 Node* in = result->in(i);
679 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
680 }
681 // we have finished processing a Phi, see if there are any more to do
682 finished = (phi_list.length() == 0 );
683 if (!finished) {
684 phi = phi_list.pop();
685 idx = cur_input.pop();
686 PhiNode *prev_result = get_map_phi(phi->_idx);
687 prev_result->set_req(idx++, result);
688 result = prev_result;
689 }
690 }
691 return result;
692 }
695 //
696 // The next methods are derived from methods in MemNode.
697 //
698 static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *tinst) {
699 Node *mem = mmem;
700 // TypeInstPtr::NOTNULL+any is an OOP with unknown offset - generally
701 // means an array I have not precisely typed yet. Do not do any
702 // alias stuff with it any time soon.
703 if( tinst->base() != Type::AnyPtr &&
704 !(tinst->klass()->is_java_lang_Object() &&
705 tinst->offset() == Type::OffsetBot) ) {
706 mem = mmem->memory_at(alias_idx);
707 // Update input if it is progress over what we have now
708 }
709 return mem;
710 }
712 //
713 // Search memory chain of "mem" to find a MemNode whose address
714 // is the specified alias index.
715 //
716 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis, PhaseGVN *phase) {
717 if (orig_mem == NULL)
718 return orig_mem;
719 Compile* C = phase->C;
720 const TypeOopPtr *tinst = C->get_adr_type(alias_idx)->isa_oopptr();
721 bool is_instance = (tinst != NULL) && tinst->is_known_instance();
722 Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
723 Node *prev = NULL;
724 Node *result = orig_mem;
725 while (prev != result) {
726 prev = result;
727 if (result == start_mem)
728 break; // hit one of our sentinels
729 if (result->is_Mem()) {
730 const Type *at = phase->type(result->in(MemNode::Address));
731 if (at != Type::TOP) {
732 assert (at->isa_ptr() != NULL, "pointer type required.");
733 int idx = C->get_alias_index(at->is_ptr());
734 if (idx == alias_idx)
735 break;
736 }
737 result = result->in(MemNode::Memory);
738 }
739 if (!is_instance)
740 continue; // don't search further for non-instance types
741 // skip over a call which does not affect this memory slice
742 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
743 Node *proj_in = result->in(0);
744 if (proj_in->is_Allocate() && proj_in->_idx == (uint)tinst->instance_id()) {
745 break; // hit one of our sentinels
746 } else if (proj_in->is_Call()) {
747 CallNode *call = proj_in->as_Call();
748 if (!call->may_modify(tinst, phase)) {
749 result = call->in(TypeFunc::Memory);
750 }
751 } else if (proj_in->is_Initialize()) {
752 AllocateNode* alloc = proj_in->as_Initialize()->allocation();
753 // Stop if this is the initialization for the object instance which
754 // which contains this memory slice, otherwise skip over it.
755 if (alloc == NULL || alloc->_idx != (uint)tinst->instance_id()) {
756 result = proj_in->in(TypeFunc::Memory);
757 }
758 } else if (proj_in->is_MemBar()) {
759 result = proj_in->in(TypeFunc::Memory);
760 }
761 } else if (result->is_MergeMem()) {
762 MergeMemNode *mmem = result->as_MergeMem();
763 result = step_through_mergemem(mmem, alias_idx, tinst);
764 if (result == mmem->base_memory()) {
765 // Didn't find instance memory, search through general slice recursively.
766 result = mmem->memory_at(C->get_general_index(alias_idx));
767 result = find_inst_mem(result, alias_idx, orig_phis, phase);
768 if (C->failing()) {
769 return NULL;
770 }
771 mmem->set_memory_at(alias_idx, result);
772 }
773 } else if (result->is_Phi() &&
774 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
775 Node *un = result->as_Phi()->unique_input(phase);
776 if (un != NULL) {
777 result = un;
778 } else {
779 break;
780 }
781 } else if (result->Opcode() == Op_SCMemProj) {
782 assert(result->in(0)->is_LoadStore(), "sanity");
783 const Type *at = phase->type(result->in(0)->in(MemNode::Address));
784 if (at != Type::TOP) {
785 assert (at->isa_ptr() != NULL, "pointer type required.");
786 int idx = C->get_alias_index(at->is_ptr());
787 assert(idx != alias_idx, "Object is not scalar replaceable if a LoadStore node access its field");
788 break;
789 }
790 result = result->in(0)->in(MemNode::Memory);
791 }
792 }
793 if (result->is_Phi()) {
794 PhiNode *mphi = result->as_Phi();
795 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
796 const TypePtr *t = mphi->adr_type();
797 if (C->get_alias_index(t) != alias_idx) {
798 // Create a new Phi with the specified alias index type.
799 result = split_memory_phi(mphi, alias_idx, orig_phis, phase);
800 } else if (!is_instance) {
801 // Push all non-instance Phis on the orig_phis worklist to update inputs
802 // during Phase 4 if needed.
803 orig_phis.append_if_missing(mphi);
804 }
805 }
806 // the result is either MemNode, PhiNode, InitializeNode.
807 return result;
808 }
811 //
812 // Convert the types of unescaped object to instance types where possible,
813 // propagate the new type information through the graph, and update memory
814 // edges and MergeMem inputs to reflect the new type.
815 //
816 // We start with allocations (and calls which may be allocations) on alloc_worklist.
817 // The processing is done in 4 phases:
818 //
819 // Phase 1: Process possible allocations from alloc_worklist. Create instance
820 // types for the CheckCastPP for allocations where possible.
821 // Propagate the the new types through users as follows:
822 // casts and Phi: push users on alloc_worklist
823 // AddP: cast Base and Address inputs to the instance type
824 // push any AddP users on alloc_worklist and push any memnode
825 // users onto memnode_worklist.
826 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
827 // search the Memory chain for a store with the appropriate type
828 // address type. If a Phi is found, create a new version with
829 // the appropriate memory slices from each of the Phi inputs.
830 // For stores, process the users as follows:
831 // MemNode: push on memnode_worklist
832 // MergeMem: push on mergemem_worklist
833 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice
834 // moving the first node encountered of each instance type to the
835 // the input corresponding to its alias index.
836 // appropriate memory slice.
837 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes.
838 //
839 // In the following example, the CheckCastPP nodes are the cast of allocation
840 // results and the allocation of node 29 is unescaped and eligible to be an
841 // instance type.
842 //
843 // We start with:
844 //
845 // 7 Parm #memory
846 // 10 ConI "12"
847 // 19 CheckCastPP "Foo"
848 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
849 // 29 CheckCastPP "Foo"
850 // 30 AddP _ 29 29 10 Foo+12 alias_index=4
851 //
852 // 40 StoreP 25 7 20 ... alias_index=4
853 // 50 StoreP 35 40 30 ... alias_index=4
854 // 60 StoreP 45 50 20 ... alias_index=4
855 // 70 LoadP _ 60 30 ... alias_index=4
856 // 80 Phi 75 50 60 Memory alias_index=4
857 // 90 LoadP _ 80 30 ... alias_index=4
858 // 100 LoadP _ 80 20 ... alias_index=4
859 //
860 //
861 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
862 // and creating a new alias index for node 30. This gives:
863 //
864 // 7 Parm #memory
865 // 10 ConI "12"
866 // 19 CheckCastPP "Foo"
867 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
868 // 29 CheckCastPP "Foo" iid=24
869 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
870 //
871 // 40 StoreP 25 7 20 ... alias_index=4
872 // 50 StoreP 35 40 30 ... alias_index=6
873 // 60 StoreP 45 50 20 ... alias_index=4
874 // 70 LoadP _ 60 30 ... alias_index=6
875 // 80 Phi 75 50 60 Memory alias_index=4
876 // 90 LoadP _ 80 30 ... alias_index=6
877 // 100 LoadP _ 80 20 ... alias_index=4
878 //
879 // In phase 2, new memory inputs are computed for the loads and stores,
880 // And a new version of the phi is created. In phase 4, the inputs to
881 // node 80 are updated and then the memory nodes are updated with the
882 // values computed in phase 2. This results in:
883 //
884 // 7 Parm #memory
885 // 10 ConI "12"
886 // 19 CheckCastPP "Foo"
887 // 20 AddP _ 19 19 10 Foo+12 alias_index=4
888 // 29 CheckCastPP "Foo" iid=24
889 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
890 //
891 // 40 StoreP 25 7 20 ... alias_index=4
892 // 50 StoreP 35 7 30 ... alias_index=6
893 // 60 StoreP 45 40 20 ... alias_index=4
894 // 70 LoadP _ 50 30 ... alias_index=6
895 // 80 Phi 75 40 60 Memory alias_index=4
896 // 120 Phi 75 50 50 Memory alias_index=6
897 // 90 LoadP _ 120 30 ... alias_index=6
898 // 100 LoadP _ 80 20 ... alias_index=4
899 //
900 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) {
901 GrowableArray<Node *> memnode_worklist;
902 GrowableArray<Node *> mergemem_worklist;
903 GrowableArray<PhiNode *> orig_phis;
904 PhaseGVN *igvn = _compile->initial_gvn();
905 uint new_index_start = (uint) _compile->num_alias_types();
906 VectorSet visited(Thread::current()->resource_area());
907 VectorSet ptset(Thread::current()->resource_area());
910 // Phase 1: Process possible allocations from alloc_worklist.
911 // Create instance types for the CheckCastPP for allocations where possible.
912 //
913 // (Note: don't forget to change the order of the second AddP node on
914 // the alloc_worklist if the order of the worklist processing is changed,
915 // see the comment in find_second_addp().)
916 //
917 while (alloc_worklist.length() != 0) {
918 Node *n = alloc_worklist.pop();
919 uint ni = n->_idx;
920 const TypeOopPtr* tinst = NULL;
921 if (n->is_Call()) {
922 CallNode *alloc = n->as_Call();
923 // copy escape information to call node
924 PointsToNode* ptn = ptnode_adr(alloc->_idx);
925 PointsToNode::EscapeState es = escape_state(alloc, igvn);
926 // We have an allocation or call which returns a Java object,
927 // see if it is unescaped.
928 if (es != PointsToNode::NoEscape || !ptn->_scalar_replaceable)
929 continue;
931 // Find CheckCastPP for the allocate or for the return value of a call
932 n = alloc->result_cast();
933 if (n == NULL) { // No uses except Initialize node
934 if (alloc->is_Allocate()) {
935 // Set the scalar_replaceable flag for allocation
936 // so it could be eliminated if it has no uses.
937 alloc->as_Allocate()->_is_scalar_replaceable = true;
938 }
939 continue;
940 }
941 if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
942 assert(!alloc->is_Allocate(), "allocation should have unique type");
943 continue;
944 }
946 // The inline code for Object.clone() casts the allocation result to
947 // java.lang.Object and then to the actual type of the allocated
948 // object. Detect this case and use the second cast.
949 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
950 // the allocation result is cast to java.lang.Object and then
951 // to the actual Array type.
952 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
953 && (alloc->is_AllocateArray() ||
954 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) {
955 Node *cast2 = NULL;
956 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
957 Node *use = n->fast_out(i);
958 if (use->is_CheckCastPP()) {
959 cast2 = use;
960 break;
961 }
962 }
963 if (cast2 != NULL) {
964 n = cast2;
965 } else {
966 // Non-scalar replaceable if the allocation type is unknown statically
967 // (reflection allocation), the object can't be restored during
968 // deoptimization without precise type.
969 continue;
970 }
971 }
972 if (alloc->is_Allocate()) {
973 // Set the scalar_replaceable flag for allocation
974 // so it could be eliminated.
975 alloc->as_Allocate()->_is_scalar_replaceable = true;
976 }
977 set_escape_state(n->_idx, es);
978 // in order for an object to be scalar-replaceable, it must be:
979 // - a direct allocation (not a call returning an object)
980 // - non-escaping
981 // - eligible to be a unique type
982 // - not determined to be ineligible by escape analysis
983 set_map(alloc->_idx, n);
984 set_map(n->_idx, alloc);
985 const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
986 if (t == NULL)
987 continue; // not a TypeInstPtr
988 tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
989 igvn->hash_delete(n);
990 igvn->set_type(n, tinst);
991 n->raise_bottom_type(tinst);
992 igvn->hash_insert(n);
993 record_for_optimizer(n);
994 if (alloc->is_Allocate() && ptn->_scalar_replaceable &&
995 (t->isa_instptr() || t->isa_aryptr())) {
997 // First, put on the worklist all Field edges from Connection Graph
998 // which is more accurate then putting immediate users from Ideal Graph.
999 for (uint e = 0; e < ptn->edge_count(); e++) {
1000 Node *use = ptnode_adr(ptn->edge_target(e))->_node;
1001 assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
1002 "only AddP nodes are Field edges in CG");
1003 if (use->outcnt() > 0) { // Don't process dead nodes
1004 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
1005 if (addp2 != NULL) {
1006 assert(alloc->is_AllocateArray(),"array allocation was expected");
1007 alloc_worklist.append_if_missing(addp2);
1008 }
1009 alloc_worklist.append_if_missing(use);
1010 }
1011 }
1013 // An allocation may have an Initialize which has raw stores. Scan
1014 // the users of the raw allocation result and push AddP users
1015 // on alloc_worklist.
1016 Node *raw_result = alloc->proj_out(TypeFunc::Parms);
1017 assert (raw_result != NULL, "must have an allocation result");
1018 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
1019 Node *use = raw_result->fast_out(i);
1020 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
1021 Node* addp2 = find_second_addp(use, raw_result);
1022 if (addp2 != NULL) {
1023 assert(alloc->is_AllocateArray(),"array allocation was expected");
1024 alloc_worklist.append_if_missing(addp2);
1025 }
1026 alloc_worklist.append_if_missing(use);
1027 } else if (use->is_Initialize()) {
1028 memnode_worklist.append_if_missing(use);
1029 }
1030 }
1031 }
1032 } else if (n->is_AddP()) {
1033 ptset.Clear();
1034 PointsTo(ptset, get_addp_base(n), igvn);
1035 assert(ptset.Size() == 1, "AddP address is unique");
1036 uint elem = ptset.getelem(); // Allocation node's index
1037 if (elem == _phantom_object)
1038 continue; // Assume the value was set outside this method.
1039 Node *base = get_map(elem); // CheckCastPP node
1040 if (!split_AddP(n, base, igvn)) continue; // wrong type
1041 tinst = igvn->type(base)->isa_oopptr();
1042 } else if (n->is_Phi() ||
1043 n->is_CheckCastPP() ||
1044 n->is_EncodeP() ||
1045 n->is_DecodeN() ||
1046 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
1047 if (visited.test_set(n->_idx)) {
1048 assert(n->is_Phi(), "loops only through Phi's");
1049 continue; // already processed
1050 }
1051 ptset.Clear();
1052 PointsTo(ptset, n, igvn);
1053 if (ptset.Size() == 1) {
1054 uint elem = ptset.getelem(); // Allocation node's index
1055 if (elem == _phantom_object)
1056 continue; // Assume the value was set outside this method.
1057 Node *val = get_map(elem); // CheckCastPP node
1058 TypeNode *tn = n->as_Type();
1059 tinst = igvn->type(val)->isa_oopptr();
1060 assert(tinst != NULL && tinst->is_known_instance() &&
1061 (uint)tinst->instance_id() == elem , "instance type expected.");
1063 const Type *tn_type = igvn->type(tn);
1064 const TypeOopPtr *tn_t;
1065 if (tn_type->isa_narrowoop()) {
1066 tn_t = tn_type->make_ptr()->isa_oopptr();
1067 } else {
1068 tn_t = tn_type->isa_oopptr();
1069 }
1071 if (tn_t != NULL &&
1072 tinst->cast_to_instance_id(TypeOopPtr::InstanceBot)->higher_equal(tn_t)) {
1073 if (tn_type->isa_narrowoop()) {
1074 tn_type = tinst->make_narrowoop();
1075 } else {
1076 tn_type = tinst;
1077 }
1078 igvn->hash_delete(tn);
1079 igvn->set_type(tn, tn_type);
1080 tn->set_type(tn_type);
1081 igvn->hash_insert(tn);
1082 record_for_optimizer(n);
1083 } else {
1084 continue; // wrong type
1085 }
1086 }
1087 } else {
1088 continue;
1089 }
1090 // push users on appropriate worklist
1091 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1092 Node *use = n->fast_out(i);
1093 if(use->is_Mem() && use->in(MemNode::Address) == n) {
1094 memnode_worklist.append_if_missing(use);
1095 } else if (use->is_Initialize()) {
1096 memnode_worklist.append_if_missing(use);
1097 } else if (use->is_MergeMem()) {
1098 mergemem_worklist.append_if_missing(use);
1099 } else if (use->is_SafePoint() && tinst != NULL) {
1100 // Look for MergeMem nodes for calls which reference unique allocation
1101 // (through CheckCastPP nodes) even for debug info.
1102 Node* m = use->in(TypeFunc::Memory);
1103 uint iid = tinst->instance_id();
1104 while (m->is_Proj() && m->in(0)->is_SafePoint() &&
1105 m->in(0) != use && !m->in(0)->_idx != iid) {
1106 m = m->in(0)->in(TypeFunc::Memory);
1107 }
1108 if (m->is_MergeMem()) {
1109 mergemem_worklist.append_if_missing(m);
1110 }
1111 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
1112 Node* addp2 = find_second_addp(use, n);
1113 if (addp2 != NULL) {
1114 alloc_worklist.append_if_missing(addp2);
1115 }
1116 alloc_worklist.append_if_missing(use);
1117 } else if (use->is_Phi() ||
1118 use->is_CheckCastPP() ||
1119 use->is_EncodeP() ||
1120 use->is_DecodeN() ||
1121 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
1122 alloc_worklist.append_if_missing(use);
1123 }
1124 }
1126 }
1127 // New alias types were created in split_AddP().
1128 uint new_index_end = (uint) _compile->num_alias_types();
1130 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
1131 // compute new values for Memory inputs (the Memory inputs are not
1132 // actually updated until phase 4.)
1133 if (memnode_worklist.length() == 0)
1134 return; // nothing to do
1136 while (memnode_worklist.length() != 0) {
1137 Node *n = memnode_worklist.pop();
1138 if (visited.test_set(n->_idx))
1139 continue;
1140 if (n->is_Phi()) {
1141 assert(n->as_Phi()->adr_type() != TypePtr::BOTTOM, "narrow memory slice required");
1142 // we don't need to do anything, but the users must be pushed if we haven't processed
1143 // this Phi before
1144 } else if (n->is_Initialize()) {
1145 // we don't need to do anything, but the users of the memory projection must be pushed
1146 n = n->as_Initialize()->proj_out(TypeFunc::Memory);
1147 if (n == NULL)
1148 continue;
1149 } else {
1150 assert(n->is_Mem(), "memory node required.");
1151 Node *addr = n->in(MemNode::Address);
1152 assert(addr->is_AddP(), "AddP required");
1153 const Type *addr_t = igvn->type(addr);
1154 if (addr_t == Type::TOP)
1155 continue;
1156 assert (addr_t->isa_ptr() != NULL, "pointer type required.");
1157 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
1158 assert ((uint)alias_idx < new_index_end, "wrong alias index");
1159 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn);
1160 if (_compile->failing()) {
1161 return;
1162 }
1163 if (mem != n->in(MemNode::Memory)) {
1164 set_map(n->_idx, mem);
1165 ptnode_adr(n->_idx)->_node = n;
1166 }
1167 if (n->is_Load()) {
1168 continue; // don't push users
1169 } else if (n->is_LoadStore()) {
1170 // get the memory projection
1171 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1172 Node *use = n->fast_out(i);
1173 if (use->Opcode() == Op_SCMemProj) {
1174 n = use;
1175 break;
1176 }
1177 }
1178 assert(n->Opcode() == Op_SCMemProj, "memory projection required");
1179 }
1180 }
1181 // push user on appropriate worklist
1182 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1183 Node *use = n->fast_out(i);
1184 if (use->is_Phi()) {
1185 memnode_worklist.append_if_missing(use);
1186 } else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
1187 memnode_worklist.append_if_missing(use);
1188 } else if (use->is_Initialize()) {
1189 memnode_worklist.append_if_missing(use);
1190 } else if (use->is_MergeMem()) {
1191 mergemem_worklist.append_if_missing(use);
1192 }
1193 }
1194 }
1196 // Phase 3: Process MergeMem nodes from mergemem_worklist.
1197 // Walk each memory moving the first node encountered of each
1198 // instance type to the the input corresponding to its alias index.
1199 while (mergemem_worklist.length() != 0) {
1200 Node *n = mergemem_worklist.pop();
1201 assert(n->is_MergeMem(), "MergeMem node required.");
1202 if (visited.test_set(n->_idx))
1203 continue;
1204 MergeMemNode *nmm = n->as_MergeMem();
1205 // Note: we don't want to use MergeMemStream here because we only want to
1206 // scan inputs which exist at the start, not ones we add during processing.
1207 uint nslices = nmm->req();
1208 igvn->hash_delete(nmm);
1209 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
1210 Node* mem = nmm->in(i);
1211 Node* cur = NULL;
1212 if (mem == NULL || mem->is_top())
1213 continue;
1214 while (mem->is_Mem()) {
1215 const Type *at = igvn->type(mem->in(MemNode::Address));
1216 if (at != Type::TOP) {
1217 assert (at->isa_ptr() != NULL, "pointer type required.");
1218 uint idx = (uint)_compile->get_alias_index(at->is_ptr());
1219 if (idx == i) {
1220 if (cur == NULL)
1221 cur = mem;
1222 } else {
1223 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
1224 nmm->set_memory_at(idx, mem);
1225 }
1226 }
1227 }
1228 mem = mem->in(MemNode::Memory);
1229 }
1230 nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
1231 // Find any instance of the current type if we haven't encountered
1232 // a value of the instance along the chain.
1233 for (uint ni = new_index_start; ni < new_index_end; ni++) {
1234 if((uint)_compile->get_general_index(ni) == i) {
1235 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
1236 if (nmm->is_empty_memory(m)) {
1237 Node* result = find_inst_mem(mem, ni, orig_phis, igvn);
1238 if (_compile->failing()) {
1239 return;
1240 }
1241 nmm->set_memory_at(ni, result);
1242 }
1243 }
1244 }
1245 }
1246 // Find the rest of instances values
1247 for (uint ni = new_index_start; ni < new_index_end; ni++) {
1248 const TypeOopPtr *tinst = igvn->C->get_adr_type(ni)->isa_oopptr();
1249 Node* result = step_through_mergemem(nmm, ni, tinst);
1250 if (result == nmm->base_memory()) {
1251 // Didn't find instance memory, search through general slice recursively.
1252 result = nmm->memory_at(igvn->C->get_general_index(ni));
1253 result = find_inst_mem(result, ni, orig_phis, igvn);
1254 if (_compile->failing()) {
1255 return;
1256 }
1257 nmm->set_memory_at(ni, result);
1258 }
1259 }
1260 igvn->hash_insert(nmm);
1261 record_for_optimizer(nmm);
1263 // Propagate new memory slices to following MergeMem nodes.
1264 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1265 Node *use = n->fast_out(i);
1266 if (use->is_Call()) {
1267 CallNode* in = use->as_Call();
1268 if (in->proj_out(TypeFunc::Memory) != NULL) {
1269 Node* m = in->proj_out(TypeFunc::Memory);
1270 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
1271 Node* mm = m->fast_out(j);
1272 if (mm->is_MergeMem()) {
1273 mergemem_worklist.append_if_missing(mm);
1274 }
1275 }
1276 }
1277 if (use->is_Allocate()) {
1278 use = use->as_Allocate()->initialization();
1279 if (use == NULL) {
1280 continue;
1281 }
1282 }
1283 }
1284 if (use->is_Initialize()) {
1285 InitializeNode* in = use->as_Initialize();
1286 if (in->proj_out(TypeFunc::Memory) != NULL) {
1287 Node* m = in->proj_out(TypeFunc::Memory);
1288 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
1289 Node* mm = m->fast_out(j);
1290 if (mm->is_MergeMem()) {
1291 mergemem_worklist.append_if_missing(mm);
1292 }
1293 }
1294 }
1295 }
1296 }
1297 }
1299 // Phase 4: Update the inputs of non-instance memory Phis and
1300 // the Memory input of memnodes
1301 // First update the inputs of any non-instance Phi's from
1302 // which we split out an instance Phi. Note we don't have
1303 // to recursively process Phi's encounted on the input memory
1304 // chains as is done in split_memory_phi() since they will
1305 // also be processed here.
1306 for (int j = 0; j < orig_phis.length(); j++) {
1307 PhiNode *phi = orig_phis.at(j);
1308 int alias_idx = _compile->get_alias_index(phi->adr_type());
1309 igvn->hash_delete(phi);
1310 for (uint i = 1; i < phi->req(); i++) {
1311 Node *mem = phi->in(i);
1312 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn);
1313 if (_compile->failing()) {
1314 return;
1315 }
1316 if (mem != new_mem) {
1317 phi->set_req(i, new_mem);
1318 }
1319 }
1320 igvn->hash_insert(phi);
1321 record_for_optimizer(phi);
1322 }
1324 // Update the memory inputs of MemNodes with the value we computed
1325 // in Phase 2.
1326 for (uint i = 0; i < nodes_size(); i++) {
1327 Node *nmem = get_map(i);
1328 if (nmem != NULL) {
1329 Node *n = ptnode_adr(i)->_node;
1330 if (n != NULL && n->is_Mem()) {
1331 igvn->hash_delete(n);
1332 n->set_req(MemNode::Memory, nmem);
1333 igvn->hash_insert(n);
1334 record_for_optimizer(n);
1335 }
1336 }
1337 }
1338 }
1340 bool ConnectionGraph::has_candidates(Compile *C) {
1341 // EA brings benefits only when the code has allocations and/or locks which
1342 // are represented by ideal Macro nodes.
1343 int cnt = C->macro_count();
1344 for( int i=0; i < cnt; i++ ) {
1345 Node *n = C->macro_node(i);
1346 if ( n->is_Allocate() )
1347 return true;
1348 if( n->is_Lock() ) {
1349 Node* obj = n->as_Lock()->obj_node()->uncast();
1350 if( !(obj->is_Parm() || obj->is_Con()) )
1351 return true;
1352 }
1353 }
1354 return false;
1355 }
1357 bool ConnectionGraph::compute_escape() {
1358 Compile* C = _compile;
1360 // 1. Populate Connection Graph (CG) with Ideal nodes.
1362 Unique_Node_List worklist_init;
1363 worklist_init.map(C->unique(), NULL); // preallocate space
1365 // Initialize worklist
1366 if (C->root() != NULL) {
1367 worklist_init.push(C->root());
1368 }
1370 GrowableArray<int> cg_worklist;
1371 PhaseGVN* igvn = C->initial_gvn();
1372 bool has_allocations = false;
1374 // Push all useful nodes onto CG list and set their type.
1375 for( uint next = 0; next < worklist_init.size(); ++next ) {
1376 Node* n = worklist_init.at(next);
1377 record_for_escape_analysis(n, igvn);
1378 // Only allocations and java static calls results are checked
1379 // for an escape status. See process_call_result() below.
1380 if (n->is_Allocate() || n->is_CallStaticJava() &&
1381 ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
1382 has_allocations = true;
1383 }
1384 if(n->is_AddP())
1385 cg_worklist.append(n->_idx);
1386 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1387 Node* m = n->fast_out(i); // Get user
1388 worklist_init.push(m);
1389 }
1390 }
1392 if (!has_allocations) {
1393 _collecting = false;
1394 return false; // Nothing to do.
1395 }
1397 // 2. First pass to create simple CG edges (doesn't require to walk CG).
1398 uint delayed_size = _delayed_worklist.size();
1399 for( uint next = 0; next < delayed_size; ++next ) {
1400 Node* n = _delayed_worklist.at(next);
1401 build_connection_graph(n, igvn);
1402 }
1404 // 3. Pass to create fields edges (Allocate -F-> AddP).
1405 uint cg_length = cg_worklist.length();
1406 for( uint next = 0; next < cg_length; ++next ) {
1407 int ni = cg_worklist.at(next);
1408 build_connection_graph(ptnode_adr(ni)->_node, igvn);
1409 }
1411 cg_worklist.clear();
1412 cg_worklist.append(_phantom_object);
1414 // 4. Build Connection Graph which need
1415 // to walk the connection graph.
1416 for (uint ni = 0; ni < nodes_size(); ni++) {
1417 PointsToNode* ptn = ptnode_adr(ni);
1418 Node *n = ptn->_node;
1419 if (n != NULL) { // Call, AddP, LoadP, StoreP
1420 build_connection_graph(n, igvn);
1421 if (ptn->node_type() != PointsToNode::UnknownType)
1422 cg_worklist.append(n->_idx); // Collect CG nodes
1423 }
1424 }
1426 VectorSet ptset(Thread::current()->resource_area());
1427 GrowableArray<uint> deferred_edges;
1428 VectorSet visited(Thread::current()->resource_area());
1430 // 5. Remove deferred edges from the graph and collect
1431 // information needed for type splitting.
1432 cg_length = cg_worklist.length();
1433 for( uint next = 0; next < cg_length; ++next ) {
1434 int ni = cg_worklist.at(next);
1435 PointsToNode* ptn = ptnode_adr(ni);
1436 PointsToNode::NodeType nt = ptn->node_type();
1437 if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
1438 remove_deferred(ni, &deferred_edges, &visited);
1439 Node *n = ptn->_node;
1440 if (n->is_AddP()) {
1441 // Search for objects which are not scalar replaceable.
1442 // Mark their escape state as ArgEscape to propagate the state
1443 // to referenced objects.
1444 // Note: currently there are no difference in compiler optimizations
1445 // for ArgEscape objects and NoEscape objects which are not
1446 // scalar replaceable.
1448 int offset = ptn->offset();
1449 Node *base = get_addp_base(n);
1450 ptset.Clear();
1451 PointsTo(ptset, base, igvn);
1452 int ptset_size = ptset.Size();
1454 // Check if a field's initializing value is recorded and add
1455 // a corresponding NULL field's value if it is not recorded.
1456 // Connection Graph does not record a default initialization by NULL
1457 // captured by Initialize node.
1458 //
1459 // Note: it will disable scalar replacement in some cases:
1460 //
1461 // Point p[] = new Point[1];
1462 // p[0] = new Point(); // Will be not scalar replaced
1463 //
1464 // but it will save us from incorrect optimizations in next cases:
1465 //
1466 // Point p[] = new Point[1];
1467 // if ( x ) p[0] = new Point(); // Will be not scalar replaced
1468 //
1469 // Without a control flow analysis we can't distinguish above cases.
1470 //
1471 if (offset != Type::OffsetBot && ptset_size == 1) {
1472 uint elem = ptset.getelem(); // Allocation node's index
1473 // It does not matter if it is not Allocation node since
1474 // only non-escaping allocations are scalar replaced.
1475 if (ptnode_adr(elem)->_node->is_Allocate() &&
1476 ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
1477 AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
1478 InitializeNode* ini = alloc->initialization();
1479 Node* value = NULL;
1480 if (ini != NULL) {
1481 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
1482 Node* store = ini->find_captured_store(offset, type2aelembytes(ft), igvn);
1483 if (store != NULL && store->is_Store())
1484 value = store->in(MemNode::ValueIn);
1485 }
1486 if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
1487 // A field's initializing value was not recorded. Add NULL.
1488 uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
1489 add_pointsto_edge(ni, null_idx);
1490 }
1491 }
1492 }
1494 // An object is not scalar replaceable if the field which may point
1495 // to it has unknown offset (unknown element of an array of objects).
1496 //
1497 if (offset == Type::OffsetBot) {
1498 uint e_cnt = ptn->edge_count();
1499 for (uint ei = 0; ei < e_cnt; ei++) {
1500 uint npi = ptn->edge_target(ei);
1501 set_escape_state(npi, PointsToNode::ArgEscape);
1502 ptnode_adr(npi)->_scalar_replaceable = false;
1503 }
1504 }
1506 // Currently an object is not scalar replaceable if a LoadStore node
1507 // access its field since the field value is unknown after it.
1508 //
1509 bool has_LoadStore = false;
1510 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1511 Node *use = n->fast_out(i);
1512 if (use->is_LoadStore()) {
1513 has_LoadStore = true;
1514 break;
1515 }
1516 }
1517 // An object is not scalar replaceable if the address points
1518 // to unknown field (unknown element for arrays, offset is OffsetBot).
1519 //
1520 // Or the address may point to more then one object. This may produce
1521 // the false positive result (set scalar_replaceable to false)
1522 // since the flow-insensitive escape analysis can't separate
1523 // the case when stores overwrite the field's value from the case
1524 // when stores happened on different control branches.
1525 //
1526 if (ptset_size > 1 || ptset_size != 0 &&
1527 (has_LoadStore || offset == Type::OffsetBot)) {
1528 for( VectorSetI j(&ptset); j.test(); ++j ) {
1529 set_escape_state(j.elem, PointsToNode::ArgEscape);
1530 ptnode_adr(j.elem)->_scalar_replaceable = false;
1531 }
1532 }
1533 }
1534 }
1535 }
1537 // 6. Propagate escape states.
1538 GrowableArray<int> worklist;
1539 bool has_non_escaping_obj = false;
1541 // push all GlobalEscape nodes on the worklist
1542 for( uint next = 0; next < cg_length; ++next ) {
1543 int nk = cg_worklist.at(next);
1544 if (ptnode_adr(nk)->escape_state() == PointsToNode::GlobalEscape)
1545 worklist.push(nk);
1546 }
1547 // mark all nodes reachable from GlobalEscape nodes
1548 while(worklist.length() > 0) {
1549 PointsToNode* ptn = ptnode_adr(worklist.pop());
1550 uint e_cnt = ptn->edge_count();
1551 for (uint ei = 0; ei < e_cnt; ei++) {
1552 uint npi = ptn->edge_target(ei);
1553 PointsToNode *np = ptnode_adr(npi);
1554 if (np->escape_state() < PointsToNode::GlobalEscape) {
1555 np->set_escape_state(PointsToNode::GlobalEscape);
1556 worklist.push(npi);
1557 }
1558 }
1559 }
1561 // push all ArgEscape nodes on the worklist
1562 for( uint next = 0; next < cg_length; ++next ) {
1563 int nk = cg_worklist.at(next);
1564 if (ptnode_adr(nk)->escape_state() == PointsToNode::ArgEscape)
1565 worklist.push(nk);
1566 }
1567 // mark all nodes reachable from ArgEscape nodes
1568 while(worklist.length() > 0) {
1569 PointsToNode* ptn = ptnode_adr(worklist.pop());
1570 if (ptn->node_type() == PointsToNode::JavaObject)
1571 has_non_escaping_obj = true; // Non GlobalEscape
1572 uint e_cnt = ptn->edge_count();
1573 for (uint ei = 0; ei < e_cnt; ei++) {
1574 uint npi = ptn->edge_target(ei);
1575 PointsToNode *np = ptnode_adr(npi);
1576 if (np->escape_state() < PointsToNode::ArgEscape) {
1577 np->set_escape_state(PointsToNode::ArgEscape);
1578 worklist.push(npi);
1579 }
1580 }
1581 }
1583 GrowableArray<Node*> alloc_worklist;
1585 // push all NoEscape nodes on the worklist
1586 for( uint next = 0; next < cg_length; ++next ) {
1587 int nk = cg_worklist.at(next);
1588 if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape)
1589 worklist.push(nk);
1590 }
1591 // mark all nodes reachable from NoEscape nodes
1592 while(worklist.length() > 0) {
1593 PointsToNode* ptn = ptnode_adr(worklist.pop());
1594 if (ptn->node_type() == PointsToNode::JavaObject)
1595 has_non_escaping_obj = true; // Non GlobalEscape
1596 Node* n = ptn->_node;
1597 if (n->is_Allocate() && ptn->_scalar_replaceable ) {
1598 // Push scalar replaceable allocations on alloc_worklist
1599 // for processing in split_unique_types().
1600 alloc_worklist.append(n);
1601 }
1602 uint e_cnt = ptn->edge_count();
1603 for (uint ei = 0; ei < e_cnt; ei++) {
1604 uint npi = ptn->edge_target(ei);
1605 PointsToNode *np = ptnode_adr(npi);
1606 if (np->escape_state() < PointsToNode::NoEscape) {
1607 np->set_escape_state(PointsToNode::NoEscape);
1608 worklist.push(npi);
1609 }
1610 }
1611 }
1613 _collecting = false;
1614 assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
1616 bool has_scalar_replaceable_candidates = alloc_worklist.length() > 0;
1617 if ( has_scalar_replaceable_candidates &&
1618 C->AliasLevel() >= 3 && EliminateAllocations ) {
1620 // Now use the escape information to create unique types for
1621 // scalar replaceable objects.
1622 split_unique_types(alloc_worklist);
1624 if (C->failing()) return false;
1626 // Clean up after split unique types.
1627 ResourceMark rm;
1628 PhaseRemoveUseless pru(C->initial_gvn(), C->for_igvn());
1630 C->print_method("After Escape Analysis", 2);
1632 #ifdef ASSERT
1633 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
1634 tty->print("=== No allocations eliminated for ");
1635 C->method()->print_short_name();
1636 if(!EliminateAllocations) {
1637 tty->print(" since EliminateAllocations is off ===");
1638 } else if(!has_scalar_replaceable_candidates) {
1639 tty->print(" since there are no scalar replaceable candidates ===");
1640 } else if(C->AliasLevel() < 3) {
1641 tty->print(" since AliasLevel < 3 ===");
1642 }
1643 tty->cr();
1644 #endif
1645 }
1646 return has_non_escaping_obj;
1647 }
1649 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
1651 switch (call->Opcode()) {
1652 #ifdef ASSERT
1653 case Op_Allocate:
1654 case Op_AllocateArray:
1655 case Op_Lock:
1656 case Op_Unlock:
1657 assert(false, "should be done already");
1658 break;
1659 #endif
1660 case Op_CallLeafNoFP:
1661 {
1662 // Stub calls, objects do not escape but they are not scale replaceable.
1663 // Adjust escape state for outgoing arguments.
1664 const TypeTuple * d = call->tf()->domain();
1665 VectorSet ptset(Thread::current()->resource_area());
1666 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1667 const Type* at = d->field_at(i);
1668 Node *arg = call->in(i)->uncast();
1669 const Type *aat = phase->type(arg);
1670 if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr()) {
1671 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1672 aat->isa_ptr() != NULL, "expecting an Ptr");
1673 set_escape_state(arg->_idx, PointsToNode::ArgEscape);
1674 if (arg->is_AddP()) {
1675 //
1676 // The inline_native_clone() case when the arraycopy stub is called
1677 // after the allocation before Initialize and CheckCastPP nodes.
1678 //
1679 // Set AddP's base (Allocate) as not scalar replaceable since
1680 // pointer to the base (with offset) is passed as argument.
1681 //
1682 arg = get_addp_base(arg);
1683 }
1684 ptset.Clear();
1685 PointsTo(ptset, arg, phase);
1686 for( VectorSetI j(&ptset); j.test(); ++j ) {
1687 uint pt = j.elem;
1688 set_escape_state(pt, PointsToNode::ArgEscape);
1689 }
1690 }
1691 }
1692 break;
1693 }
1695 case Op_CallStaticJava:
1696 // For a static call, we know exactly what method is being called.
1697 // Use bytecode estimator to record the call's escape affects
1698 {
1699 ciMethod *meth = call->as_CallJava()->method();
1700 BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1701 // fall-through if not a Java method or no analyzer information
1702 if (call_analyzer != NULL) {
1703 const TypeTuple * d = call->tf()->domain();
1704 VectorSet ptset(Thread::current()->resource_area());
1705 bool copy_dependencies = false;
1706 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1707 const Type* at = d->field_at(i);
1708 int k = i - TypeFunc::Parms;
1710 if (at->isa_oopptr() != NULL) {
1711 Node *arg = call->in(i)->uncast();
1713 bool global_escapes = false;
1714 bool fields_escapes = false;
1715 if (!call_analyzer->is_arg_stack(k)) {
1716 // The argument global escapes, mark everything it could point to
1717 set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
1718 global_escapes = true;
1719 } else {
1720 if (!call_analyzer->is_arg_local(k)) {
1721 // The argument itself doesn't escape, but any fields might
1722 fields_escapes = true;
1723 }
1724 set_escape_state(arg->_idx, PointsToNode::ArgEscape);
1725 copy_dependencies = true;
1726 }
1728 ptset.Clear();
1729 PointsTo(ptset, arg, phase);
1730 for( VectorSetI j(&ptset); j.test(); ++j ) {
1731 uint pt = j.elem;
1732 if (global_escapes) {
1733 //The argument global escapes, mark everything it could point to
1734 set_escape_state(pt, PointsToNode::GlobalEscape);
1735 } else {
1736 if (fields_escapes) {
1737 // The argument itself doesn't escape, but any fields might
1738 add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
1739 }
1740 set_escape_state(pt, PointsToNode::ArgEscape);
1741 }
1742 }
1743 }
1744 }
1745 if (copy_dependencies)
1746 call_analyzer->copy_dependencies(_compile->dependencies());
1747 break;
1748 }
1749 }
1751 default:
1752 // Fall-through here if not a Java method or no analyzer information
1753 // or some other type of call, assume the worst case: all arguments
1754 // globally escape.
1755 {
1756 // adjust escape state for outgoing arguments
1757 const TypeTuple * d = call->tf()->domain();
1758 VectorSet ptset(Thread::current()->resource_area());
1759 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1760 const Type* at = d->field_at(i);
1761 if (at->isa_oopptr() != NULL) {
1762 Node *arg = call->in(i)->uncast();
1763 set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
1764 ptset.Clear();
1765 PointsTo(ptset, arg, phase);
1766 for( VectorSetI j(&ptset); j.test(); ++j ) {
1767 uint pt = j.elem;
1768 set_escape_state(pt, PointsToNode::GlobalEscape);
1769 }
1770 }
1771 }
1772 }
1773 }
1774 }
1775 void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) {
1776 CallNode *call = resproj->in(0)->as_Call();
1777 uint call_idx = call->_idx;
1778 uint resproj_idx = resproj->_idx;
1780 switch (call->Opcode()) {
1781 case Op_Allocate:
1782 {
1783 Node *k = call->in(AllocateNode::KlassNode);
1784 const TypeKlassPtr *kt;
1785 if (k->Opcode() == Op_LoadKlass) {
1786 kt = k->as_Load()->type()->isa_klassptr();
1787 } else {
1788 // Also works for DecodeN(LoadNKlass).
1789 kt = k->as_Type()->type()->isa_klassptr();
1790 }
1791 assert(kt != NULL, "TypeKlassPtr required.");
1792 ciKlass* cik = kt->klass();
1793 ciInstanceKlass* ciik = cik->as_instance_klass();
1795 PointsToNode::EscapeState es;
1796 uint edge_to;
1797 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) {
1798 es = PointsToNode::GlobalEscape;
1799 edge_to = _phantom_object; // Could not be worse
1800 } else {
1801 es = PointsToNode::NoEscape;
1802 edge_to = call_idx;
1803 }
1804 set_escape_state(call_idx, es);
1805 add_pointsto_edge(resproj_idx, edge_to);
1806 _processed.set(resproj_idx);
1807 break;
1808 }
1810 case Op_AllocateArray:
1811 {
1812 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
1813 if (length < 0 || length > EliminateAllocationArraySizeLimit) {
1814 // Not scalar replaceable if the length is not constant or too big.
1815 ptnode_adr(call_idx)->_scalar_replaceable = false;
1816 }
1817 set_escape_state(call_idx, PointsToNode::NoEscape);
1818 add_pointsto_edge(resproj_idx, call_idx);
1819 _processed.set(resproj_idx);
1820 break;
1821 }
1823 case Op_CallStaticJava:
1824 // For a static call, we know exactly what method is being called.
1825 // Use bytecode estimator to record whether the call's return value escapes
1826 {
1827 bool done = true;
1828 const TypeTuple *r = call->tf()->range();
1829 const Type* ret_type = NULL;
1831 if (r->cnt() > TypeFunc::Parms)
1832 ret_type = r->field_at(TypeFunc::Parms);
1834 // Note: we use isa_ptr() instead of isa_oopptr() here because the
1835 // _multianewarray functions return a TypeRawPtr.
1836 if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
1837 _processed.set(resproj_idx);
1838 break; // doesn't return a pointer type
1839 }
1840 ciMethod *meth = call->as_CallJava()->method();
1841 const TypeTuple * d = call->tf()->domain();
1842 if (meth == NULL) {
1843 // not a Java method, assume global escape
1844 set_escape_state(call_idx, PointsToNode::GlobalEscape);
1845 add_pointsto_edge(resproj_idx, _phantom_object);
1846 } else {
1847 BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
1848 bool copy_dependencies = false;
1850 if (call_analyzer->is_return_allocated()) {
1851 // Returns a newly allocated unescaped object, simply
1852 // update dependency information.
1853 // Mark it as NoEscape so that objects referenced by
1854 // it's fields will be marked as NoEscape at least.
1855 set_escape_state(call_idx, PointsToNode::NoEscape);
1856 add_pointsto_edge(resproj_idx, call_idx);
1857 copy_dependencies = true;
1858 } else if (call_analyzer->is_return_local()) {
1859 // determine whether any arguments are returned
1860 set_escape_state(call_idx, PointsToNode::NoEscape);
1861 bool ret_arg = false;
1862 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1863 const Type* at = d->field_at(i);
1865 if (at->isa_oopptr() != NULL) {
1866 Node *arg = call->in(i)->uncast();
1868 if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1869 ret_arg = true;
1870 PointsToNode *arg_esp = ptnode_adr(arg->_idx);
1871 if (arg_esp->node_type() == PointsToNode::UnknownType)
1872 done = false;
1873 else if (arg_esp->node_type() == PointsToNode::JavaObject)
1874 add_pointsto_edge(resproj_idx, arg->_idx);
1875 else
1876 add_deferred_edge(resproj_idx, arg->_idx);
1877 arg_esp->_hidden_alias = true;
1878 }
1879 }
1880 }
1881 if (done && !ret_arg) {
1882 // Returns unknown object.
1883 set_escape_state(call_idx, PointsToNode::GlobalEscape);
1884 add_pointsto_edge(resproj_idx, _phantom_object);
1885 }
1886 copy_dependencies = true;
1887 } else {
1888 set_escape_state(call_idx, PointsToNode::GlobalEscape);
1889 add_pointsto_edge(resproj_idx, _phantom_object);
1890 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1891 const Type* at = d->field_at(i);
1892 if (at->isa_oopptr() != NULL) {
1893 Node *arg = call->in(i)->uncast();
1894 PointsToNode *arg_esp = ptnode_adr(arg->_idx);
1895 arg_esp->_hidden_alias = true;
1896 }
1897 }
1898 }
1899 if (copy_dependencies)
1900 call_analyzer->copy_dependencies(_compile->dependencies());
1901 }
1902 if (done)
1903 _processed.set(resproj_idx);
1904 break;
1905 }
1907 default:
1908 // Some other type of call, assume the worst case that the
1909 // returned value, if any, globally escapes.
1910 {
1911 const TypeTuple *r = call->tf()->range();
1912 if (r->cnt() > TypeFunc::Parms) {
1913 const Type* ret_type = r->field_at(TypeFunc::Parms);
1915 // Note: we use isa_ptr() instead of isa_oopptr() here because the
1916 // _multianewarray functions return a TypeRawPtr.
1917 if (ret_type->isa_ptr() != NULL) {
1918 set_escape_state(call_idx, PointsToNode::GlobalEscape);
1919 add_pointsto_edge(resproj_idx, _phantom_object);
1920 }
1921 }
1922 _processed.set(resproj_idx);
1923 }
1924 }
1925 }
1927 // Populate Connection Graph with Ideal nodes and create simple
1928 // connection graph edges (do not need to check the node_type of inputs
1929 // or to call PointsTo() to walk the connection graph).
1930 void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) {
1931 if (_processed.test(n->_idx))
1932 return; // No need to redefine node's state.
1934 if (n->is_Call()) {
1935 // Arguments to allocation and locking don't escape.
1936 if (n->is_Allocate()) {
1937 add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true);
1938 record_for_optimizer(n);
1939 } else if (n->is_Lock() || n->is_Unlock()) {
1940 // Put Lock and Unlock nodes on IGVN worklist to process them during
1941 // the first IGVN optimization when escape information is still available.
1942 record_for_optimizer(n);
1943 _processed.set(n->_idx);
1944 } else {
1945 // Have to process call's arguments first.
1946 PointsToNode::NodeType nt = PointsToNode::UnknownType;
1948 // Check if a call returns an object.
1949 const TypeTuple *r = n->as_Call()->tf()->range();
1950 if (n->is_CallStaticJava() && r->cnt() > TypeFunc::Parms &&
1951 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
1952 // Note: use isa_ptr() instead of isa_oopptr() here because
1953 // the _multianewarray functions return a TypeRawPtr.
1954 if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
1955 nt = PointsToNode::JavaObject;
1956 }
1957 }
1958 add_node(n, nt, PointsToNode::UnknownEscape, false);
1959 }
1960 return;
1961 }
1963 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1964 // ThreadLocal has RawPrt type.
1965 switch (n->Opcode()) {
1966 case Op_AddP:
1967 {
1968 add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false);
1969 break;
1970 }
1971 case Op_CastX2P:
1972 { // "Unsafe" memory access.
1973 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
1974 break;
1975 }
1976 case Op_CastPP:
1977 case Op_CheckCastPP:
1978 case Op_EncodeP:
1979 case Op_DecodeN:
1980 {
1981 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
1982 int ti = n->in(1)->_idx;
1983 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
1984 if (nt == PointsToNode::UnknownType) {
1985 _delayed_worklist.push(n); // Process it later.
1986 break;
1987 } else if (nt == PointsToNode::JavaObject) {
1988 add_pointsto_edge(n->_idx, ti);
1989 } else {
1990 add_deferred_edge(n->_idx, ti);
1991 }
1992 _processed.set(n->_idx);
1993 break;
1994 }
1995 case Op_ConP:
1996 {
1997 // assume all pointer constants globally escape except for null
1998 PointsToNode::EscapeState es;
1999 if (phase->type(n) == TypePtr::NULL_PTR)
2000 es = PointsToNode::NoEscape;
2001 else
2002 es = PointsToNode::GlobalEscape;
2004 add_node(n, PointsToNode::JavaObject, es, true);
2005 break;
2006 }
2007 case Op_ConN:
2008 {
2009 // assume all narrow oop constants globally escape except for null
2010 PointsToNode::EscapeState es;
2011 if (phase->type(n) == TypeNarrowOop::NULL_PTR)
2012 es = PointsToNode::NoEscape;
2013 else
2014 es = PointsToNode::GlobalEscape;
2016 add_node(n, PointsToNode::JavaObject, es, true);
2017 break;
2018 }
2019 case Op_CreateEx:
2020 {
2021 // assume that all exception objects globally escape
2022 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
2023 break;
2024 }
2025 case Op_LoadKlass:
2026 case Op_LoadNKlass:
2027 {
2028 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
2029 break;
2030 }
2031 case Op_LoadP:
2032 case Op_LoadN:
2033 {
2034 const Type *t = phase->type(n);
2035 if (t->make_ptr() == NULL) {
2036 _processed.set(n->_idx);
2037 return;
2038 }
2039 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2040 break;
2041 }
2042 case Op_Parm:
2043 {
2044 _processed.set(n->_idx); // No need to redefine it state.
2045 uint con = n->as_Proj()->_con;
2046 if (con < TypeFunc::Parms)
2047 return;
2048 const Type *t = n->in(0)->as_Start()->_domain->field_at(con);
2049 if (t->isa_ptr() == NULL)
2050 return;
2051 // We have to assume all input parameters globally escape
2052 // (Note: passing 'false' since _processed is already set).
2053 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
2054 break;
2055 }
2056 case Op_Phi:
2057 {
2058 const Type *t = n->as_Phi()->type();
2059 if (t->make_ptr() == NULL) {
2060 // nothing to do if not an oop or narrow oop
2061 _processed.set(n->_idx);
2062 return;
2063 }
2064 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2065 uint i;
2066 for (i = 1; i < n->req() ; i++) {
2067 Node* in = n->in(i);
2068 if (in == NULL)
2069 continue; // ignore NULL
2070 in = in->uncast();
2071 if (in->is_top() || in == n)
2072 continue; // ignore top or inputs which go back this node
2073 int ti = in->_idx;
2074 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
2075 if (nt == PointsToNode::UnknownType) {
2076 break;
2077 } else if (nt == PointsToNode::JavaObject) {
2078 add_pointsto_edge(n->_idx, ti);
2079 } else {
2080 add_deferred_edge(n->_idx, ti);
2081 }
2082 }
2083 if (i >= n->req())
2084 _processed.set(n->_idx);
2085 else
2086 _delayed_worklist.push(n);
2087 break;
2088 }
2089 case Op_Proj:
2090 {
2091 // we are only interested in the result projection from a call
2092 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
2093 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2094 process_call_result(n->as_Proj(), phase);
2095 if (!_processed.test(n->_idx)) {
2096 // The call's result may need to be processed later if the call
2097 // returns it's argument and the argument is not processed yet.
2098 _delayed_worklist.push(n);
2099 }
2100 } else {
2101 _processed.set(n->_idx);
2102 }
2103 break;
2104 }
2105 case Op_Return:
2106 {
2107 if( n->req() > TypeFunc::Parms &&
2108 phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
2109 // Treat Return value as LocalVar with GlobalEscape escape state.
2110 add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false);
2111 int ti = n->in(TypeFunc::Parms)->_idx;
2112 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
2113 if (nt == PointsToNode::UnknownType) {
2114 _delayed_worklist.push(n); // Process it later.
2115 break;
2116 } else if (nt == PointsToNode::JavaObject) {
2117 add_pointsto_edge(n->_idx, ti);
2118 } else {
2119 add_deferred_edge(n->_idx, ti);
2120 }
2121 }
2122 _processed.set(n->_idx);
2123 break;
2124 }
2125 case Op_StoreP:
2126 case Op_StoreN:
2127 {
2128 const Type *adr_type = phase->type(n->in(MemNode::Address));
2129 adr_type = adr_type->make_ptr();
2130 if (adr_type->isa_oopptr()) {
2131 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2132 } else {
2133 Node* adr = n->in(MemNode::Address);
2134 if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL &&
2135 adr->in(AddPNode::Address)->is_Proj() &&
2136 adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
2137 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2138 // We are computing a raw address for a store captured
2139 // by an Initialize compute an appropriate address type.
2140 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
2141 assert(offs != Type::OffsetBot, "offset must be a constant");
2142 } else {
2143 _processed.set(n->_idx);
2144 return;
2145 }
2146 }
2147 break;
2148 }
2149 case Op_StorePConditional:
2150 case Op_CompareAndSwapP:
2151 case Op_CompareAndSwapN:
2152 {
2153 const Type *adr_type = phase->type(n->in(MemNode::Address));
2154 adr_type = adr_type->make_ptr();
2155 if (adr_type->isa_oopptr()) {
2156 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2157 } else {
2158 _processed.set(n->_idx);
2159 return;
2160 }
2161 break;
2162 }
2163 case Op_ThreadLocal:
2164 {
2165 add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
2166 break;
2167 }
2168 default:
2169 ;
2170 // nothing to do
2171 }
2172 return;
2173 }
2175 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
2176 uint n_idx = n->_idx;
2178 // Don't set processed bit for AddP, LoadP, StoreP since
2179 // they may need more then one pass to process.
2180 if (_processed.test(n_idx))
2181 return; // No need to redefine node's state.
2183 if (n->is_Call()) {
2184 CallNode *call = n->as_Call();
2185 process_call_arguments(call, phase);
2186 _processed.set(n_idx);
2187 return;
2188 }
2190 switch (n->Opcode()) {
2191 case Op_AddP:
2192 {
2193 Node *base = get_addp_base(n);
2194 // Create a field edge to this node from everything base could point to.
2195 VectorSet ptset(Thread::current()->resource_area());
2196 PointsTo(ptset, base, phase);
2197 for( VectorSetI i(&ptset); i.test(); ++i ) {
2198 uint pt = i.elem;
2199 add_field_edge(pt, n_idx, address_offset(n, phase));
2200 }
2201 break;
2202 }
2203 case Op_CastX2P:
2204 {
2205 assert(false, "Op_CastX2P");
2206 break;
2207 }
2208 case Op_CastPP:
2209 case Op_CheckCastPP:
2210 case Op_EncodeP:
2211 case Op_DecodeN:
2212 {
2213 int ti = n->in(1)->_idx;
2214 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
2215 add_pointsto_edge(n_idx, ti);
2216 } else {
2217 add_deferred_edge(n_idx, ti);
2218 }
2219 _processed.set(n_idx);
2220 break;
2221 }
2222 case Op_ConP:
2223 {
2224 assert(false, "Op_ConP");
2225 break;
2226 }
2227 case Op_ConN:
2228 {
2229 assert(false, "Op_ConN");
2230 break;
2231 }
2232 case Op_CreateEx:
2233 {
2234 assert(false, "Op_CreateEx");
2235 break;
2236 }
2237 case Op_LoadKlass:
2238 case Op_LoadNKlass:
2239 {
2240 assert(false, "Op_LoadKlass");
2241 break;
2242 }
2243 case Op_LoadP:
2244 case Op_LoadN:
2245 {
2246 const Type *t = phase->type(n);
2247 #ifdef ASSERT
2248 if (t->make_ptr() == NULL)
2249 assert(false, "Op_LoadP");
2250 #endif
2252 Node* adr = n->in(MemNode::Address)->uncast();
2253 const Type *adr_type = phase->type(adr);
2254 Node* adr_base;
2255 if (adr->is_AddP()) {
2256 adr_base = get_addp_base(adr);
2257 } else {
2258 adr_base = adr;
2259 }
2261 // For everything "adr_base" could point to, create a deferred edge from
2262 // this node to each field with the same offset.
2263 VectorSet ptset(Thread::current()->resource_area());
2264 PointsTo(ptset, adr_base, phase);
2265 int offset = address_offset(adr, phase);
2266 for( VectorSetI i(&ptset); i.test(); ++i ) {
2267 uint pt = i.elem;
2268 add_deferred_edge_to_fields(n_idx, pt, offset);
2269 }
2270 break;
2271 }
2272 case Op_Parm:
2273 {
2274 assert(false, "Op_Parm");
2275 break;
2276 }
2277 case Op_Phi:
2278 {
2279 #ifdef ASSERT
2280 const Type *t = n->as_Phi()->type();
2281 if (t->make_ptr() == NULL)
2282 assert(false, "Op_Phi");
2283 #endif
2284 for (uint i = 1; i < n->req() ; i++) {
2285 Node* in = n->in(i);
2286 if (in == NULL)
2287 continue; // ignore NULL
2288 in = in->uncast();
2289 if (in->is_top() || in == n)
2290 continue; // ignore top or inputs which go back this node
2291 int ti = in->_idx;
2292 PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
2293 assert(nt != PointsToNode::UnknownType, "all nodes should be known");
2294 if (nt == PointsToNode::JavaObject) {
2295 add_pointsto_edge(n_idx, ti);
2296 } else {
2297 add_deferred_edge(n_idx, ti);
2298 }
2299 }
2300 _processed.set(n_idx);
2301 break;
2302 }
2303 case Op_Proj:
2304 {
2305 // we are only interested in the result projection from a call
2306 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
2307 process_call_result(n->as_Proj(), phase);
2308 assert(_processed.test(n_idx), "all call results should be processed");
2309 } else {
2310 assert(false, "Op_Proj");
2311 }
2312 break;
2313 }
2314 case Op_Return:
2315 {
2316 #ifdef ASSERT
2317 if( n->req() <= TypeFunc::Parms ||
2318 !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
2319 assert(false, "Op_Return");
2320 }
2321 #endif
2322 int ti = n->in(TypeFunc::Parms)->_idx;
2323 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
2324 add_pointsto_edge(n_idx, ti);
2325 } else {
2326 add_deferred_edge(n_idx, ti);
2327 }
2328 _processed.set(n_idx);
2329 break;
2330 }
2331 case Op_StoreP:
2332 case Op_StoreN:
2333 case Op_StorePConditional:
2334 case Op_CompareAndSwapP:
2335 case Op_CompareAndSwapN:
2336 {
2337 Node *adr = n->in(MemNode::Address);
2338 const Type *adr_type = phase->type(adr)->make_ptr();
2339 #ifdef ASSERT
2340 if (!adr_type->isa_oopptr())
2341 assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP");
2342 #endif
2344 assert(adr->is_AddP(), "expecting an AddP");
2345 Node *adr_base = get_addp_base(adr);
2346 Node *val = n->in(MemNode::ValueIn)->uncast();
2347 // For everything "adr_base" could point to, create a deferred edge
2348 // to "val" from each field with the same offset.
2349 VectorSet ptset(Thread::current()->resource_area());
2350 PointsTo(ptset, adr_base, phase);
2351 for( VectorSetI i(&ptset); i.test(); ++i ) {
2352 uint pt = i.elem;
2353 add_edge_from_fields(pt, val->_idx, address_offset(adr, phase));
2354 }
2355 break;
2356 }
2357 case Op_ThreadLocal:
2358 {
2359 assert(false, "Op_ThreadLocal");
2360 break;
2361 }
2362 default:
2363 ;
2364 // nothing to do
2365 }
2366 }
2368 #ifndef PRODUCT
2369 void ConnectionGraph::dump() {
2370 PhaseGVN *igvn = _compile->initial_gvn();
2371 bool first = true;
2373 uint size = nodes_size();
2374 for (uint ni = 0; ni < size; ni++) {
2375 PointsToNode *ptn = ptnode_adr(ni);
2376 PointsToNode::NodeType ptn_type = ptn->node_type();
2378 if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
2379 continue;
2380 PointsToNode::EscapeState es = escape_state(ptn->_node, igvn);
2381 if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
2382 if (first) {
2383 tty->cr();
2384 tty->print("======== Connection graph for ");
2385 _compile->method()->print_short_name();
2386 tty->cr();
2387 first = false;
2388 }
2389 tty->print("%6d ", ni);
2390 ptn->dump();
2391 // Print all locals which reference this allocation
2392 for (uint li = ni; li < size; li++) {
2393 PointsToNode *ptn_loc = ptnode_adr(li);
2394 PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type();
2395 if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL &&
2396 ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) {
2397 ptnode_adr(li)->dump(false);
2398 }
2399 }
2400 if (Verbose) {
2401 // Print all fields which reference this allocation
2402 for (uint i = 0; i < ptn->edge_count(); i++) {
2403 uint ei = ptn->edge_target(i);
2404 ptnode_adr(ei)->dump(false);
2405 }
2406 }
2407 tty->cr();
2408 }
2409 }
2410 }
2411 #endif