513 // The type 't' could be subclass of 'base_t'. |
513 // The type 't' could be subclass of 'base_t'. |
514 // As result t->offset() could be large then base_t's size and it will |
514 // As result t->offset() could be large then base_t's size and it will |
515 // cause the failure in add_offset() with narrow oops since TypeOopPtr() |
515 // cause the failure in add_offset() with narrow oops since TypeOopPtr() |
516 // constructor verifies correctness of the offset. |
516 // constructor verifies correctness of the offset. |
517 // |
517 // |
518 // It could happend on subclass's branch (from the type profiling |
518 // It could happened on subclass's branch (from the type profiling |
519 // inlining) which was not eliminated during parsing since the exactness |
519 // inlining) which was not eliminated during parsing since the exactness |
520 // of the allocation type was not propagated to the subclass type check. |
520 // of the allocation type was not propagated to the subclass type check. |
521 // |
521 // |
522 // Do nothing for such AddP node and don't process its users since |
522 // Do nothing for such AddP node and don't process its users since |
523 // this code branch will go away. |
523 // this code branch will go away. |
701 Node *prev = NULL; |
701 Node *prev = NULL; |
702 Node *result = orig_mem; |
702 Node *result = orig_mem; |
703 while (prev != result) { |
703 while (prev != result) { |
704 prev = result; |
704 prev = result; |
705 if (result == start_mem) |
705 if (result == start_mem) |
706 break; // hit one of our sentinals |
706 break; // hit one of our sentinels |
707 if (result->is_Mem()) { |
707 if (result->is_Mem()) { |
708 const Type *at = phase->type(result->in(MemNode::Address)); |
708 const Type *at = phase->type(result->in(MemNode::Address)); |
709 if (at != Type::TOP) { |
709 if (at != Type::TOP) { |
710 assert (at->isa_ptr() != NULL, "pointer type required."); |
710 assert (at->isa_ptr() != NULL, "pointer type required."); |
711 int idx = C->get_alias_index(at->is_ptr()); |
711 int idx = C->get_alias_index(at->is_ptr()); |
718 continue; // don't search further for non-instance types |
718 continue; // don't search further for non-instance types |
719 // skip over a call which does not affect this memory slice |
719 // skip over a call which does not affect this memory slice |
720 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { |
720 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { |
721 Node *proj_in = result->in(0); |
721 Node *proj_in = result->in(0); |
722 if (proj_in->is_Allocate() && proj_in->_idx == (uint)tinst->instance_id()) { |
722 if (proj_in->is_Allocate() && proj_in->_idx == (uint)tinst->instance_id()) { |
723 break; // hit one of our sentinals |
723 break; // hit one of our sentinels |
724 } else if (proj_in->is_Call()) { |
724 } else if (proj_in->is_Call()) { |
725 CallNode *call = proj_in->as_Call(); |
725 CallNode *call = proj_in->as_Call(); |
726 if (!call->may_modify(tinst, phase)) { |
726 if (!call->may_modify(tinst, phase)) { |
727 result = call->in(TypeFunc::Memory); |
727 result = call->in(TypeFunc::Memory); |
728 } |
728 } |
802 // push any AddP users on alloc_worklist and push any memnode |
802 // push any AddP users on alloc_worklist and push any memnode |
803 // users onto memnode_worklist. |
803 // users onto memnode_worklist. |
804 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and |
804 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and |
805 // search the Memory chain for a store with the appropriate type |
805 // search the Memory chain for a store with the appropriate type |
806 // address type. If a Phi is found, create a new version with |
806 // address type. If a Phi is found, create a new version with |
807 // the approriate memory slices from each of the Phi inputs. |
807 // the appropriate memory slices from each of the Phi inputs. |
808 // For stores, process the users as follows: |
808 // For stores, process the users as follows: |
809 // MemNode: push on memnode_worklist |
809 // MemNode: push on memnode_worklist |
810 // MergeMem: push on mergemem_worklist |
810 // MergeMem: push on mergemem_worklist |
811 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice |
811 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice |
812 // moving the first node encountered of each instance type to the |
812 // moving the first node encountered of each instance type to the |
1556 PointsToNode* ptn = ptnode_adr(worklist.pop()); |
1556 PointsToNode* ptn = ptnode_adr(worklist.pop()); |
1557 if (ptn->node_type() == PointsToNode::JavaObject) |
1557 if (ptn->node_type() == PointsToNode::JavaObject) |
1558 has_non_escaping_obj = true; // Non GlobalEscape |
1558 has_non_escaping_obj = true; // Non GlobalEscape |
1559 Node* n = ptn->_node; |
1559 Node* n = ptn->_node; |
1560 if (n->is_Allocate() && ptn->_scalar_replaceable ) { |
1560 if (n->is_Allocate() && ptn->_scalar_replaceable ) { |
1561 // Push scalar replaceable alocations on alloc_worklist |
1561 // Push scalar replaceable allocations on alloc_worklist |
1562 // for processing in split_unique_types(). |
1562 // for processing in split_unique_types(). |
1563 alloc_worklist.append(n); |
1563 alloc_worklist.append(n); |
1564 } |
1564 } |
1565 uint e_cnt = ptn->edge_count(); |
1565 uint e_cnt = ptn->edge_count(); |
1566 for (uint ei = 0; ei < e_cnt; ei++) { |
1566 for (uint ei = 0; ei < e_cnt; ei++) { |