Wed, 21 May 2008 10:45:07 -0700
6695810: null oop passed to encode_heap_oop_not_null
Summary: fix several problems in C2 related to Escape Analysis and Compressed Oops.
Reviewed-by: never, jrose
1.1 --- a/src/cpu/sparc/vm/sparc.ad Tue May 20 06:32:58 2008 -0700 1.2 +++ b/src/cpu/sparc/vm/sparc.ad Wed May 21 10:45:07 2008 -0700 1.3 @@ -5471,7 +5471,7 @@ 1.4 // Load Klass Pointer 1.5 instruct loadKlass(iRegP dst, memory mem) %{ 1.6 match(Set dst (LoadKlass mem)); 1.7 - predicate(!n->in(MemNode::Address)->bottom_type()->is_narrow()); 1.8 + predicate(!n->in(MemNode::Address)->bottom_type()->is_ptr_to_narrowoop()); 1.9 ins_cost(MEMORY_REF_COST); 1.10 size(4); 1.11 1.12 @@ -5489,7 +5489,7 @@ 1.13 // Load Klass Pointer 1.14 instruct loadKlassComp(iRegP dst, memory mem) %{ 1.15 match(Set dst (LoadKlass mem)); 1.16 - predicate(n->in(MemNode::Address)->bottom_type()->is_narrow()); 1.17 + predicate(n->in(MemNode::Address)->bottom_type()->is_ptr_to_narrowoop()); 1.18 ins_cost(MEMORY_REF_COST); 1.19 1.20 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
2.1 --- a/src/cpu/x86/vm/x86_64.ad Tue May 20 06:32:58 2008 -0700 2.2 +++ b/src/cpu/x86/vm/x86_64.ad Wed May 21 10:45:07 2008 -0700 2.3 @@ -6044,10 +6044,9 @@ 2.4 %} 2.5 2.6 // Load Compressed Pointer 2.7 -instruct loadN(rRegN dst, memory mem, rFlagsReg cr) 2.8 +instruct loadN(rRegN dst, memory mem) 2.9 %{ 2.10 match(Set dst (LoadN mem)); 2.11 - effect(KILL cr); 2.12 2.13 ins_cost(125); // XXX 2.14 format %{ "movl $dst, $mem\t# compressed ptr" %} 2.15 @@ -6064,7 +6063,7 @@ 2.16 instruct loadKlass(rRegP dst, memory mem) 2.17 %{ 2.18 match(Set dst (LoadKlass mem)); 2.19 - predicate(!n->in(MemNode::Address)->bottom_type()->is_narrow()); 2.20 + predicate(!n->in(MemNode::Address)->bottom_type()->is_ptr_to_narrowoop()); 2.21 2.22 ins_cost(125); // XXX 2.23 format %{ "movq $dst, $mem\t# class" %} 2.24 @@ -6074,10 +6073,11 @@ 2.25 %} 2.26 2.27 // Load Klass Pointer 2.28 -instruct loadKlassComp(rRegP dst, memory mem) 2.29 +instruct loadKlassComp(rRegP dst, memory mem, rFlagsReg cr) 2.30 %{ 2.31 match(Set dst (LoadKlass mem)); 2.32 - predicate(n->in(MemNode::Address)->bottom_type()->is_narrow()); 2.33 + predicate(n->in(MemNode::Address)->bottom_type()->is_ptr_to_narrowoop()); 2.34 + effect(KILL cr); 2.35 2.36 ins_cost(125); // XXX 2.37 format %{ "movl $dst, $mem\t# compressed class\n\t" 2.38 @@ -6358,8 +6358,9 @@ 2.39 ins_pipe(ialu_reg); 2.40 %} 2.41 2.42 -instruct loadConN(rRegN dst, immN src) %{ 2.43 +instruct loadConN(rRegN dst, immN src, rFlagsReg cr) %{ 2.44 match(Set dst src); 2.45 + effect(KILL cr); 2.46 2.47 ins_cost(125); 2.48 format %{ "movq $dst, $src\t# compressed ptr\n\t" 2.49 @@ -6633,10 +6634,9 @@ 2.50 %} 2.51 2.52 // Store Compressed Pointer 2.53 -instruct storeN(memory mem, rRegN src, rFlagsReg cr) 2.54 +instruct storeN(memory mem, rRegN src) 2.55 %{ 2.56 match(Set mem (StoreN mem src)); 2.57 - effect(KILL cr); 2.58 2.59 ins_cost(125); // XXX 2.60 format %{ "movl $mem, $src\t# ptr" %}
3.1 --- a/src/share/vm/opto/callnode.cpp Tue May 20 06:32:58 2008 -0700 3.2 +++ b/src/share/vm/opto/callnode.cpp Wed May 21 10:45:07 2008 -0700 3.3 @@ -637,7 +637,7 @@ 3.4 } 3.5 Compile *C = phase->C; 3.6 int offset = adrInst_t->offset(); 3.7 - assert(offset >= 0, "should be valid offset"); 3.8 + assert(adrInst_t->klass_is_exact() && offset >= 0, "should be valid offset"); 3.9 ciKlass* adr_k = adrInst_t->klass(); 3.10 assert(adr_k->is_loaded() && 3.11 adr_k->is_java_klass() && 3.12 @@ -674,12 +674,11 @@ 3.13 ciKlass* at_k = at_ptr->klass(); 3.14 if ((adrInst_t->base() == at_ptr->base()) && 3.15 at_k->is_loaded() && 3.16 - at_k->is_java_klass() && 3.17 - !at_k->is_interface()) { 3.18 + at_k->is_java_klass()) { 3.19 // If we have found an argument matching addr_t, check if the field 3.20 // at the specified offset is modified. 3.21 - int at_idx = C->get_alias_index(at_ptr->add_offset(offset)->isa_oopptr()); 3.22 - if (base_idx == at_idx && 3.23 + if ((at_k->is_interface() || adr_k == at_k || 3.24 + adr_k->is_subclass_of(at_k) && !at_ptr->klass_is_exact()) && 3.25 (bcea == NULL || 3.26 bcea->is_arg_modified(i - TypeFunc::Parms, offset, size))) { 3.27 return true;
4.1 --- a/src/share/vm/opto/cfgnode.cpp Tue May 20 06:32:58 2008 -0700 4.2 +++ b/src/share/vm/opto/cfgnode.cpp Wed May 21 10:45:07 2008 -0700 4.3 @@ -707,8 +707,14 @@ 4.4 //------------------------split_out_instance----------------------------------- 4.5 // Split out an instance type from a bottom phi. 4.6 PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const { 4.7 - assert(type() == Type::MEMORY && (adr_type() == TypePtr::BOTTOM || 4.8 - adr_type() == TypeRawPtr::BOTTOM) , "bottom or raw memory required"); 4.9 + const TypeOopPtr *t_oop = at->isa_oopptr(); 4.10 + assert(t_oop != NULL && t_oop->is_instance(), "expecting instance oopptr"); 4.11 + const TypePtr *t = adr_type(); 4.12 + assert(type() == Type::MEMORY && 4.13 + (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || 4.14 + t->isa_oopptr() && !t->is_oopptr()->is_instance() && 4.15 + t->is_oopptr()->cast_to_instance(t_oop->instance_id()) == t_oop), 4.16 + "bottom or raw memory required"); 4.17 4.18 // Check if an appropriate node already exists. 4.19 Node *region = in(0); 4.20 @@ -1342,7 +1348,7 @@ 4.21 Node *n = phi->in(i); 4.22 if( !n ) return NULL; 4.23 if( phase->type(n) == Type::TOP ) return NULL; 4.24 - if( n->Opcode() == Op_ConP ) 4.25 + if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN ) 4.26 break; 4.27 } 4.28 if( i >= phi->req() ) // Only split for constants
5.1 --- a/src/share/vm/opto/compile.cpp Tue May 20 06:32:58 2008 -0700 5.2 +++ b/src/share/vm/opto/compile.cpp Wed May 21 10:45:07 2008 -0700 5.3 @@ -368,7 +368,12 @@ 5.4 BufferBlob* blob = BufferBlob::create("Compile::scratch_buffer", size); 5.5 // Record the buffer blob for next time. 5.6 set_scratch_buffer_blob(blob); 5.7 - guarantee(scratch_buffer_blob() != NULL, "Need BufferBlob for code generation"); 5.8 + // Have we run out of code space? 5.9 + if (scratch_buffer_blob() == NULL) { 5.10 + // Let CompilerBroker disable further compilations. 5.11 + record_failure("Not enough space for scratch buffer in CodeCache"); 5.12 + return; 5.13 + } 5.14 5.15 // Initialize the relocation buffers 5.16 relocInfo* locs_buf = (relocInfo*) blob->instructions_end() - MAX_locs_size; 5.17 @@ -1065,6 +1070,8 @@ 5.18 // No constant oop pointers (such as Strings); they alias with 5.19 // unknown strings. 5.20 tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); 5.21 + } else if( to->is_instance_field() ) { 5.22 + tj = to; // Keep NotNull and klass_is_exact for instance type 5.23 } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) { 5.24 // During the 2nd round of IterGVN, NotNull castings are removed. 5.25 // Make sure the Bottom and NotNull variants alias the same. 5.26 @@ -1084,7 +1091,7 @@ 5.27 } else { 5.28 ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset); 5.29 if (!k->equals(canonical_holder) || tj->offset() != offset) { 5.30 - tj = to = TypeInstPtr::make(TypePtr::BotPTR, canonical_holder, false, NULL, offset, to->instance_id()); 5.31 + tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset, to->instance_id()); 5.32 } 5.33 } 5.34 }
6.1 --- a/src/share/vm/opto/connode.cpp Tue May 20 06:32:58 2008 -0700 6.2 +++ b/src/share/vm/opto/connode.cpp Wed May 21 10:45:07 2008 -0700 6.3 @@ -578,8 +578,11 @@ 6.4 const Type* newtype = value->bottom_type(); 6.5 if (newtype == TypeNarrowOop::NULL_PTR) { 6.6 return phase->transform(new (phase->C, 1) ConPNode(TypePtr::NULL_PTR)); 6.7 + } else if (newtype->isa_narrowoop()) { 6.8 + return phase->transform(new (phase->C, 2) DecodeNNode(value, newtype->is_narrowoop()->make_oopptr())); 6.9 } else { 6.10 - return phase->transform(new (phase->C, 2) DecodeNNode(value, newtype->is_narrowoop()->make_oopptr())); 6.11 + ShouldNotReachHere(); 6.12 + return NULL; // to make C++ compiler happy. 6.13 } 6.14 } 6.15 6.16 @@ -617,6 +620,9 @@ 6.17 } 6.18 } 6.19 6.20 +Node *EncodePNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { 6.21 + return MemNode::Ideal_common_DU_postCCP(ccp, this, in(1)); 6.22 +} 6.23 6.24 //============================================================================= 6.25 //------------------------------Identity---------------------------------------
7.1 --- a/src/share/vm/opto/connode.hpp Tue May 20 06:32:58 2008 -0700 7.2 +++ b/src/share/vm/opto/connode.hpp Wed May 21 10:45:07 2008 -0700 7.3 @@ -283,6 +283,7 @@ 7.4 virtual uint ideal_reg() const { return Op_RegN; } 7.5 7.6 static Node* encode(PhaseGVN* phase, Node* value); 7.7 + virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); 7.8 }; 7.9 7.10 //------------------------------DecodeN--------------------------------
8.1 --- a/src/share/vm/opto/escape.cpp Tue May 20 06:32:58 2008 -0700 8.2 +++ b/src/share/vm/opto/escape.cpp Wed May 21 10:45:07 2008 -0700 8.3 @@ -888,6 +888,23 @@ 8.4 record_for_optimizer(n); 8.5 if (alloc->is_Allocate() && ptn->_scalar_replaceable && 8.6 (t->isa_instptr() || t->isa_aryptr())) { 8.7 + 8.8 + // First, put on the worklist all Field edges from Connection Graph 8.9 + // which is more accurate then putting immediate users from Ideal Graph. 8.10 + for (uint e = 0; e < ptn->edge_count(); e++) { 8.11 + Node *use = _nodes->adr_at(ptn->edge_target(e))->_node; 8.12 + assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(), 8.13 + "only AddP nodes are Field edges in CG"); 8.14 + if (use->outcnt() > 0) { // Don't process dead nodes 8.15 + Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 8.16 + if (addp2 != NULL) { 8.17 + assert(alloc->is_AllocateArray(),"array allocation was expected"); 8.18 + alloc_worklist.append_if_missing(addp2); 8.19 + } 8.20 + alloc_worklist.append_if_missing(use); 8.21 + } 8.22 + } 8.23 + 8.24 // An allocation may have an Initialize which has raw stores. Scan 8.25 // the users of the raw allocation result and push AddP users 8.26 // on alloc_worklist. 8.27 @@ -919,6 +936,8 @@ 8.28 tinst = igvn->type(base)->isa_oopptr(); 8.29 } else if (n->is_Phi() || 8.30 n->is_CheckCastPP() || 8.31 + n->Opcode() == Op_EncodeP || 8.32 + n->Opcode() == Op_DecodeN || 8.33 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 8.34 if (visited.test_set(n->_idx)) { 8.35 assert(n->is_Phi(), "loops only through Phi's"); 8.36 @@ -935,13 +954,25 @@ 8.37 tinst = igvn->type(val)->isa_oopptr(); 8.38 assert(tinst != NULL && tinst->is_instance() && 8.39 tinst->instance_id() == elem , "instance type expected."); 8.40 - const TypeOopPtr *tn_t = igvn->type(tn)->isa_oopptr(); 8.41 + 8.42 + const TypeOopPtr *tn_t = NULL; 8.43 + const Type *tn_type = igvn->type(tn); 8.44 + if (tn_type->isa_narrowoop()) { 8.45 + tn_t = tn_type->is_narrowoop()->make_oopptr()->isa_oopptr(); 8.46 + } else { 8.47 + tn_t = tn_type->isa_oopptr(); 8.48 + } 8.49 8.50 if (tn_t != NULL && 8.51 tinst->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE)->higher_equal(tn_t)) { 8.52 + if (tn_type->isa_narrowoop()) { 8.53 + tn_type = tinst->make_narrowoop(); 8.54 + } else { 8.55 + tn_type = tinst; 8.56 + } 8.57 igvn->hash_delete(tn); 8.58 - igvn->set_type(tn, tinst); 8.59 - tn->set_type(tinst); 8.60 + igvn->set_type(tn, tn_type); 8.61 + tn->set_type(tn_type); 8.62 igvn->hash_insert(tn); 8.63 record_for_optimizer(n); 8.64 } 8.65 @@ -978,6 +1009,8 @@ 8.66 alloc_worklist.append_if_missing(use); 8.67 } else if (use->is_Phi() || 8.68 use->is_CheckCastPP() || 8.69 + use->Opcode() == Op_EncodeP || 8.70 + use->Opcode() == Op_DecodeN || 8.71 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 8.72 alloc_worklist.append_if_missing(use); 8.73 } 8.74 @@ -1199,7 +1232,7 @@ 8.75 8.76 void ConnectionGraph::compute_escape() { 8.77 8.78 - // 1. Populate Connection Graph with Ideal nodes. 8.79 + // 1. Populate Connection Graph (CG) with Ideal nodes. 8.80 8.81 Unique_Node_List worklist_init; 8.82 worklist_init.map(_compile->unique(), NULL); // preallocate space 8.83 @@ -1281,11 +1314,13 @@ 8.84 remove_deferred(ni, &deferred_edges, &visited); 8.85 if (n->is_AddP()) { 8.86 // If this AddP computes an address which may point to more that one 8.87 - // object, nothing the address points to can be scalar replaceable. 8.88 + // object or more then one field (array's element), nothing the address 8.89 + // points to can be scalar replaceable. 8.90 Node *base = get_addp_base(n); 8.91 ptset.Clear(); 8.92 PointsTo(ptset, base, igvn); 8.93 - if (ptset.Size() > 1) { 8.94 + if (ptset.Size() > 1 || 8.95 + (ptset.Size() != 0 && ptn->offset() == Type::OffsetBot)) { 8.96 for( VectorSetI j(&ptset); j.test(); ++j ) { 8.97 uint pt = j.elem; 8.98 ptnode_adr(pt)->_scalar_replaceable = false; 8.99 @@ -1979,6 +2014,11 @@ 8.100 assert(false, "Op_ConP"); 8.101 break; 8.102 } 8.103 + case Op_ConN: 8.104 + { 8.105 + assert(false, "Op_ConN"); 8.106 + break; 8.107 + } 8.108 case Op_CreateEx: 8.109 { 8.110 assert(false, "Op_CreateEx");
9.1 --- a/src/share/vm/opto/library_call.cpp Tue May 20 06:32:58 2008 -0700 9.2 +++ b/src/share/vm/opto/library_call.cpp Wed May 21 10:45:07 2008 -0700 9.3 @@ -2168,7 +2168,7 @@ 9.4 // (They don't if CAS fails, but it isn't worth checking.) 9.5 pre_barrier(control(), base, adr, alias_idx, newval, value_type, T_OBJECT); 9.6 #ifdef _LP64 9.7 - if (adr->bottom_type()->is_narrow()) { 9.8 + if (adr->bottom_type()->is_ptr_to_narrowoop()) { 9.9 cas = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr, 9.10 EncodePNode::encode(&_gvn, newval), 9.11 EncodePNode::encode(&_gvn, oldval))); 9.12 @@ -2838,6 +2838,8 @@ 9.13 _sp += nargs; // set original stack for use by uncommon_trap 9.14 mirror = do_null_check(mirror, T_OBJECT); 9.15 _sp -= nargs; 9.16 + // If mirror or obj is dead, only null-path is taken. 9.17 + if (stopped()) return true; 9.18 9.19 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT }; 9.20 RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); 9.21 @@ -3827,24 +3829,22 @@ 9.22 if (!stopped()) { 9.23 // Copy the fastest available way. 9.24 // (No need for PreserveJVMState, since we're using it all up now.) 9.25 + // TODO: generate fields/elements copies for small objects instead. 9.26 Node* src = obj; 9.27 Node* dest = raw_obj; 9.28 - Node* end = dest; 9.29 Node* size = _gvn.transform(alloc_siz); 9.30 9.31 // Exclude the header. 9.32 int base_off = instanceOopDesc::base_offset_in_bytes(); 9.33 if (UseCompressedOops) { 9.34 - // copy the header gap though. 9.35 - Node* sptr = basic_plus_adr(src, base_off); 9.36 - Node* dptr = basic_plus_adr(dest, base_off); 9.37 - Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, raw_adr_type); 9.38 - store_to_memory(control(), dptr, sval, T_INT, raw_adr_type); 9.39 - base_off += sizeof(int); 9.40 + assert(base_off % BytesPerLong != 0, "base with compressed oops"); 9.41 + // With compressed oops base_offset_in_bytes is 12 which creates 9.42 + // the gap since countx is rounded by 8 bytes below. 9.43 + // Copy klass and the gap. 9.44 + base_off = instanceOopDesc::klass_offset_in_bytes(); 9.45 } 9.46 src = basic_plus_adr(src, base_off); 9.47 dest = basic_plus_adr(dest, base_off); 9.48 - end = basic_plus_adr(end, size); 9.49 9.50 // Compute the length also, if needed: 9.51 Node* countx = size;
10.1 --- a/src/share/vm/opto/macro.cpp Tue May 20 06:32:58 2008 -0700 10.2 +++ b/src/share/vm/opto/macro.cpp Wed May 21 10:45:07 2008 -0700 10.3 @@ -1282,12 +1282,6 @@ 10.4 } 10.5 rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS); 10.6 10.7 - if (UseCompressedOops) { 10.8 - Node *zeronode = makecon(TypeInt::ZERO); 10.9 - // store uncompressed 0 into klass ptr to zero out gap. The gap is 10.10 - // used for primitive fields and has to be zeroed. 10.11 - rawmem = make_store(control, rawmem, object, oopDesc::klass_gap_offset_in_bytes(), zeronode, T_INT); 10.12 - } 10.13 rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_OBJECT); 10.14 int header_size = alloc->minimum_header_size(); // conservatively small 10.15
11.1 --- a/src/share/vm/opto/matcher.cpp Tue May 20 06:32:58 2008 -0700 11.2 +++ b/src/share/vm/opto/matcher.cpp Wed May 21 10:45:07 2008 -0700 11.3 @@ -880,7 +880,7 @@ 11.4 Node *m = n->in(i); // Get input 11.5 int op = m->Opcode(); 11.6 assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites"); 11.7 - if( op == Op_ConI || op == Op_ConP || 11.8 + if( op == Op_ConI || op == Op_ConP || op == Op_ConN || 11.9 op == Op_ConF || op == Op_ConD || op == Op_ConL 11.10 // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp 11.11 ) { 11.12 @@ -1726,6 +1726,14 @@ 11.13 } 11.14 break; 11.15 } 11.16 + case Op_ConN: { // Convert narrow pointers above the centerline to NUL 11.17 + TypeNode *tn = n->as_Type(); // Constants derive from type nodes 11.18 + const TypePtr* tp = tn->type()->is_narrowoop()->make_oopptr(); 11.19 + if (tp->_ptr == TypePtr::AnyNull) { 11.20 + tn->set_type(TypeNarrowOop::NULL_PTR); 11.21 + } 11.22 + break; 11.23 + } 11.24 case Op_Binary: // These are introduced in the Post_Visit state. 11.25 ShouldNotReachHere(); 11.26 break;
12.1 --- a/src/share/vm/opto/memnode.cpp Tue May 20 06:32:58 2008 -0700 12.2 +++ b/src/share/vm/opto/memnode.cpp Wed May 21 10:45:07 2008 -0700 12.3 @@ -133,7 +133,9 @@ 12.4 PhiNode *mphi = result->as_Phi(); 12.5 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 12.6 const TypePtr *t = mphi->adr_type(); 12.7 - if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM) { 12.8 + if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || 12.9 + t->isa_oopptr() && !t->is_oopptr()->is_instance() && 12.10 + t->is_oopptr()->cast_to_instance(t_oop->instance_id()) == t_oop) { 12.11 // clone the Phi with our address type 12.12 result = mphi->split_out_instance(t_adr, igvn); 12.13 } else { 12.14 @@ -263,7 +265,10 @@ 12.15 // of all its inputs dominate or equal to sub's control edge. 12.16 12.17 // Currently 'sub' is either Allocate, Initialize or Start nodes. 12.18 - assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start(), "expecting only these nodes"); 12.19 + // Or Region for the check in LoadNode::Ideal(); 12.20 + // 'sub' should have sub->in(0) != NULL. 12.21 + assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() || 12.22 + sub->is_Region(), "expecting only these nodes"); 12.23 12.24 // Get control edge of 'sub'. 12.25 sub = sub->find_exact_control(sub->in(0)); 12.26 @@ -576,6 +581,9 @@ 12.27 // Find any cast-away of null-ness and keep its control. Null cast-aways are 12.28 // going away in this pass and we need to make this memory op depend on the 12.29 // gating null check. 12.30 +Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { 12.31 + return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address)); 12.32 +} 12.33 12.34 // I tried to leave the CastPP's in. This makes the graph more accurate in 12.35 // some sense; we get to keep around the knowledge that an oop is not-null 12.36 @@ -585,15 +593,14 @@ 12.37 // some of the more trivial cases in the optimizer. Removing more useless 12.38 // Phi's started allowing Loads to illegally float above null checks. I gave 12.39 // up on this approach. CNC 10/20/2000 12.40 -Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { 12.41 - Node *ctr = in(MemNode::Control); 12.42 - Node *mem = in(MemNode::Memory); 12.43 - Node *adr = in(MemNode::Address); 12.44 +// This static method may be called not from MemNode (EncodePNode calls it). 12.45 +// Only the control edge of the node 'n' might be updated. 12.46 +Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) { 12.47 Node *skipped_cast = NULL; 12.48 // Need a null check? Regular static accesses do not because they are 12.49 // from constant addresses. Array ops are gated by the range check (which 12.50 // always includes a NULL check). Just check field ops. 12.51 - if( !ctr ) { 12.52 + if( n->in(MemNode::Control) == NULL ) { 12.53 // Scan upwards for the highest location we can place this memory op. 12.54 while( true ) { 12.55 switch( adr->Opcode() ) { 12.56 @@ -618,10 +625,10 @@ 12.57 } 12.58 // CastPP is going away in this pass! We need this memory op to be 12.59 // control-dependent on the test that is guarding the CastPP. 12.60 - ccp->hash_delete(this); 12.61 - set_req(MemNode::Control, adr->in(0)); 12.62 - ccp->hash_insert(this); 12.63 - return this; 12.64 + ccp->hash_delete(n); 12.65 + n->set_req(MemNode::Control, adr->in(0)); 12.66 + ccp->hash_insert(n); 12.67 + return n; 12.68 12.69 case Op_Phi: 12.70 // Attempt to float above a Phi to some dominating point. 12.71 @@ -652,10 +659,10 @@ 12.72 adr = adr->in(1); 12.73 continue; 12.74 } 12.75 - ccp->hash_delete(this); 12.76 - set_req(MemNode::Control, adr->in(0)); 12.77 - ccp->hash_insert(this); 12.78 - return this; 12.79 + ccp->hash_delete(n); 12.80 + n->set_req(MemNode::Control, adr->in(0)); 12.81 + ccp->hash_insert(n); 12.82 + return n; 12.83 12.84 // List of "safe" opcodes; those that implicitly block the memory 12.85 // op below any null check. 12.86 @@ -665,6 +672,7 @@ 12.87 case Op_LoadN: // Loading from within a klass 12.88 case Op_LoadKlass: // Loading from within a klass 12.89 case Op_ConP: // Loading from a klass 12.90 + case Op_ConN: // Loading from a klass 12.91 case Op_CreateEx: // Sucking up the guts of an exception oop 12.92 case Op_Con: // Reading from TLS 12.93 case Op_CMoveP: // CMoveP is pinned 12.94 @@ -676,8 +684,8 @@ 12.95 { 12.96 assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value"); 12.97 const Node* call = adr->in(0); 12.98 - if (call->is_CallStaticJava()) { 12.99 - const CallStaticJavaNode* call_java = call->as_CallStaticJava(); 12.100 + if (call->is_CallJava()) { 12.101 + const CallJavaNode* call_java = call->as_CallJava(); 12.102 const TypeTuple *r = call_java->tf()->range(); 12.103 assert(r->cnt() > TypeFunc::Parms, "must return value"); 12.104 const Type* ret_type = r->field_at(TypeFunc::Parms); 12.105 @@ -749,7 +757,7 @@ 12.106 case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr() ); 12.107 case T_OBJECT: 12.108 #ifdef _LP64 12.109 - if (adr->bottom_type()->is_narrow()) { 12.110 + if (adr->bottom_type()->is_ptr_to_narrowoop()) { 12.111 const TypeNarrowOop* narrowtype; 12.112 if (rt->isa_narrowoop()) { 12.113 narrowtype = rt->is_narrowoop(); 12.114 @@ -761,10 +769,10 @@ 12.115 return DecodeNNode::decode(&gvn, load); 12.116 } else 12.117 #endif 12.118 - { 12.119 - assert(!adr->bottom_type()->is_narrow(), "should have got back a narrow oop"); 12.120 - return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); 12.121 - } 12.122 + { 12.123 + assert(!adr->bottom_type()->is_ptr_to_narrowoop(), "should have got back a narrow oop"); 12.124 + return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr()); 12.125 + } 12.126 } 12.127 ShouldNotReachHere(); 12.128 return (LoadNode*)NULL; 12.129 @@ -1118,6 +1126,127 @@ 12.130 return NULL; 12.131 } 12.132 12.133 +//------------------------------split_through_phi------------------------------ 12.134 +// Split instance field load through Phi. 12.135 +Node *LoadNode::split_through_phi(PhaseGVN *phase) { 12.136 + Node* mem = in(MemNode::Memory); 12.137 + Node* address = in(MemNode::Address); 12.138 + const TypePtr *addr_t = phase->type(address)->isa_ptr(); 12.139 + const TypeOopPtr *t_oop = addr_t->isa_oopptr(); 12.140 + 12.141 + assert(mem->is_Phi() && (t_oop != NULL) && 12.142 + t_oop->is_instance_field(), "invalide conditions"); 12.143 + 12.144 + Node *region = mem->in(0); 12.145 + if (region == NULL) { 12.146 + return NULL; // Wait stable graph 12.147 + } 12.148 + uint cnt = mem->req(); 12.149 + for( uint i = 1; i < cnt; i++ ) { 12.150 + Node *in = mem->in(i); 12.151 + if( in == NULL ) { 12.152 + return NULL; // Wait stable graph 12.153 + } 12.154 + } 12.155 + // Check for loop invariant. 12.156 + if (cnt == 3) { 12.157 + for( uint i = 1; i < cnt; i++ ) { 12.158 + Node *in = mem->in(i); 12.159 + Node* m = MemNode::optimize_memory_chain(in, addr_t, phase); 12.160 + if (m == mem) { 12.161 + set_req(MemNode::Memory, mem->in(cnt - i)); // Skip this phi. 12.162 + return this; 12.163 + } 12.164 + } 12.165 + } 12.166 + // Split through Phi (see original code in loopopts.cpp). 12.167 + assert(phase->C->have_alias_type(addr_t), "instance should have alias type"); 12.168 + 12.169 + // Do nothing here if Identity will find a value 12.170 + // (to avoid infinite chain of value phis generation). 12.171 + if ( !phase->eqv(this, this->Identity(phase)) ) 12.172 + return NULL; 12.173 + 12.174 + // Skip the split if the region dominates some control edge of the address. 12.175 + if (cnt == 3 && !MemNode::all_controls_dominate(address, region)) 12.176 + return NULL; 12.177 + 12.178 + const Type* this_type = this->bottom_type(); 12.179 + int this_index = phase->C->get_alias_index(addr_t); 12.180 + int this_offset = addr_t->offset(); 12.181 + int this_iid = addr_t->is_oopptr()->instance_id(); 12.182 + int wins = 0; 12.183 + PhaseIterGVN *igvn = phase->is_IterGVN(); 12.184 + Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); 12.185 + for( uint i = 1; i < region->req(); i++ ) { 12.186 + Node *x; 12.187 + Node* the_clone = NULL; 12.188 + if( region->in(i) == phase->C->top() ) { 12.189 + x = phase->C->top(); // Dead path? Use a dead data op 12.190 + } else { 12.191 + x = this->clone(); // Else clone up the data op 12.192 + the_clone = x; // Remember for possible deletion. 12.193 + // Alter data node to use pre-phi inputs 12.194 + if( this->in(0) == region ) { 12.195 + x->set_req( 0, region->in(i) ); 12.196 + } else { 12.197 + x->set_req( 0, NULL ); 12.198 + } 12.199 + for( uint j = 1; j < this->req(); j++ ) { 12.200 + Node *in = this->in(j); 12.201 + if( in->is_Phi() && in->in(0) == region ) 12.202 + x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone 12.203 + } 12.204 + } 12.205 + // Check for a 'win' on some paths 12.206 + const Type *t = x->Value(igvn); 12.207 + 12.208 + bool singleton = t->singleton(); 12.209 + 12.210 + // See comments in PhaseIdealLoop::split_thru_phi(). 12.211 + if( singleton && t == Type::TOP ) { 12.212 + singleton &= region->is_Loop() && (i != LoopNode::EntryControl); 12.213 + } 12.214 + 12.215 + if( singleton ) { 12.216 + wins++; 12.217 + x = igvn->makecon(t); 12.218 + } else { 12.219 + // We now call Identity to try to simplify the cloned node. 12.220 + // Note that some Identity methods call phase->type(this). 12.221 + // Make sure that the type array is big enough for 12.222 + // our new node, even though we may throw the node away. 12.223 + // (This tweaking with igvn only works because x is a new node.) 12.224 + igvn->set_type(x, t); 12.225 + Node *y = x->Identity(igvn); 12.226 + if( y != x ) { 12.227 + wins++; 12.228 + x = y; 12.229 + } else { 12.230 + y = igvn->hash_find(x); 12.231 + if( y ) { 12.232 + wins++; 12.233 + x = y; 12.234 + } else { 12.235 + // Else x is a new node we are keeping 12.236 + // We do not need register_new_node_with_optimizer 12.237 + // because set_type has already been called. 12.238 + igvn->_worklist.push(x); 12.239 + } 12.240 + } 12.241 + } 12.242 + if (x != the_clone && the_clone != NULL) 12.243 + igvn->remove_dead_node(the_clone); 12.244 + phi->set_req(i, x); 12.245 + } 12.246 + if( wins > 0 ) { 12.247 + // Record Phi 12.248 + igvn->register_new_node_with_optimizer(phi); 12.249 + return phi; 12.250 + } 12.251 + igvn->remove_dead_node(phi); 12.252 + return NULL; 12.253 +} 12.254 12.255 //------------------------------Ideal------------------------------------------ 12.256 // If the load is from Field memory and the pointer is non-null, we can 12.257 @@ -1175,112 +1304,9 @@ 12.258 const TypeOopPtr *t_oop = addr_t->isa_oopptr(); 12.259 if (can_reshape && opt_mem->is_Phi() && 12.260 (t_oop != NULL) && t_oop->is_instance_field()) { 12.261 - assert(t_oop->offset() != Type::OffsetBot && t_oop->offset() != Type::OffsetTop, ""); 12.262 - Node *region = opt_mem->in(0); 12.263 - uint cnt = opt_mem->req(); 12.264 - for( uint i = 1; i < cnt; i++ ) { 12.265 - Node *in = opt_mem->in(i); 12.266 - if( in == NULL ) { 12.267 - region = NULL; // Wait stable graph 12.268 - break; 12.269 - } 12.270 - } 12.271 - if (region != NULL) { 12.272 - // Check for loop invariant. 12.273 - if (cnt == 3) { 12.274 - for( uint i = 1; i < cnt; i++ ) { 12.275 - Node *in = opt_mem->in(i); 12.276 - Node* m = MemNode::optimize_memory_chain(in, addr_t, phase); 12.277 - if (m == opt_mem) { 12.278 - set_req(MemNode::Memory, opt_mem->in(cnt - i)); // Skip this phi. 12.279 - return this; 12.280 - } 12.281 - } 12.282 - } 12.283 - // Split through Phi (see original code in loopopts.cpp). 12.284 - assert(phase->C->have_alias_type(addr_t), "instance should have alias type"); 12.285 - 12.286 - // Do nothing here if Identity will find a value 12.287 - // (to avoid infinite chain of value phis generation). 12.288 - if ( !phase->eqv(this, this->Identity(phase)) ) 12.289 - return NULL; 12.290 - 12.291 - const Type* this_type = this->bottom_type(); 12.292 - int this_index = phase->C->get_alias_index(addr_t); 12.293 - int this_offset = addr_t->offset(); 12.294 - int this_iid = addr_t->is_oopptr()->instance_id(); 12.295 - int wins = 0; 12.296 - PhaseIterGVN *igvn = phase->is_IterGVN(); 12.297 - Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); 12.298 - for( uint i = 1; i < region->req(); i++ ) { 12.299 - Node *x; 12.300 - Node* the_clone = NULL; 12.301 - if( region->in(i) == phase->C->top() ) { 12.302 - x = phase->C->top(); // Dead path? Use a dead data op 12.303 - } else { 12.304 - x = this->clone(); // Else clone up the data op 12.305 - the_clone = x; // Remember for possible deletion. 12.306 - // Alter data node to use pre-phi inputs 12.307 - if( this->in(0) == region ) { 12.308 - x->set_req( 0, region->in(i) ); 12.309 - } else { 12.310 - x->set_req( 0, NULL ); 12.311 - } 12.312 - for( uint j = 1; j < this->req(); j++ ) { 12.313 - Node *in = this->in(j); 12.314 - if( in->is_Phi() && in->in(0) == region ) 12.315 - x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone 12.316 - } 12.317 - } 12.318 - // Check for a 'win' on some paths 12.319 - const Type *t = x->Value(igvn); 12.320 - 12.321 - bool singleton = t->singleton(); 12.322 - 12.323 - // See comments in PhaseIdealLoop::split_thru_phi(). 12.324 - if( singleton && t == Type::TOP ) { 12.325 - singleton &= region->is_Loop() && (i != LoopNode::EntryControl); 12.326 - } 12.327 - 12.328 - if( singleton ) { 12.329 - wins++; 12.330 - x = igvn->makecon(t); 12.331 - } else { 12.332 - // We now call Identity to try to simplify the cloned node. 12.333 - // Note that some Identity methods call phase->type(this). 12.334 - // Make sure that the type array is big enough for 12.335 - // our new node, even though we may throw the node away. 12.336 - // (This tweaking with igvn only works because x is a new node.) 12.337 - igvn->set_type(x, t); 12.338 - Node *y = x->Identity(igvn); 12.339 - if( y != x ) { 12.340 - wins++; 12.341 - x = y; 12.342 - } else { 12.343 - y = igvn->hash_find(x); 12.344 - if( y ) { 12.345 - wins++; 12.346 - x = y; 12.347 - } else { 12.348 - // Else x is a new node we are keeping 12.349 - // We do not need register_new_node_with_optimizer 12.350 - // because set_type has already been called. 12.351 - igvn->_worklist.push(x); 12.352 - } 12.353 - } 12.354 - } 12.355 - if (x != the_clone && the_clone != NULL) 12.356 - igvn->remove_dead_node(the_clone); 12.357 - phi->set_req(i, x); 12.358 - } 12.359 - if( wins > 0 ) { 12.360 - // Record Phi 12.361 - igvn->register_new_node_with_optimizer(phi); 12.362 - return phi; 12.363 - } else { 12.364 - igvn->remove_dead_node(phi); 12.365 - } 12.366 - } 12.367 + // Split instance field load through Phi. 12.368 + Node* result = split_through_phi(phase); 12.369 + if (result != NULL) return result; 12.370 } 12.371 } 12.372 12.373 @@ -1835,7 +1861,7 @@ 12.374 case T_ADDRESS: 12.375 case T_OBJECT: 12.376 #ifdef _LP64 12.377 - if (adr->bottom_type()->is_narrow() || 12.378 + if (adr->bottom_type()->is_ptr_to_narrowoop() || 12.379 (UseCompressedOops && val->bottom_type()->isa_klassptr() && 12.380 adr->bottom_type()->isa_rawptr())) { 12.381 const TypePtr* type = val->bottom_type()->is_ptr();
13.1 --- a/src/share/vm/opto/memnode.hpp Tue May 20 06:32:58 2008 -0700 13.2 +++ b/src/share/vm/opto/memnode.hpp Wed May 21 10:45:07 2008 -0700 13.3 @@ -72,7 +72,8 @@ 13.4 // This one should probably be a phase-specific function: 13.5 static bool all_controls_dominate(Node* dom, Node* sub); 13.6 13.7 - // Is this Node a MemNode or some descendent? Default is YES. 13.8 + // Find any cast-away of null-ness and keep its control. 13.9 + static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ); 13.10 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); 13.11 13.12 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 13.13 @@ -150,6 +151,9 @@ 13.14 // zero out the control input. 13.15 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 13.16 13.17 + // Split instance field load through Phi. 13.18 + Node* split_through_phi(PhaseGVN *phase); 13.19 + 13.20 // Recover original value from boxed values 13.21 Node *eliminate_autobox(PhaseGVN *phase); 13.22
14.1 --- a/src/share/vm/opto/node.cpp Tue May 20 06:32:58 2008 -0700 14.2 +++ b/src/share/vm/opto/node.cpp Wed May 21 10:45:07 2008 -0700 14.3 @@ -1049,51 +1049,80 @@ 14.4 Node* orig_sub = sub; 14.5 nlist.clear(); 14.6 bool this_dominates = false; 14.7 - uint region_input = 0; 14.8 + bool result = false; // Conservative answer 14.9 + 14.10 while (sub != NULL) { // walk 'sub' up the chain to 'this' 14.11 if (sub == this) { 14.12 if (nlist.size() == 0) { 14.13 // No Region nodes except loops were visited before and the EntryControl 14.14 // path was taken for loops: it did not walk in a cycle. 14.15 - return true; 14.16 - } else if (!this_dominates) { 14.17 + result = true; 14.18 + break; 14.19 + } else if (this_dominates) { 14.20 + result = false; // already met before: walk in a cycle 14.21 + break; 14.22 + } else { 14.23 // Region nodes were visited. Continue walk up to Start or Root 14.24 // to make sure that it did not walk in a cycle. 14.25 this_dominates = true; // first time meet 14.26 iterations_without_region_limit = DominatorSearchLimit; // Reset 14.27 - } else { 14.28 - return false; // already met before: walk in a cycle 14.29 - } 14.30 + } 14.31 } 14.32 - if (sub->is_Start() || sub->is_Root()) 14.33 - return this_dominates; 14.34 + if (sub->is_Start() || sub->is_Root()) { 14.35 + result = this_dominates; 14.36 + break; 14.37 + } 14.38 + Node* up = sub->find_exact_control(sub->in(0)); 14.39 + if (up == NULL || up->is_top()) { 14.40 + result = false; // Conservative answer for dead code 14.41 + break; 14.42 + } 14.43 + if (sub == up && (sub->is_Loop() || sub->is_Region() && sub->req() != 3)) { 14.44 + // Take first valid path on the way up to 'this'. 14.45 + up = sub->in(1); // in(LoopNode::EntryControl); 14.46 + } else if (sub == up && sub->is_Region()) { 14.47 + assert(sub->req() == 3, "sanity"); 14.48 + iterations_without_region_limit = DominatorSearchLimit; // Reset 14.49 14.50 - Node* up = sub->find_exact_control(sub->in(0)); 14.51 - if (up == NULL || up->is_top()) 14.52 - return false; // Conservative answer for dead code 14.53 - 14.54 - if (sub == up && sub->is_Loop()) { 14.55 - up = sub->in(1); // in(LoopNode::EntryControl); 14.56 - } else if (sub == up && sub->is_Region() && sub->req() == 3) { 14.57 - iterations_without_region_limit = DominatorSearchLimit; // Reset 14.58 + // Try both paths for such Regions. 14.59 + // It is not accurate without regions dominating information. 14.60 + // With such information the other path should be checked for 14.61 + // the most dominating Region which was visited before. 14.62 + bool region_was_visited_before = false; 14.63 uint i = 1; 14.64 uint size = nlist.size(); 14.65 if (size == 0) { 14.66 - // No Region nodes (except Loops) were visited before. 14.67 + // No such Region nodes were visited before. 14.68 // Take first valid path on the way up to 'this'. 14.69 - } else if (nlist.at(size - 1) == sub) { 14.70 - // This Region node was just visited. Take other path. 14.71 - i = region_input + 1; 14.72 - nlist.pop(); 14.73 } else { 14.74 // Was this Region node visited before? 14.75 - for (uint j = 0; j < size; j++) { 14.76 - if (nlist.at(j) == sub) { 14.77 - return false; // The Region node was visited before. Give up. 14.78 + intptr_t ni; 14.79 + int j = size - 1; 14.80 + for (; j >= 0; j--) { 14.81 + ni = (intptr_t)nlist.at(j); 14.82 + if ((Node*)(ni & ~1) == sub) { 14.83 + if ((ni & 1) != 0) { 14.84 + break; // Visited 2 paths. Give up. 14.85 + } else { 14.86 + // The Region node was visited before only once. 14.87 + nlist.remove(j); 14.88 + region_was_visited_before = true; 14.89 + for (; i < sub->req(); i++) { 14.90 + Node* in = sub->in(i); 14.91 + if (in != NULL && !in->is_top() && in != sub) { 14.92 + break; 14.93 + } 14.94 + } 14.95 + i++; // Take other path. 14.96 + break; 14.97 + } 14.98 } 14.99 } 14.100 + if (j >= 0 && (ni & 1) != 0) { 14.101 + result = false; // Visited 2 paths. Give up. 14.102 + break; 14.103 + } 14.104 // The Region node was not visited before. 14.105 - // Take first valid path on the way up to 'this'. 14.106 } 14.107 for (; i < sub->req(); i++) { 14.108 Node* in = sub->in(i); 14.109 @@ -1102,20 +1131,26 @@ 14.110 } 14.111 } 14.112 if (i < sub->req()) { 14.113 - nlist.push(sub); 14.114 up = sub->in(i); 14.115 - region_input = i; 14.116 + if (region_was_visited_before && sub != up) { 14.117 + // Set 0 bit to indicate that both paths were taken. 14.118 + nlist.push((Node*)((intptr_t)sub + 1)); 14.119 + } else { 14.120 + nlist.push(sub); 14.121 + } 14.122 } 14.123 } 14.124 - if (sub == up) 14.125 - return false; // some kind of tight cycle 14.126 - 14.127 - if (--iterations_without_region_limit < 0) 14.128 - return false; // dead cycle 14.129 - 14.130 + if (sub == up) { 14.131 + result = false; // some kind of tight cycle 14.132 + break; 14.133 + } 14.134 + if (--iterations_without_region_limit < 0) { 14.135 + result = false; // dead cycle 14.136 + break; 14.137 + } 14.138 sub = up; 14.139 } 14.140 - return false; 14.141 + return result; 14.142 } 14.143 14.144 //------------------------------remove_dead_region-----------------------------
15.1 --- a/src/share/vm/opto/output.cpp Tue May 20 06:32:58 2008 -0700 15.2 +++ b/src/share/vm/opto/output.cpp Wed May 21 10:45:07 2008 -0700 15.3 @@ -48,6 +48,7 @@ 15.4 // Initialize the space for the BufferBlob used to find and verify 15.5 // instruction size in MachNode::emit_size() 15.6 init_scratch_buffer_blob(); 15.7 + if (failing()) return; // Out of memory 15.8 15.9 // Make sure I can find the Start Node 15.10 Block_Array& bbs = _cfg->_bbs;
16.1 --- a/src/share/vm/opto/type.cpp Tue May 20 06:32:58 2008 -0700 16.2 +++ b/src/share/vm/opto/type.cpp Wed May 21 10:45:07 2008 -0700 16.3 @@ -311,8 +311,18 @@ 16.4 mreg2type[Op_RegFlags] = TypeInt::CC; 16.5 16.6 TypeAryPtr::RANGE = TypeAryPtr::make( TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), current->env()->Object_klass(), false, arrayOopDesc::length_offset_in_bytes()); 16.7 - // There is no shared klass for Object[]. See note in TypeAryPtr::klass(). 16.8 - TypeAryPtr::OOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); 16.9 + 16.10 + TypeAryPtr::NARROWOOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeNarrowOop::BOTTOM, TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); 16.11 + 16.12 +#ifdef _LP64 16.13 + if (UseCompressedOops) { 16.14 + TypeAryPtr::OOPS = TypeAryPtr::NARROWOOPS; 16.15 + } else 16.16 +#endif 16.17 + { 16.18 + // There is no shared klass for Object[]. See note in TypeAryPtr::klass(). 16.19 + TypeAryPtr::OOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); 16.20 + } 16.21 TypeAryPtr::BYTES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::BYTE ,TypeInt::POS), ciTypeArrayKlass::make(T_BYTE), true, Type::OffsetBot); 16.22 TypeAryPtr::SHORTS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::SHORT ,TypeInt::POS), ciTypeArrayKlass::make(T_SHORT), true, Type::OffsetBot); 16.23 TypeAryPtr::CHARS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::CHAR ,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, Type::OffsetBot); 16.24 @@ -321,9 +331,10 @@ 16.25 TypeAryPtr::FLOATS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::FLOAT ,TypeInt::POS), ciTypeArrayKlass::make(T_FLOAT), true, Type::OffsetBot); 16.26 TypeAryPtr::DOUBLES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::DOUBLE ,TypeInt::POS), ciTypeArrayKlass::make(T_DOUBLE), true, Type::OffsetBot); 16.27 16.28 - TypeAryPtr::_array_body_type[T_NARROWOOP] = NULL; // what should this be? 16.29 + // Nobody should ask _array_body_type[T_NARROWOOP]. Use NULL as assert. 16.30 + TypeAryPtr::_array_body_type[T_NARROWOOP] = NULL; 16.31 TypeAryPtr::_array_body_type[T_OBJECT] = TypeAryPtr::OOPS; 16.32 - TypeAryPtr::_array_body_type[T_ARRAY] = TypeAryPtr::OOPS; // arrays are stored in oop arrays 16.33 + TypeAryPtr::_array_body_type[T_ARRAY] = TypeAryPtr::OOPS; // arrays are stored in oop arrays 16.34 TypeAryPtr::_array_body_type[T_BYTE] = TypeAryPtr::BYTES; 16.35 TypeAryPtr::_array_body_type[T_BOOLEAN] = TypeAryPtr::BYTES; // boolean[] is a byte array 16.36 TypeAryPtr::_array_body_type[T_SHORT] = TypeAryPtr::SHORTS; 16.37 @@ -696,7 +707,7 @@ 16.38 ResourceMark rm; 16.39 Dict d(cmpkey,hashkey); // Stop recursive type dumping 16.40 dump2(d,1, st); 16.41 - if (isa_ptr() && is_ptr()->is_narrow()) { 16.42 + if (is_ptr_to_narrowoop()) { 16.43 st->print(" [narrow]"); 16.44 } 16.45 } 16.46 @@ -2146,6 +2157,67 @@ 16.47 // Convenience common pre-built type. 16.48 const TypeOopPtr *TypeOopPtr::BOTTOM; 16.49 16.50 +//------------------------------TypeOopPtr------------------------------------- 16.51 +TypeOopPtr::TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id ) 16.52 + : TypePtr(t, ptr, offset), 16.53 + _const_oop(o), _klass(k), 16.54 + _klass_is_exact(xk), 16.55 + _is_ptr_to_narrowoop(false), 16.56 + _instance_id(instance_id) { 16.57 +#ifdef _LP64 16.58 + if (UseCompressedOops && _offset != 0) { 16.59 + if (klass() == NULL) { 16.60 + assert(this->isa_aryptr(), "only arrays without klass"); 16.61 + _is_ptr_to_narrowoop = true; 16.62 + } else if (_offset == oopDesc::klass_offset_in_bytes()) { 16.63 + _is_ptr_to_narrowoop = true; 16.64 + } else if (this->isa_aryptr()) { 16.65 + _is_ptr_to_narrowoop = (klass()->is_obj_array_klass() && 16.66 + _offset != arrayOopDesc::length_offset_in_bytes()); 16.67 + } else if (klass() == ciEnv::current()->Class_klass() && 16.68 + (_offset == java_lang_Class::klass_offset_in_bytes() || 16.69 + _offset == java_lang_Class::array_klass_offset_in_bytes())) { 16.70 + // Special hidden fields from the Class. 16.71 + assert(this->isa_instptr(), "must be an instance ptr."); 16.72 + _is_ptr_to_narrowoop = true; 16.73 + } else if (klass()->is_instance_klass()) { 16.74 + ciInstanceKlass* ik = klass()->as_instance_klass(); 16.75 + ciField* field = NULL; 16.76 + if (this->isa_klassptr()) { 16.77 + // Perm objects don't use compressed references, except for 16.78 + // static fields which are currently compressed. 16.79 + field = ik->get_field_by_offset(_offset, true); 16.80 + if (field != NULL) { 16.81 + BasicType basic_elem_type = field->layout_type(); 16.82 + _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT || 16.83 + basic_elem_type == T_ARRAY); 16.84 + } 16.85 + } else if (_offset == OffsetBot || _offset == OffsetTop) { 16.86 + // unsafe access 16.87 + _is_ptr_to_narrowoop = true; 16.88 + } else { // exclude unsafe ops 16.89 + assert(this->isa_instptr(), "must be an instance ptr."); 16.90 + // Field which contains a compressed oop references. 16.91 + field = ik->get_field_by_offset(_offset, false); 16.92 + if (field != NULL) { 16.93 + BasicType basic_elem_type = field->layout_type(); 16.94 + _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT || 16.95 + basic_elem_type == T_ARRAY); 16.96 + } else if (klass()->equals(ciEnv::current()->Object_klass())) { 16.97 + // Compile::find_alias_type() cast exactness on all types to verify 16.98 + // that it does not affect alias type. 16.99 + _is_ptr_to_narrowoop = true; 16.100 + } else { 16.101 + // Type for the copy start in LibraryCallKit::inline_native_clone(). 16.102 + assert(!klass_is_exact(), "only non-exact klass"); 16.103 + _is_ptr_to_narrowoop = true; 16.104 + } 16.105 + } 16.106 + } 16.107 + } 16.108 +#endif 16.109 +} 16.110 + 16.111 //------------------------------make------------------------------------------- 16.112 const TypeOopPtr *TypeOopPtr::make(PTR ptr, 16.113 int offset) { 16.114 @@ -2593,9 +2665,13 @@ 16.115 //-----------------------------cast_to_instance------------------------------- 16.116 const TypeOopPtr *TypeInstPtr::cast_to_instance(int instance_id) const { 16.117 if( instance_id == _instance_id) return this; 16.118 - bool exact = (instance_id == UNKNOWN_INSTANCE) ? _klass_is_exact : true; 16.119 - 16.120 - return make(ptr(), klass(), exact, const_oop(), _offset, instance_id); 16.121 + bool exact = true; 16.122 + PTR ptr_t = NotNull; 16.123 + if (instance_id == UNKNOWN_INSTANCE) { 16.124 + exact = _klass_is_exact; 16.125 + ptr_t = _ptr; 16.126 + } 16.127 + return make(ptr_t, klass(), exact, const_oop(), _offset, instance_id); 16.128 } 16.129 16.130 //------------------------------xmeet_unloaded--------------------------------- 16.131 @@ -3014,6 +3090,7 @@ 16.132 // Convenience common pre-built types. 16.133 const TypeAryPtr *TypeAryPtr::RANGE; 16.134 const TypeAryPtr *TypeAryPtr::OOPS; 16.135 +const TypeAryPtr *TypeAryPtr::NARROWOOPS; 16.136 const TypeAryPtr *TypeAryPtr::BYTES; 16.137 const TypeAryPtr *TypeAryPtr::SHORTS; 16.138 const TypeAryPtr *TypeAryPtr::CHARS; 16.139 @@ -3063,8 +3140,13 @@ 16.140 //-----------------------------cast_to_instance------------------------------- 16.141 const TypeOopPtr *TypeAryPtr::cast_to_instance(int instance_id) const { 16.142 if( instance_id == _instance_id) return this; 16.143 - bool exact = (instance_id == UNKNOWN_INSTANCE) ? _klass_is_exact : true; 16.144 - return make(ptr(), const_oop(), _ary, klass(), exact, _offset, instance_id); 16.145 + bool exact = true; 16.146 + PTR ptr_t = NotNull; 16.147 + if (instance_id == UNKNOWN_INSTANCE) { 16.148 + exact = _klass_is_exact; 16.149 + ptr_t = _ptr; 16.150 + } 16.151 + return make(ptr_t, const_oop(), _ary, klass(), exact, _offset, instance_id); 16.152 } 16.153 16.154 //-----------------------------narrow_size_type------------------------------- 16.155 @@ -3547,7 +3629,7 @@ 16.156 k_ary = ciTypeArrayKlass::make(el->basic_type()); 16.157 } 16.158 16.159 - if( this != TypeAryPtr::OOPS ) 16.160 + if( this != TypeAryPtr::OOPS ) { 16.161 // The _klass field acts as a cache of the underlying 16.162 // ciKlass for this array type. In order to set the field, 16.163 // we need to cast away const-ness. 16.164 @@ -3562,6 +3644,11 @@ 16.165 // a bit less efficient than caching, but calls to 16.166 // TypeAryPtr::OOPS->klass() are not common enough to matter. 16.167 ((TypeAryPtr*)this)->_klass = k_ary; 16.168 + if (UseCompressedOops && k_ary != NULL && k_ary->is_obj_array_klass() && 16.169 + _offset != 0 && _offset != arrayOopDesc::length_offset_in_bytes()) { 16.170 + ((TypeAryPtr*)this)->_is_ptr_to_narrowoop = true; 16.171 + } 16.172 + } 16.173 return k_ary; 16.174 } 16.175
17.1 --- a/src/share/vm/opto/type.hpp Tue May 20 06:32:58 2008 -0700 17.2 +++ b/src/share/vm/opto/type.hpp Wed May 21 10:45:07 2008 -0700 17.3 @@ -191,9 +191,8 @@ 17.4 virtual const Type *filter( const Type *kills ) const; 17.5 17.6 // Returns true if this pointer points at memory which contains a 17.7 - // compressed oop references. In 32-bit builds it's non-virtual 17.8 - // since we don't support compressed oops at all in the mode. 17.9 - LP64_ONLY(virtual) bool is_narrow() const { return false; } 17.10 + // compressed oop references. 17.11 + bool is_ptr_to_narrowoop() const; 17.12 17.13 // Convenience access 17.14 float getf() const; 17.15 @@ -213,8 +212,8 @@ 17.16 const TypePtr *isa_ptr() const; // Returns NULL if not ptr type 17.17 const TypeRawPtr *isa_rawptr() const; // NOT Java oop 17.18 const TypeRawPtr *is_rawptr() const; // Asserts is rawptr 17.19 - const TypeNarrowOop *is_narrowoop() const; // Java-style GC'd pointer 17.20 - const TypeNarrowOop *isa_narrowoop() const; // Returns NULL if not oop ptr type 17.21 + const TypeNarrowOop *is_narrowoop() const; // Java-style GC'd pointer 17.22 + const TypeNarrowOop *isa_narrowoop() const; // Returns NULL if not oop ptr type 17.23 const TypeOopPtr *isa_oopptr() const; // Returns NULL if not oop ptr type 17.24 const TypeOopPtr *is_oopptr() const; // Java-style GC'd pointer 17.25 const TypeKlassPtr *isa_klassptr() const; // Returns NULL if not KlassPtr 17.26 @@ -643,7 +642,7 @@ 17.27 // Some kind of oop (Java pointer), either klass or instance or array. 17.28 class TypeOopPtr : public TypePtr { 17.29 protected: 17.30 - TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id ) : TypePtr(t, ptr, offset), _const_oop(o), _klass(k), _klass_is_exact(xk), _instance_id(instance_id) { } 17.31 + TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id ); 17.32 public: 17.33 virtual bool eq( const Type *t ) const; 17.34 virtual int hash() const; // Type specific hashing 17.35 @@ -660,8 +659,9 @@ 17.36 ciKlass* _klass; // Klass object 17.37 // Does the type exclude subclasses of the klass? (Inexact == polymorphic.) 17.38 bool _klass_is_exact; 17.39 + bool _is_ptr_to_narrowoop; 17.40 17.41 - int _instance_id; // if not UNKNOWN_INSTANCE, indicates that this is a particular instance 17.42 + int _instance_id; // if not UNKNOWN_INSTANCE, indicates that this is a particular instance 17.43 // of this type which is distinct. This is the the node index of the 17.44 // node creating this instance 17.45 17.46 @@ -696,6 +696,11 @@ 17.47 ciObject* const_oop() const { return _const_oop; } 17.48 virtual ciKlass* klass() const { return _klass; } 17.49 bool klass_is_exact() const { return _klass_is_exact; } 17.50 + 17.51 + // Returns true if this pointer points at memory which contains a 17.52 + // compressed oop references. 17.53 + bool is_ptr_to_narrowoop_nv() const { return _is_ptr_to_narrowoop; } 17.54 + 17.55 bool is_instance() const { return _instance_id != UNKNOWN_INSTANCE; } 17.56 uint instance_id() const { return _instance_id; } 17.57 bool is_instance_field() const { return _instance_id != UNKNOWN_INSTANCE && _offset >= 0; } 17.58 @@ -716,12 +721,6 @@ 17.59 // returns the equivalent compressed version of this pointer type 17.60 virtual const TypeNarrowOop* make_narrowoop() const; 17.61 17.62 -#ifdef _LP64 17.63 - virtual bool is_narrow() const { 17.64 - return (UseCompressedOops && _offset != 0); 17.65 - } 17.66 -#endif 17.67 - 17.68 virtual const Type *xmeet( const Type *t ) const; 17.69 virtual const Type *xdual() const; // Compute dual right now. 17.70 17.71 @@ -843,15 +842,10 @@ 17.72 virtual const Type *xmeet( const Type *t ) const; 17.73 virtual const Type *xdual() const; // Compute dual right now. 17.74 17.75 -#ifdef _LP64 17.76 - virtual bool is_narrow() const { 17.77 - return (UseCompressedOops && klass() != NULL && _offset != 0); 17.78 - } 17.79 -#endif 17.80 - 17.81 // Convenience common pre-built types. 17.82 static const TypeAryPtr *RANGE; 17.83 static const TypeAryPtr *OOPS; 17.84 + static const TypeAryPtr *NARROWOOPS; 17.85 static const TypeAryPtr *BYTES; 17.86 static const TypeAryPtr *SHORTS; 17.87 static const TypeAryPtr *CHARS; 17.88 @@ -901,18 +895,6 @@ 17.89 virtual const Type *xmeet( const Type *t ) const; 17.90 virtual const Type *xdual() const; // Compute dual right now. 17.91 17.92 -#ifdef _LP64 17.93 - // Perm objects don't use compressed references, except for static fields 17.94 - // which are currently compressed 17.95 - virtual bool is_narrow() const { 17.96 - if (UseCompressedOops && _offset != 0 && _klass->is_instance_klass()) { 17.97 - ciInstanceKlass* ik = _klass->as_instance_klass(); 17.98 - return ik != NULL && ik->get_field_by_offset(_offset, true) != NULL; 17.99 - } 17.100 - return false; 17.101 - } 17.102 -#endif 17.103 - 17.104 // Convenience common pre-built types. 17.105 static const TypeKlassPtr* OBJECT; // Not-null object klass or below 17.106 static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same 17.107 @@ -921,7 +903,7 @@ 17.108 #endif 17.109 }; 17.110 17.111 -//------------------------------TypeNarrowOop---------------------------------------- 17.112 +//------------------------------TypeNarrowOop---------------------------------- 17.113 // A compressed reference to some kind of Oop. This type wraps around 17.114 // a preexisting TypeOopPtr and forwards most of it's operations to 17.115 // the underlying type. It's only real purpose is to track the 17.116 @@ -1013,6 +995,14 @@ 17.117 }; 17.118 17.119 //------------------------------accessors-------------------------------------- 17.120 +inline bool Type::is_ptr_to_narrowoop() const { 17.121 +#ifdef _LP64 17.122 + return (isa_oopptr() != NULL && is_oopptr()->is_ptr_to_narrowoop_nv()); 17.123 +#else 17.124 + return false; 17.125 +#endif 17.126 +} 17.127 + 17.128 inline float Type::getf() const { 17.129 assert( _base == FloatCon, "Not a FloatCon" ); 17.130 return ((TypeF*)this)->_f;
18.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 18.2 +++ b/test/compiler/6689060/Test.java Wed May 21 10:45:07 2008 -0700 18.3 @@ -0,0 +1,577 @@ 18.4 +/* 18.5 + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 18.6 + * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. 18.7 + * 18.8 + * 18.9 + * 18.10 + * 18.11 + * 18.12 + * 18.13 + * 18.14 + * 18.15 + * 18.16 + * 18.17 + * 18.18 + * 18.19 + * 18.20 + * 18.21 + * 18.22 + * 18.23 + * 18.24 + * 18.25 + * 18.26 + */ 18.27 + 18.28 +/* 18.29 + * @test 18.30 + * @bug 6689060 18.31 + * @summary Escape Analysis does not work with Compressed Oops 18.32 + * @run main/othervm -Xbatch -XX:CompileCommand=exclude,Test.dummy -XX:+AggressiveOpts Test 18.33 + */ 18.34 + 18.35 +import java.lang.reflect.Array; 18.36 + 18.37 +class Point { 18.38 + int x; 18.39 + int y; 18.40 + Point next; 18.41 + int ax[]; 18.42 + int ay[]; 18.43 + Point pax[]; 18.44 + Point pay[]; 18.45 + public Point getNext() { 18.46 + return next; 18.47 + } 18.48 +} 18.49 + 18.50 +public class Test { 18.51 + 18.52 + void dummy() { 18.53 + // Empty method to verify correctness of DebugInfo. 18.54 + // Use -XX:CompileCommand=exclude,Test.dummy 18.55 + } 18.56 + 18.57 + int ival(int i) { 18.58 + return i*2; 18.59 + } 18.60 + 18.61 + int test80(int y, int l, int i) { 18.62 + Point p = new Point(); 18.63 + p.ax = new int[2]; 18.64 + p.ay = new int[2]; 18.65 + int x = 3; 18.66 + p.ax[0] = x; 18.67 + p.ay[1] = 3 * x + y; 18.68 + dummy(); 18.69 + return p.ax[0] * p.ay[1]; 18.70 + } 18.71 + 18.72 + int test81(int y, int l, int i) { 18.73 + Point p = new Point(); 18.74 + p.ax = new int[2]; 18.75 + p.ay = new int[2]; 18.76 + int x = 3; 18.77 + p.ax[0] = x; 18.78 + p.ay[1] = 3 * x + y; 18.79 + dummy(); 18.80 + return p.ax[0] * p.ay[1]; 18.81 + } 18.82 + 18.83 + 18.84 + int test44(int y) { 18.85 + Point p1 = new Point(); 18.86 + p1.x = ival(3); 18.87 + dummy(); 18.88 + p1.y = 3 * p1.x + y; 18.89 + return p1.y; 18.90 + } 18.91 + 18.92 + int test43(int y) { 18.93 + Point p1 = new Point(); 18.94 + if ( (y & 1) == 1 ) { 18.95 + p1.x = ival(3); 18.96 + } else { 18.97 + p1.x = ival(5); 18.98 + } 18.99 + dummy(); 18.100 + p1.y = 3 * p1.x + y; 18.101 + return p1.y; 18.102 + } 18.103 + 18.104 + int test42(int y) { 18.105 + Point p1 = new Point(); 18.106 + p1.x = 3; 18.107 + for (int i = 0; i < y; i++) { 18.108 + if ( (i & 1) == 1 ) { 18.109 + p1.x += 4; 18.110 + } 18.111 + } 18.112 + p1.y = 3 * y + p1.x; 18.113 + return p1.y; 18.114 + } 18.115 + 18.116 + int test40(int y) { 18.117 + Point p1 = new Point(); 18.118 + if ( (y & 1) == 1 ) { 18.119 + p1.x = 3; 18.120 + } else { 18.121 + p1.x = 5; 18.122 + } 18.123 + p1.y = 3 * p1.x + y; 18.124 + return p1.y; 18.125 + } 18.126 + 18.127 + int test41(int y) { 18.128 + Point p1 = new Point(); 18.129 + if ( (y & 1) == 1 ) { 18.130 + p1.x += 4; 18.131 + } else { 18.132 + p1.x += 5; 18.133 + } 18.134 + p1.y = 3 * p1.x + y; 18.135 + return p1.y; 18.136 + } 18.137 + 18.138 + Point test00(int y) { 18.139 + int x = 3; 18.140 + Point p = new Point(); 18.141 + p.x = x; 18.142 + p.y = 3 * x + y; 18.143 + return p; 18.144 + } 18.145 + 18.146 + Point test01(int y) { 18.147 + int x = 3; 18.148 + Point p = new Point(); 18.149 + p.x = x; 18.150 + p.y = 3 * x + y; 18.151 + dummy(); 18.152 + return p; 18.153 + } 18.154 + 18.155 + Point test02(int y) { 18.156 + int x = 3; 18.157 + Point p1 = null; 18.158 + for (int i = 0; i < y; i++) { 18.159 + Point p2 = new Point(); 18.160 + p2.x = x; 18.161 + p2.y = 3 * y + x; 18.162 + p2.next = p1; 18.163 + p1 = p2; 18.164 + } 18.165 + return p1; 18.166 + } 18.167 + 18.168 + Point test03(int y) { 18.169 + int x = 3; 18.170 + Point p1 = null; 18.171 + for (int i = 0; i < y; i++) { 18.172 + Point p2 = new Point(); 18.173 + p2.x = x; 18.174 + p2.y = 3 * y + x; 18.175 + p2.next = p1; 18.176 + p1 = p2; 18.177 + } 18.178 + dummy(); 18.179 + return p1; 18.180 + } 18.181 + 18.182 + Point test04(int y) { 18.183 + int x = 3; 18.184 + Point p1 = null; 18.185 + for (int i = 0; i < y; i++) { 18.186 + Point p2 = new Point(); 18.187 + p2.x = x; 18.188 + p2.y = 3 * y + x; 18.189 + p2.next = p1; 18.190 + dummy(); 18.191 + p1 = p2; 18.192 + } 18.193 + return p1; 18.194 + } 18.195 + 18.196 + int test05(int y) { 18.197 + int x = 3; 18.198 + Point p1 = new Point(); 18.199 + for (int i = 0; i < y; i++) { 18.200 + Point p2 = new Point(); 18.201 + p2.x = x; 18.202 + p2.y = 3 * y + x; 18.203 + p1.next = p2; 18.204 + p1 = p2; 18.205 + } 18.206 + return p1.y; 18.207 + } 18.208 + 18.209 + int test0(int y) { 18.210 + int x = 3; 18.211 + Point p = new Point(); 18.212 + p.x = x; 18.213 + p.y = 3 * x + y; 18.214 + dummy(); 18.215 + return p.x * p.y; 18.216 + } 18.217 + 18.218 + int test1(int y) { 18.219 + Point p = new Point(); 18.220 + if ( (y & 1) == 1 ) { 18.221 + p = new Point(); // Kill previous 18.222 + } 18.223 + int x = 3; 18.224 + p.x = x; 18.225 + p.y = 3 * x + y; 18.226 + dummy(); 18.227 + return p.x * p.y; 18.228 + } 18.229 + 18.230 + int test2(int y) { 18.231 + Point p1 = new Point(); 18.232 + Point p2 = new Point(); 18.233 + p1.x = 3; 18.234 + p2.x = 4; 18.235 + p1.y = 3 * p2.x + y; 18.236 + p2.y = 3 * p1.x + y; 18.237 + dummy(); 18.238 + return p1.y * p2.y; 18.239 + } 18.240 + 18.241 + int test3(int y, Point p1) { 18.242 + Point p2 = new Point(); 18.243 + p1.x = 3; 18.244 + p2.x = 4; 18.245 + p1.y = 3 * p2.x + y; 18.246 + p2.y = 3 * p1.x + y; 18.247 + dummy(); 18.248 + return p1.y * p2.y; 18.249 + } 18.250 + 18.251 + int test4(int y) { 18.252 + Point p1 = new Point(); 18.253 + Point p2 = new Point(); 18.254 + if ( (y & 1) == 1 ) { 18.255 + p1.x = 3; 18.256 + p2.x = 4; 18.257 + } else { 18.258 + p1.x = 5; 18.259 + p2.x = 6; 18.260 + } 18.261 + p1.y = 3 * p2.x + y; 18.262 + p2.y = 3 * p1.x + y; 18.263 + dummy(); 18.264 + return p1.y * p2.y; 18.265 + } 18.266 + 18.267 + int test5(int y, Point p1) { 18.268 + Point p2 = new Point(); 18.269 + if ( (y & 1) == 1 ) { 18.270 + p1.x = 3; 18.271 + p2.x = 4; 18.272 + } else { 18.273 + p1.x = 5; 18.274 + p2.x = 6; 18.275 + } 18.276 + p1.y = 3 * p2.x + y; 18.277 + p2.y = 3 * p1.x + y; 18.278 + dummy(); 18.279 + return p1.y * p2.y; 18.280 + } 18.281 + 18.282 + int test6(int y) { 18.283 + Point p1 = new Point(); 18.284 + Point p2 = new Point(); 18.285 + p1.next = p2; 18.286 + if ( (y & 1) == 1 ) { 18.287 + p1.x = 3; 18.288 + p1.getNext().x = 4; 18.289 + } else { 18.290 + p1.x = 5; 18.291 + p1.getNext().x = 6; 18.292 + } 18.293 + p1.y = 3 * p2.x + y; 18.294 + p2.y = 3 * p1.x + y; 18.295 + dummy(); 18.296 + return p1.y * p2.y; 18.297 + } 18.298 + 18.299 + int test7(int y, Point p1) { 18.300 + Point p2 = new Point(); 18.301 + p1.next = p2; 18.302 + if ( (y & 1) == 1 ) { 18.303 + p1.x = 3; 18.304 + p1.getNext().x = 4; 18.305 + } else { 18.306 + p1.x = 5; 18.307 + p1.getNext().x = 6; 18.308 + } 18.309 + p1.y = 3 * p2.x + y; 18.310 + p2.y = 3 * p1.x + y; 18.311 + dummy(); 18.312 + return p1.y * p2.y; 18.313 + } 18.314 + 18.315 + int test8(int y, int l, int i) { 18.316 + Point p = new Point(); 18.317 + p.ax = new int[l]; 18.318 + p.ay = new int[l]; 18.319 + int x = 3; 18.320 + p.ax[i] = x; 18.321 + p.ay[i] = 3 * x + y; 18.322 + dummy(); 18.323 + return p.ax[i] * p.ay[i]; 18.324 + } 18.325 + 18.326 + int test9(int y, int l, int i) { 18.327 + Point p = new Point(); 18.328 + p.pax = new Point[l]; 18.329 + p.pay = new Point[l]; 18.330 + p.pax[i] = new Point(); 18.331 + p.pay[i] = new Point(); 18.332 + p.pax[i].x = 3; 18.333 + p.pay[i].x = 4; 18.334 + p.pax[i].y = 3 * p.pay[i].x + y; 18.335 + p.pay[i].y = 3 * p.pax[i].x + y; 18.336 + dummy(); 18.337 + return p.pax[i].y * p.pay[i].y; 18.338 + } 18.339 + 18.340 + int test10(int y, int l, int i, Class cls) { 18.341 + Point p = new Point(); 18.342 + try { 18.343 + p.pax = (Point[])Array.newInstance(cls, l); 18.344 + p.pax[i] = (Point)cls.newInstance(); 18.345 + } 18.346 + catch(java.lang.InstantiationException ex) { 18.347 + return 0; 18.348 + } 18.349 + catch(java.lang.IllegalAccessException ex) { 18.350 + return 0; 18.351 + } 18.352 + p.pax[i].x = 3; 18.353 + p.pax[i].y = 3 * p.pax[i].x + y; 18.354 + dummy(); 18.355 + return p.pax[i].x * p.pax[i].y; 18.356 + } 18.357 + 18.358 + int test11(int y) { 18.359 + Point p1 = new Point(); 18.360 + Point p2 = new Point(); 18.361 + p1.next = p2; 18.362 + if ( (y & 1) == 1 ) { 18.363 + p1.x = 3; 18.364 + p1.next.x = 4; 18.365 + } else { 18.366 + p1.x = 5; 18.367 + p1.next.x = 6; 18.368 + } 18.369 + p1.y = 3 * p1.next.x + y; 18.370 + p1.next.y = 3 * p1.x + y; 18.371 + dummy(); 18.372 + return p1.y * p1.next.y; 18.373 + } 18.374 + 18.375 + int test12(int y) { 18.376 + Point p1 = new Point(); 18.377 + p1.next = p1; 18.378 + if ( (y & 1) == 1 ) { 18.379 + p1.x = 3; 18.380 + p1.next.x = 4; 18.381 + } else { 18.382 + p1.x = 5; 18.383 + p1.next.x = 6; 18.384 + } 18.385 + p1.y = 3 * p1.next.x + y; 18.386 + p1.next.y = 3 * p1.x + y; 18.387 + dummy(); 18.388 + return p1.y * p1.next.y; 18.389 + } 18.390 + 18.391 + 18.392 + public static void main(String args[]) { 18.393 + Test tsr = new Test(); 18.394 + Point p = new Point(); 18.395 + Point ptmp = p; 18.396 + Class cls = Point.class; 18.397 + int y = 0; 18.398 + for (int i=0; i<10000; i++) { 18.399 + ptmp.next = tsr.test00(1); 18.400 + ptmp.next = tsr.test01(1); 18.401 + ptmp.next = tsr.test02(1); 18.402 + ptmp.next = tsr.test03(1); 18.403 + ptmp.next = tsr.test04(1); 18.404 + 18.405 + y = tsr.test05(1); 18.406 + 18.407 + y = tsr.test80(y, 1, 0); 18.408 + y = tsr.test81(y, 1, 0); 18.409 + 18.410 + y = tsr.test44(y); 18.411 + y = tsr.test43(y); 18.412 + y = tsr.test42(y); 18.413 + y = tsr.test40(y); 18.414 + y = tsr.test41(y); 18.415 + 18.416 + y = tsr.test0(y); 18.417 + y = tsr.test1(y); 18.418 + y = tsr.test2(y); 18.419 + y = tsr.test3(y, p); 18.420 + y = tsr.test4(y); 18.421 + y = tsr.test5(y, p); 18.422 + y = tsr.test6(y); 18.423 + y = tsr.test7(y, p); 18.424 + y = tsr.test8(y, 1, 0); 18.425 + y = tsr.test9(y, 1, 0); 18.426 + y = tsr.test10(y, 1, 0, cls); 18.427 + y = tsr.test11(y); 18.428 + y = tsr.test12(y); 18.429 + } 18.430 + for (int i=0; i<10000; i++) { 18.431 + ptmp.next = tsr.test00(1); 18.432 + ptmp.next = tsr.test01(1); 18.433 + ptmp.next = tsr.test02(1); 18.434 + ptmp.next = tsr.test03(1); 18.435 + ptmp.next = tsr.test04(1); 18.436 + 18.437 + y = tsr.test05(1); 18.438 + 18.439 + y = tsr.test80(y, 1, 0); 18.440 + y = tsr.test81(y, 1, 0); 18.441 + 18.442 + y = tsr.test44(y); 18.443 + y = tsr.test43(y); 18.444 + y = tsr.test42(y); 18.445 + y = tsr.test40(y); 18.446 + y = tsr.test41(y); 18.447 + 18.448 + y = tsr.test0(y); 18.449 + y = tsr.test1(y); 18.450 + y = tsr.test2(y); 18.451 + y = tsr.test3(y, p); 18.452 + y = tsr.test4(y); 18.453 + y = tsr.test5(y, p); 18.454 + y = tsr.test6(y); 18.455 + y = tsr.test7(y, p); 18.456 + y = tsr.test8(y, 1, 0); 18.457 + y = tsr.test9(y, 1, 0); 18.458 + y = tsr.test10(y, 1, 0, cls); 18.459 + y = tsr.test11(y); 18.460 + y = tsr.test12(y); 18.461 + } 18.462 + for (int i=0; i<10000; i++) { 18.463 + ptmp.next = tsr.test00(1); 18.464 + ptmp.next = tsr.test01(1); 18.465 + ptmp.next = tsr.test02(1); 18.466 + ptmp.next = tsr.test03(1); 18.467 + ptmp.next = tsr.test04(1); 18.468 + 18.469 + y = tsr.test05(1); 18.470 + 18.471 + y = tsr.test80(y, 1, 0); 18.472 + y = tsr.test81(y, 1, 0); 18.473 + 18.474 + y = tsr.test44(y); 18.475 + y = tsr.test43(y); 18.476 + y = tsr.test42(y); 18.477 + y = tsr.test40(y); 18.478 + y = tsr.test41(y); 18.479 + 18.480 + y = tsr.test0(y); 18.481 + y = tsr.test1(y); 18.482 + y = tsr.test2(y); 18.483 + y = tsr.test3(y, p); 18.484 + y = tsr.test4(y); 18.485 + y = tsr.test5(y, p); 18.486 + y = tsr.test6(y); 18.487 + y = tsr.test7(y, p); 18.488 + y = tsr.test8(y, 1, 0); 18.489 + y = tsr.test9(y, 1, 0); 18.490 + y = tsr.test10(y, 1, 0, cls); 18.491 + y = tsr.test11(y); 18.492 + y = tsr.test12(y); 18.493 + } 18.494 + 18.495 + int z = 0; 18.496 + y = tsr.test80(0, 1, 0); 18.497 + z += y; 18.498 + System.out.println("After 'test80' y=" + y); 18.499 + y = tsr.test81(0, 1, 0); 18.500 + z += y; 18.501 + System.out.println("After 'test81' y=" + y); 18.502 + 18.503 + y = tsr.test44(0); 18.504 + z += y; 18.505 + System.out.println("After 'test44' y=" + y); 18.506 + y = tsr.test43(0); 18.507 + z += y; 18.508 + System.out.println("After 'test43' y=" + y); 18.509 + y = tsr.test42(0); 18.510 + z += y; 18.511 + System.out.println("After 'test42' y=" + y); 18.512 + y = tsr.test40(0); 18.513 + z += y; 18.514 + System.out.println("After 'test40' y=" + y); 18.515 + y = tsr.test41(0); 18.516 + z += y; 18.517 + System.out.println("After 'test41' y=" + y); 18.518 + 18.519 + ptmp.next = tsr.test00(1); 18.520 + z += y; 18.521 + System.out.println("After 'test00' p.y=" + ptmp.next.y); 18.522 + ptmp.next = tsr.test01(1); 18.523 + z += y; 18.524 + System.out.println("After 'test01' p.y=" + ptmp.next.y); 18.525 + ptmp.next = tsr.test02(1); 18.526 + z += y; 18.527 + System.out.println("After 'test02' p.y=" + ptmp.next.y); 18.528 + ptmp.next = tsr.test03(1); 18.529 + z += y; 18.530 + System.out.println("After 'test03' p.y=" + ptmp.next.y); 18.531 + ptmp.next = tsr.test04(1); 18.532 + z += y; 18.533 + System.out.println("After 'test04' p.y=" + ptmp.next.y); 18.534 + 18.535 + y = tsr.test05(1); 18.536 + z += y; 18.537 + System.out.println("After 'test05' y=" + y); 18.538 + 18.539 + y = tsr.test0(0); 18.540 + z += y; 18.541 + System.out.println("After 'test0' y=" + y); 18.542 + y = tsr.test1(0); 18.543 + z += y; 18.544 + System.out.println("After 'test1' y=" + y); 18.545 + y = tsr.test2(0); 18.546 + z += y; 18.547 + System.out.println("After 'test2' y=" + y); 18.548 + y = tsr.test3(0, new Point()); 18.549 + z += y; 18.550 + System.out.println("After 'test3' y=" + y); 18.551 + y = tsr.test4(0); 18.552 + z += y; 18.553 + System.out.println("After 'test4' y=" + y); 18.554 + y = tsr.test5(0, new Point()); 18.555 + z += y; 18.556 + System.out.println("After 'test5' y=" + y); 18.557 + y = tsr.test6(0); 18.558 + z += y; 18.559 + System.out.println("After 'test6' y=" + y); 18.560 + y = tsr.test7(0, new Point()); 18.561 + z += y; 18.562 + System.out.println("After 'test7' y=" + y); 18.563 + y = tsr.test8(0, 1, 0); 18.564 + z += y; 18.565 + System.out.println("After 'test8' y=" + y); 18.566 + y = tsr.test9(0, 1, 0); 18.567 + z += y; 18.568 + System.out.println("After 'test9' y=" + y); 18.569 + y = tsr.test10(0, 1, 0, cls); 18.570 + z += y; 18.571 + System.out.println("After 'test10' y=" + y); 18.572 + y = tsr.test11(0); 18.573 + z += y; 18.574 + System.out.println("After 'test11' y=" + y); 18.575 + y = tsr.test12(0); 18.576 + z += y; 18.577 + System.out.println("After 'test12' y=" + y); 18.578 + System.out.println("Sum of y =" + z); 18.579 + } 18.580 +}
19.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 19.2 +++ b/test/compiler/6695810/Test.java Wed May 21 10:45:07 2008 -0700 19.3 @@ -0,0 +1,56 @@ 19.4 +/* 19.5 + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 19.6 + * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. 19.7 + * 19.8 + * 19.9 + * 19.10 + * 19.11 + * 19.12 + * 19.13 + * 19.14 + * 19.15 + * 19.16 + * 19.17 + * 19.18 + * 19.19 + * 19.20 + * 19.21 + * 19.22 + * 19.23 + * 19.24 + * 19.25 + * 19.26 + */ 19.27 + 19.28 +/* 19.29 + * @test 19.30 + * @bug 6695810 19.31 + * @summary null oop passed to encode_heap_oop_not_null 19.32 + * @run main/othervm -Xbatch Test 19.33 + */ 19.34 + 19.35 +public class Test { 19.36 + Test _t; 19.37 + 19.38 + static void test(Test t1, Test t2) { 19.39 + if (t2 != null) 19.40 + t1._t = t2; 19.41 + 19.42 + if (t2 != null) 19.43 + t1._t = t2; 19.44 + } 19.45 + 19.46 + public static void main(String[] args) { 19.47 + Test t = new Test(); 19.48 + for (int i = 0; i < 50; i++) { 19.49 + for (int j = 0; j < 100; j++) { 19.50 + test(t, t); 19.51 + } 19.52 + test(t, null); 19.53 + } 19.54 + for (int i = 0; i < 10000; i++) { 19.55 + test(t, t); 19.56 + } 19.57 + test(t, null); 19.58 + } 19.59 +}