Wed, 20 Apr 2011 18:29:35 -0700
7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
Summary: memory slices should be always created for non-static fields after allocation
Reviewed-by: never
1.1 --- a/src/share/vm/opto/escape.cpp Wed Apr 20 09:29:00 2011 -0700 1.2 +++ b/src/share/vm/opto/escape.cpp Wed Apr 20 18:29:35 2011 -0700 1.3 @@ -1437,7 +1437,10 @@ 1.4 1.5 // Update the memory inputs of MemNodes with the value we computed 1.6 // in Phase 2 and move stores memory users to corresponding memory slices. 1.7 -#ifdef ASSERT 1.8 + 1.9 + // Disable memory split verification code until the fix for 6984348. 1.10 + // Currently it produces false negative results since it does not cover all cases. 1.11 +#if 0 // ifdef ASSERT 1.12 visited.Reset(); 1.13 Node_Stack old_mems(arena, _compile->unique() >> 2); 1.14 #endif 1.15 @@ -1447,7 +1450,7 @@ 1.16 Node *n = ptnode_adr(i)->_node; 1.17 assert(n != NULL, "sanity"); 1.18 if (n->is_Mem()) { 1.19 -#ifdef ASSERT 1.20 +#if 0 // ifdef ASSERT 1.21 Node* old_mem = n->in(MemNode::Memory); 1.22 if (!visited.test_set(old_mem->_idx)) { 1.23 old_mems.push(old_mem, old_mem->outcnt()); 1.24 @@ -1469,13 +1472,13 @@ 1.25 } 1.26 } 1.27 } 1.28 -#ifdef ASSERT 1.29 +#if 0 // ifdef ASSERT 1.30 // Verify that memory was split correctly 1.31 while (old_mems.is_nonempty()) { 1.32 Node* old_mem = old_mems.node(); 1.33 uint old_cnt = old_mems.index(); 1.34 old_mems.pop(); 1.35 - assert(old_cnt = old_mem->outcnt(), "old mem could be lost"); 1.36 + assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 1.37 } 1.38 #endif 1.39 }
2.1 --- a/src/share/vm/opto/graphKit.cpp Wed Apr 20 09:29:00 2011 -0700 2.2 +++ b/src/share/vm/opto/graphKit.cpp Wed Apr 20 18:29:35 2011 -0700 2.3 @@ -2950,8 +2950,7 @@ 2.4 2.5 //---------------------------set_output_for_allocation------------------------- 2.6 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, 2.7 - const TypeOopPtr* oop_type, 2.8 - bool raw_mem_only) { 2.9 + const TypeOopPtr* oop_type) { 2.10 int rawidx = Compile::AliasIdxRaw; 2.11 alloc->set_req( TypeFunc::FramePtr, frameptr() ); 2.12 add_safepoint_edges(alloc); 2.13 @@ -2975,7 +2974,7 @@ 2.14 rawoop)->as_Initialize(); 2.15 assert(alloc->initialization() == init, "2-way macro link must work"); 2.16 assert(init ->allocation() == alloc, "2-way macro link must work"); 2.17 - if (ReduceFieldZeroing && !raw_mem_only) { 2.18 + { 2.19 // Extract memory strands which may participate in the new object's 2.20 // initialization, and source them from the new InitializeNode. 2.21 // This will allow us to observe initializations when they occur, 2.22 @@ -3036,11 +3035,9 @@ 2.23 // the type to a constant. 2.24 // The optional arguments are for specialized use by intrinsics: 2.25 // - If 'extra_slow_test' if not null is an extra condition for the slow-path. 2.26 -// - If 'raw_mem_only', do not cast the result to an oop. 2.27 // - If 'return_size_val', report the the total object size to the caller. 2.28 Node* GraphKit::new_instance(Node* klass_node, 2.29 Node* extra_slow_test, 2.30 - bool raw_mem_only, // affect only raw memory 2.31 Node* *return_size_val) { 2.32 // Compute size in doublewords 2.33 // The size is always an integral number of doublewords, represented 2.34 @@ -3111,7 +3108,7 @@ 2.35 size, klass_node, 2.36 initial_slow_test); 2.37 2.38 - return set_output_for_allocation(alloc, oop_type, raw_mem_only); 2.39 + return set_output_for_allocation(alloc, oop_type); 2.40 } 2.41 2.42 //-------------------------------new_array------------------------------------- 2.43 @@ -3121,7 +3118,6 @@ 2.44 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) 2.45 Node* length, // number of array elements 2.46 int nargs, // number of arguments to push back for uncommon trap 2.47 - bool raw_mem_only, // affect only raw memory 2.48 Node* *return_size_val) { 2.49 jint layout_con = Klass::_lh_neutral_value; 2.50 Node* layout_val = get_layout_helper(klass_node, layout_con); 2.51 @@ -3266,7 +3262,7 @@ 2.52 ary_type = ary_type->is_aryptr()->cast_to_size(length_type); 2.53 } 2.54 2.55 - Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only); 2.56 + Node* javaoop = set_output_for_allocation(alloc, ary_type); 2.57 2.58 // Cast length on remaining path to be as narrow as possible 2.59 if (map()->find_edge(length) >= 0) {
3.1 --- a/src/share/vm/opto/graphKit.hpp Wed Apr 20 09:29:00 2011 -0700 3.2 +++ b/src/share/vm/opto/graphKit.hpp Wed Apr 20 18:29:35 2011 -0700 3.3 @@ -769,15 +769,13 @@ 3.4 3.5 // implementation of object creation 3.6 Node* set_output_for_allocation(AllocateNode* alloc, 3.7 - const TypeOopPtr* oop_type, 3.8 - bool raw_mem_only); 3.9 + const TypeOopPtr* oop_type); 3.10 Node* get_layout_helper(Node* klass_node, jint& constant_value); 3.11 Node* new_instance(Node* klass_node, 3.12 Node* slow_test = NULL, 3.13 - bool raw_mem_only = false, 3.14 Node* *return_size_val = NULL); 3.15 Node* new_array(Node* klass_node, Node* count_val, int nargs, 3.16 - bool raw_mem_only = false, Node* *return_size_val = NULL); 3.17 + Node* *return_size_val = NULL); 3.18 3.19 // Handy for making control flow 3.20 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
4.1 --- a/src/share/vm/opto/library_call.cpp Wed Apr 20 09:29:00 2011 -0700 4.2 +++ b/src/share/vm/opto/library_call.cpp Wed Apr 20 18:29:35 2011 -0700 4.3 @@ -3376,8 +3376,7 @@ 4.4 Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) ); 4.5 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); 4.6 4.7 - const bool raw_mem_only = true; 4.8 - newcopy = new_array(klass_node, length, 0, raw_mem_only); 4.9 + newcopy = new_array(klass_node, length, 0); 4.10 4.11 // Generate a direct call to the right arraycopy function(s). 4.12 // We know the copy is disjoint but we might not know if the 4.13 @@ -4174,8 +4173,6 @@ 4.14 4.15 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 4.16 int raw_adr_idx = Compile::AliasIdxRaw; 4.17 - const bool raw_mem_only = true; 4.18 - 4.19 4.20 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); 4.21 if (array_ctl != NULL) { 4.22 @@ -4184,8 +4181,7 @@ 4.23 set_control(array_ctl); 4.24 Node* obj_length = load_array_length(obj); 4.25 Node* obj_size = NULL; 4.26 - Node* alloc_obj = new_array(obj_klass, obj_length, 0, 4.27 - raw_mem_only, &obj_size); 4.28 + Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); 4.29 4.30 if (!use_ReduceInitialCardMarks()) { 4.31 // If it is an oop array, it requires very special treatment, 4.32 @@ -4257,7 +4253,7 @@ 4.33 // It's an instance, and it passed the slow-path tests. 4.34 PreserveJVMState pjvms(this); 4.35 Node* obj_size = NULL; 4.36 - Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size); 4.37 + Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size); 4.38 4.39 copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks()); 4.40
5.1 --- a/src/share/vm/opto/memnode.cpp Wed Apr 20 09:29:00 2011 -0700 5.2 +++ b/src/share/vm/opto/memnode.cpp Wed Apr 20 18:29:35 2011 -0700 5.3 @@ -1259,15 +1259,18 @@ 5.4 return NULL; // Wait stable graph 5.5 } 5.6 uint cnt = mem->req(); 5.7 - for( uint i = 1; i < cnt; i++ ) { 5.8 + for (uint i = 1; i < cnt; i++) { 5.9 + Node* rc = region->in(i); 5.10 + if (rc == NULL || phase->type(rc) == Type::TOP) 5.11 + return NULL; // Wait stable graph 5.12 Node *in = mem->in(i); 5.13 - if( in == NULL ) { 5.14 + if (in == NULL) { 5.15 return NULL; // Wait stable graph 5.16 } 5.17 } 5.18 // Check for loop invariant. 5.19 if (cnt == 3) { 5.20 - for( uint i = 1; i < cnt; i++ ) { 5.21 + for (uint i = 1; i < cnt; i++) { 5.22 Node *in = mem->in(i); 5.23 Node* m = MemNode::optimize_memory_chain(in, addr_t, phase); 5.24 if (m == mem) { 5.25 @@ -1281,38 +1284,37 @@ 5.26 5.27 // Do nothing here if Identity will find a value 5.28 // (to avoid infinite chain of value phis generation). 5.29 - if ( !phase->eqv(this, this->Identity(phase)) ) 5.30 + if (!phase->eqv(this, this->Identity(phase))) 5.31 return NULL; 5.32 5.33 // Skip the split if the region dominates some control edge of the address. 5.34 - if (cnt == 3 && !MemNode::all_controls_dominate(address, region)) 5.35 + if (!MemNode::all_controls_dominate(address, region)) 5.36 return NULL; 5.37 5.38 const Type* this_type = this->bottom_type(); 5.39 int this_index = phase->C->get_alias_index(addr_t); 5.40 int this_offset = addr_t->offset(); 5.41 int this_iid = addr_t->is_oopptr()->instance_id(); 5.42 - int wins = 0; 5.43 PhaseIterGVN *igvn = phase->is_IterGVN(); 5.44 Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); 5.45 - for( uint i = 1; i < region->req(); i++ ) { 5.46 + for (uint i = 1; i < region->req(); i++) { 5.47 Node *x; 5.48 Node* the_clone = NULL; 5.49 - if( region->in(i) == phase->C->top() ) { 5.50 + if (region->in(i) == phase->C->top()) { 5.51 x = phase->C->top(); // Dead path? Use a dead data op 5.52 } else { 5.53 x = this->clone(); // Else clone up the data op 5.54 the_clone = x; // Remember for possible deletion. 5.55 // Alter data node to use pre-phi inputs 5.56 - if( this->in(0) == region ) { 5.57 - x->set_req( 0, region->in(i) ); 5.58 + if (this->in(0) == region) { 5.59 + x->set_req(0, region->in(i)); 5.60 } else { 5.61 - x->set_req( 0, NULL ); 5.62 + x->set_req(0, NULL); 5.63 } 5.64 - for( uint j = 1; j < this->req(); j++ ) { 5.65 + for (uint j = 1; j < this->req(); j++) { 5.66 Node *in = this->in(j); 5.67 - if( in->is_Phi() && in->in(0) == region ) 5.68 - x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone 5.69 + if (in->is_Phi() && in->in(0) == region) 5.70 + x->set_req(j, in->in(i)); // Use pre-Phi input for the clone 5.71 } 5.72 } 5.73 // Check for a 'win' on some paths 5.74 @@ -1321,12 +1323,11 @@ 5.75 bool singleton = t->singleton(); 5.76 5.77 // See comments in PhaseIdealLoop::split_thru_phi(). 5.78 - if( singleton && t == Type::TOP ) { 5.79 + if (singleton && t == Type::TOP) { 5.80 singleton &= region->is_Loop() && (i != LoopNode::EntryControl); 5.81 } 5.82 5.83 - if( singleton ) { 5.84 - wins++; 5.85 + if (singleton) { 5.86 x = igvn->makecon(t); 5.87 } else { 5.88 // We now call Identity to try to simplify the cloned node. 5.89 @@ -1340,13 +1341,11 @@ 5.90 // igvn->type(x) is set to x->Value() already. 5.91 x->raise_bottom_type(t); 5.92 Node *y = x->Identity(igvn); 5.93 - if( y != x ) { 5.94 - wins++; 5.95 + if (y != x) { 5.96 x = y; 5.97 } else { 5.98 y = igvn->hash_find(x); 5.99 - if( y ) { 5.100 - wins++; 5.101 + if (y) { 5.102 x = y; 5.103 } else { 5.104 // Else x is a new node we are keeping 5.105 @@ -1360,13 +1359,9 @@ 5.106 igvn->remove_dead_node(the_clone); 5.107 phi->set_req(i, x); 5.108 } 5.109 - if( wins > 0 ) { 5.110 - // Record Phi 5.111 - igvn->register_new_node_with_optimizer(phi); 5.112 - return phi; 5.113 - } 5.114 - igvn->remove_dead_node(phi); 5.115 - return NULL; 5.116 + // Record Phi 5.117 + igvn->register_new_node_with_optimizer(phi); 5.118 + return phi; 5.119 } 5.120 5.121 //------------------------------Ideal------------------------------------------ 5.122 @@ -1677,14 +1672,15 @@ 5.123 // If we are loading from a freshly-allocated object, produce a zero, 5.124 // if the load is provably beyond the header of the object. 5.125 // (Also allow a variable load from a fresh array to produce zero.) 5.126 - if (ReduceFieldZeroing) { 5.127 + const TypeOopPtr *tinst = tp->isa_oopptr(); 5.128 + bool is_instance = (tinst != NULL) && tinst->is_known_instance_field(); 5.129 + if (ReduceFieldZeroing || is_instance) { 5.130 Node* value = can_see_stored_value(mem,phase); 5.131 if (value != NULL && value->is_Con()) 5.132 return value->bottom_type(); 5.133 } 5.134 5.135 - const TypeOopPtr *tinst = tp->isa_oopptr(); 5.136 - if (tinst != NULL && tinst->is_known_instance_field()) { 5.137 + if (is_instance) { 5.138 // If we have an instance type and our memory input is the 5.139 // programs's initial memory state, there is no matching store, 5.140 // so just return a zero of the appropriate type