1.1 --- a/src/share/vm/opto/matcher.cpp Wed May 28 21:06:24 2008 -0700 1.2 +++ b/src/share/vm/opto/matcher.cpp Thu May 29 12:04:14 2008 -0700 1.3 @@ -52,7 +52,7 @@ 1.4 #ifdef ASSERT 1.5 _old2new_map(C->comp_arena()), 1.6 #endif 1.7 - _shared_constants(C->comp_arena()), 1.8 + _shared_nodes(C->comp_arena()), 1.9 _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp), 1.10 _swallowed(swallowed), 1.11 _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE), 1.12 @@ -1191,7 +1191,7 @@ 1.13 uint cnt = n->req(); 1.14 uint start = 1; 1.15 if( mem != (Node*)1 ) start = MemNode::Memory+1; 1.16 - if( n->Opcode() == Op_AddP ) { 1.17 + if( n->is_AddP() ) { 1.18 assert( mem == (Node*)1, "" ); 1.19 start = AddPNode::Base+1; 1.20 } 1.21 @@ -1219,7 +1219,7 @@ 1.22 if( t->singleton() ) { 1.23 // Never force constants into registers. Allow them to match as 1.24 // constants or registers. Copies of the same value will share 1.25 - // the same register. See find_shared_constant. 1.26 + // the same register. See find_shared_node. 1.27 return false; 1.28 } else { // Not a constant 1.29 // Stop recursion if they have different Controls. 1.30 @@ -1243,12 +1243,10 @@ 1.31 if( j == max_scan ) // No post-domination before scan end? 1.32 return true; // Then break the match tree up 1.33 } 1.34 - 1.35 - if (m->Opcode() == Op_DecodeN && m->outcnt() == 2) { 1.36 + if (m->is_DecodeN() && Matcher::clone_shift_expressions) { 1.37 // These are commonly used in address expressions and can 1.38 - // efficiently fold into them in some cases but because they are 1.39 - // consumed by AddP they commonly have two users. 1.40 - if (m->raw_out(0) == m->raw_out(1) && m->raw_out(0)->Opcode() == Op_AddP) return false; 1.41 + // efficiently fold into them on X64 in some cases. 1.42 + return false; 1.43 } 1.44 } 1.45 1.46 @@ -1368,13 +1366,16 @@ 1.47 // which reduces the number of copies of a constant in the final 1.48 // program. The register allocator is free to split uses later to 1.49 // split live ranges. 1.50 -MachNode* Matcher::find_shared_constant(Node* leaf, uint rule) { 1.51 - if (!leaf->is_Con()) return NULL; 1.52 +MachNode* Matcher::find_shared_node(Node* leaf, uint rule) { 1.53 + if (!leaf->is_Con() && !leaf->is_DecodeN()) return NULL; 1.54 1.55 // See if this Con has already been reduced using this rule. 1.56 - if (_shared_constants.Size() <= leaf->_idx) return NULL; 1.57 - MachNode* last = (MachNode*)_shared_constants.at(leaf->_idx); 1.58 + if (_shared_nodes.Size() <= leaf->_idx) return NULL; 1.59 + MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx); 1.60 if (last != NULL && rule == last->rule()) { 1.61 + // Don't expect control change for DecodeN 1.62 + if (leaf->is_DecodeN()) 1.63 + return last; 1.64 // Get the new space root. 1.65 Node* xroot = new_node(C->root()); 1.66 if (xroot == NULL) { 1.67 @@ -1420,9 +1421,9 @@ 1.68 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) { 1.69 assert( rule >= NUM_OPERANDS, "called with operand rule" ); 1.70 1.71 - MachNode* shared_con = find_shared_constant(s->_leaf, rule); 1.72 - if (shared_con != NULL) { 1.73 - return shared_con; 1.74 + MachNode* shared_node = find_shared_node(s->_leaf, rule); 1.75 + if (shared_node != NULL) { 1.76 + return shared_node; 1.77 } 1.78 1.79 // Build the object to represent this state & prepare for recursive calls 1.80 @@ -1447,7 +1448,7 @@ 1.81 mach->ins_req(MemNode::Memory,mem); 1.82 1.83 // If the _leaf is an AddP, insert the base edge 1.84 - if( leaf->Opcode() == Op_AddP ) 1.85 + if( leaf->is_AddP() ) 1.86 mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base)); 1.87 1.88 uint num_proj = _proj_list.size(); 1.89 @@ -1475,9 +1476,9 @@ 1.90 guarantee(_proj_list.size() == num_proj, "no allocation during spill generation"); 1.91 } 1.92 1.93 - if (leaf->is_Con()) { 1.94 + if (leaf->is_Con() || leaf->is_DecodeN()) { 1.95 // Record the con for sharing 1.96 - _shared_constants.map(leaf->_idx, ex); 1.97 + _shared_nodes.map(leaf->_idx, ex); 1.98 } 1.99 1.100 return ex; 1.101 @@ -1826,7 +1827,7 @@ 1.102 Node *adr = m->in(AddPNode::Address); 1.103 1.104 // Intel, ARM and friends can handle 2 adds in addressing mode 1.105 - if( clone_shift_expressions && adr->Opcode() == Op_AddP && 1.106 + if( clone_shift_expressions && adr->is_AddP() && 1.107 // AtomicAdd is not an addressing expression. 1.108 // Cheap to find it by looking for screwy base. 1.109 !adr->in(AddPNode::Base)->is_top() ) {