src/share/vm/opto/matcher.cpp

changeset 670
9c2ecc2ffb12
parent 631
d1605aabd0a1
parent 657
2a1a77d3458f
child 744
eaf496ad4a14
     1.1 --- a/src/share/vm/opto/matcher.cpp	Thu Jul 03 11:01:32 2008 -0700
     1.2 +++ b/src/share/vm/opto/matcher.cpp	Fri Jul 11 01:14:44 2008 -0700
     1.3 @@ -51,6 +51,7 @@
     1.4    PhaseTransform( Phase::Ins_Select ),
     1.5  #ifdef ASSERT
     1.6    _old2new_map(C->comp_arena()),
     1.7 +  _new2old_map(C->comp_arena()),
     1.8  #endif
     1.9    _shared_nodes(C->comp_arena()),
    1.10    _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
    1.11 @@ -82,6 +83,7 @@
    1.12    idealreg2debugmask[Op_RegF] = NULL;
    1.13    idealreg2debugmask[Op_RegD] = NULL;
    1.14    idealreg2debugmask[Op_RegP] = NULL;
    1.15 +  debug_only(_mem_node = NULL;)   // Ideal memory node consumed by mach node
    1.16  }
    1.17  
    1.18  //------------------------------warp_incoming_stk_arg------------------------
    1.19 @@ -834,10 +836,16 @@
    1.20              if( n->is_Proj() && n->in(0)->is_Multi()) {       // Projections?
    1.21                // Convert to machine-dependent projection
    1.22                m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
    1.23 +#ifdef ASSERT
    1.24 +              _new2old_map.map(m->_idx, n);
    1.25 +#endif
    1.26                if (m->in(0) != NULL) // m might be top
    1.27                  collect_null_checks(m);
    1.28              } else {                // Else just a regular 'ol guy
    1.29                m = n->clone();       // So just clone into new-space
    1.30 +#ifdef ASSERT
    1.31 +              _new2old_map.map(m->_idx, n);
    1.32 +#endif
    1.33                // Def-Use edges will be added incrementally as Uses
    1.34                // of this node are matched.
    1.35                assert(m->outcnt() == 0, "no Uses of this clone yet");
    1.36 @@ -886,6 +894,9 @@
    1.37              // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
    1.38              ) {
    1.39            m = m->clone();
    1.40 +#ifdef ASSERT
    1.41 +          _new2old_map.map(m->_idx, n);
    1.42 +#endif
    1.43            mstack.push(m, Post_Visit, n, i); // Don't neet to visit
    1.44            mstack.push(m->in(0), Visit, m, 0);
    1.45          } else {
    1.46 @@ -1153,7 +1164,10 @@
    1.47  
    1.48    // StoreNodes require their Memory input to match any LoadNodes
    1.49    Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
    1.50 -
    1.51 +#ifdef ASSERT
    1.52 +  Node* save_mem_node = _mem_node;
    1.53 +  _mem_node = n->is_Store() ? (Node*)n : NULL;
    1.54 +#endif
    1.55    // State object for root node of match tree
    1.56    // Allocate it on _states_arena - stack allocation can cause stack overflow.
    1.57    State *s = new (&_states_arena) State;
    1.58 @@ -1186,6 +1200,7 @@
    1.59    MachNode *m = ReduceInst( s, s->_rule[mincost], mem );
    1.60  #ifdef ASSERT
    1.61    _old2new_map.map(n->_idx, m);
    1.62 +  _new2old_map.map(m->_idx, (Node*)n);
    1.63  #endif
    1.64  
    1.65    // Add any Matcher-ignored edges
    1.66 @@ -1205,6 +1220,7 @@
    1.67      }
    1.68    }
    1.69  
    1.70 +  debug_only( _mem_node = save_mem_node; )
    1.71    return m;
    1.72  }
    1.73  
    1.74 @@ -1445,8 +1461,30 @@
    1.75    }
    1.76  
    1.77    // If a Memory was used, insert a Memory edge
    1.78 -  if( mem != (Node*)1 )
    1.79 +  if( mem != (Node*)1 ) {
    1.80      mach->ins_req(MemNode::Memory,mem);
    1.81 +#ifdef ASSERT
    1.82 +    // Verify adr type after matching memory operation
    1.83 +    const MachOper* oper = mach->memory_operand();
    1.84 +    if (oper != NULL && oper != (MachOper*)-1 &&
    1.85 +        mach->adr_type() != TypeRawPtr::BOTTOM) { // non-direct addressing mode
    1.86 +      // It has a unique memory operand.  Find corresponding ideal mem node.
    1.87 +      Node* m = NULL;
    1.88 +      if (leaf->is_Mem()) {
    1.89 +        m = leaf;
    1.90 +      } else {
    1.91 +        m = _mem_node;
    1.92 +        assert(m != NULL && m->is_Mem(), "expecting memory node");
    1.93 +      }
    1.94 +      if (m->adr_type() != mach->adr_type()) {
    1.95 +        m->dump();
    1.96 +        tty->print_cr("mach:");
    1.97 +        mach->dump(1);
    1.98 +      }
    1.99 +      assert(m->adr_type() == mach->adr_type(), "matcher should not change adr type");
   1.100 +    }
   1.101 +#endif
   1.102 +  }
   1.103  
   1.104    // If the _leaf is an AddP, insert the base edge
   1.105    if( leaf->is_AddP() )
   1.106 @@ -1464,6 +1502,9 @@
   1.107      for( uint i=0; i<mach->req(); i++ ) {
   1.108        mach->set_req(i,NULL);
   1.109      }
   1.110 +#ifdef ASSERT
   1.111 +    _new2old_map.map(ex->_idx, s->_leaf);
   1.112 +#endif
   1.113    }
   1.114  
   1.115    // PhaseChaitin::fixup_spills will sometimes generate spill code
   1.116 @@ -1510,7 +1551,9 @@
   1.117      assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
   1.118      mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
   1.119      Node *mem1 = (Node*)1;
   1.120 +    debug_only(Node *save_mem_node = _mem_node;)
   1.121      mach->add_req( ReduceInst(s, newrule, mem1) );
   1.122 +    debug_only(_mem_node = save_mem_node;)
   1.123    }
   1.124    return;
   1.125  }
   1.126 @@ -1520,6 +1563,7 @@
   1.127    if( s->_leaf->is_Load() ) {
   1.128      Node *mem2 = s->_leaf->in(MemNode::Memory);
   1.129      assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
   1.130 +    debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
   1.131      mem = mem2;
   1.132    }
   1.133    if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
   1.134 @@ -1563,7 +1607,9 @@
   1.135          //             --> ReduceInst( newrule )
   1.136          mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C );
   1.137          Node *mem1 = (Node*)1;
   1.138 +        debug_only(Node *save_mem_node = _mem_node;)
   1.139          mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
   1.140 +        debug_only(_mem_node = save_mem_node;)
   1.141        }
   1.142      }
   1.143      assert( mach->_opnds[num_opnds-1], "" );
   1.144 @@ -1594,6 +1640,7 @@
   1.145    if( s->_leaf->is_Load() ) {
   1.146      assert( mem == (Node*)1, "multiple Memories being matched at once?" );
   1.147      mem = s->_leaf->in(MemNode::Memory);
   1.148 +    debug_only(_mem_node = s->_leaf;)
   1.149    }
   1.150    if( s->_leaf->in(0) && s->_leaf->req() > 1) {
   1.151      if( !mach->in(0) )
   1.152 @@ -1618,7 +1665,9 @@
   1.153        // Reduce the instruction, and add a direct pointer from this
   1.154        // machine instruction to the newly reduced one.
   1.155        Node *mem1 = (Node*)1;
   1.156 +      debug_only(Node *save_mem_node = _mem_node;)
   1.157        mach->add_req( ReduceInst( kid, newrule, mem1 ) );
   1.158 +      debug_only(_mem_node = save_mem_node;)
   1.159      }
   1.160    }
   1.161  }
   1.162 @@ -1731,8 +1780,8 @@
   1.163        }
   1.164        case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
   1.165          TypeNode *tn = n->as_Type(); // Constants derive from type nodes
   1.166 -        const TypePtr* tp = tn->type()->is_narrowoop()->make_oopptr();
   1.167 -        if (tp->_ptr == TypePtr::AnyNull) {
   1.168 +        const TypePtr* tp = tn->type()->make_ptr();
   1.169 +        if (tp && tp->_ptr == TypePtr::AnyNull) {
   1.170            tn->set_type(TypeNarrowOop::NULL_PTR);
   1.171          }
   1.172          break;

mercurial