src/share/vm/opto/gcm.cpp

changeset 5509
d1034bd8cefc
parent 4691
571076d3c79d
child 5539
adb9a7d94cb5
     1.1 --- a/src/share/vm/opto/gcm.cpp	Mon Aug 05 15:03:40 2013 -0700
     1.2 +++ b/src/share/vm/opto/gcm.cpp	Wed Aug 07 17:56:19 2013 +0200
     1.3 @@ -66,7 +66,7 @@
     1.4  // are in b also.
     1.5  void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
     1.6    // Set basic block of n, Add n to b,
     1.7 -  _bbs.map(n->_idx, b);
     1.8 +  map_node_to_block(n, b);
     1.9    b->add_inst(n);
    1.10  
    1.11    // After Matching, nearly any old Node may have projections trailing it.
    1.12 @@ -75,11 +75,12 @@
    1.13    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
    1.14      Node*  use  = n->fast_out(i);
    1.15      if (use->is_Proj()) {
    1.16 -      Block* buse = _bbs[use->_idx];
    1.17 +      Block* buse = get_block_for_node(use);
    1.18        if (buse != b) {              // In wrong block?
    1.19 -        if (buse != NULL)
    1.20 +        if (buse != NULL) {
    1.21            buse->find_remove(use);   // Remove from wrong block
    1.22 -        _bbs.map(use->_idx, b);     // Re-insert in this block
    1.23 +        }
    1.24 +        map_node_to_block(use, b);
    1.25          b->add_inst(use);
    1.26        }
    1.27      }
    1.28 @@ -97,7 +98,7 @@
    1.29    if (p != NULL && p != n) {    // Control from a block projection?
    1.30      assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
    1.31      // Find trailing Region
    1.32 -    Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
    1.33 +    Block *pb = get_block_for_node(in0); // Block-projection already has basic block
    1.34      uint j = 0;
    1.35      if (pb->_num_succs != 1) {  // More then 1 successor?
    1.36        // Search for successor
    1.37 @@ -127,14 +128,15 @@
    1.38    while ( spstack.is_nonempty() ) {
    1.39      Node *n = spstack.pop();
    1.40      if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
    1.41 -      if( n->pinned() && !_bbs.lookup(n->_idx) ) {  // Pinned?  Nail it down!
    1.42 +      if( n->pinned() && !has_block(n)) {  // Pinned?  Nail it down!
    1.43          assert( n->in(0), "pinned Node must have Control" );
    1.44          // Before setting block replace block_proj control edge
    1.45          replace_block_proj_ctrl(n);
    1.46          Node *input = n->in(0);
    1.47 -        while( !input->is_block_start() )
    1.48 +        while (!input->is_block_start()) {
    1.49            input = input->in(0);
    1.50 -        Block *b = _bbs[input->_idx];  // Basic block of controlling input
    1.51 +        }
    1.52 +        Block *b = get_block_for_node(input); // Basic block of controlling input
    1.53          schedule_node_into_block(n, b);
    1.54        }
    1.55        for( int i = n->req() - 1; i >= 0; --i ) {  // For all inputs
    1.56 @@ -149,7 +151,7 @@
    1.57  // Assert that new input b2 is dominated by all previous inputs.
    1.58  // Check this by by seeing that it is dominated by b1, the deepest
    1.59  // input observed until b2.
    1.60 -static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
    1.61 +static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
    1.62    if (b1 == NULL)  return;
    1.63    assert(b1->_dom_depth < b2->_dom_depth, "sanity");
    1.64    Block* tmp = b2;
    1.65 @@ -162,7 +164,7 @@
    1.66      for (uint j=0; j<n->len(); j++) { // For all inputs
    1.67        Node* inn = n->in(j); // Get input
    1.68        if (inn == NULL)  continue;  // Ignore NULL, missing inputs
    1.69 -      Block* inb = bbs[inn->_idx];
    1.70 +      Block* inb = cfg->get_block_for_node(inn);
    1.71        tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
    1.72                   inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
    1.73        inn->dump();
    1.74 @@ -174,20 +176,20 @@
    1.75  }
    1.76  #endif
    1.77  
    1.78 -static Block* find_deepest_input(Node* n, Block_Array &bbs) {
    1.79 +static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
    1.80    // Find the last input dominated by all other inputs.
    1.81    Block* deepb           = NULL;        // Deepest block so far
    1.82    int    deepb_dom_depth = 0;
    1.83    for (uint k = 0; k < n->len(); k++) { // For all inputs
    1.84      Node* inn = n->in(k);               // Get input
    1.85      if (inn == NULL)  continue;         // Ignore NULL, missing inputs
    1.86 -    Block* inb = bbs[inn->_idx];
    1.87 +    Block* inb = cfg->get_block_for_node(inn);
    1.88      assert(inb != NULL, "must already have scheduled this input");
    1.89      if (deepb_dom_depth < (int) inb->_dom_depth) {
    1.90        // The new inb must be dominated by the previous deepb.
    1.91        // The various inputs must be linearly ordered in the dom
    1.92        // tree, or else there will not be a unique deepest block.
    1.93 -      DEBUG_ONLY(assert_dom(deepb, inb, n, bbs));
    1.94 +      DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
    1.95        deepb = inb;                      // Save deepest block
    1.96        deepb_dom_depth = deepb->_dom_depth;
    1.97      }
    1.98 @@ -243,7 +245,7 @@
    1.99          ++i;
   1.100          if (in == NULL) continue;    // Ignore NULL, missing inputs
   1.101          int is_visited = visited.test_set(in->_idx);
   1.102 -        if (!_bbs.lookup(in->_idx)) { // Missing block selection?
   1.103 +        if (!has_block(in)) { // Missing block selection?
   1.104            if (is_visited) {
   1.105              // assert( !visited.test(in->_idx), "did not schedule early" );
   1.106              return false;
   1.107 @@ -265,9 +267,9 @@
   1.108          // any projections which depend on them.
   1.109          if (!n->pinned()) {
   1.110            // Set earliest legal block.
   1.111 -          _bbs.map(n->_idx, find_deepest_input(n, _bbs));
   1.112 +          map_node_to_block(n, find_deepest_input(n, this));
   1.113          } else {
   1.114 -          assert(_bbs[n->_idx] == _bbs[n->in(0)->_idx], "Pinned Node should be at the same block as its control edge");
   1.115 +          assert(get_block_for_node(n) == get_block_for_node(n->in(0)), "Pinned Node should be at the same block as its control edge");
   1.116          }
   1.117  
   1.118          if (nstack.is_empty()) {
   1.119 @@ -313,8 +315,8 @@
   1.120  // The definition must dominate the use, so move the LCA upward in the
   1.121  // dominator tree to dominate the use.  If the use is a phi, adjust
   1.122  // the LCA only with the phi input paths which actually use this def.
   1.123 -static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) {
   1.124 -  Block* buse = bbs[use->_idx];
   1.125 +static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
   1.126 +  Block* buse = cfg->get_block_for_node(use);
   1.127    if (buse == NULL)    return LCA;   // Unused killing Projs have no use block
   1.128    if (!use->is_Phi())  return buse->dom_lca(LCA);
   1.129    uint pmax = use->req();       // Number of Phi inputs
   1.130 @@ -329,7 +331,7 @@
   1.131    // more than once.
   1.132    for (uint j=1; j<pmax; j++) { // For all inputs
   1.133      if (use->in(j) == def) {    // Found matching input?
   1.134 -      Block* pred = bbs[buse->pred(j)->_idx];
   1.135 +      Block* pred = cfg->get_block_for_node(buse->pred(j));
   1.136        LCA = pred->dom_lca(LCA);
   1.137      }
   1.138    }
   1.139 @@ -342,8 +344,7 @@
   1.140  // which are marked with the given index.  Return the LCA (in the dom tree)
   1.141  // of all marked blocks.  If there are none marked, return the original
   1.142  // LCA.
   1.143 -static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
   1.144 -                                    Block* early, Block_Array &bbs) {
   1.145 +static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
   1.146    Block_List worklist;
   1.147    worklist.push(LCA);
   1.148    while (worklist.size() > 0) {
   1.149 @@ -366,7 +367,7 @@
   1.150      } else {
   1.151        // Keep searching through this block's predecessors.
   1.152        for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
   1.153 -        Block* mid_parent = bbs[ mid->pred(j)->_idx ];
   1.154 +        Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
   1.155          worklist.push(mid_parent);
   1.156        }
   1.157      }
   1.158 @@ -384,7 +385,7 @@
   1.159  // be earlier (at a shallower dom_depth) than the true schedule_early
   1.160  // point of the node. We compute this earlier block as a more permissive
   1.161  // site for anti-dependency insertion, but only if subsume_loads is enabled.
   1.162 -static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
   1.163 +static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
   1.164    Node* base;
   1.165    Node* index;
   1.166    Node* store = load->in(MemNode::Memory);
   1.167 @@ -412,12 +413,12 @@
   1.168      Block* deepb           = NULL;        // Deepest block so far
   1.169      int    deepb_dom_depth = 0;
   1.170      for (int i = 0; i < mem_inputs_length; i++) {
   1.171 -      Block* inb = bbs[mem_inputs[i]->_idx];
   1.172 +      Block* inb = cfg->get_block_for_node(mem_inputs[i]);
   1.173        if (deepb_dom_depth < (int) inb->_dom_depth) {
   1.174          // The new inb must be dominated by the previous deepb.
   1.175          // The various inputs must be linearly ordered in the dom
   1.176          // tree, or else there will not be a unique deepest block.
   1.177 -        DEBUG_ONLY(assert_dom(deepb, inb, load, bbs));
   1.178 +        DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
   1.179          deepb = inb;                      // Save deepest block
   1.180          deepb_dom_depth = deepb->_dom_depth;
   1.181        }
   1.182 @@ -488,14 +489,14 @@
   1.183    // and other inputs are first available.  (Computed by schedule_early.)
   1.184    // For normal loads, 'early' is the shallowest place (dom graph wise)
   1.185    // to look for anti-deps between this load and any store.
   1.186 -  Block* early = _bbs[load_index];
   1.187 +  Block* early = get_block_for_node(load);
   1.188  
   1.189    // If we are subsuming loads, compute an "early" block that only considers
   1.190    // memory or address inputs. This block may be different than the
   1.191    // schedule_early block in that it could be at an even shallower depth in the
   1.192    // dominator tree, and allow for a broader discovery of anti-dependences.
   1.193    if (C->subsume_loads()) {
   1.194 -    early = memory_early_block(load, early, _bbs);
   1.195 +    early = memory_early_block(load, early, this);
   1.196    }
   1.197  
   1.198    ResourceArea *area = Thread::current()->resource_area();
   1.199 @@ -619,7 +620,7 @@
   1.200      // or else observe that 'store' is all the way up in the
   1.201      // earliest legal block for 'load'.  In the latter case,
   1.202      // immediately insert an anti-dependence edge.
   1.203 -    Block* store_block = _bbs[store->_idx];
   1.204 +    Block* store_block = get_block_for_node(store);
   1.205      assert(store_block != NULL, "unused killing projections skipped above");
   1.206  
   1.207      if (store->is_Phi()) {
   1.208 @@ -637,7 +638,7 @@
   1.209        for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
   1.210          if (store->in(j) == mem) {   // Found matching input?
   1.211            DEBUG_ONLY(found_match = true);
   1.212 -          Block* pred_block = _bbs[store_block->pred(j)->_idx];
   1.213 +          Block* pred_block = get_block_for_node(store_block->pred(j));
   1.214            if (pred_block != early) {
   1.215              // If any predecessor of the Phi matches the load's "early block",
   1.216              // we do not need a precedence edge between the Phi and 'load'
   1.217 @@ -711,7 +712,7 @@
   1.218    // preventing the load from sinking past any block containing
   1.219    // a store that may invalidate the memory state required by 'load'.
   1.220    if (must_raise_LCA)
   1.221 -    LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs);
   1.222 +    LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
   1.223    if (LCA == early)  return LCA;
   1.224  
   1.225    // Insert anti-dependence edges from 'load' to each store
   1.226 @@ -720,7 +721,7 @@
   1.227    if (LCA->raise_LCA_mark() == load_index) {
   1.228      while (non_early_stores.size() > 0) {
   1.229        Node* store = non_early_stores.pop();
   1.230 -      Block* store_block = _bbs[store->_idx];
   1.231 +      Block* store_block = get_block_for_node(store);
   1.232        if (store_block == LCA) {
   1.233          // add anti_dependence from store to load in its own block
   1.234          assert(store != load->in(0), "dependence cycle found");
   1.235 @@ -754,7 +755,7 @@
   1.236  
   1.237  public:
   1.238    // Constructor for the iterator
   1.239 -  Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs);
   1.240 +  Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
   1.241  
   1.242    // Postincrement operator to iterate over the nodes
   1.243    Node *next();
   1.244 @@ -762,12 +763,12 @@
   1.245  private:
   1.246    VectorSet   &_visited;
   1.247    Node_List   &_stack;
   1.248 -  Block_Array &_bbs;
   1.249 +  PhaseCFG &_cfg;
   1.250  };
   1.251  
   1.252  // Constructor for the Node_Backward_Iterator
   1.253 -Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs )
   1.254 -  : _visited(visited), _stack(stack), _bbs(bbs) {
   1.255 +Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
   1.256 +  : _visited(visited), _stack(stack), _cfg(cfg) {
   1.257    // The stack should contain exactly the root
   1.258    stack.clear();
   1.259    stack.push(root);
   1.260 @@ -797,8 +798,8 @@
   1.261      _visited.set(self->_idx);
   1.262  
   1.263      // Now schedule all uses as late as possible.
   1.264 -    uint src     = self->is_Proj() ? self->in(0)->_idx : self->_idx;
   1.265 -    uint src_rpo = _bbs[src]->_rpo;
   1.266 +    const Node* src = self->is_Proj() ? self->in(0) : self;
   1.267 +    uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
   1.268  
   1.269      // Schedule all nodes in a post-order visit
   1.270      Node *unvisited = NULL;  // Unvisited anti-dependent Node, if any
   1.271 @@ -814,7 +815,7 @@
   1.272  
   1.273        // do not traverse backward control edges
   1.274        Node *use = n->is_Proj() ? n->in(0) : n;
   1.275 -      uint use_rpo = _bbs[use->_idx]->_rpo;
   1.276 +      uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
   1.277  
   1.278        if ( use_rpo < src_rpo )
   1.279          continue;
   1.280 @@ -852,7 +853,7 @@
   1.281      tty->print("\n#---- ComputeLatenciesBackwards ----\n");
   1.282  #endif
   1.283  
   1.284 -  Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
   1.285 +  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
   1.286    Node *n;
   1.287  
   1.288    // Walk over all the nodes from last to first
   1.289 @@ -883,7 +884,7 @@
   1.290  
   1.291    uint nlen = n->len();
   1.292    uint use_latency = _node_latency->at_grow(n->_idx);
   1.293 -  uint use_pre_order = _bbs[n->_idx]->_pre_order;
   1.294 +  uint use_pre_order = get_block_for_node(n)->_pre_order;
   1.295  
   1.296    for ( uint j=0; j<nlen; j++ ) {
   1.297      Node *def = n->in(j);
   1.298 @@ -903,7 +904,7 @@
   1.299  #endif
   1.300  
   1.301      // If the defining block is not known, assume it is ok
   1.302 -    Block *def_block = _bbs[def->_idx];
   1.303 +    Block *def_block = get_block_for_node(def);
   1.304      uint def_pre_order = def_block ? def_block->_pre_order : 0;
   1.305  
   1.306      if ( (use_pre_order <  def_pre_order) ||
   1.307 @@ -931,10 +932,11 @@
   1.308  // Compute the latency of a specific use
   1.309  int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
   1.310    // If self-reference, return no latency
   1.311 -  if (use == n || use->is_Root())
   1.312 +  if (use == n || use->is_Root()) {
   1.313      return 0;
   1.314 +  }
   1.315  
   1.316 -  uint def_pre_order = _bbs[def->_idx]->_pre_order;
   1.317 +  uint def_pre_order = get_block_for_node(def)->_pre_order;
   1.318    uint latency = 0;
   1.319  
   1.320    // If the use is not a projection, then it is simple...
   1.321 @@ -946,7 +948,7 @@
   1.322      }
   1.323  #endif
   1.324  
   1.325 -    uint use_pre_order = _bbs[use->_idx]->_pre_order;
   1.326 +    uint use_pre_order = get_block_for_node(use)->_pre_order;
   1.327  
   1.328      if (use_pre_order < def_pre_order)
   1.329        return 0;
   1.330 @@ -1018,7 +1020,7 @@
   1.331    uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
   1.332    uint end_latency   = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
   1.333    bool in_latency    = (target <= start_latency);
   1.334 -  const Block* root_block = _bbs[_root->_idx];
   1.335 +  const Block* root_block = get_block_for_node(_root);
   1.336  
   1.337    // Turn off latency scheduling if scheduling is just plain off
   1.338    if (!C->do_scheduling())
   1.339 @@ -1126,12 +1128,12 @@
   1.340      tty->print("\n#---- schedule_late ----\n");
   1.341  #endif
   1.342  
   1.343 -  Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
   1.344 +  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
   1.345    Node *self;
   1.346  
   1.347    // Walk over all the nodes from last to first
   1.348    while (self = iter.next()) {
   1.349 -    Block* early = _bbs[self->_idx];   // Earliest legal placement
   1.350 +    Block* early = get_block_for_node(self); // Earliest legal placement
   1.351  
   1.352      if (self->is_top()) {
   1.353        // Top node goes in bb #2 with other constants.
   1.354 @@ -1179,7 +1181,7 @@
   1.355        for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
   1.356          // For all uses, find LCA
   1.357          Node* use = self->fast_out(i);
   1.358 -        LCA = raise_LCA_above_use(LCA, use, self, _bbs);
   1.359 +        LCA = raise_LCA_above_use(LCA, use, self, this);
   1.360        }
   1.361      }  // (Hide defs of imax, i from rest of block.)
   1.362  
   1.363 @@ -1187,7 +1189,7 @@
   1.364      // requirement for correctness but it reduces useless
   1.365      // interference between temps and other nodes.
   1.366      if (mach != NULL && mach->is_MachTemp()) {
   1.367 -      _bbs.map(self->_idx, LCA);
   1.368 +      map_node_to_block(self, LCA);
   1.369        LCA->add_inst(self);
   1.370        continue;
   1.371      }
   1.372 @@ -1262,10 +1264,10 @@
   1.373    }
   1.374  #endif
   1.375  
   1.376 -  // Initialize the bbs.map for things on the proj_list
   1.377 -  uint i;
   1.378 -  for( i=0; i < proj_list.size(); i++ )
   1.379 -    _bbs.map(proj_list[i]->_idx, NULL);
   1.380 +  // Initialize the node to block mapping for things on the proj_list
   1.381 +  for (uint i = 0; i < proj_list.size(); i++) {
   1.382 +    unmap_node_from_block(proj_list[i]);
   1.383 +  }
   1.384  
   1.385    // Set the basic block for Nodes pinned into blocks
   1.386    Arena *a = Thread::current()->resource_area();
   1.387 @@ -1333,7 +1335,7 @@
   1.388      for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
   1.389        Node *proj = matcher._null_check_tests[i  ];
   1.390        Node *val  = matcher._null_check_tests[i+1];
   1.391 -      _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons);
   1.392 +      get_block_for_node(proj)->implicit_null_check(this, proj, val, allowed_reasons);
   1.393        // The implicit_null_check will only perform the transformation
   1.394        // if the null branch is truly uncommon, *and* it leads to an
   1.395        // uncommon trap.  Combined with the too_many_traps guards
   1.396 @@ -1353,7 +1355,7 @@
   1.397    uint max_idx = C->unique();
   1.398    GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
   1.399    visited.Clear();
   1.400 -  for (i = 0; i < _num_blocks; i++) {
   1.401 +  for (uint i = 0; i < _num_blocks; i++) {
   1.402      if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
   1.403        if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
   1.404          C->record_method_not_compilable("local schedule failed");
   1.405 @@ -1364,8 +1366,9 @@
   1.406  
   1.407    // If we inserted any instructions between a Call and his CatchNode,
   1.408    // clone the instructions on all paths below the Catch.
   1.409 -  for( i=0; i < _num_blocks; i++ )
   1.410 -    _blocks[i]->call_catch_cleanup(_bbs, C);
   1.411 +  for (uint i = 0; i < _num_blocks; i++) {
   1.412 +    _blocks[i]->call_catch_cleanup(this, C);
   1.413 +  }
   1.414  
   1.415  #ifndef PRODUCT
   1.416    if (trace_opto_pipelining()) {
   1.417 @@ -1392,7 +1395,7 @@
   1.418      Block_List worklist;
   1.419      Block* root_blk = _blocks[0];
   1.420      for (uint i = 1; i < root_blk->num_preds(); i++) {
   1.421 -      Block *pb = _bbs[root_blk->pred(i)->_idx];
   1.422 +      Block *pb = get_block_for_node(root_blk->pred(i));
   1.423        if (pb->has_uncommon_code()) {
   1.424          worklist.push(pb);
   1.425        }
   1.426 @@ -1401,7 +1404,7 @@
   1.427        Block* uct = worklist.pop();
   1.428        if (uct == _broot) continue;
   1.429        for (uint i = 1; i < uct->num_preds(); i++) {
   1.430 -        Block *pb = _bbs[uct->pred(i)->_idx];
   1.431 +        Block *pb = get_block_for_node(uct->pred(i));
   1.432          if (pb->_num_succs == 1) {
   1.433            worklist.push(pb);
   1.434          } else if (pb->num_fall_throughs() == 2) {
   1.435 @@ -1430,7 +1433,7 @@
   1.436      Block_List worklist;
   1.437      Block* root_blk = _blocks[0];
   1.438      for (uint i = 1; i < root_blk->num_preds(); i++) {
   1.439 -      Block *pb = _bbs[root_blk->pred(i)->_idx];
   1.440 +      Block *pb = get_block_for_node(root_blk->pred(i));
   1.441        if (pb->has_uncommon_code()) {
   1.442          worklist.push(pb);
   1.443        }
   1.444 @@ -1439,7 +1442,7 @@
   1.445        Block* uct = worklist.pop();
   1.446        uct->_freq = PROB_MIN;
   1.447        for (uint i = 1; i < uct->num_preds(); i++) {
   1.448 -        Block *pb = _bbs[uct->pred(i)->_idx];
   1.449 +        Block *pb = get_block_for_node(uct->pred(i));
   1.450          if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
   1.451            worklist.push(pb);
   1.452          }
   1.453 @@ -1499,7 +1502,7 @@
   1.454        Block* loop_head = b;
   1.455        assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
   1.456        Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
   1.457 -      Block* tail = _bbs[tail_n->_idx];
   1.458 +      Block* tail = get_block_for_node(tail_n);
   1.459  
   1.460        // Defensively filter out Loop nodes for non-single-entry loops.
   1.461        // For all reasonable loops, the head occurs before the tail in RPO.
   1.462 @@ -1514,13 +1517,13 @@
   1.463          loop_head->_loop = nloop;
   1.464          // Add to nloop so push_pred() will skip over inner loops
   1.465          nloop->add_member(loop_head);
   1.466 -        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs);
   1.467 +        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
   1.468  
   1.469          while (worklist.size() > 0) {
   1.470            Block* member = worklist.pop();
   1.471            if (member != loop_head) {
   1.472              for (uint j = 1; j < member->num_preds(); j++) {
   1.473 -              nloop->push_pred(member, j, worklist, _bbs);
   1.474 +              nloop->push_pred(member, j, worklist, this);
   1.475              }
   1.476            }
   1.477          }
   1.478 @@ -1557,9 +1560,9 @@
   1.479  }
   1.480  
   1.481  //------------------------------push_pred--------------------------------------
   1.482 -void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) {
   1.483 +void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
   1.484    Node* pred_n = blk->pred(i);
   1.485 -  Block* pred = node_to_blk[pred_n->_idx];
   1.486 +  Block* pred = cfg->get_block_for_node(pred_n);
   1.487    CFGLoop *pred_loop = pred->_loop;
   1.488    if (pred_loop == NULL) {
   1.489      // Filter out blocks for non-single-entry loops.
   1.490 @@ -1580,7 +1583,7 @@
   1.491        Block* pred_head = pred_loop->head();
   1.492        assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
   1.493        assert(pred_head != head(), "loop head in only one loop");
   1.494 -      push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk);
   1.495 +      push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
   1.496      } else {
   1.497        assert(pred_loop->_parent == this && _parent == NULL, "just checking");
   1.498      }

mercurial