8011621: live_ranges_in_separate_class.patch

Tue, 16 Apr 2013 10:08:41 +0200

author
neliasso
date
Tue, 16 Apr 2013 10:08:41 +0200
changeset 4949
8373c19be854
parent 4946
1c6887c9afaa
child 4950
c89eab0b6b30

8011621: live_ranges_in_separate_class.patch
Reviewed-by: kvn, roland
Contributed-by: niclas.adlertz@oracle.com

make/bsd/makefiles/vm.make file | annotate | diff | comparison | revisions
make/linux/makefiles/vm.make file | annotate | diff | comparison | revisions
make/solaris/makefiles/vm.make file | annotate | diff | comparison | revisions
make/windows/create_obj_files.sh file | annotate | diff | comparison | revisions
src/os/bsd/vm/chaitin_bsd.cpp file | annotate | diff | comparison | revisions
src/os/linux/vm/chaitin_linux.cpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/chaitin_solaris.cpp file | annotate | diff | comparison | revisions
src/os/windows/vm/chaitin_windows.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/chaitin.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/chaitin.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/coalesce.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/coalesce.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/compile.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/idealGraphPrinter.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/ifg.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/live.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/live.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/postaloc.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/reg_split.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/regalloc.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vmStructs.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/make/bsd/makefiles/vm.make	Mon Apr 15 16:20:05 2013 -0700
     1.2 +++ b/make/bsd/makefiles/vm.make	Tue Apr 16 10:08:41 2013 +0200
     1.3 @@ -187,7 +187,7 @@
     1.4  Src_Dirs/SHARK     := $(CORE_PATHS) $(SHARK_PATHS)
     1.5  Src_Dirs := $(Src_Dirs/$(TYPE))
     1.6  
     1.7 -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\*
     1.8 +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
     1.9  COMPILER1_SPECIFIC_FILES := c1_\*
    1.10  SHARK_SPECIFIC_FILES     := shark
    1.11  ZERO_SPECIFIC_FILES      := zero
     2.1 --- a/make/linux/makefiles/vm.make	Mon Apr 15 16:20:05 2013 -0700
     2.2 +++ b/make/linux/makefiles/vm.make	Tue Apr 16 10:08:41 2013 +0200
     2.3 @@ -189,7 +189,7 @@
     2.4  Src_Dirs/SHARK     := $(CORE_PATHS) $(SHARK_PATHS)
     2.5  Src_Dirs := $(Src_Dirs/$(TYPE))
     2.6  
     2.7 -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\*
     2.8 +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
     2.9  COMPILER1_SPECIFIC_FILES := c1_\*
    2.10  SHARK_SPECIFIC_FILES     := shark
    2.11  ZERO_SPECIFIC_FILES      := zero
     3.1 --- a/make/solaris/makefiles/vm.make	Mon Apr 15 16:20:05 2013 -0700
     3.2 +++ b/make/solaris/makefiles/vm.make	Tue Apr 16 10:08:41 2013 +0200
     3.3 @@ -202,7 +202,7 @@
     3.4  Src_Dirs/SHARK     := $(CORE_PATHS)
     3.5  Src_Dirs := $(Src_Dirs/$(TYPE))
     3.6  
     3.7 -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\*
     3.8 +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
     3.9  COMPILER1_SPECIFIC_FILES := c1_\*
    3.10  SHARK_SPECIFIC_FILES     := shark
    3.11  ZERO_SPECIFIC_FILES      := zero
     4.1 --- a/make/windows/create_obj_files.sh	Mon Apr 15 16:20:05 2013 -0700
     4.2 +++ b/make/windows/create_obj_files.sh	Tue Apr 16 10:08:41 2013 +0200
     4.3 @@ -114,7 +114,7 @@
     4.4      "shark")     Src_Dirs="${CORE_PATHS}" ;;
     4.5  esac
     4.6  
     4.7 -COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp chaitin* c2_* runtime_*"
     4.8 +COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*"
     4.9  COMPILER1_SPECIFIC_FILES="c1_*"
    4.10  SHARK_SPECIFIC_FILES="shark"
    4.11  ZERO_SPECIFIC_FILES="zero"
     5.1 --- a/src/os/bsd/vm/chaitin_bsd.cpp	Mon Apr 15 16:20:05 2013 -0700
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,42 +0,0 @@
     5.4 -/*
     5.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
     5.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5.7 - *
     5.8 - * This code is free software; you can redistribute it and/or modify it
     5.9 - * under the terms of the GNU General Public License version 2 only, as
    5.10 - * published by the Free Software Foundation.
    5.11 - *
    5.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
    5.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    5.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    5.15 - * version 2 for more details (a copy is included in the LICENSE file that
    5.16 - * accompanied this code).
    5.17 - *
    5.18 - * You should have received a copy of the GNU General Public License version
    5.19 - * 2 along with this work; if not, write to the Free Software Foundation,
    5.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    5.21 - *
    5.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    5.23 - * or visit www.oracle.com if you need additional information or have any
    5.24 - * questions.
    5.25 - *
    5.26 - */
    5.27 -
    5.28 -#include "precompiled.hpp"
    5.29 -#include "opto/chaitin.hpp"
    5.30 -#include "opto/machnode.hpp"
    5.31 -
    5.32 -void PhaseRegAlloc::pd_preallocate_hook() {
    5.33 -  // no action
    5.34 -}
    5.35 -
    5.36 -#ifdef ASSERT
    5.37 -void PhaseRegAlloc::pd_postallocate_verify_hook() {
    5.38 -  // no action
    5.39 -}
    5.40 -#endif
    5.41 -
    5.42 -
    5.43 -// Reconciliation History
    5.44 -// chaitin_solaris.cpp  1.7 99/07/12 23:54:22
    5.45 -// End
     6.1 --- a/src/os/linux/vm/chaitin_linux.cpp	Mon Apr 15 16:20:05 2013 -0700
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,42 +0,0 @@
     6.4 -/*
     6.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
     6.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.7 - *
     6.8 - * This code is free software; you can redistribute it and/or modify it
     6.9 - * under the terms of the GNU General Public License version 2 only, as
    6.10 - * published by the Free Software Foundation.
    6.11 - *
    6.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
    6.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    6.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    6.15 - * version 2 for more details (a copy is included in the LICENSE file that
    6.16 - * accompanied this code).
    6.17 - *
    6.18 - * You should have received a copy of the GNU General Public License version
    6.19 - * 2 along with this work; if not, write to the Free Software Foundation,
    6.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    6.21 - *
    6.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    6.23 - * or visit www.oracle.com if you need additional information or have any
    6.24 - * questions.
    6.25 - *
    6.26 - */
    6.27 -
    6.28 -#include "precompiled.hpp"
    6.29 -#include "opto/chaitin.hpp"
    6.30 -#include "opto/machnode.hpp"
    6.31 -
    6.32 -void PhaseRegAlloc::pd_preallocate_hook() {
    6.33 -  // no action
    6.34 -}
    6.35 -
    6.36 -#ifdef ASSERT
    6.37 -void PhaseRegAlloc::pd_postallocate_verify_hook() {
    6.38 -  // no action
    6.39 -}
    6.40 -#endif
    6.41 -
    6.42 -
    6.43 -// Reconciliation History
    6.44 -// chaitin_solaris.cpp  1.7 99/07/12 23:54:22
    6.45 -// End
     7.1 --- a/src/os/solaris/vm/chaitin_solaris.cpp	Mon Apr 15 16:20:05 2013 -0700
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,46 +0,0 @@
     7.4 -/*
     7.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
     7.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     7.7 - *
     7.8 - * This code is free software; you can redistribute it and/or modify it
     7.9 - * under the terms of the GNU General Public License version 2 only, as
    7.10 - * published by the Free Software Foundation.
    7.11 - *
    7.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
    7.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    7.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    7.15 - * version 2 for more details (a copy is included in the LICENSE file that
    7.16 - * accompanied this code).
    7.17 - *
    7.18 - * You should have received a copy of the GNU General Public License version
    7.19 - * 2 along with this work; if not, write to the Free Software Foundation,
    7.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    7.21 - *
    7.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    7.23 - * or visit www.oracle.com if you need additional information or have any
    7.24 - * questions.
    7.25 - *
    7.26 - */
    7.27 -
    7.28 -#include "precompiled.hpp"
    7.29 -#include "opto/chaitin.hpp"
    7.30 -#include "opto/machnode.hpp"
    7.31 -
    7.32 -void PhaseRegAlloc::pd_preallocate_hook() {
    7.33 -  // no action
    7.34 -}
    7.35 -
    7.36 -#ifdef ASSERT
    7.37 -void PhaseRegAlloc::pd_postallocate_verify_hook() {
    7.38 -  // no action
    7.39 -}
    7.40 -#endif
    7.41 -
    7.42 -
    7.43 -//Reconciliation History
    7.44 -// 1.1 99/02/12 15:35:26 chaitin_win32.cpp
    7.45 -// 1.2 99/02/18 15:38:56 chaitin_win32.cpp
    7.46 -// 1.4 99/03/09 10:37:48 chaitin_win32.cpp
    7.47 -// 1.6 99/03/25 11:07:44 chaitin_win32.cpp
    7.48 -// 1.8 99/06/22 16:38:58 chaitin_win32.cpp
    7.49 -//End
     8.1 --- a/src/os/windows/vm/chaitin_windows.cpp	Mon Apr 15 16:20:05 2013 -0700
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,78 +0,0 @@
     8.4 -/*
     8.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
     8.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.7 - *
     8.8 - * This code is free software; you can redistribute it and/or modify it
     8.9 - * under the terms of the GNU General Public License version 2 only, as
    8.10 - * published by the Free Software Foundation.
    8.11 - *
    8.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
    8.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    8.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    8.15 - * version 2 for more details (a copy is included in the LICENSE file that
    8.16 - * accompanied this code).
    8.17 - *
    8.18 - * You should have received a copy of the GNU General Public License version
    8.19 - * 2 along with this work; if not, write to the Free Software Foundation,
    8.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    8.21 - *
    8.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    8.23 - * or visit www.oracle.com if you need additional information or have any
    8.24 - * questions.
    8.25 - *
    8.26 - */
    8.27 -
    8.28 -#include "precompiled.hpp"
    8.29 -#include "opto/chaitin.hpp"
    8.30 -#include "opto/machnode.hpp"
    8.31 -
    8.32 -// Disallow the use of the frame pointer (EBP) for implicit null exceptions
    8.33 -// on win95/98.  If we do not do this, the OS gets confused and gives a stack
    8.34 -// error.
    8.35 -void PhaseRegAlloc::pd_preallocate_hook() {
    8.36 -#ifndef _WIN64
    8.37 -  if (ImplicitNullChecks && !os::win32::is_nt()) {
    8.38 -    for (uint block_num=1; block_num<_cfg._num_blocks; block_num++) {
    8.39 -      Block *block = _cfg._blocks[block_num];
    8.40 -
    8.41 -      Node *block_end = block->end();
    8.42 -      if (block_end->is_MachNullCheck() &&
    8.43 -          block_end->as_Mach()->ideal_Opcode() != Op_Con) {
    8.44 -        // The last instruction in the block is an implicit null check.
    8.45 -        // Fix its input so that it does not load into the frame pointer.
    8.46 -        _matcher.pd_implicit_null_fixup(block_end->in(1)->as_Mach(),
    8.47 -                                        block_end->as_MachNullCheck()->_vidx);
    8.48 -      }
    8.49 -    }
    8.50 -  }
    8.51 -#else
    8.52 -  // WIN64==itanium on XP
    8.53 -#endif
    8.54 -}
    8.55 -
    8.56 -#ifdef ASSERT
    8.57 -// Verify that no implicit null check uses the frame pointer (EBP) as
    8.58 -// its register on win95/98.  Use of the frame pointer in an implicit
    8.59 -// null check confuses the OS, yielding a stack error.
    8.60 -void PhaseRegAlloc::pd_postallocate_verify_hook() {
    8.61 -#ifndef _WIN64
    8.62 -  if (ImplicitNullChecks && !os::win32::is_nt()) {
    8.63 -    for (uint block_num=1; block_num<_cfg._num_blocks; block_num++) {
    8.64 -      Block *block = _cfg._blocks[block_num];
    8.65 -
    8.66 -      Node *block_end = block->_nodes[block->_nodes.size()-1];
    8.67 -      if (block_end->is_MachNullCheck() && block_end->as_Mach()->ideal_Opcode() != Op_Con) {
    8.68 -        // The last instruction in the block is an implicit
    8.69 -        // null check.  Verify that this instruction does not
    8.70 -        // use the frame pointer.
    8.71 -        int reg = get_reg_first(block_end->in(1)->in(block_end->as_MachNullCheck()->_vidx));
    8.72 -        assert(reg != EBP_num,
    8.73 -               "implicit null check using frame pointer on win95/98");
    8.74 -      }
    8.75 -    }
    8.76 -  }
    8.77 -#else
    8.78 -  // WIN64==itanium on XP
    8.79 -#endif
    8.80 -}
    8.81 -#endif
     9.1 --- a/src/share/vm/opto/chaitin.cpp	Mon Apr 15 16:20:05 2013 -0700
     9.2 +++ b/src/share/vm/opto/chaitin.cpp	Tue Apr 16 10:08:41 2013 +0200
     9.3 @@ -145,6 +145,72 @@
     9.4  
     9.5  #define NUMBUCKS 3
     9.6  
     9.7 +// Straight out of Tarjan's union-find algorithm
     9.8 +uint LiveRangeMap::find_compress(uint lrg) {
     9.9 +  uint cur = lrg;
    9.10 +  uint next = _uf_map[cur];
    9.11 +  while (next != cur) { // Scan chain of equivalences
    9.12 +    assert( next < cur, "always union smaller");
    9.13 +    cur = next; // until find a fixed-point
    9.14 +    next = _uf_map[cur];
    9.15 +  }
    9.16 +
    9.17 +  // Core of union-find algorithm: update chain of
    9.18 +  // equivalences to be equal to the root.
    9.19 +  while (lrg != next) {
    9.20 +    uint tmp = _uf_map[lrg];
    9.21 +    _uf_map.map(lrg, next);
    9.22 +    lrg = tmp;
    9.23 +  }
    9.24 +  return lrg;
    9.25 +}
    9.26 +
    9.27 +// Reset the Union-Find map to identity
    9.28 +void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
    9.29 +  _max_lrg_id= max_lrg_id;
    9.30 +  // Force the Union-Find mapping to be at least this large
    9.31 +  _uf_map.extend(_max_lrg_id, 0);
    9.32 +  // Initialize it to be the ID mapping.
    9.33 +  for (uint i = 0; i < _max_lrg_id; ++i) {
    9.34 +    _uf_map.map(i, i);
    9.35 +  }
    9.36 +}
    9.37 +
    9.38 +// Make all Nodes map directly to their final live range; no need for
    9.39 +// the Union-Find mapping after this call.
    9.40 +void LiveRangeMap::compress_uf_map_for_nodes() {
    9.41 +  // For all Nodes, compress mapping
    9.42 +  uint unique = _names.Size();
    9.43 +  for (uint i = 0; i < unique; ++i) {
    9.44 +    uint lrg = _names[i];
    9.45 +    uint compressed_lrg = find(lrg);
    9.46 +    if (lrg != compressed_lrg) {
    9.47 +      _names.map(i, compressed_lrg);
    9.48 +    }
    9.49 +  }
    9.50 +}
    9.51 +
    9.52 +// Like Find above, but no path compress, so bad asymptotic behavior
    9.53 +uint LiveRangeMap::find_const(uint lrg) const {
    9.54 +  if (!lrg) {
    9.55 +    return lrg; // Ignore the zero LRG
    9.56 +  }
    9.57 +
    9.58 +  // Off the end?  This happens during debugging dumps when you got
    9.59 +  // brand new live ranges but have not told the allocator yet.
    9.60 +  if (lrg >= _max_lrg_id) {
    9.61 +    return lrg;
    9.62 +  }
    9.63 +
    9.64 +  uint next = _uf_map[lrg];
    9.65 +  while (next != lrg) { // Scan chain of equivalences
    9.66 +    assert(next < lrg, "always union smaller");
    9.67 +    lrg = next; // until find a fixed-point
    9.68 +    next = _uf_map[lrg];
    9.69 +  }
    9.70 +  return next;
    9.71 +}
    9.72 +
    9.73  //------------------------------Chaitin----------------------------------------
    9.74  PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
    9.75    : PhaseRegAlloc(unique, cfg, matcher,
    9.76 @@ -153,13 +219,13 @@
    9.77  #else
    9.78         NULL
    9.79  #endif
    9.80 -       ),
    9.81 -    _names(unique), _uf_map(unique),
    9.82 -    _maxlrg(0), _live(0),
    9.83 -    _spilled_once(Thread::current()->resource_area()),
    9.84 -    _spilled_twice(Thread::current()->resource_area()),
    9.85 -    _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0),
    9.86 -    _oldphi(unique)
    9.87 +       )
    9.88 +  , _lrg_map(unique)
    9.89 +  , _live(0)
    9.90 +  , _spilled_once(Thread::current()->resource_area())
    9.91 +  , _spilled_twice(Thread::current()->resource_area())
    9.92 +  , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0)
    9.93 +  , _oldphi(unique)
    9.94  #ifndef PRODUCT
    9.95    , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling"))
    9.96  #endif
    9.97 @@ -168,7 +234,6 @@
    9.98  
    9.99    _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq);
   9.100  
   9.101 -  uint i,j;
   9.102    // Build a list of basic blocks, sorted by frequency
   9.103    _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
   9.104    // Experiment with sorting strategies to speed compilation
   9.105 @@ -176,30 +241,30 @@
   9.106    Block **buckets[NUMBUCKS];             // Array of buckets
   9.107    uint    buckcnt[NUMBUCKS];             // Array of bucket counters
   9.108    double  buckval[NUMBUCKS];             // Array of bucket value cutoffs
   9.109 -  for( i = 0; i < NUMBUCKS; i++ ) {
   9.110 -    buckets[i] = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
   9.111 +  for (uint i = 0; i < NUMBUCKS; i++) {
   9.112 +    buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks);
   9.113      buckcnt[i] = 0;
   9.114      // Bump by three orders of magnitude each time
   9.115      cutoff *= 0.001;
   9.116      buckval[i] = cutoff;
   9.117 -    for( j = 0; j < _cfg._num_blocks; j++ ) {
   9.118 +    for (uint j = 0; j < _cfg._num_blocks; j++) {
   9.119        buckets[i][j] = NULL;
   9.120      }
   9.121    }
   9.122    // Sort blocks into buckets
   9.123 -  for( i = 0; i < _cfg._num_blocks; i++ ) {
   9.124 -    for( j = 0; j < NUMBUCKS; j++ ) {
   9.125 -      if( (j == NUMBUCKS-1) || (_cfg._blocks[i]->_freq > buckval[j]) ) {
   9.126 +  for (uint i = 0; i < _cfg._num_blocks; i++) {
   9.127 +    for (uint j = 0; j < NUMBUCKS; j++) {
   9.128 +      if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) {
   9.129          // Assign block to end of list for appropriate bucket
   9.130          buckets[j][buckcnt[j]++] = _cfg._blocks[i];
   9.131 -        break;                      // kick out of inner loop
   9.132 +        break; // kick out of inner loop
   9.133        }
   9.134      }
   9.135    }
   9.136    // Dump buckets into final block array
   9.137    uint blkcnt = 0;
   9.138 -  for( i = 0; i < NUMBUCKS; i++ ) {
   9.139 -    for( j = 0; j < buckcnt[i]; j++ ) {
   9.140 +  for (uint i = 0; i < NUMBUCKS; i++) {
   9.141 +    for (uint j = 0; j < buckcnt[i]; j++) {
   9.142        _blks[blkcnt++] = buckets[i][j];
   9.143      }
   9.144    }
   9.145 @@ -207,6 +272,77 @@
   9.146    assert(blkcnt == _cfg._num_blocks, "Block array not totally filled");
   9.147  }
   9.148  
   9.149 +//------------------------------Union------------------------------------------
   9.150 +// union 2 sets together.
   9.151 +void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
   9.152 +  uint src = _lrg_map.find(src_n);
   9.153 +  uint dst = _lrg_map.find(dst_n);
   9.154 +  assert(src, "");
   9.155 +  assert(dst, "");
   9.156 +  assert(src < _lrg_map.max_lrg_id(), "oob");
   9.157 +  assert(dst < _lrg_map.max_lrg_id(), "oob");
   9.158 +  assert(src < dst, "always union smaller");
   9.159 +  _lrg_map.uf_map(dst, src);
   9.160 +}
   9.161 +
   9.162 +//------------------------------new_lrg----------------------------------------
   9.163 +void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
   9.164 +  // Make the Node->LRG mapping
   9.165 +  _lrg_map.extend(x->_idx,lrg);
   9.166 +  // Make the Union-Find mapping an identity function
   9.167 +  _lrg_map.uf_extend(lrg, lrg);
   9.168 +}
   9.169 +
   9.170 +
   9.171 +bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
   9.172 +  Block *bcon = _cfg._bbs[con->_idx];
   9.173 +  uint cindex = bcon->find_node(con);
   9.174 +  Node *con_next = bcon->_nodes[cindex+1];
   9.175 +  if (con_next->in(0) != con || !con_next->is_MachProj()) {
   9.176 +    return false;               // No MachProj's follow
   9.177 +  }
   9.178 +
   9.179 +  // Copy kills after the cloned constant
   9.180 +  Node *kills = con_next->clone();
   9.181 +  kills->set_req(0, copy);
   9.182 +  b->_nodes.insert(idx, kills);
   9.183 +  _cfg._bbs.map(kills->_idx, b);
   9.184 +  new_lrg(kills, max_lrg_id);
   9.185 +  return true;
   9.186 +}
   9.187 +
   9.188 +//------------------------------compact----------------------------------------
   9.189 +// Renumber the live ranges to compact them.  Makes the IFG smaller.
   9.190 +void PhaseChaitin::compact() {
   9.191 +  // Current the _uf_map contains a series of short chains which are headed
   9.192 +  // by a self-cycle.  All the chains run from big numbers to little numbers.
   9.193 +  // The Find() call chases the chains & shortens them for the next Find call.
   9.194 +  // We are going to change this structure slightly.  Numbers above a moving
   9.195 +  // wave 'i' are unchanged.  Numbers below 'j' point directly to their
   9.196 +  // compacted live range with no further chaining.  There are no chains or
   9.197 +  // cycles below 'i', so the Find call no longer works.
   9.198 +  uint j=1;
   9.199 +  uint i;
   9.200 +  for (i = 1; i < _lrg_map.max_lrg_id(); i++) {
   9.201 +    uint lr = _lrg_map.uf_live_range_id(i);
   9.202 +    // Ignore unallocated live ranges
   9.203 +    if (!lr) {
   9.204 +      continue;
   9.205 +    }
   9.206 +    assert(lr <= i, "");
   9.207 +    _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr));
   9.208 +  }
   9.209 +  // Now change the Node->LR mapping to reflect the compacted names
   9.210 +  uint unique = _lrg_map.size();
   9.211 +  for (i = 0; i < unique; i++) {
   9.212 +    uint lrg_id = _lrg_map.live_range_id(i);
   9.213 +    _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id));
   9.214 +  }
   9.215 +
   9.216 +  // Reset the Union-Find mapping
   9.217 +  _lrg_map.reset_uf_map(j);
   9.218 +}
   9.219 +
   9.220  void PhaseChaitin::Register_Allocate() {
   9.221  
   9.222    // Above the OLD FP (and in registers) are the incoming arguments.  Stack
   9.223 @@ -231,14 +367,12 @@
   9.224    // all copy-related live ranges low and then using the max copy-related
   9.225    // live range as a cut-off for LIVE and the IFG.  In other words, I can
   9.226    // build a subset of LIVE and IFG just for copies.
   9.227 -  PhaseLive live(_cfg,_names,&live_arena);
   9.228 +  PhaseLive live(_cfg, _lrg_map.names(), &live_arena);
   9.229  
   9.230    // Need IFG for coalescing and coloring
   9.231 -  PhaseIFG ifg( &live_arena );
   9.232 +  PhaseIFG ifg(&live_arena);
   9.233    _ifg = &ifg;
   9.234  
   9.235 -  if (C->unique() > _names.Size())  _names.extend(C->unique()-1, 0);
   9.236 -
   9.237    // Come out of SSA world to the Named world.  Assign (virtual) registers to
   9.238    // Nodes.  Use the same register for all inputs and the output of PhiNodes
   9.239    // - effectively ending SSA form.  This requires either coalescing live
   9.240 @@ -258,9 +392,9 @@
   9.241      _live = NULL;                 // Mark live as being not available
   9.242      rm.reset_to_mark();           // Reclaim working storage
   9.243      IndexSet::reset_memory(C, &live_arena);
   9.244 -    ifg.init(_maxlrg);            // Empty IFG
   9.245 +    ifg.init(_lrg_map.max_lrg_id()); // Empty IFG
   9.246      gather_lrg_masks( false );    // Collect LRG masks
   9.247 -    live.compute( _maxlrg );      // Compute liveness
   9.248 +    live.compute(_lrg_map.max_lrg_id()); // Compute liveness
   9.249      _live = &live;                // Mark LIVE as being available
   9.250    }
   9.251  
   9.252 @@ -270,19 +404,19 @@
   9.253    // across any GC point where the derived value is live.  So this code looks
   9.254    // at all the GC points, and "stretches" the live range of any base pointer
   9.255    // to the GC point.
   9.256 -  if( stretch_base_pointer_live_ranges(&live_arena) ) {
   9.257 -    NOT_PRODUCT( Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler); )
   9.258 +  if (stretch_base_pointer_live_ranges(&live_arena)) {
   9.259 +    NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);)
   9.260      // Since some live range stretched, I need to recompute live
   9.261      _live = NULL;
   9.262      rm.reset_to_mark();         // Reclaim working storage
   9.263      IndexSet::reset_memory(C, &live_arena);
   9.264 -    ifg.init(_maxlrg);
   9.265 -    gather_lrg_masks( false );
   9.266 -    live.compute( _maxlrg );
   9.267 +    ifg.init(_lrg_map.max_lrg_id());
   9.268 +    gather_lrg_masks(false);
   9.269 +    live.compute(_lrg_map.max_lrg_id());
   9.270      _live = &live;
   9.271    }
   9.272    // Create the interference graph using virtual copies
   9.273 -  build_ifg_virtual( );  // Include stack slots this time
   9.274 +  build_ifg_virtual();  // Include stack slots this time
   9.275  
   9.276    // Aggressive (but pessimistic) copy coalescing.
   9.277    // This pass works on virtual copies.  Any virtual copies which are not
   9.278 @@ -296,8 +430,8 @@
   9.279      // given Node and search them for an instance, i.e., time O(#MaxLRG)).
   9.280      _ifg->SquareUp();
   9.281  
   9.282 -    PhaseAggressiveCoalesce coalesce( *this );
   9.283 -    coalesce.coalesce_driver( );
   9.284 +    PhaseAggressiveCoalesce coalesce(*this);
   9.285 +    coalesce.coalesce_driver();
   9.286      // Insert un-coalesced copies.  Visit all Phis.  Where inputs to a Phi do
   9.287      // not match the Phi itself, insert a copy.
   9.288      coalesce.insert_copies(_matcher);
   9.289 @@ -310,28 +444,36 @@
   9.290      _live = NULL;
   9.291      rm.reset_to_mark();           // Reclaim working storage
   9.292      IndexSet::reset_memory(C, &live_arena);
   9.293 -    ifg.init(_maxlrg);
   9.294 +    ifg.init(_lrg_map.max_lrg_id());
   9.295      gather_lrg_masks( true );
   9.296 -    live.compute( _maxlrg );
   9.297 +    live.compute(_lrg_map.max_lrg_id());
   9.298      _live = &live;
   9.299    }
   9.300  
   9.301    // Build physical interference graph
   9.302    uint must_spill = 0;
   9.303 -  must_spill = build_ifg_physical( &live_arena );
   9.304 +  must_spill = build_ifg_physical(&live_arena);
   9.305    // If we have a guaranteed spill, might as well spill now
   9.306 -  if( must_spill ) {
   9.307 -    if( !_maxlrg ) return;
   9.308 +  if (must_spill) {
   9.309 +    if(!_lrg_map.max_lrg_id()) {
   9.310 +      return;
   9.311 +    }
   9.312      // Bail out if unique gets too large (ie - unique > MaxNodeLimit)
   9.313      C->check_node_count(10*must_spill, "out of nodes before split");
   9.314 -    if (C->failing())  return;
   9.315 -    _maxlrg = Split(_maxlrg, &split_arena);  // Split spilling LRG everywhere
   9.316 +    if (C->failing()) {
   9.317 +      return;
   9.318 +    }
   9.319 +
   9.320 +    uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
   9.321 +    _lrg_map.set_max_lrg_id(new_max_lrg_id);
   9.322      // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
   9.323      // or we failed to split
   9.324      C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
   9.325 -    if (C->failing())  return;
   9.326 +    if (C->failing()) {
   9.327 +      return;
   9.328 +    }
   9.329  
   9.330 -    NOT_PRODUCT( C->verify_graph_edges(); )
   9.331 +    NOT_PRODUCT(C->verify_graph_edges();)
   9.332  
   9.333      compact();                  // Compact LRGs; return new lower max lrg
   9.334  
   9.335 @@ -340,23 +482,23 @@
   9.336        _live = NULL;
   9.337        rm.reset_to_mark();         // Reclaim working storage
   9.338        IndexSet::reset_memory(C, &live_arena);
   9.339 -      ifg.init(_maxlrg);          // Build a new interference graph
   9.340 +      ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph
   9.341        gather_lrg_masks( true );   // Collect intersect mask
   9.342 -      live.compute( _maxlrg );    // Compute LIVE
   9.343 +      live.compute(_lrg_map.max_lrg_id()); // Compute LIVE
   9.344        _live = &live;
   9.345      }
   9.346 -    build_ifg_physical( &live_arena );
   9.347 +    build_ifg_physical(&live_arena);
   9.348      _ifg->SquareUp();
   9.349      _ifg->Compute_Effective_Degree();
   9.350      // Only do conservative coalescing if requested
   9.351 -    if( OptoCoalesce ) {
   9.352 +    if (OptoCoalesce) {
   9.353        // Conservative (and pessimistic) copy coalescing of those spills
   9.354 -      PhaseConservativeCoalesce coalesce( *this );
   9.355 +      PhaseConservativeCoalesce coalesce(*this);
   9.356        // If max live ranges greater than cutoff, don't color the stack.
   9.357        // This cutoff can be larger than below since it is only done once.
   9.358 -      coalesce.coalesce_driver( );
   9.359 +      coalesce.coalesce_driver();
   9.360      }
   9.361 -    compress_uf_map_for_nodes();
   9.362 +    _lrg_map.compress_uf_map_for_nodes();
   9.363  
   9.364  #ifdef ASSERT
   9.365      verify(&live_arena, true);
   9.366 @@ -390,13 +532,18 @@
   9.367        }
   9.368      }
   9.369  
   9.370 -    if( !_maxlrg ) return;
   9.371 -    _maxlrg = Split(_maxlrg, &split_arena);  // Split spilling LRG everywhere
   9.372 +    if (!_lrg_map.max_lrg_id()) {
   9.373 +      return;
   9.374 +    }
   9.375 +    uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
   9.376 +    _lrg_map.set_max_lrg_id(new_max_lrg_id);
   9.377      // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
   9.378 -    C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after split");
   9.379 -    if (C->failing())  return;
   9.380 +    C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split");
   9.381 +    if (C->failing()) {
   9.382 +      return;
   9.383 +    }
   9.384  
   9.385 -    compact();                  // Compact LRGs; return new lower max lrg
   9.386 +    compact(); // Compact LRGs; return new lower max lrg
   9.387  
   9.388      // Nuke the live-ness and interference graph and LiveRanGe info
   9.389      {
   9.390 @@ -404,26 +551,26 @@
   9.391        _live = NULL;
   9.392        rm.reset_to_mark();         // Reclaim working storage
   9.393        IndexSet::reset_memory(C, &live_arena);
   9.394 -      ifg.init(_maxlrg);
   9.395 +      ifg.init(_lrg_map.max_lrg_id());
   9.396  
   9.397        // Create LiveRanGe array.
   9.398        // Intersect register masks for all USEs and DEFs
   9.399 -      gather_lrg_masks( true );
   9.400 -      live.compute( _maxlrg );
   9.401 +      gather_lrg_masks(true);
   9.402 +      live.compute(_lrg_map.max_lrg_id());
   9.403        _live = &live;
   9.404      }
   9.405 -    must_spill = build_ifg_physical( &live_arena );
   9.406 +    must_spill = build_ifg_physical(&live_arena);
   9.407      _ifg->SquareUp();
   9.408      _ifg->Compute_Effective_Degree();
   9.409  
   9.410      // Only do conservative coalescing if requested
   9.411 -    if( OptoCoalesce ) {
   9.412 +    if (OptoCoalesce) {
   9.413        // Conservative (and pessimistic) copy coalescing
   9.414 -      PhaseConservativeCoalesce coalesce( *this );
   9.415 +      PhaseConservativeCoalesce coalesce(*this);
   9.416        // Check for few live ranges determines how aggressive coalesce is.
   9.417 -      coalesce.coalesce_driver( );
   9.418 +      coalesce.coalesce_driver();
   9.419      }
   9.420 -    compress_uf_map_for_nodes();
   9.421 +    _lrg_map.compress_uf_map_for_nodes();
   9.422  #ifdef ASSERT
   9.423      verify(&live_arena, true);
   9.424  #endif
   9.425 @@ -435,7 +582,7 @@
   9.426  
   9.427      // Select colors by re-inserting LRGs back into the IFG in reverse order.
   9.428      // Return whether or not something spills.
   9.429 -    spills = Select( );
   9.430 +    spills = Select();
   9.431    }
   9.432  
   9.433    // Count number of Simplify-Select trips per coloring success.
   9.434 @@ -452,9 +599,12 @@
   9.435  
   9.436    // max_reg is past the largest *register* used.
   9.437    // Convert that to a frame_slot number.
   9.438 -  if( _max_reg <= _matcher._new_SP )
   9.439 +  if (_max_reg <= _matcher._new_SP) {
   9.440      _framesize = C->out_preserve_stack_slots();
   9.441 -  else _framesize = _max_reg -_matcher._new_SP;
   9.442 +  }
   9.443 +  else {
   9.444 +    _framesize = _max_reg -_matcher._new_SP;
   9.445 +  }
   9.446    assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
   9.447  
   9.448    // This frame must preserve the required fp alignment
   9.449 @@ -462,8 +612,9 @@
   9.450    assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" );
   9.451  #ifndef PRODUCT
   9.452    _total_framesize += _framesize;
   9.453 -  if( (int)_framesize > _max_framesize )
   9.454 +  if ((int)_framesize > _max_framesize) {
   9.455      _max_framesize = _framesize;
   9.456 +  }
   9.457  #endif
   9.458  
   9.459    // Convert CISC spills
   9.460 @@ -475,15 +626,17 @@
   9.461      log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
   9.462    }
   9.463  
   9.464 -  if (C->failing())  return;
   9.465 +  if (C->failing()) {
   9.466 +    return;
   9.467 +  }
   9.468  
   9.469 -  NOT_PRODUCT( C->verify_graph_edges(); )
   9.470 +  NOT_PRODUCT(C->verify_graph_edges();)
   9.471  
   9.472    // Move important info out of the live_arena to longer lasting storage.
   9.473 -  alloc_node_regs(_names.Size());
   9.474 -  for (uint i=0; i < _names.Size(); i++) {
   9.475 -    if (_names[i]) {           // Live range associated with Node?
   9.476 -      LRG &lrg = lrgs(_names[i]);
   9.477 +  alloc_node_regs(_lrg_map.size());
   9.478 +  for (uint i=0; i < _lrg_map.size(); i++) {
   9.479 +    if (_lrg_map.live_range_id(i)) { // Live range associated with Node?
   9.480 +      LRG &lrg = lrgs(_lrg_map.live_range_id(i));
   9.481        if (!lrg.alive()) {
   9.482          set_bad(i);
   9.483        } else if (lrg.num_regs() == 1) {
   9.484 @@ -537,11 +690,11 @@
   9.485        Node *n = b->_nodes[j];
   9.486        // Pre-color to the zero live range, or pick virtual register
   9.487        const RegMask &rm = n->out_RegMask();
   9.488 -      _names.map( n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0 );
   9.489 +      _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
   9.490      }
   9.491    }
   9.492    // Reset the Union-Find mapping to be identity
   9.493 -  reset_uf_map(lr_counter);
   9.494 +  _lrg_map.reset_uf_map(lr_counter);
   9.495  }
   9.496  
   9.497  
   9.498 @@ -551,7 +704,7 @@
   9.499  void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
   9.500  
   9.501    // Nail down the frame pointer live range
   9.502 -  uint fp_lrg = n2lidx(_cfg._root->in(1)->in(TypeFunc::FramePtr));
   9.503 +  uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr));
   9.504    lrgs(fp_lrg)._cost += 1e12;   // Cost is infinite
   9.505  
   9.506    // For all blocks
   9.507 @@ -566,14 +719,14 @@
   9.508        uint idx = n->is_Copy();
   9.509  
   9.510        // Get virtual register number, same as LiveRanGe index
   9.511 -      uint vreg = n2lidx(n);
   9.512 +      uint vreg = _lrg_map.live_range_id(n);
   9.513        LRG &lrg = lrgs(vreg);
   9.514        if( vreg ) {              // No vreg means un-allocable (e.g. memory)
   9.515  
   9.516          // Collect has-copy bit
   9.517          if( idx ) {
   9.518            lrg._has_copy = 1;
   9.519 -          uint clidx = n2lidx(n->in(idx));
   9.520 +          uint clidx = _lrg_map.live_range_id(n->in(idx));
   9.521            LRG &copy_src = lrgs(clidx);
   9.522            copy_src._has_copy = 1;
   9.523          }
   9.524 @@ -773,8 +926,10 @@
   9.525        }
   9.526        // Prepare register mask for each input
   9.527        for( uint k = input_edge_start; k < cnt; k++ ) {
   9.528 -        uint vreg = n2lidx(n->in(k));
   9.529 -        if( !vreg ) continue;
   9.530 +        uint vreg = _lrg_map.live_range_id(n->in(k));
   9.531 +        if (!vreg) {
   9.532 +          continue;
   9.533 +        }
   9.534  
   9.535          // If this instruction is CISC Spillable, add the flags
   9.536          // bit to its appropriate input
   9.537 @@ -857,7 +1012,7 @@
   9.538    } // end for all blocks
   9.539  
   9.540    // Final per-liverange setup
   9.541 -  for (uint i2=0; i2<_maxlrg; i2++) {
   9.542 +  for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) {
   9.543      LRG &lrg = lrgs(i2);
   9.544      assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
   9.545      if (lrg.num_regs() > 1 && !lrg._fat_proj) {
   9.546 @@ -879,7 +1034,7 @@
   9.547  // The bit is checked in Simplify.
   9.548  void PhaseChaitin::set_was_low() {
   9.549  #ifdef ASSERT
   9.550 -  for( uint i = 1; i < _maxlrg; i++ ) {
   9.551 +  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
   9.552      int size = lrgs(i).num_regs();
   9.553      uint old_was_lo = lrgs(i)._was_lo;
   9.554      lrgs(i)._was_lo = 0;
   9.555 @@ -913,7 +1068,7 @@
   9.556  // Compute cost/area ratio, in case we spill.  Build the lo-degree list.
   9.557  void PhaseChaitin::cache_lrg_info( ) {
   9.558  
   9.559 -  for( uint i = 1; i < _maxlrg; i++ ) {
   9.560 +  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
   9.561      LRG &lrg = lrgs(i);
   9.562  
   9.563      // Check for being of low degree: means we can be trivially colored.
   9.564 @@ -949,10 +1104,10 @@
   9.565  
   9.566    // Warm up the lo-degree no-copy list
   9.567    int lo_no_copy = 0;
   9.568 -  for( uint i = 1; i < _maxlrg; i++ ) {
   9.569 -    if( (lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
   9.570 +  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
   9.571 +    if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
   9.572          !lrgs(i).alive() ||
   9.573 -        lrgs(i)._must_spill ) {
   9.574 +        lrgs(i)._must_spill) {
   9.575        lrgs(i)._next = lo_no_copy;
   9.576        lo_no_copy = i;
   9.577      }
   9.578 @@ -1163,7 +1318,7 @@
   9.579  OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
   9.580  
   9.581    // Check for "at_risk" LRG's
   9.582 -  uint risk_lrg = Find(lrg._risk_bias);
   9.583 +  uint risk_lrg = _lrg_map.find(lrg._risk_bias);
   9.584    if( risk_lrg != 0 ) {
   9.585      // Walk the colored neighbors of the "at_risk" candidate
   9.586      // Choose a color which is both legal and already taken by a neighbor
   9.587 @@ -1179,7 +1334,7 @@
   9.588      }
   9.589    }
   9.590  
   9.591 -  uint copy_lrg = Find(lrg._copy_bias);
   9.592 +  uint copy_lrg = _lrg_map.find(lrg._copy_bias);
   9.593    if( copy_lrg != 0 ) {
   9.594      // If he has a color,
   9.595      if( !(*(_ifg->_yanked))[copy_lrg] ) {
   9.596 @@ -1423,10 +1578,10 @@
   9.597  void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
   9.598    if( _spilled_once.test(src->_idx) ) {
   9.599      _spilled_once.set(dst->_idx);
   9.600 -    lrgs(Find(dst))._was_spilled1 = 1;
   9.601 +    lrgs(_lrg_map.find(dst))._was_spilled1 = 1;
   9.602      if( _spilled_twice.test(src->_idx) ) {
   9.603        _spilled_twice.set(dst->_idx);
   9.604 -      lrgs(Find(dst))._was_spilled2 = 1;
   9.605 +      lrgs(_lrg_map.find(dst))._was_spilled2 = 1;
   9.606      }
   9.607    }
   9.608  }
   9.609 @@ -1471,7 +1626,7 @@
   9.610          MachNode *mach = n->as_Mach();
   9.611          inp = mach->operand_index(inp);
   9.612          Node *src = n->in(inp);   // Value to load or store
   9.613 -        LRG &lrg_cisc = lrgs( Find_const(src) );
   9.614 +        LRG &lrg_cisc = lrgs(_lrg_map.find_const(src));
   9.615          OptoReg::Name src_reg = lrg_cisc.reg();
   9.616          // Doubles record the HIGH register of an adjacent pair.
   9.617          src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs());
   9.618 @@ -1554,9 +1709,9 @@
   9.619        Block *startb = _cfg._bbs[C->top()->_idx];
   9.620        startb->_nodes.insert(startb->find_node(C->top()), base );
   9.621        _cfg._bbs.map( base->_idx, startb );
   9.622 -      assert (n2lidx(base) == 0, "should not have LRG yet");
   9.623 +      assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
   9.624      }
   9.625 -    if (n2lidx(base) == 0) {
   9.626 +    if (_lrg_map.live_range_id(base) == 0) {
   9.627        new_lrg(base, maxlrg++);
   9.628      }
   9.629      assert(base->in(0) == _cfg._root &&
   9.630 @@ -1566,7 +1721,7 @@
   9.631    }
   9.632  
   9.633    // Check for AddP-related opcodes
   9.634 -  if( !derived->is_Phi() ) {
   9.635 +  if (!derived->is_Phi()) {
   9.636      assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name()));
   9.637      Node *base = derived->in(AddPNode::Base);
   9.638      derived_base_map[derived->_idx] = base;
   9.639 @@ -1629,9 +1784,9 @@
   9.640  // base pointer that is live across the Safepoint for oopmap building.  The
   9.641  // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
   9.642  // required edge set.
   9.643 -bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) {
   9.644 +bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
   9.645    int must_recompute_live = false;
   9.646 -  uint maxlrg = _maxlrg;
   9.647 +  uint maxlrg = _lrg_map.max_lrg_id();
   9.648    Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique());
   9.649    memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
   9.650  
   9.651 @@ -1669,15 +1824,18 @@
   9.652        }
   9.653  
   9.654        // Get value being defined
   9.655 -      uint lidx = n2lidx(n);
   9.656 -      if( lidx && lidx < _maxlrg /* Ignore the occasional brand-new live range */) {
   9.657 +      uint lidx = _lrg_map.live_range_id(n);
   9.658 +      // Ignore the occasional brand-new live range
   9.659 +      if (lidx && lidx < _lrg_map.max_lrg_id()) {
   9.660          // Remove from live-out set
   9.661          liveout.remove(lidx);
   9.662  
   9.663          // Copies do not define a new value and so do not interfere.
   9.664          // Remove the copies source from the liveout set before interfering.
   9.665          uint idx = n->is_Copy();
   9.666 -        if( idx ) liveout.remove( n2lidx(n->in(idx)) );
   9.667 +        if (idx) {
   9.668 +          liveout.remove(_lrg_map.live_range_id(n->in(idx)));
   9.669 +        }
   9.670        }
   9.671  
   9.672        // Found a safepoint?
   9.673 @@ -1695,21 +1853,21 @@
   9.674                    derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
   9.675            // If its an OOP with a non-zero offset, then it is derived.
   9.676            if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
   9.677 -            Node *base = find_base_for_derived( derived_base_map, derived, maxlrg );
   9.678 -            assert( base->_idx < _names.Size(), "" );
   9.679 +            Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
   9.680 +            assert(base->_idx < _lrg_map.size(), "");
   9.681              // Add reaching DEFs of derived pointer and base pointer as a
   9.682              // pair of inputs
   9.683 -            n->add_req( derived );
   9.684 -            n->add_req( base );
   9.685 +            n->add_req(derived);
   9.686 +            n->add_req(base);
   9.687  
   9.688              // See if the base pointer is already live to this point.
   9.689              // Since I'm working on the SSA form, live-ness amounts to
   9.690              // reaching def's.  So if I find the base's live range then
   9.691              // I know the base's def reaches here.
   9.692 -            if( (n2lidx(base) >= _maxlrg ||// (Brand new base (hence not live) or
   9.693 -                 !liveout.member( n2lidx(base) ) ) && // not live) AND
   9.694 -                 (n2lidx(base) > 0)                && // not a constant
   9.695 -                 _cfg._bbs[base->_idx] != b ) {     //  base not def'd in blk)
   9.696 +            if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
   9.697 +                 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
   9.698 +                 (_lrg_map.live_range_id(base) > 0) && // not a constant
   9.699 +                 _cfg._bbs[base->_idx] != b) { // base not def'd in blk)
   9.700                // Base pointer is not currently live.  Since I stretched
   9.701                // the base pointer to here and it crosses basic-block
   9.702                // boundaries, the global live info is now incorrect.
   9.703 @@ -1721,11 +1879,12 @@
   9.704        } // End of if found a GC point
   9.705  
   9.706        // Make all inputs live
   9.707 -      if( !n->is_Phi() ) {      // Phi function uses come from prior block
   9.708 -        for( uint k = 1; k < n->req(); k++ ) {
   9.709 -          uint lidx = n2lidx(n->in(k));
   9.710 -          if( lidx < _maxlrg )
   9.711 -            liveout.insert( lidx );
   9.712 +      if (!n->is_Phi()) {      // Phi function uses come from prior block
   9.713 +        for (uint k = 1; k < n->req(); k++) {
   9.714 +          uint lidx = _lrg_map.live_range_id(n->in(k));
   9.715 +          if (lidx < _lrg_map.max_lrg_id()) {
   9.716 +            liveout.insert(lidx);
   9.717 +          }
   9.718          }
   9.719        }
   9.720  
   9.721 @@ -1733,11 +1892,12 @@
   9.722      liveout.clear();  // Free the memory used by liveout.
   9.723  
   9.724    } // End of forall blocks
   9.725 -  _maxlrg = maxlrg;
   9.726 +  _lrg_map.set_max_lrg_id(maxlrg);
   9.727  
   9.728    // If I created a new live range I need to recompute live
   9.729 -  if( maxlrg != _ifg->_maxlrg )
   9.730 +  if (maxlrg != _ifg->_maxlrg) {
   9.731      must_recompute_live = true;
   9.732 +  }
   9.733  
   9.734    return must_recompute_live != 0;
   9.735  }
   9.736 @@ -1745,16 +1905,17 @@
   9.737  
   9.738  //------------------------------add_reference----------------------------------
   9.739  // Extend the node to LRG mapping
   9.740 -void PhaseChaitin::add_reference( const Node *node, const Node *old_node ) {
   9.741 -  _names.extend( node->_idx, n2lidx(old_node) );
   9.742 +
   9.743 +void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
   9.744 +  _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
   9.745  }
   9.746  
   9.747  //------------------------------dump-------------------------------------------
   9.748  #ifndef PRODUCT
   9.749 -void PhaseChaitin::dump( const Node *n ) const {
   9.750 -  uint r = (n->_idx < _names.Size() ) ? Find_const(n) : 0;
   9.751 +void PhaseChaitin::dump(const Node *n) const {
   9.752 +  uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
   9.753    tty->print("L%d",r);
   9.754 -  if( r && n->Opcode() != Op_Phi ) {
   9.755 +  if (r && n->Opcode() != Op_Phi) {
   9.756      if( _node_regs ) {          // Got a post-allocation copy of allocation?
   9.757        tty->print("[");
   9.758        OptoReg::Name second = get_reg_second(n);
   9.759 @@ -1775,11 +1936,13 @@
   9.760    tty->print("/N%d\t",n->_idx);
   9.761    tty->print("%s === ", n->Name());
   9.762    uint k;
   9.763 -  for( k = 0; k < n->req(); k++) {
   9.764 +  for (k = 0; k < n->req(); k++) {
   9.765      Node *m = n->in(k);
   9.766 -    if( !m ) tty->print("_ ");
   9.767 +    if (!m) {
   9.768 +      tty->print("_ ");
   9.769 +    }
   9.770      else {
   9.771 -      uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0;
   9.772 +      uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
   9.773        tty->print("L%d",r);
   9.774        // Data MultiNode's can have projections with no real registers.
   9.775        // Don't die while dumping them.
   9.776 @@ -1810,8 +1973,10 @@
   9.777    if( k < n->len() && n->in(k) ) tty->print("| ");
   9.778    for( ; k < n->len(); k++ ) {
   9.779      Node *m = n->in(k);
   9.780 -    if( !m ) break;
   9.781 -    uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0;
   9.782 +    if(!m) {
   9.783 +      break;
   9.784 +    }
   9.785 +    uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
   9.786      tty->print("L%d",r);
   9.787      tty->print("/N%d ",m->_idx);
   9.788    }
   9.789 @@ -1839,7 +2004,7 @@
   9.790      tty->print("{");
   9.791      uint i;
   9.792      while ((i = elements.next()) != 0) {
   9.793 -      tty->print("L%d ", Find_const(i));
   9.794 +      tty->print("L%d ", _lrg_map.find_const(i));
   9.795      }
   9.796      tty->print_cr("}");
   9.797    }
   9.798 @@ -1863,10 +2028,14 @@
   9.799  
   9.800    // Dump LRG array
   9.801    tty->print("--- Live RanGe Array ---\n");
   9.802 -  for(uint i2 = 1; i2 < _maxlrg; i2++ ) {
   9.803 +  for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) {
   9.804      tty->print("L%d: ",i2);
   9.805 -    if( i2 < _ifg->_maxlrg ) lrgs(i2).dump( );
   9.806 -    else tty->print_cr("new LRG");
   9.807 +    if (i2 < _ifg->_maxlrg) {
   9.808 +      lrgs(i2).dump();
   9.809 +    }
   9.810 +    else {
   9.811 +      tty->print_cr("new LRG");
   9.812 +    }
   9.813    }
   9.814    tty->print_cr("");
   9.815  
   9.816 @@ -1939,7 +2108,7 @@
   9.817      // Post allocation, use direct mappings, no LRG info available
   9.818      print_reg( get_reg_first(n), this, buf );
   9.819    } else {
   9.820 -    uint lidx = Find_const(n); // Grab LRG number
   9.821 +    uint lidx = _lrg_map.find_const(n); // Grab LRG number
   9.822      if( !_ifg ) {
   9.823        sprintf(buf,"L%d",lidx);  // No register binding yet
   9.824      } else if( !lidx ) {        // Special, not allocated value
   9.825 @@ -1968,7 +2137,7 @@
   9.826    if( WizardMode && (PrintCompilation || PrintOpto) ) {
   9.827      // Display which live ranges need to be split and the allocator's state
   9.828      tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
   9.829 -    for( uint bidx = 1; bidx < _maxlrg; bidx++ ) {
   9.830 +    for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
   9.831        if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
   9.832          tty->print("L%d: ", bidx);
   9.833          lrgs(bidx).dump();
   9.834 @@ -2099,14 +2268,17 @@
   9.835  void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
   9.836    tty->print_cr("---dump of L%d---",lidx);
   9.837  
   9.838 -  if( _ifg ) {
   9.839 -    if( lidx >= _maxlrg ) {
   9.840 +  if (_ifg) {
   9.841 +    if (lidx >= _lrg_map.max_lrg_id()) {
   9.842        tty->print("Attempt to print live range index beyond max live range.\n");
   9.843        return;
   9.844      }
   9.845      tty->print("L%d: ",lidx);
   9.846 -    if( lidx < _ifg->_maxlrg ) lrgs(lidx).dump( );
   9.847 -    else tty->print_cr("new LRG");
   9.848 +    if (lidx < _ifg->_maxlrg) {
   9.849 +      lrgs(lidx).dump();
   9.850 +    } else {
   9.851 +      tty->print_cr("new LRG");
   9.852 +    }
   9.853    }
   9.854    if( _ifg && lidx < _ifg->_maxlrg) {
   9.855      tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
   9.856 @@ -2121,8 +2293,8 @@
   9.857      // For all instructions
   9.858      for( uint j = 0; j < b->_nodes.size(); j++ ) {
   9.859        Node *n = b->_nodes[j];
   9.860 -      if( Find_const(n) == lidx ) {
   9.861 -        if( !dump_once++ ) {
   9.862 +      if (_lrg_map.find_const(n) == lidx) {
   9.863 +        if (!dump_once++) {
   9.864            tty->cr();
   9.865            b->dump_head( &_cfg._bbs );
   9.866          }
   9.867 @@ -2133,11 +2305,13 @@
   9.868          uint cnt = n->req();
   9.869          for( uint k = 1; k < cnt; k++ ) {
   9.870            Node *m = n->in(k);
   9.871 -          if (!m)  continue;  // be robust in the dumper
   9.872 -          if( Find_const(m) == lidx ) {
   9.873 -            if( !dump_once++ ) {
   9.874 +          if (!m)  {
   9.875 +            continue;  // be robust in the dumper
   9.876 +          }
   9.877 +          if (_lrg_map.find_const(m) == lidx) {
   9.878 +            if (!dump_once++) {
   9.879                tty->cr();
   9.880 -              b->dump_head( &_cfg._bbs );
   9.881 +              b->dump_head(&_cfg._bbs);
   9.882              }
   9.883              dump(n);
   9.884            }
    10.1 --- a/src/share/vm/opto/chaitin.hpp	Mon Apr 15 16:20:05 2013 -0700
    10.2 +++ b/src/share/vm/opto/chaitin.hpp	Tue Apr 16 10:08:41 2013 +0200
    10.3 @@ -265,18 +265,118 @@
    10.4    int effective_degree( uint lidx ) const;
    10.5  };
    10.6  
    10.7 -// TEMPORARILY REPLACED WITH COMMAND LINE FLAG
    10.8 +// The LiveRangeMap class is responsible for storing node to live range id mapping.
    10.9 +// Each node is mapped to a live range id (a virtual register). Nodes that are
   10.10 +// not considered for register allocation are given live range id 0.
   10.11 +class LiveRangeMap VALUE_OBJ_CLASS_SPEC {
   10.12  
   10.13 -//// !!!!! Magic Constants need to move into ad file
   10.14 -#ifdef SPARC
   10.15 -//#define FLOAT_PRESSURE 30  /*     SFLT_REG_mask.Size() - 1 */
   10.16 -//#define INT_PRESSURE   23  /* NOTEMP_I_REG_mask.Size() - 1 */
   10.17 -#define FLOAT_INCREMENT(regs) regs
   10.18 -#else
   10.19 -//#define FLOAT_PRESSURE 6
   10.20 -//#define INT_PRESSURE   6
   10.21 -#define FLOAT_INCREMENT(regs) 1
   10.22 -#endif
   10.23 +private:
   10.24 +
   10.25 +  uint _max_lrg_id;
   10.26 +
   10.27 +  // Union-find map.  Declared as a short for speed.
   10.28 +  // Indexed by live-range number, it returns the compacted live-range number
   10.29 +  LRG_List _uf_map;
   10.30 +
   10.31 +  // Map from Nodes to live ranges
   10.32 +  LRG_List _names;
   10.33 +
   10.34 +  // Straight out of Tarjan's union-find algorithm
   10.35 +  uint find_compress(const Node *node) {
   10.36 +    uint lrg_id = find_compress(_names[node->_idx]);
   10.37 +    _names.map(node->_idx, lrg_id);
   10.38 +    return lrg_id;
   10.39 +  }
   10.40 +
   10.41 +  uint find_compress(uint lrg);
   10.42 +
   10.43 +public:
   10.44 +
   10.45 +  const LRG_List& names() {
   10.46 +    return _names;
   10.47 +  }
   10.48 +
   10.49 +  uint max_lrg_id() const {
   10.50 +    return _max_lrg_id;
   10.51 +  }
   10.52 +
   10.53 +  void set_max_lrg_id(uint max_lrg_id) {
   10.54 +    _max_lrg_id = max_lrg_id;
   10.55 +  }
   10.56 +
   10.57 +  uint size() const {
   10.58 +    return _names.Size();
   10.59 +  }
   10.60 +
   10.61 +  uint live_range_id(uint idx) const {
   10.62 +    return _names[idx];
   10.63 +  }
   10.64 +
   10.65 +  uint live_range_id(const Node *node) const {
   10.66 +    return _names[node->_idx];
   10.67 +  }
   10.68 +
   10.69 +  uint uf_live_range_id(uint lrg_id) const {
   10.70 +    return _uf_map[lrg_id];
   10.71 +  }
   10.72 +
   10.73 +  void map(uint idx, uint lrg_id) {
   10.74 +    _names.map(idx, lrg_id);
   10.75 +  }
   10.76 +
   10.77 +  void uf_map(uint dst_lrg_id, uint src_lrg_id) {
   10.78 +    _uf_map.map(dst_lrg_id, src_lrg_id);
   10.79 +  }
   10.80 +
   10.81 +  void extend(uint idx, uint lrg_id) {
   10.82 +    _names.extend(idx, lrg_id);
   10.83 +  }
   10.84 +
   10.85 +  void uf_extend(uint dst_lrg_id, uint src_lrg_id) {
   10.86 +    _uf_map.extend(dst_lrg_id, src_lrg_id);
   10.87 +  }
   10.88 +
   10.89 +  LiveRangeMap(uint unique)
   10.90 +  : _names(unique)
   10.91 +  , _uf_map(unique)
   10.92 +  , _max_lrg_id(0) {}
   10.93 +
   10.94 +  uint find_id( const Node *n ) {
   10.95 +    uint retval = live_range_id(n);
   10.96 +    assert(retval == find(n),"Invalid node to lidx mapping");
   10.97 +    return retval;
   10.98 +  }
   10.99 +
  10.100 +  // Reset the Union-Find map to identity
  10.101 +  void reset_uf_map(uint max_lrg_id);
  10.102 +
  10.103 +  // Make all Nodes map directly to their final live range; no need for
  10.104 +  // the Union-Find mapping after this call.
  10.105 +  void compress_uf_map_for_nodes();
  10.106 +
  10.107 +  uint find(uint lidx) {
  10.108 +    uint uf_lidx = _uf_map[lidx];
  10.109 +    return (uf_lidx == lidx) ? uf_lidx : find_compress(lidx);
  10.110 +  }
  10.111 +
  10.112 +  // Convert a Node into a Live Range Index - a lidx
  10.113 +  uint find(const Node *node) {
  10.114 +    uint lidx = live_range_id(node);
  10.115 +    uint uf_lidx = _uf_map[lidx];
  10.116 +    return (uf_lidx == lidx) ? uf_lidx : find_compress(node);
  10.117 +  }
  10.118 +
  10.119 +  // Like Find above, but no path compress, so bad asymptotic behavior
  10.120 +  uint find_const(uint lrg) const;
  10.121 +
  10.122 +  // Like Find above, but no path compress, so bad asymptotic behavior
  10.123 +  uint find_const(const Node *node) const {
  10.124 +    if(node->_idx >= _names.Size()) {
  10.125 +      return 0; // not mapped, usual for debug dump
  10.126 +    }
  10.127 +    return find_const(_names[node->_idx]);
  10.128 +  }
  10.129 +};
  10.130  
  10.131  //------------------------------Chaitin----------------------------------------
  10.132  // Briggs-Chaitin style allocation, mostly.
  10.133 @@ -286,7 +386,6 @@
  10.134    int _trip_cnt;
  10.135    int _alternate;
  10.136  
  10.137 -  uint _maxlrg;                 // Max live range number
  10.138    LRG &lrgs(uint idx) const { return _ifg->lrgs(idx); }
  10.139    PhaseLive *_live;             // Liveness, used in the interference graph
  10.140    PhaseIFG *_ifg;               // Interference graph (for original chunk)
  10.141 @@ -294,16 +393,6 @@
  10.142    VectorSet _spilled_once;      // Nodes that have been spilled
  10.143    VectorSet _spilled_twice;     // Nodes that have been spilled twice
  10.144  
  10.145 -  LRG_List _names;              // Map from Nodes to Live RanGes
  10.146 -
  10.147 -  // Union-find map.  Declared as a short for speed.
  10.148 -  // Indexed by live-range number, it returns the compacted live-range number
  10.149 -  LRG_List _uf_map;
  10.150 -  // Reset the Union-Find map to identity
  10.151 -  void reset_uf_map( uint maxlrg );
  10.152 -  // Remove the need for the Union-Find mapping
  10.153 -  void compress_uf_map_for_nodes( );
  10.154 -
  10.155    // Combine the Live Range Indices for these 2 Nodes into a single live
  10.156    // range.  Future requests for any Node in either live range will
  10.157    // return the live range index for the combined live range.
  10.158 @@ -322,7 +411,34 @@
  10.159    // Helper functions for Split()
  10.160    uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
  10.161    uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
  10.162 -  int clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg );
  10.163 +
  10.164 +  bool clone_projs(Block *b, uint idx, Node *con, Node *copy, LiveRangeMap &lrg_map) {
  10.165 +    bool found_projs = clone_projs_shared(b, idx, con, copy, lrg_map.max_lrg_id());
  10.166 +
  10.167 +    if(found_projs) {
  10.168 +      uint max_lrg_id = lrg_map.max_lrg_id();
  10.169 +      lrg_map.set_max_lrg_id(max_lrg_id + 1);
  10.170 +    }
  10.171 +
  10.172 +    return found_projs;
  10.173 +  }
  10.174 +
  10.175 +  //------------------------------clone_projs------------------------------------
  10.176 +  // After cloning some rematerialized instruction, clone any MachProj's that
  10.177 +  // follow it.  Example: Intel zero is XOR, kills flags.  Sparc FP constants
  10.178 +  // use G3 as an address temp.
  10.179 +  bool clone_projs(Block *b, uint idx, Node *con, Node *copy, uint &max_lrg_id) {
  10.180 +    bool found_projs = clone_projs_shared(b, idx, con, copy, max_lrg_id);
  10.181 +
  10.182 +    if(found_projs) {
  10.183 +      max_lrg_id++;
  10.184 +    }
  10.185 +
  10.186 +    return found_projs;
  10.187 +  }
  10.188 +
  10.189 +  bool clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id);
  10.190 +
  10.191    Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits,
  10.192                              int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru);
  10.193    // True if lidx is used before any real register is def'd in the block
  10.194 @@ -349,20 +465,11 @@
  10.195    PhaseChaitin( uint unique, PhaseCFG &cfg, Matcher &matcher );
  10.196    ~PhaseChaitin() {}
  10.197  
  10.198 -  // Convert a Node into a Live Range Index - a lidx
  10.199 -  uint Find( const Node *n ) {
  10.200 -    uint lidx = n2lidx(n);
  10.201 -    uint uf_lidx = _uf_map[lidx];
  10.202 -    return (uf_lidx == lidx) ? uf_lidx : Find_compress(n);
  10.203 -  }
  10.204 -  uint Find_const( uint lrg ) const;
  10.205 -  uint Find_const( const Node *n ) const;
  10.206 +  LiveRangeMap _lrg_map;
  10.207  
  10.208    // Do all the real work of allocate
  10.209    void Register_Allocate();
  10.210  
  10.211 -  uint n2lidx( const Node *n ) const { return _names[n->_idx]; }
  10.212 -
  10.213    float high_frequency_lrg() const { return _high_frequency_lrg; }
  10.214  
  10.215  #ifndef PRODUCT
  10.216 @@ -374,18 +481,6 @@
  10.217    // all inputs to a PhiNode, effectively coalescing live ranges.  Insert
  10.218    // copies as needed.
  10.219    void de_ssa();
  10.220 -  uint Find_compress( const Node *n );
  10.221 -  uint Find( uint lidx ) {
  10.222 -    uint uf_lidx = _uf_map[lidx];
  10.223 -    return (uf_lidx == lidx) ? uf_lidx : Find_compress(lidx);
  10.224 -  }
  10.225 -  uint Find_compress( uint lidx );
  10.226 -
  10.227 -  uint Find_id( const Node *n ) {
  10.228 -    uint retval = n2lidx(n);
  10.229 -    assert(retval == Find(n),"Invalid node to lidx mapping");
  10.230 -    return retval;
  10.231 -  }
  10.232  
  10.233    // Add edge between reg and everything in the vector.
  10.234    // Same as _ifg->add_vector(reg,live) EXCEPT use the RegMask
    11.1 --- a/src/share/vm/opto/coalesce.cpp	Mon Apr 15 16:20:05 2013 -0700
    11.2 +++ b/src/share/vm/opto/coalesce.cpp	Tue Apr 16 10:08:41 2013 +0200
    11.3 @@ -35,159 +35,11 @@
    11.4  #include "opto/regmask.hpp"
    11.5  
    11.6  //=============================================================================
    11.7 -//------------------------------reset_uf_map-----------------------------------
    11.8 -void PhaseChaitin::reset_uf_map( uint maxlrg ) {
    11.9 -  _maxlrg = maxlrg;
   11.10 -  // Force the Union-Find mapping to be at least this large
   11.11 -  _uf_map.extend(_maxlrg,0);
   11.12 -  // Initialize it to be the ID mapping.
   11.13 -  for( uint i=0; i<_maxlrg; i++ )
   11.14 -    _uf_map.map(i,i);
   11.15 -}
   11.16 -
   11.17 -//------------------------------compress_uf_map--------------------------------
   11.18 -// Make all Nodes map directly to their final live range; no need for
   11.19 -// the Union-Find mapping after this call.
   11.20 -void PhaseChaitin::compress_uf_map_for_nodes( ) {
   11.21 -  // For all Nodes, compress mapping
   11.22 -  uint unique = _names.Size();
   11.23 -  for( uint i=0; i<unique; i++ ) {
   11.24 -    uint lrg = _names[i];
   11.25 -    uint compressed_lrg = Find(lrg);
   11.26 -    if( lrg != compressed_lrg )
   11.27 -      _names.map(i,compressed_lrg);
   11.28 -  }
   11.29 -}
   11.30 -
   11.31 -//------------------------------Find-------------------------------------------
   11.32 -// Straight out of Tarjan's union-find algorithm
   11.33 -uint PhaseChaitin::Find_compress( uint lrg ) {
   11.34 -  uint cur = lrg;
   11.35 -  uint next = _uf_map[cur];
   11.36 -  while( next != cur ) {        // Scan chain of equivalences
   11.37 -    assert( next < cur, "always union smaller" );
   11.38 -    cur = next;                 // until find a fixed-point
   11.39 -    next = _uf_map[cur];
   11.40 -  }
   11.41 -  // Core of union-find algorithm: update chain of
   11.42 -  // equivalences to be equal to the root.
   11.43 -  while( lrg != next ) {
   11.44 -    uint tmp = _uf_map[lrg];
   11.45 -    _uf_map.map(lrg, next);
   11.46 -    lrg = tmp;
   11.47 -  }
   11.48 -  return lrg;
   11.49 -}
   11.50 -
   11.51 -//------------------------------Find-------------------------------------------
   11.52 -// Straight out of Tarjan's union-find algorithm
   11.53 -uint PhaseChaitin::Find_compress( const Node *n ) {
   11.54 -  uint lrg = Find_compress(_names[n->_idx]);
   11.55 -  _names.map(n->_idx,lrg);
   11.56 -  return lrg;
   11.57 -}
   11.58 -
   11.59 -//------------------------------Find_const-------------------------------------
   11.60 -// Like Find above, but no path compress, so bad asymptotic behavior
   11.61 -uint PhaseChaitin::Find_const( uint lrg ) const {
   11.62 -  if( !lrg ) return lrg;        // Ignore the zero LRG
   11.63 -  // Off the end?  This happens during debugging dumps when you got
   11.64 -  // brand new live ranges but have not told the allocator yet.
   11.65 -  if( lrg >= _maxlrg ) return lrg;
   11.66 -  uint next = _uf_map[lrg];
   11.67 -  while( next != lrg ) {        // Scan chain of equivalences
   11.68 -    assert( next < lrg, "always union smaller" );
   11.69 -    lrg = next;                 // until find a fixed-point
   11.70 -    next = _uf_map[lrg];
   11.71 -  }
   11.72 -  return next;
   11.73 -}
   11.74 -
   11.75 -//------------------------------Find-------------------------------------------
   11.76 -// Like Find above, but no path compress, so bad asymptotic behavior
   11.77 -uint PhaseChaitin::Find_const( const Node *n ) const {
   11.78 -  if( n->_idx >= _names.Size() ) return 0; // not mapped, usual for debug dump
   11.79 -  return Find_const( _names[n->_idx] );
   11.80 -}
   11.81 -
   11.82 -//------------------------------Union------------------------------------------
   11.83 -// union 2 sets together.
   11.84 -void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
   11.85 -  uint src = Find(src_n);
   11.86 -  uint dst = Find(dst_n);
   11.87 -  assert( src, "" );
   11.88 -  assert( dst, "" );
   11.89 -  assert( src < _maxlrg, "oob" );
   11.90 -  assert( dst < _maxlrg, "oob" );
   11.91 -  assert( src < dst, "always union smaller" );
   11.92 -  _uf_map.map(dst,src);
   11.93 -}
   11.94 -
   11.95 -//------------------------------new_lrg----------------------------------------
   11.96 -void PhaseChaitin::new_lrg( const Node *x, uint lrg ) {
   11.97 -  // Make the Node->LRG mapping
   11.98 -  _names.extend(x->_idx,lrg);
   11.99 -  // Make the Union-Find mapping an identity function
  11.100 -  _uf_map.extend(lrg,lrg);
  11.101 -}
  11.102 -
  11.103 -//------------------------------clone_projs------------------------------------
  11.104 -// After cloning some rematerialized instruction, clone any MachProj's that
  11.105 -// follow it.  Example: Intel zero is XOR, kills flags.  Sparc FP constants
  11.106 -// use G3 as an address temp.
  11.107 -int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) {
  11.108 -  Block *bcon = _cfg._bbs[con->_idx];
  11.109 -  uint cindex = bcon->find_node(con);
  11.110 -  Node *con_next = bcon->_nodes[cindex+1];
  11.111 -  if( con_next->in(0) != con || !con_next->is_MachProj() )
  11.112 -    return false;               // No MachProj's follow
  11.113 -
  11.114 -  // Copy kills after the cloned constant
  11.115 -  Node *kills = con_next->clone();
  11.116 -  kills->set_req( 0, copy );
  11.117 -  b->_nodes.insert( idx, kills );
  11.118 -  _cfg._bbs.map( kills->_idx, b );
  11.119 -  new_lrg( kills, maxlrg++ );
  11.120 -  return true;
  11.121 -}
  11.122 -
  11.123 -//------------------------------compact----------------------------------------
  11.124 -// Renumber the live ranges to compact them.  Makes the IFG smaller.
  11.125 -void PhaseChaitin::compact() {
  11.126 -  // Current the _uf_map contains a series of short chains which are headed
  11.127 -  // by a self-cycle.  All the chains run from big numbers to little numbers.
  11.128 -  // The Find() call chases the chains & shortens them for the next Find call.
  11.129 -  // We are going to change this structure slightly.  Numbers above a moving
  11.130 -  // wave 'i' are unchanged.  Numbers below 'j' point directly to their
  11.131 -  // compacted live range with no further chaining.  There are no chains or
  11.132 -  // cycles below 'i', so the Find call no longer works.
  11.133 -  uint j=1;
  11.134 -  uint i;
  11.135 -  for( i=1; i < _maxlrg; i++ ) {
  11.136 -    uint lr = _uf_map[i];
  11.137 -    // Ignore unallocated live ranges
  11.138 -    if( !lr ) continue;
  11.139 -    assert( lr <= i, "" );
  11.140 -    _uf_map.map(i, ( lr == i ) ? j++ : _uf_map[lr]);
  11.141 -  }
  11.142 -  if( false )                  // PrintOptoCompactLiveRanges
  11.143 -    printf("Compacted %d LRs from %d\n",i-j,i);
  11.144 -  // Now change the Node->LR mapping to reflect the compacted names
  11.145 -  uint unique = _names.Size();
  11.146 -  for( i=0; i<unique; i++ )
  11.147 -    _names.map(i,_uf_map[_names[i]]);
  11.148 -
  11.149 -  // Reset the Union-Find mapping
  11.150 -  reset_uf_map(j);
  11.151 -
  11.152 -}
  11.153 -
  11.154 -//=============================================================================
  11.155  //------------------------------Dump-------------------------------------------
  11.156  #ifndef PRODUCT
  11.157 -void PhaseCoalesce::dump( Node *n ) const {
  11.158 +void PhaseCoalesce::dump(Node *n) const {
  11.159    // Being a const function means I cannot use 'Find'
  11.160 -  uint r = _phc.Find(n);
  11.161 +  uint r = _phc._lrg_map.find(n);
  11.162    tty->print("L%d/N%d ",r,n->_idx);
  11.163  }
  11.164  
  11.165 @@ -235,9 +87,9 @@
  11.166  
  11.167  //------------------------------combine_these_two------------------------------
  11.168  // Combine the live ranges def'd by these 2 Nodes.  N2 is an input to N1.
  11.169 -void PhaseCoalesce::combine_these_two( Node *n1, Node *n2 ) {
  11.170 -  uint lr1 = _phc.Find(n1);
  11.171 -  uint lr2 = _phc.Find(n2);
  11.172 +void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) {
  11.173 +  uint lr1 = _phc._lrg_map.find(n1);
  11.174 +  uint lr2 = _phc._lrg_map.find(n2);
  11.175    if( lr1 != lr2 &&             // Different live ranges already AND
  11.176        !_phc._ifg->test_edge_sq( lr1, lr2 ) ) {  // Do not interfere
  11.177      LRG *lrg1 = &_phc.lrgs(lr1);
  11.178 @@ -306,14 +158,18 @@
  11.179    // I am about to clobber the dst_name, so the copy must be inserted
  11.180    // after the last use.  Last use is really first-use on a backwards scan.
  11.181    uint i = b->end_idx()-1;
  11.182 -  while( 1 ) {
  11.183 +  while(1) {
  11.184      Node *n = b->_nodes[i];
  11.185      // Check for end of virtual copies; this is also the end of the
  11.186      // parallel renaming effort.
  11.187 -    if( n->_idx < _unique ) break;
  11.188 +    if (n->_idx < _unique) {
  11.189 +      break;
  11.190 +    }
  11.191      uint idx = n->is_Copy();
  11.192      assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
  11.193 -    if( idx && _phc.Find(n->in(idx)) == dst_name ) break;
  11.194 +    if (idx && _phc._lrg_map.find(n->in(idx)) == dst_name) {
  11.195 +      break;
  11.196 +    }
  11.197      i--;
  11.198    }
  11.199    uint last_use_idx = i;
  11.200 @@ -324,24 +180,29 @@
  11.201    // There can be only 1 kill that exits any block and that is
  11.202    // the last kill.  Thus it is the first kill on a backwards scan.
  11.203    i = b->end_idx()-1;
  11.204 -  while( 1 ) {
  11.205 +  while (1) {
  11.206      Node *n = b->_nodes[i];
  11.207      // Check for end of virtual copies; this is also the end of the
  11.208      // parallel renaming effort.
  11.209 -    if( n->_idx < _unique ) break;
  11.210 +    if (n->_idx < _unique) {
  11.211 +      break;
  11.212 +    }
  11.213      assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
  11.214 -    if( _phc.Find(n) == src_name ) {
  11.215 +    if (_phc._lrg_map.find(n) == src_name) {
  11.216        kill_src_idx = i;
  11.217        break;
  11.218      }
  11.219      i--;
  11.220    }
  11.221    // Need a temp?  Last use of dst comes after the kill of src?
  11.222 -  if( last_use_idx >= kill_src_idx ) {
  11.223 +  if (last_use_idx >= kill_src_idx) {
  11.224      // Need to break a cycle with a temp
  11.225      uint idx = copy->is_Copy();
  11.226      Node *tmp = copy->clone();
  11.227 -    _phc.new_lrg(tmp,_phc._maxlrg++);
  11.228 +    uint max_lrg_id = _phc._lrg_map.max_lrg_id();
  11.229 +    _phc.new_lrg(tmp, max_lrg_id);
  11.230 +    _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
  11.231 +
  11.232      // Insert new temp between copy and source
  11.233      tmp ->set_req(idx,copy->in(idx));
  11.234      copy->set_req(idx,tmp);
  11.235 @@ -359,14 +220,14 @@
  11.236  void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
  11.237    // We do LRGs compressing and fix a liveout data only here since the other
  11.238    // place in Split() is guarded by the assert which we never hit.
  11.239 -  _phc.compress_uf_map_for_nodes();
  11.240 +  _phc._lrg_map.compress_uf_map_for_nodes();
  11.241    // Fix block's liveout data for compressed live ranges.
  11.242 -  for(uint lrg = 1; lrg < _phc._maxlrg; lrg++ ) {
  11.243 -    uint compressed_lrg = _phc.Find(lrg);
  11.244 -    if( lrg != compressed_lrg ) {
  11.245 -      for( uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++ ) {
  11.246 +  for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) {
  11.247 +    uint compressed_lrg = _phc._lrg_map.find(lrg);
  11.248 +    if (lrg != compressed_lrg) {
  11.249 +      for (uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++) {
  11.250          IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]);
  11.251 -        if( liveout->member(lrg) ) {
  11.252 +        if (liveout->member(lrg)) {
  11.253            liveout->remove(lrg);
  11.254            liveout->insert(compressed_lrg);
  11.255          }
  11.256 @@ -392,8 +253,9 @@
  11.257          uint cidx = copy->is_Copy();
  11.258          if( cidx ) {
  11.259            Node *def = copy->in(cidx);
  11.260 -          if( _phc.Find(copy) == _phc.Find(def) )
  11.261 -            n->set_req(k,def);
  11.262 +          if (_phc._lrg_map.find(copy) == _phc._lrg_map.find(def)) {
  11.263 +            n->set_req(k, def);
  11.264 +          }
  11.265          }
  11.266        }
  11.267  
  11.268 @@ -401,7 +263,7 @@
  11.269        uint cidx = n->is_Copy();
  11.270        if( cidx ) {
  11.271          Node *def = n->in(cidx);
  11.272 -        if( _phc.Find(n) == _phc.Find(def) ) {
  11.273 +        if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
  11.274            n->replace_by(def);
  11.275            n->set_req(cidx,NULL);
  11.276            b->_nodes.remove(l);
  11.277 @@ -410,16 +272,18 @@
  11.278          }
  11.279        }
  11.280  
  11.281 -      if( n->is_Phi() ) {
  11.282 +      if (n->is_Phi()) {
  11.283          // Get the chosen name for the Phi
  11.284 -        uint phi_name = _phc.Find( n );
  11.285 +        uint phi_name = _phc._lrg_map.find(n);
  11.286          // Ignore the pre-allocated specials
  11.287 -        if( !phi_name ) continue;
  11.288 +        if (!phi_name) {
  11.289 +          continue;
  11.290 +        }
  11.291          // Check for mismatch inputs to Phi
  11.292 -        for( uint j = 1; j<cnt; j++ ) {
  11.293 +        for (uint j = 1; j < cnt; j++) {
  11.294            Node *m = n->in(j);
  11.295 -          uint src_name = _phc.Find(m);
  11.296 -          if( src_name != phi_name ) {
  11.297 +          uint src_name = _phc._lrg_map.find(m);
  11.298 +          if (src_name != phi_name) {
  11.299              Block *pred = _phc._cfg._bbs[b->pred(j)->_idx];
  11.300              Node *copy;
  11.301              assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
  11.302 @@ -430,18 +294,18 @@
  11.303                // Insert the copy in the predecessor basic block
  11.304                pred->add_inst(copy);
  11.305                // Copy any flags as well
  11.306 -              _phc.clone_projs( pred, pred->end_idx(), m, copy, _phc._maxlrg );
  11.307 +              _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map);
  11.308              } else {
  11.309                const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
  11.310 -              copy = new (C) MachSpillCopyNode(m,*rm,*rm);
  11.311 +              copy = new (C) MachSpillCopyNode(m, *rm, *rm);
  11.312                // Find a good place to insert.  Kinda tricky, use a subroutine
  11.313                insert_copy_with_overlap(pred,copy,phi_name,src_name);
  11.314              }
  11.315              // Insert the copy in the use-def chain
  11.316 -            n->set_req( j, copy );
  11.317 +            n->set_req(j, copy);
  11.318              _phc._cfg._bbs.map( copy->_idx, pred );
  11.319              // Extend ("register allocate") the names array for the copy.
  11.320 -            _phc._names.extend( copy->_idx, phi_name );
  11.321 +            _phc._lrg_map.extend(copy->_idx, phi_name);
  11.322            } // End of if Phi names do not match
  11.323          } // End of for all inputs to Phi
  11.324        } else { // End of if Phi
  11.325 @@ -450,39 +314,40 @@
  11.326          uint idx;
  11.327          if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) {
  11.328            // Get the chosen name for the Node
  11.329 -          uint name = _phc.Find( n );
  11.330 -          assert( name, "no 2-address specials" );
  11.331 +          uint name = _phc._lrg_map.find(n);
  11.332 +          assert (name, "no 2-address specials");
  11.333            // Check for name mis-match on the 2-address input
  11.334            Node *m = n->in(idx);
  11.335 -          if( _phc.Find(m) != name ) {
  11.336 +          if (_phc._lrg_map.find(m) != name) {
  11.337              Node *copy;
  11.338              assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
  11.339              // At this point it is unsafe to extend live ranges (6550579).
  11.340              // Rematerialize only constants as we do for Phi above.
  11.341 -            if( m->is_Mach() && m->as_Mach()->is_Con() &&
  11.342 -                m->as_Mach()->rematerialize() ) {
  11.343 +            if(m->is_Mach() && m->as_Mach()->is_Con() &&
  11.344 +               m->as_Mach()->rematerialize()) {
  11.345                copy = m->clone();
  11.346                // Insert the copy in the basic block, just before us
  11.347 -              b->_nodes.insert( l++, copy );
  11.348 -              if( _phc.clone_projs( b, l, m, copy, _phc._maxlrg ) )
  11.349 +              b->_nodes.insert(l++, copy);
  11.350 +              if(_phc.clone_projs(b, l, m, copy, _phc._lrg_map)) {
  11.351                  l++;
  11.352 +              }
  11.353              } else {
  11.354                const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
  11.355 -              copy = new (C) MachSpillCopyNode( m, *rm, *rm );
  11.356 +              copy = new (C) MachSpillCopyNode(m, *rm, *rm);
  11.357                // Insert the copy in the basic block, just before us
  11.358 -              b->_nodes.insert( l++, copy );
  11.359 +              b->_nodes.insert(l++, copy);
  11.360              }
  11.361              // Insert the copy in the use-def chain
  11.362 -            n->set_req(idx, copy );
  11.363 +            n->set_req(idx, copy);
  11.364              // Extend ("register allocate") the names array for the copy.
  11.365 -            _phc._names.extend( copy->_idx, name );
  11.366 +            _phc._lrg_map.extend(copy->_idx, name);
  11.367              _phc._cfg._bbs.map( copy->_idx, b );
  11.368            }
  11.369  
  11.370          } // End of is two-adr
  11.371  
  11.372          // Insert a copy at a debug use for a lrg which has high frequency
  11.373 -        if( b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs) ) {
  11.374 +        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) {
  11.375            // Walk the debug inputs to the node and check for lrg freq
  11.376            JVMState* jvms = n->jvms();
  11.377            uint debug_start = jvms ? jvms->debug_start() : 999999;
  11.378 @@ -490,9 +355,11 @@
  11.379            for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) {
  11.380              // Do not split monitors; they are only needed for debug table
  11.381              // entries and need no code.
  11.382 -            if( jvms->is_monitor_use(inpidx) ) continue;
  11.383 +            if (jvms->is_monitor_use(inpidx)) {
  11.384 +              continue;
  11.385 +            }
  11.386              Node *inp = n->in(inpidx);
  11.387 -            uint nidx = _phc.n2lidx(inp);
  11.388 +            uint nidx = _phc._lrg_map.live_range_id(inp);
  11.389              LRG &lrg = lrgs(nidx);
  11.390  
  11.391              // If this lrg has a high frequency use/def
  11.392 @@ -519,8 +386,10 @@
  11.393                // Insert the copy in the basic block, just before us
  11.394                b->_nodes.insert( l++, copy );
  11.395                // Extend ("register allocate") the names array for the copy.
  11.396 -              _phc.new_lrg( copy, _phc._maxlrg++ );
  11.397 -              _phc._cfg._bbs.map( copy->_idx, b );
  11.398 +              uint max_lrg_id = _phc._lrg_map.max_lrg_id();
  11.399 +              _phc.new_lrg(copy, max_lrg_id);
  11.400 +              _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
  11.401 +              _phc._cfg._bbs.map(copy->_idx, b);
  11.402                //tty->print_cr("Split a debug use in Aggressive Coalesce");
  11.403              }  // End of if high frequency use/def
  11.404            }  // End of for all debug inputs
  11.405 @@ -583,17 +452,17 @@
  11.406      uint idx;
  11.407      // 2-address instructions have a virtual Copy matching their input
  11.408      // to their output
  11.409 -    if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) {
  11.410 +    if (n->is_Mach() && (idx = n->as_Mach()->two_adr())) {
  11.411        MachNode *mach = n->as_Mach();
  11.412 -      combine_these_two( mach, mach->in(idx) );
  11.413 +      combine_these_two(mach, mach->in(idx));
  11.414      }
  11.415    } // End of for all instructions in block
  11.416  }
  11.417  
  11.418  //=============================================================================
  11.419  //------------------------------PhaseConservativeCoalesce----------------------
  11.420 -PhaseConservativeCoalesce::PhaseConservativeCoalesce( PhaseChaitin &chaitin ) : PhaseCoalesce(chaitin) {
  11.421 -  _ulr.initialize(_phc._maxlrg);
  11.422 +PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) {
  11.423 +  _ulr.initialize(_phc._lrg_map.max_lrg_id());
  11.424  }
  11.425  
  11.426  //------------------------------verify-----------------------------------------
  11.427 @@ -673,10 +542,14 @@
  11.428        // Else work back one in copy chain
  11.429        prev_copy = prev_copy->in(prev_copy->is_Copy());
  11.430      } else {                    // Else collect interferences
  11.431 -      uint lidx = _phc.Find(x);
  11.432 +      uint lidx = _phc._lrg_map.find(x);
  11.433        // Found another def of live-range being stretched?
  11.434 -      if( lidx == lr1 ) return max_juint;
  11.435 -      if( lidx == lr2 ) return max_juint;
  11.436 +      if(lidx == lr1) {
  11.437 +        return max_juint;
  11.438 +      }
  11.439 +      if(lidx == lr2) {
  11.440 +        return max_juint;
  11.441 +      }
  11.442  
  11.443        // If we attempt to coalesce across a bound def
  11.444        if( lrgs(lidx).is_bound() ) {
  11.445 @@ -751,33 +624,43 @@
  11.446  // See if I can coalesce a series of multiple copies together.  I need the
  11.447  // final dest copy and the original src copy.  They can be the same Node.
  11.448  // Compute the compatible register masks.
  11.449 -bool PhaseConservativeCoalesce::copy_copy( Node *dst_copy, Node *src_copy, Block *b, uint bindex ) {
  11.450 +bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block *b, uint bindex) {
  11.451  
  11.452 -  if( !dst_copy->is_SpillCopy() ) return false;
  11.453 -  if( !src_copy->is_SpillCopy() ) return false;
  11.454 +  if (!dst_copy->is_SpillCopy()) {
  11.455 +    return false;
  11.456 +  }
  11.457 +  if (!src_copy->is_SpillCopy()) {
  11.458 +    return false;
  11.459 +  }
  11.460    Node *src_def = src_copy->in(src_copy->is_Copy());
  11.461 -  uint lr1 = _phc.Find(dst_copy);
  11.462 -  uint lr2 = _phc.Find(src_def );
  11.463 +  uint lr1 = _phc._lrg_map.find(dst_copy);
  11.464 +  uint lr2 = _phc._lrg_map.find(src_def);
  11.465  
  11.466    // Same live ranges already?
  11.467 -  if( lr1 == lr2 ) return false;
  11.468 +  if (lr1 == lr2) {
  11.469 +    return false;
  11.470 +  }
  11.471  
  11.472    // Interfere?
  11.473 -  if( _phc._ifg->test_edge_sq( lr1, lr2 ) ) return false;
  11.474 +  if (_phc._ifg->test_edge_sq(lr1, lr2)) {
  11.475 +    return false;
  11.476 +  }
  11.477  
  11.478    // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK.
  11.479 -  if( !lrgs(lr1)._is_oop && lrgs(lr2)._is_oop ) // not an oop->int cast
  11.480 +  if (!lrgs(lr1)._is_oop && lrgs(lr2)._is_oop) { // not an oop->int cast
  11.481      return false;
  11.482 +  }
  11.483  
  11.484    // Coalescing between an aligned live range and a mis-aligned live range?
  11.485    // No, no!  Alignment changes how we count degree.
  11.486 -  if( lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj )
  11.487 +  if (lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj) {
  11.488      return false;
  11.489 +  }
  11.490  
  11.491    // Sort; use smaller live-range number
  11.492    Node *lr1_node = dst_copy;
  11.493    Node *lr2_node = src_def;
  11.494 -  if( lr1 > lr2 ) {
  11.495 +  if (lr1 > lr2) {
  11.496      uint tmp = lr1; lr1 = lr2; lr2 = tmp;
  11.497      lr1_node = src_def;  lr2_node = dst_copy;
  11.498    }
  11.499 @@ -916,17 +799,5 @@
  11.500        PhaseChaitin::_conserv_coalesce++;  // Collect stats on success
  11.501        continue;
  11.502      }
  11.503 -
  11.504 -    /* do not attempt pairs.  About 1/2 of all pairs can be removed by
  11.505 -       post-alloc.  The other set are too few to bother.
  11.506 -    Node *copy2 = copy1->in(idx1);
  11.507 -    uint idx2 = copy2->is_Copy();
  11.508 -    if( !idx2 ) continue;
  11.509 -    if( copy_copy(copy1,copy2,b,i) ) {
  11.510 -      i--;                      // Retry, same location in block
  11.511 -      PhaseChaitin::_conserv_coalesce_pair++; // Collect stats on success
  11.512 -      continue;
  11.513 -    }
  11.514 -    */
  11.515    }
  11.516  }
    12.1 --- a/src/share/vm/opto/coalesce.hpp	Mon Apr 15 16:20:05 2013 -0700
    12.2 +++ b/src/share/vm/opto/coalesce.hpp	Tue Apr 16 10:08:41 2013 +0200
    12.3 @@ -41,23 +41,25 @@
    12.4  
    12.5  public:
    12.6    // Coalesce copies
    12.7 -  PhaseCoalesce( PhaseChaitin &chaitin ) : Phase(Coalesce), _phc(chaitin) { }
    12.8 +  PhaseCoalesce(PhaseChaitin &phc)
    12.9 +  : Phase(Coalesce)
   12.10 +  , _phc(phc) {}
   12.11  
   12.12    virtual void verify() = 0;
   12.13  
   12.14    // Coalesce copies
   12.15 -  void coalesce_driver( );
   12.16 +  void coalesce_driver();
   12.17  
   12.18    // Coalesce copies in this block
   12.19 -  virtual void coalesce( Block *b ) = 0;
   12.20 +  virtual void coalesce(Block *b) = 0;
   12.21  
   12.22    // Attempt to coalesce live ranges defined by these 2
   12.23 -  void combine_these_two( Node *n1, Node *n2 );
   12.24 +  void combine_these_two(Node *n1, Node *n2);
   12.25  
   12.26 -  LRG &lrgs( uint lidx ) { return _phc.lrgs(lidx); }
   12.27 +  LRG &lrgs(uint lidx) { return _phc.lrgs(lidx); }
   12.28  #ifndef PRODUCT
   12.29    // Dump internally name
   12.30 -  void dump( Node *n ) const;
   12.31 +  void dump(Node *n) const;
   12.32    // Dump whole shebang
   12.33    void dump() const;
   12.34  #endif
    13.1 --- a/src/share/vm/opto/compile.cpp	Mon Apr 15 16:20:05 2013 -0700
    13.2 +++ b/src/share/vm/opto/compile.cpp	Tue Apr 16 10:08:41 2013 +0200
    13.3 @@ -2127,22 +2127,19 @@
    13.4    }
    13.5    NOT_PRODUCT( verify_graph_edges(); )
    13.6  
    13.7 -  PhaseChaitin regalloc(unique(),cfg,m);
    13.8 +  PhaseChaitin regalloc(unique(), cfg, m);
    13.9    _regalloc = &regalloc;
   13.10    {
   13.11      TracePhase t2("regalloc", &_t_registerAllocation, true);
   13.12 -    // Perform any platform dependent preallocation actions.  This is used,
   13.13 -    // for example, to avoid taking an implicit null pointer exception
   13.14 -    // using the frame pointer on win95.
   13.15 -    _regalloc->pd_preallocate_hook();
   13.16 -
   13.17      // Perform register allocation.  After Chaitin, use-def chains are
   13.18      // no longer accurate (at spill code) and so must be ignored.
   13.19      // Node->LRG->reg mappings are still accurate.
   13.20      _regalloc->Register_Allocate();
   13.21  
   13.22      // Bail out if the allocator builds too many nodes
   13.23 -    if (failing())  return;
   13.24 +    if (failing()) {
   13.25 +      return;
   13.26 +    }
   13.27    }
   13.28  
   13.29    // Prior to register allocation we kept empty basic blocks in case the
   13.30 @@ -2160,9 +2157,6 @@
   13.31      cfg.fixup_flow();
   13.32    }
   13.33  
   13.34 -  // Perform any platform dependent postallocation verifications.
   13.35 -  debug_only( _regalloc->pd_postallocate_verify_hook(); )
   13.36 -
   13.37    // Apply peephole optimizations
   13.38    if( OptoPeephole ) {
   13.39      NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); )
    14.1 --- a/src/share/vm/opto/idealGraphPrinter.cpp	Mon Apr 15 16:20:05 2013 -0700
    14.2 +++ b/src/share/vm/opto/idealGraphPrinter.cpp	Tue Apr 16 10:08:41 2013 +0200
    14.3 @@ -616,7 +616,7 @@
    14.4        buffer[0] = 0;
    14.5        _chaitin->dump_register(node, buffer);
    14.6        print_prop("reg", buffer);
    14.7 -      print_prop("lrg", _chaitin->n2lidx(node));
    14.8 +      print_prop("lrg", _chaitin->_lrg_map.live_range_id(node));
    14.9      }
   14.10  
   14.11      node->_in_dump_cnt--;
    15.1 --- a/src/share/vm/opto/ifg.cpp	Mon Apr 15 16:20:05 2013 -0700
    15.2 +++ b/src/share/vm/opto/ifg.cpp	Tue Apr 16 10:08:41 2013 +0200
    15.3 @@ -286,15 +286,14 @@
    15.4      uint idx;
    15.5      uint last = 0;
    15.6      while ((idx = elements.next()) != 0) {
    15.7 -      assert( idx != i, "Must have empty diagonal");
    15.8 -      assert( pc->Find_const(idx) == idx, "Must not need Find" );
    15.9 -      assert( _adjs[idx].member(i), "IFG not square" );
   15.10 -      assert( !(*_yanked)[idx], "No yanked neighbors" );
   15.11 -      assert( last < idx, "not sorted increasing");
   15.12 +      assert(idx != i, "Must have empty diagonal");
   15.13 +      assert(pc->_lrg_map.find_const(idx) == idx, "Must not need Find");
   15.14 +      assert(_adjs[idx].member(i), "IFG not square");
   15.15 +      assert(!(*_yanked)[idx], "No yanked neighbors");
   15.16 +      assert(last < idx, "not sorted increasing");
   15.17        last = idx;
   15.18      }
   15.19 -    assert( !lrgs(i)._degree_valid ||
   15.20 -            effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong" );
   15.21 +    assert(!lrgs(i)._degree_valid || effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong");
   15.22    }
   15.23  }
   15.24  #endif
   15.25 @@ -342,10 +341,10 @@
   15.26        Node *n = b->_nodes[j-1];
   15.27  
   15.28        // Get value being defined
   15.29 -      uint r = n2lidx(n);
   15.30 +      uint r = _lrg_map.live_range_id(n);
   15.31  
   15.32        // Some special values do not allocate
   15.33 -      if( r ) {
   15.34 +      if (r) {
   15.35  
   15.36          // Remove from live-out set
   15.37          liveout->remove(r);
   15.38 @@ -353,16 +352,19 @@
   15.39          // Copies do not define a new value and so do not interfere.
   15.40          // Remove the copies source from the liveout set before interfering.
   15.41          uint idx = n->is_Copy();
   15.42 -        if( idx ) liveout->remove( n2lidx(n->in(idx)) );
   15.43 +        if (idx) {
   15.44 +          liveout->remove(_lrg_map.live_range_id(n->in(idx)));
   15.45 +        }
   15.46  
   15.47          // Interfere with everything live
   15.48 -        interfere_with_live( r, liveout );
   15.49 +        interfere_with_live(r, liveout);
   15.50        }
   15.51  
   15.52        // Make all inputs live
   15.53 -      if( !n->is_Phi() ) {      // Phi function uses come from prior block
   15.54 -        for( uint k = 1; k < n->req(); k++ )
   15.55 -          liveout->insert( n2lidx(n->in(k)) );
   15.56 +      if (!n->is_Phi()) {      // Phi function uses come from prior block
   15.57 +        for(uint k = 1; k < n->req(); k++) {
   15.58 +          liveout->insert(_lrg_map.live_range_id(n->in(k)));
   15.59 +        }
   15.60        }
   15.61  
   15.62        // 2-address instructions always have the defined value live
   15.63 @@ -394,11 +396,12 @@
   15.64            n->set_req( 2, tmp );
   15.65          }
   15.66          // Defined value interferes with all inputs
   15.67 -        uint lidx = n2lidx(n->in(idx));
   15.68 -        for( uint k = 1; k < n->req(); k++ ) {
   15.69 -          uint kidx = n2lidx(n->in(k));
   15.70 -          if( kidx != lidx )
   15.71 -            _ifg->add_edge( r, kidx );
   15.72 +        uint lidx = _lrg_map.live_range_id(n->in(idx));
   15.73 +        for (uint k = 1; k < n->req(); k++) {
   15.74 +          uint kidx = _lrg_map.live_range_id(n->in(k));
   15.75 +          if (kidx != lidx) {
   15.76 +            _ifg->add_edge(r, kidx);
   15.77 +          }
   15.78          }
   15.79        }
   15.80      } // End of forall instructions in block
   15.81 @@ -542,10 +545,10 @@
   15.82        Node *n = b->_nodes[j - 1];
   15.83  
   15.84        // Get value being defined
   15.85 -      uint r = n2lidx(n);
   15.86 +      uint r = _lrg_map.live_range_id(n);
   15.87  
   15.88        // Some special values do not allocate
   15.89 -      if( r ) {
   15.90 +      if(r) {
   15.91          // A DEF normally costs block frequency; rematerialized values are
   15.92          // removed from the DEF sight, so LOWER costs here.
   15.93          lrgs(r)._cost += n->rematerialize() ? 0 : b->_freq;
   15.94 @@ -556,9 +559,11 @@
   15.95            Node *def = n->in(0);
   15.96            if( !n->is_Proj() ||
   15.97                // Could also be a flags-projection of a dead ADD or such.
   15.98 -              (n2lidx(def) && !liveout.member(n2lidx(def)) ) ) {
   15.99 +              (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
  15.100              b->_nodes.remove(j - 1);
  15.101 -            if( lrgs(r)._def == n ) lrgs(r)._def = 0;
  15.102 +            if (lrgs(r)._def == n) {
  15.103 +              lrgs(r)._def = 0;
  15.104 +            }
  15.105              n->disconnect_inputs(NULL, C);
  15.106              _cfg._bbs.map(n->_idx,NULL);
  15.107              n->replace_by(C->top());
  15.108 @@ -570,7 +575,7 @@
  15.109  
  15.110            // Fat-projections kill many registers which cannot be used to
  15.111            // hold live ranges.
  15.112 -          if( lrgs(r)._fat_proj ) {
  15.113 +          if (lrgs(r)._fat_proj) {
  15.114              // Count the int-only registers
  15.115              RegMask itmp = lrgs(r).mask();
  15.116              itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
  15.117 @@ -636,12 +641,12 @@
  15.118            // Copies do not define a new value and so do not interfere.
  15.119            // Remove the copies source from the liveout set before interfering.
  15.120            uint idx = n->is_Copy();
  15.121 -          if( idx ) {
  15.122 -            uint x = n2lidx(n->in(idx));
  15.123 -            if( liveout.remove( x ) ) {
  15.124 +          if (idx) {
  15.125 +            uint x = _lrg_map.live_range_id(n->in(idx));
  15.126 +            if (liveout.remove(x)) {
  15.127                lrgs(x)._area -= cost;
  15.128                // Adjust register pressure.
  15.129 -              lower_pressure( &lrgs(x), j-1, b, pressure, hrp_index );
  15.130 +              lower_pressure(&lrgs(x), j-1, b, pressure, hrp_index);
  15.131                assert( pressure[0] == count_int_pressure  (&liveout), "" );
  15.132                assert( pressure[1] == count_float_pressure(&liveout), "" );
  15.133              }
  15.134 @@ -727,18 +732,21 @@
  15.135          // the flags and assumes it's dead.  This keeps the (useless)
  15.136          // flag-setting behavior alive while also keeping the (useful)
  15.137          // memory update effect.
  15.138 -        for( uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++ ) {
  15.139 +        for (uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++) {
  15.140            Node *def = n->in(k);
  15.141 -          uint x = n2lidx(def);
  15.142 -          if( !x ) continue;
  15.143 +          uint x = _lrg_map.live_range_id(def);
  15.144 +          if (!x) {
  15.145 +            continue;
  15.146 +          }
  15.147            LRG &lrg = lrgs(x);
  15.148            // No use-side cost for spilling debug info
  15.149 -          if( k < debug_start )
  15.150 +          if (k < debug_start) {
  15.151              // A USE costs twice block frequency (once for the Load, once
  15.152              // for a Load-delay).  Rematerialized uses only cost once.
  15.153              lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq + b->_freq));
  15.154 +          }
  15.155            // It is live now
  15.156 -          if( liveout.insert( x ) ) {
  15.157 +          if (liveout.insert(x)) {
  15.158              // Newly live things assumed live from here to top of block
  15.159              lrg._area += cost;
  15.160              // Adjust register pressure
    16.1 --- a/src/share/vm/opto/live.cpp	Mon Apr 15 16:20:05 2013 -0700
    16.2 +++ b/src/share/vm/opto/live.cpp	Tue Apr 16 10:08:41 2013 +0200
    16.3 @@ -44,7 +44,7 @@
    16.4  // block is put on the worklist.
    16.5  //   The locally live-in stuff is computed once and added to predecessor
    16.6  // live-out sets.  This separate compilation is done in the outer loop below.
    16.7 -PhaseLive::PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) {
    16.8 +PhaseLive::PhaseLive( const PhaseCFG &cfg, const LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) {
    16.9  }
   16.10  
   16.11  void PhaseLive::compute(uint maxlrg) {
    17.1 --- a/src/share/vm/opto/live.hpp	Mon Apr 15 16:20:05 2013 -0700
    17.2 +++ b/src/share/vm/opto/live.hpp	Tue Apr 16 10:08:41 2013 +0200
    17.3 @@ -80,7 +80,7 @@
    17.4    Block_List *_worklist;        // Worklist for iterative solution
    17.5  
    17.6    const PhaseCFG &_cfg;         // Basic blocks
    17.7 -  LRG_List &_names;             // Mapping from Nodes to live ranges
    17.8 +  const LRG_List &_names;       // Mapping from Nodes to live ranges
    17.9    uint _maxlrg;                 // Largest live-range number
   17.10    Arena *_arena;
   17.11  
   17.12 @@ -91,7 +91,7 @@
   17.13    void add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass );
   17.14  
   17.15  public:
   17.16 -  PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena );
   17.17 +  PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena);
   17.18    ~PhaseLive() {}
   17.19    // Compute liveness info
   17.20    void compute(uint maxlrg);
    18.1 --- a/src/share/vm/opto/postaloc.cpp	Mon Apr 15 16:20:05 2013 -0700
    18.2 +++ b/src/share/vm/opto/postaloc.cpp	Tue Apr 16 10:08:41 2013 +0200
    18.3 @@ -56,7 +56,7 @@
    18.4    int i;
    18.5    for( i=0; i < limit; i++ ) {
    18.6      if( def->is_Proj() && def->in(0)->is_Start() &&
    18.7 -        _matcher.is_save_on_entry(lrgs(n2lidx(def)).reg()) )
    18.8 +        _matcher.is_save_on_entry(lrgs(_lrg_map.live_range_id(def)).reg()))
    18.9        return true;              // Direct use of callee-save proj
   18.10      if( def->is_Copy() )        // Copies carry value through
   18.11        def = def->in(def->is_Copy());
   18.12 @@ -83,7 +83,7 @@
   18.13    // Count 1 if deleting an instruction from the current block
   18.14    if( oldb == current_block ) blk_adjust++;
   18.15    _cfg._bbs.map(old->_idx,NULL);
   18.16 -  OptoReg::Name old_reg = lrgs(n2lidx(old)).reg();
   18.17 +  OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg();
   18.18    if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available?
   18.19      value->map(old_reg,NULL);  // Yank from value/regnd maps
   18.20      regnd->map(old_reg,NULL);  // This register's value is now unknown
   18.21 @@ -164,7 +164,7 @@
   18.22    // Not every pair of physical registers are assignment compatible,
   18.23    // e.g. on sparc floating point registers are not assignable to integer
   18.24    // registers.
   18.25 -  const LRG &def_lrg = lrgs(n2lidx(def));
   18.26 +  const LRG &def_lrg = lrgs(_lrg_map.live_range_id(def));
   18.27    OptoReg::Name def_reg = def_lrg.reg();
   18.28    const RegMask &use_mask = n->in_RegMask(idx);
   18.29    bool can_use = ( RegMask::can_represent(def_reg) ? (use_mask.Member(def_reg) != 0)
   18.30 @@ -209,11 +209,12 @@
   18.31  // Skip through any number of copies (that don't mod oop-i-ness)
   18.32  Node *PhaseChaitin::skip_copies( Node *c ) {
   18.33    int idx = c->is_Copy();
   18.34 -  uint is_oop = lrgs(n2lidx(c))._is_oop;
   18.35 +  uint is_oop = lrgs(_lrg_map.live_range_id(c))._is_oop;
   18.36    while (idx != 0) {
   18.37      guarantee(c->in(idx) != NULL, "must not resurrect dead copy");
   18.38 -    if (lrgs(n2lidx(c->in(idx)))._is_oop != is_oop)
   18.39 +    if (lrgs(_lrg_map.live_range_id(c->in(idx)))._is_oop != is_oop) {
   18.40        break;  // casting copy, not the same value
   18.41 +    }
   18.42      c = c->in(idx);
   18.43      idx = c->is_Copy();
   18.44    }
   18.45 @@ -225,8 +226,8 @@
   18.46  int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List &regnd, bool can_change_regs ) {
   18.47    int blk_adjust = 0;
   18.48  
   18.49 -  uint nk_idx = n2lidx(n->in(k));
   18.50 -  OptoReg::Name nk_reg = lrgs(nk_idx ).reg();
   18.51 +  uint nk_idx = _lrg_map.live_range_id(n->in(k));
   18.52 +  OptoReg::Name nk_reg = lrgs(nk_idx).reg();
   18.53  
   18.54    // Remove obvious same-register copies
   18.55    Node *x = n->in(k);
   18.56 @@ -234,9 +235,13 @@
   18.57    while( (idx=x->is_Copy()) != 0 ) {
   18.58      Node *copy = x->in(idx);
   18.59      guarantee(copy != NULL, "must not resurrect dead copy");
   18.60 -    if( lrgs(n2lidx(copy)).reg() != nk_reg ) break;
   18.61 +    if(lrgs(_lrg_map.live_range_id(copy)).reg() != nk_reg) {
   18.62 +      break;
   18.63 +    }
   18.64      blk_adjust += use_prior_register(n,k,copy,current_block,value,regnd);
   18.65 -    if( n->in(k) != copy ) break; // Failed for some cutout?
   18.66 +    if (n->in(k) != copy) {
   18.67 +      break; // Failed for some cutout?
   18.68 +    }
   18.69      x = copy;                   // Progress, try again
   18.70    }
   18.71  
   18.72 @@ -256,7 +261,7 @@
   18.73  
   18.74    if (val == x && nk_idx != 0 &&
   18.75        regnd[nk_reg] != NULL && regnd[nk_reg] != x &&
   18.76 -      n2lidx(x) == n2lidx(regnd[nk_reg])) {
   18.77 +      _lrg_map.live_range_id(x) == _lrg_map.live_range_id(regnd[nk_reg])) {
   18.78      // When rematerialzing nodes and stretching lifetimes, the
   18.79      // allocator will reuse the original def for multidef LRG instead
   18.80      // of the current reaching def because it can't know it's safe to
   18.81 @@ -270,7 +275,7 @@
   18.82    if (val == x) return blk_adjust; // No progress?
   18.83  
   18.84    int n_regs = RegMask::num_registers(val->ideal_reg());
   18.85 -  uint val_idx = n2lidx(val);
   18.86 +  uint val_idx = _lrg_map.live_range_id(val);
   18.87    OptoReg::Name val_reg = lrgs(val_idx).reg();
   18.88  
   18.89    // See if it happens to already be in the correct register!
   18.90 @@ -499,12 +504,12 @@
   18.91      for( j = 1; j < phi_dex; j++ ) {
   18.92        uint k;
   18.93        Node *phi = b->_nodes[j];
   18.94 -      uint pidx = n2lidx(phi);
   18.95 -      OptoReg::Name preg = lrgs(n2lidx(phi)).reg();
   18.96 +      uint pidx = _lrg_map.live_range_id(phi);
   18.97 +      OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg();
   18.98  
   18.99        // Remove copies remaining on edges.  Check for junk phi.
  18.100        Node *u = NULL;
  18.101 -      for( k=1; k<phi->req(); k++ ) {
  18.102 +      for (k = 1; k < phi->req(); k++) {
  18.103          Node *x = phi->in(k);
  18.104          if( phi != x && u != x ) // Found a different input
  18.105            u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
  18.106 @@ -555,10 +560,10 @@
  18.107        // alive and well at the use (or else the allocator fubar'd).  Take
  18.108        // advantage of this info to set a reaching def for the use-reg.
  18.109        uint k;
  18.110 -      for( k = 1; k < n->req(); k++ ) {
  18.111 +      for (k = 1; k < n->req(); k++) {
  18.112          Node *def = n->in(k);   // n->in(k) is a USE; def is the DEF for this USE
  18.113          guarantee(def != NULL, "no disconnected nodes at this point");
  18.114 -        uint useidx = n2lidx(def); // useidx is the live range index for this USE
  18.115 +        uint useidx = _lrg_map.live_range_id(def); // useidx is the live range index for this USE
  18.116  
  18.117          if( useidx ) {
  18.118            OptoReg::Name ureg = lrgs(useidx).reg();
  18.119 @@ -566,7 +571,7 @@
  18.120              int idx;            // Skip occasional useless copy
  18.121              while( (idx=def->is_Copy()) != 0 &&
  18.122                     def->in(idx) != NULL &&  // NULL should not happen
  18.123 -                   ureg == lrgs(n2lidx(def->in(idx))).reg() )
  18.124 +                   ureg == lrgs(_lrg_map.live_range_id(def->in(idx))).reg())
  18.125                def = def->in(idx);
  18.126              Node *valdef = skip_copies(def); // tighten up val through non-useless copies
  18.127              value.map(ureg,valdef); // record improved reaching-def info
  18.128 @@ -594,8 +599,10 @@
  18.129          j -= elide_copy( n, k, b, value, regnd, two_adr!=k );
  18.130  
  18.131        // Unallocated Nodes define no registers
  18.132 -      uint lidx = n2lidx(n);
  18.133 -      if( !lidx ) continue;
  18.134 +      uint lidx = _lrg_map.live_range_id(n);
  18.135 +      if (!lidx) {
  18.136 +        continue;
  18.137 +      }
  18.138  
  18.139        // Update the register defined by this instruction
  18.140        OptoReg::Name nreg = lrgs(lidx).reg();
    19.1 --- a/src/share/vm/opto/reg_split.cpp	Mon Apr 15 16:20:05 2013 -0700
    19.2 +++ b/src/share/vm/opto/reg_split.cpp	Tue Apr 16 10:08:41 2013 +0200
    19.3 @@ -318,9 +318,13 @@
    19.4      for( uint i = 1; i < def->req(); i++ ) {
    19.5        Node *in = def->in(i);
    19.6        // Check for single-def (LRG cannot redefined)
    19.7 -      uint lidx = n2lidx(in);
    19.8 -      if( lidx >= _maxlrg ) continue; // Value is a recent spill-copy
    19.9 -      if (lrgs(lidx).is_singledef()) continue;
   19.10 +      uint lidx = _lrg_map.live_range_id(in);
   19.11 +      if (lidx >= _lrg_map.max_lrg_id()) {
   19.12 +        continue; // Value is a recent spill-copy
   19.13 +      }
   19.14 +      if (lrgs(lidx).is_singledef()) {
   19.15 +        continue;
   19.16 +      }
   19.17  
   19.18        Block *b_def = _cfg._bbs[def->_idx];
   19.19        int idx_def = b_def->find_node(def);
   19.20 @@ -344,26 +348,28 @@
   19.21    if( spill->req() > 1 ) {
   19.22      for( uint i = 1; i < spill->req(); i++ ) {
   19.23        Node *in = spill->in(i);
   19.24 -      uint lidx = Find_id(in);
   19.25 +      uint lidx = _lrg_map.find_id(in);
   19.26  
   19.27        // Walk backwards thru spill copy node intermediates
   19.28        if (walkThru) {
   19.29 -        while ( in->is_SpillCopy() && lidx >= _maxlrg ) {
   19.30 +        while (in->is_SpillCopy() && lidx >= _lrg_map.max_lrg_id()) {
   19.31            in = in->in(1);
   19.32 -          lidx = Find_id(in);
   19.33 +          lidx = _lrg_map.find_id(in);
   19.34          }
   19.35  
   19.36 -        if (lidx < _maxlrg && lrgs(lidx).is_multidef()) {
   19.37 +        if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_multidef()) {
   19.38            // walkThru found a multidef LRG, which is unsafe to use, so
   19.39            // just keep the original def used in the clone.
   19.40            in = spill->in(i);
   19.41 -          lidx = Find_id(in);
   19.42 +          lidx = _lrg_map.find_id(in);
   19.43          }
   19.44        }
   19.45  
   19.46 -      if( lidx < _maxlrg && lrgs(lidx).reg() >= LRG::SPILL_REG ) {
   19.47 +      if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) {
   19.48          Node *rdef = Reachblock[lrg2reach[lidx]];
   19.49 -        if( rdef ) spill->set_req(i,rdef);
   19.50 +        if (rdef) {
   19.51 +          spill->set_req(i, rdef);
   19.52 +        }
   19.53        }
   19.54      }
   19.55    }
   19.56 @@ -382,7 +388,7 @@
   19.57  #endif
   19.58    // See if the cloned def kills any flags, and copy those kills as well
   19.59    uint i = insidx+1;
   19.60 -  if( clone_projs( b, i, def, spill, maxlrg ) ) {
   19.61 +  if( clone_projs( b, i, def, spill, maxlrg) ) {
   19.62      // Adjust the point where we go hi-pressure
   19.63      if( i <= b->_ihrp_index ) b->_ihrp_index++;
   19.64      if( i <= b->_fhrp_index ) b->_fhrp_index++;
   19.65 @@ -424,17 +430,25 @@
   19.66  //------------------------------prompt_use---------------------------------
   19.67  // True if lidx is used before any real register is def'd in the block
   19.68  bool PhaseChaitin::prompt_use( Block *b, uint lidx ) {
   19.69 -  if( lrgs(lidx)._was_spilled2 ) return false;
   19.70 +  if (lrgs(lidx)._was_spilled2) {
   19.71 +    return false;
   19.72 +  }
   19.73  
   19.74    // Scan block for 1st use.
   19.75    for( uint i = 1; i <= b->end_idx(); i++ ) {
   19.76      Node *n = b->_nodes[i];
   19.77      // Ignore PHI use, these can be up or down
   19.78 -    if( n->is_Phi() ) continue;
   19.79 -    for( uint j = 1; j < n->req(); j++ )
   19.80 -      if( Find_id(n->in(j)) == lidx )
   19.81 +    if (n->is_Phi()) {
   19.82 +      continue;
   19.83 +    }
   19.84 +    for (uint j = 1; j < n->req(); j++) {
   19.85 +      if (_lrg_map.find_id(n->in(j)) == lidx) {
   19.86          return true;          // Found 1st use!
   19.87 -    if( n->out_RegMask().is_NotEmpty() ) return false;
   19.88 +      }
   19.89 +    }
   19.90 +    if (n->out_RegMask().is_NotEmpty()) {
   19.91 +      return false;
   19.92 +    }
   19.93    }
   19.94    return false;
   19.95  }
   19.96 @@ -464,23 +478,23 @@
   19.97    bool                 u1, u2, u3;
   19.98    Block               *b, *pred;
   19.99    PhiNode             *phi;
  19.100 -  GrowableArray<uint>  lidxs(split_arena, _maxlrg, 0, 0);
  19.101 +  GrowableArray<uint>  lidxs(split_arena, maxlrg, 0, 0);
  19.102  
  19.103    // Array of counters to count splits per live range
  19.104 -  GrowableArray<uint>  splits(split_arena, _maxlrg, 0, 0);
  19.105 +  GrowableArray<uint>  splits(split_arena, maxlrg, 0, 0);
  19.106  
  19.107  #define NEW_SPLIT_ARRAY(type, size)\
  19.108    (type*) split_arena->allocate_bytes((size) * sizeof(type))
  19.109  
  19.110    //----------Setup Code----------
  19.111    // Create a convenient mapping from lrg numbers to reaches/leaves indices
  19.112 -  uint *lrg2reach = NEW_SPLIT_ARRAY( uint, _maxlrg );
  19.113 +  uint *lrg2reach = NEW_SPLIT_ARRAY(uint, maxlrg);
  19.114    // Keep track of DEFS & Phis for later passes
  19.115    defs = new Node_List();
  19.116    phis = new Node_List();
  19.117    // Gather info on which LRG's are spilling, and build maps
  19.118 -  for( bidx = 1; bidx < _maxlrg; bidx++ ) {
  19.119 -    if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
  19.120 +  for (bidx = 1; bidx < maxlrg; bidx++) {
  19.121 +    if (lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG) {
  19.122        assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color");
  19.123        lrg2reach[bidx] = spill_cnt;
  19.124        spill_cnt++;
  19.125 @@ -629,7 +643,7 @@
  19.126            break;
  19.127          }
  19.128          // must be looking at a phi
  19.129 -        if( Find_id(n1) == lidxs.at(slidx) ) {
  19.130 +        if (_lrg_map.find_id(n1) == lidxs.at(slidx)) {
  19.131            // found the necessary phi
  19.132            needs_phi = false;
  19.133            has_phi = true;
  19.134 @@ -651,11 +665,11 @@
  19.135            Reachblock[slidx] = phi;
  19.136  
  19.137            // add node to block & node_to_block mapping
  19.138 -          insert_proj( b, insidx++, phi, maxlrg++ );
  19.139 +          insert_proj(b, insidx++, phi, maxlrg++);
  19.140            non_phi++;
  19.141            // Reset new phi's mapping to be the spilling live range
  19.142 -          _names.map(phi->_idx, lidx);
  19.143 -          assert(Find_id(phi) == lidx,"Bad update on Union-Find mapping");
  19.144 +          _lrg_map.map(phi->_idx, lidx);
  19.145 +          assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping");
  19.146          }  // end if not found correct phi
  19.147          // Here you have either found or created the Phi, so record it
  19.148          assert(phi != NULL,"Must have a Phi Node here");
  19.149 @@ -721,12 +735,12 @@
  19.150      for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
  19.151        Node *n = b->_nodes[insidx];
  19.152        // Find the defining Node's live range index
  19.153 -      uint defidx = Find_id(n);
  19.154 +      uint defidx = _lrg_map.find_id(n);
  19.155        uint cnt = n->req();
  19.156  
  19.157 -      if( n->is_Phi() ) {
  19.158 +      if (n->is_Phi()) {
  19.159          // Skip phi nodes after removing dead copies.
  19.160 -        if( defidx < _maxlrg ) {
  19.161 +        if (defidx < _lrg_map.max_lrg_id()) {
  19.162            // Check for useless Phis.  These appear if we spill, then
  19.163            // coalesce away copies.  Dont touch Phis in spilling live
  19.164            // ranges; they are busy getting modifed in this pass.
  19.165 @@ -744,8 +758,8 @@
  19.166                }
  19.167              }
  19.168              assert( u, "at least 1 valid input expected" );
  19.169 -            if( i >= cnt ) {    // Found one unique input
  19.170 -              assert(Find_id(n) == Find_id(u), "should be the same lrg");
  19.171 +            if (i >= cnt) {    // Found one unique input
  19.172 +              assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg");
  19.173                n->replace_by(u); // Then replace with unique input
  19.174                n->disconnect_inputs(NULL, C);
  19.175                b->_nodes.remove(insidx);
  19.176 @@ -793,16 +807,24 @@
  19.177                  while( insert_point > 0 ) {
  19.178                    Node *n = b->_nodes[insert_point];
  19.179                    // Hit top of block?  Quit going backwards
  19.180 -                  if( n->is_Phi() ) break;
  19.181 +                  if (n->is_Phi()) {
  19.182 +                    break;
  19.183 +                  }
  19.184                    // Found a def?  Better split after it.
  19.185 -                  if( n2lidx(n) == lidx ) break;
  19.186 +                  if (_lrg_map.live_range_id(n) == lidx) {
  19.187 +                    break;
  19.188 +                  }
  19.189                    // Look for a use
  19.190                    uint i;
  19.191 -                  for( i = 1; i < n->req(); i++ )
  19.192 -                    if( n2lidx(n->in(i)) == lidx )
  19.193 +                  for( i = 1; i < n->req(); i++ ) {
  19.194 +                    if (_lrg_map.live_range_id(n->in(i)) == lidx) {
  19.195                        break;
  19.196 +                    }
  19.197 +                  }
  19.198                    // Found a use?  Better split after it.
  19.199 -                  if( i < n->req() ) break;
  19.200 +                  if (i < n->req()) {
  19.201 +                    break;
  19.202 +                  }
  19.203                    insert_point--;
  19.204                  }
  19.205                  uint orig_eidx = b->end_idx();
  19.206 @@ -812,8 +834,9 @@
  19.207                    return 0;
  19.208                  }
  19.209                  // Spill of NULL check mem op goes into the following block.
  19.210 -                if (b->end_idx() > orig_eidx)
  19.211 +                if (b->end_idx() > orig_eidx) {
  19.212                    insidx++;
  19.213 +                }
  19.214                }
  19.215                // This is a new DEF, so update UP
  19.216                UPblock[slidx] = false;
  19.217 @@ -832,13 +855,13 @@
  19.218        }  // end if crossing HRP Boundry
  19.219  
  19.220        // If the LRG index is oob, then this is a new spillcopy, skip it.
  19.221 -      if( defidx >= _maxlrg ) {
  19.222 +      if (defidx >= _lrg_map.max_lrg_id()) {
  19.223          continue;
  19.224        }
  19.225        LRG &deflrg = lrgs(defidx);
  19.226        uint copyidx = n->is_Copy();
  19.227        // Remove coalesced copy from CFG
  19.228 -      if( copyidx && defidx == n2lidx(n->in(copyidx)) ) {
  19.229 +      if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) {
  19.230          n->replace_by( n->in(copyidx) );
  19.231          n->set_req( copyidx, NULL );
  19.232          b->_nodes.remove(insidx--);
  19.233 @@ -864,13 +887,13 @@
  19.234            // If inpidx > old_last, then one of these new inputs is being
  19.235            // handled. Skip the derived part of the pair, but process
  19.236            // the base like any other input.
  19.237 -          if( inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED ) {
  19.238 +          if (inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED) {
  19.239              continue;  // skip derived_debug added below
  19.240            }
  19.241            // Get lidx of input
  19.242 -          uint useidx = Find_id(n->in(inpidx));
  19.243 +          uint useidx = _lrg_map.find_id(n->in(inpidx));
  19.244            // Not a brand-new split, and it is a spill use
  19.245 -          if( useidx < _maxlrg && lrgs(useidx).reg() >= LRG::SPILL_REG ) {
  19.246 +          if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) {
  19.247              // Check for valid reaching DEF
  19.248              slidx = lrg2reach[useidx];
  19.249              Node *def = Reachblock[slidx];
  19.250 @@ -886,7 +909,7 @@
  19.251                if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
  19.252                  return 0;
  19.253                }
  19.254 -              _names.extend(def->_idx,0);
  19.255 +              _lrg_map.extend(def->_idx, 0);
  19.256                _cfg._bbs.map(def->_idx,b);
  19.257                n->set_req(inpidx, def);
  19.258                continue;
  19.259 @@ -1186,10 +1209,10 @@
  19.260        // ********** Split Left Over Mem-Mem Moves **********
  19.261        // Check for mem-mem copies and split them now.  Do not do this
  19.262        // to copies about to be spilled; they will be Split shortly.
  19.263 -      if( copyidx ) {
  19.264 +      if (copyidx) {
  19.265          Node *use = n->in(copyidx);
  19.266 -        uint useidx = Find_id(use);
  19.267 -        if( useidx < _maxlrg &&       // This is not a new split
  19.268 +        uint useidx = _lrg_map.find_id(use);
  19.269 +        if (useidx < _lrg_map.max_lrg_id() &&       // This is not a new split
  19.270              OptoReg::is_stack(deflrg.reg()) &&
  19.271              deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack
  19.272            LRG &uselrg = lrgs(useidx);
  19.273 @@ -1228,7 +1251,7 @@
  19.274          uint member;
  19.275          IndexSetIterator isi(liveout);
  19.276          while ((member = isi.next()) != 0) {
  19.277 -          assert(defidx != Find_const(member), "Live out member has not been compressed");
  19.278 +          assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed");
  19.279          }
  19.280  #endif
  19.281          Reachblock[slidx] = NULL;
  19.282 @@ -1261,7 +1284,7 @@
  19.283      assert(phi->is_Phi(),"This list must only contain Phi Nodes");
  19.284      Block *b = _cfg._bbs[phi->_idx];
  19.285      // Grab the live range number
  19.286 -    uint lidx = Find_id(phi);
  19.287 +    uint lidx = _lrg_map.find_id(phi);
  19.288      uint slidx = lrg2reach[lidx];
  19.289      // Update node to lidx map
  19.290      new_lrg(phi, maxlrg++);
  19.291 @@ -1296,11 +1319,13 @@
  19.292          int insert = pred->end_idx();
  19.293          while (insert >= 1 &&
  19.294                 pred->_nodes[insert - 1]->is_SpillCopy() &&
  19.295 -               Find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
  19.296 +               _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
  19.297            insert--;
  19.298          }
  19.299 -        def = split_Rematerialize( def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false );
  19.300 -        if( !def ) return 0;    // Bail out
  19.301 +        def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
  19.302 +        if (!def) {
  19.303 +          return 0;    // Bail out
  19.304 +        }
  19.305        }
  19.306        // Update the Phi's input edge array
  19.307        phi->set_req(i,def);
  19.308 @@ -1316,7 +1341,7 @@
  19.309      }  // End for all inputs to the Phi
  19.310    }  // End for all Phi Nodes
  19.311    // Update _maxlrg to save Union asserts
  19.312 -  _maxlrg = maxlrg;
  19.313 +  _lrg_map.set_max_lrg_id(maxlrg);
  19.314  
  19.315  
  19.316    //----------PASS 3----------
  19.317 @@ -1328,47 +1353,51 @@
  19.318      for( uint i = 1; i < phi->req(); i++ ) {
  19.319        // Grab the input node
  19.320        Node *n = phi->in(i);
  19.321 -      assert( n, "" );
  19.322 -      uint lidx = Find(n);
  19.323 -      uint pidx = Find(phi);
  19.324 -      if( lidx < pidx )
  19.325 +      assert(n, "node should exist");
  19.326 +      uint lidx = _lrg_map.find(n);
  19.327 +      uint pidx = _lrg_map.find(phi);
  19.328 +      if (lidx < pidx) {
  19.329          Union(n, phi);
  19.330 -      else if( lidx > pidx )
  19.331 +      }
  19.332 +      else if(lidx > pidx) {
  19.333          Union(phi, n);
  19.334 +      }
  19.335      }  // End for all inputs to the Phi Node
  19.336    }  // End for all Phi Nodes
  19.337    // Now union all two address instructions
  19.338 -  for( insidx = 0; insidx < defs->size(); insidx++ ) {
  19.339 +  for (insidx = 0; insidx < defs->size(); insidx++) {
  19.340      // Grab the def
  19.341      n1 = defs->at(insidx);
  19.342      // Set new lidx for DEF & handle 2-addr instructions
  19.343 -    if( n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0) ) {
  19.344 -      assert( Find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index");
  19.345 +    if (n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0)) {
  19.346 +      assert(_lrg_map.find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index");
  19.347        // Union the input and output live ranges
  19.348 -      uint lr1 = Find(n1);
  19.349 -      uint lr2 = Find(n1->in(twoidx));
  19.350 -      if( lr1 < lr2 )
  19.351 +      uint lr1 = _lrg_map.find(n1);
  19.352 +      uint lr2 = _lrg_map.find(n1->in(twoidx));
  19.353 +      if (lr1 < lr2) {
  19.354          Union(n1, n1->in(twoidx));
  19.355 -      else if( lr1 > lr2 )
  19.356 +      }
  19.357 +      else if (lr1 > lr2) {
  19.358          Union(n1->in(twoidx), n1);
  19.359 +      }
  19.360      }  // End if two address
  19.361    }  // End for all defs
  19.362    // DEBUG
  19.363  #ifdef ASSERT
  19.364    // Validate all live range index assignments
  19.365 -  for( bidx = 0; bidx < _cfg._num_blocks; bidx++ ) {
  19.366 +  for (bidx = 0; bidx < _cfg._num_blocks; bidx++) {
  19.367      b  = _cfg._blocks[bidx];
  19.368 -    for( insidx = 0; insidx <= b->end_idx(); insidx++ ) {
  19.369 +    for (insidx = 0; insidx <= b->end_idx(); insidx++) {
  19.370        Node *n = b->_nodes[insidx];
  19.371 -      uint defidx = Find(n);
  19.372 -      assert(defidx < _maxlrg,"Bad live range index in Split");
  19.373 +      uint defidx = _lrg_map.find(n);
  19.374 +      assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split");
  19.375        assert(defidx < maxlrg,"Bad live range index in Split");
  19.376      }
  19.377    }
  19.378    // Issue a warning if splitting made no progress
  19.379    int noprogress = 0;
  19.380 -  for( slidx = 0; slidx < spill_cnt; slidx++ ) {
  19.381 -    if( PrintOpto && WizardMode && splits.at(slidx) == 0 ) {
  19.382 +  for (slidx = 0; slidx < spill_cnt; slidx++) {
  19.383 +    if (PrintOpto && WizardMode && splits.at(slidx) == 0) {
  19.384        tty->print_cr("Failed to split live range %d", lidxs.at(slidx));
  19.385        //BREAKPOINT;
  19.386      }
    20.1 --- a/src/share/vm/opto/regalloc.hpp	Mon Apr 15 16:20:05 2013 -0700
    20.2 +++ b/src/share/vm/opto/regalloc.hpp	Tue Apr 16 10:08:41 2013 +0200
    20.3 @@ -113,7 +113,7 @@
    20.4    OptoReg::Name offset2reg( int stk_offset ) const;
    20.5  
    20.6    // Get the register encoding associated with the Node
    20.7 -  int get_encode( const Node *n ) const {
    20.8 +  int get_encode(const Node *n) const {
    20.9      assert( n->_idx < _node_regs_max_index, "Exceeded _node_regs array");
   20.10      OptoReg::Name first = _node_regs[n->_idx].first();
   20.11      OptoReg::Name second = _node_regs[n->_idx].second();
   20.12 @@ -122,15 +122,6 @@
   20.13      return Matcher::_regEncode[first];
   20.14    }
   20.15  
   20.16 -  // Platform dependent hook for actions prior to allocation
   20.17 -  void  pd_preallocate_hook();
   20.18 -
   20.19 -#ifdef ASSERT
   20.20 -  // Platform dependent hook for verification after allocation.  Will
   20.21 -  // only get called when compiling with asserts.
   20.22 -  void  pd_postallocate_verify_hook();
   20.23 -#endif
   20.24 -
   20.25  #ifndef PRODUCT
   20.26    static int _total_framesize;
   20.27    static int _max_framesize;
    21.1 --- a/src/share/vm/runtime/vmStructs.cpp	Mon Apr 15 16:20:05 2013 -0700
    21.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Tue Apr 16 10:08:41 2013 +0200
    21.3 @@ -1115,7 +1115,6 @@
    21.4    c2_nonstatic_field(PhaseChaitin,       _lo_stk_degree,           uint)                                                             \
    21.5    c2_nonstatic_field(PhaseChaitin,       _hi_degree,               uint)                                                             \
    21.6    c2_nonstatic_field(PhaseChaitin,       _simplified,              uint)                                                             \
    21.7 -  c2_nonstatic_field(PhaseChaitin,       _maxlrg,                  uint)                                                             \
    21.8                                                                                                                                       \
    21.9    c2_nonstatic_field(Block,              _nodes,                   Node_List)                                                        \
   21.10    c2_nonstatic_field(Block,              _succs,                   Block_Array)                                                      \

mercurial